max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
simple_parser.py
|
galaxyChen/dl4ir-webnav
| 14
|
12785351
|
<reponame>galaxyChen/dl4ir-webnav
'''
Simple parser that extracts a webpage's content and hyperlinks.
'''
import urllib2
import re
class Parser():
def __init__(self):
pass
def parse(self, url):
f = urllib2.urlopen(url)
text = f.read() # get page's contents.
#use re.findall to get all the links
links = re.findall('href=[\'"]?([^\'" >]+)', text)
return text, links
| 3.390625
| 3
|
tests/test_comment.py
|
muze-interviews/muze-lark-interview
| 0
|
12785352
|
<filename>tests/test_comment.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest
from lark.lark import Lark
def DynamicEarleyLark(grammar, **kwargs):
return Lark(grammar, lexer='dynamic', parser='earley', **kwargs)
class TestParserComments(unittest.TestCase):
def make_test_parser(self):
# Create a parser for a grammar that has `#` as a "comment" character, and which expects one
return DynamicEarleyLark(r"""
%import common.INT
%ignore "#"
start : INT*
""")
def test_happy_path_with_child(self):
parser = self.make_test_parser()
# Accepts a single digit
tree = parser.parse("2")
self.assertEqual(tree.children, ['2'])
# Accepts multiple digits
tree = parser.parse("23")
self.assertEqual(tree.children, ['2', '3'])
# Accepts no digits
tree = parser.parse("")
self.assertEqual(tree.children, [])
# Accepts digits with ignored `#` character
tree = parser.parse("#2")
self.assertEqual(tree.children, ['2'])
def test_comment_without_child(self):
parser = self.make_test_parser()
# This parse should ignore all `#` characters and return an empty tree.
tree = parser.parse("##")
self.assertEqual(tree.children, [])
if __name__ == '__main__':
unittest.main()
| 2.9375
| 3
|
test/cpu/test_flip.py
|
HiroakiMikami/benchmarks-of-deep-learning-libraries
| 0
|
12785353
|
<filename>test/cpu/test_flip.py
import json
import os
import subprocess
import tempfile
import pytest
import torch
@pytest.mark.parametrize("env", ["pytorch", "torchscript", "albumentations"])
def test_lcs(env: str) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
img = (
torch.arange(10).reshape(1, 1, 1, 10).expand(-1, 3, 10, -1).to(torch.uint8)
)
data_path = os.path.join(tmpdir, "input")
torch.save(img, data_path)
cmd = [
"python",
os.path.join("cpu", env, "flip.py"),
"--input-path",
data_path,
"--n-warmup",
"0",
"--n-measure",
"1",
"--out",
tmpdir,
]
subprocess.run(cmd)
output = torch.load(os.path.join(tmpdir, "output")) # type: ignore
expected = img.clone()
for i in range(img.shape[2]):
expected[:, :, :, i] = img[:, :, :, img.shape[2] - i - 1]
assert torch.all(output == expected)
with open(os.path.join(tmpdir, "out.json")) as f:
out = json.load(f)
assert "time_sec" in out
| 1.96875
| 2
|
raspagem/random/exemplo01.py
|
sslppractice/propython
| 0
|
12785354
|
import urllib.request
url = 'http://www.ifce.edu.br'
# Obter o conteúdo da página
pagina = urllib.request.urlopen(url)
texto1 = pagina.read().decode('utf-8')
# Outra forma de fazer a mesma coisa ..
import requests
page = requests.get(url)
texto2 = page.content.decode('utf-8')
# Verificamos que todas as linhas são iguais
print(texto1.split('\n') == texto2.split('\n'))
| 3.34375
| 3
|
catalog/bindings/csw/title_1.py
|
NIVANorge/s-enda-playground
| 0
|
12785355
|
from dataclasses import dataclass, field
__NAMESPACE__ = "http://www.opengis.net/ows"
@dataclass
class Title1:
"""
Title of this resource, normally used for display to a human.
"""
class Meta:
name = "Title"
namespace = "http://www.opengis.net/ows"
value: str = field(
default="",
metadata={
"required": True,
},
)
| 2.734375
| 3
|
test4~6.py
|
songgiwoo/python1
| 0
|
12785356
|
# *을 1부터 5까지 출력 하세요.
a = "*"
#for i in range(1, 6, 1):
# for j in range(1, i+1, 1):
# print(a, end="")
# print()
#for i in range(5, 0, -1):
# for j in range(i-1, 0, -1):
# print(a, end="")
# print()
#line = int(input("Diamond 의 길이를 입력하세요(2~30) : "))
#for x in range(1, line * 2, 2):
# print((" " * ((line * 2 - 1 - x) // 2)) + ("*" * x))
#for y in range(line * 2 - 3, 0, -2):
# print((" " * ((line * 2 - 1 - y) // 2)) + "*" * y)
| 3.875
| 4
|
project/user/inputs.py
|
jasmine95dn/flask_best_worst_scaling
| 0
|
12785357
|
# -*- coding: utf-8 -*-
"""
Inputs
#################
*Module* ``project.user.inputs``
This module defines routes to manage input new inputs of projects from users.
"""
import re, boto3, string
from flask import render_template, redirect, url_for, Response, current_app
from flask_login import login_required, current_user
from . import user_app
from .forms import ProjectInformationForm
from .helpers import upload_file, generate_keyword, convert_into_seconds
from .. import db
from ..models import Project, Annotator, Batch, Tuple, Item
# User - Upload project
@user_app.route('/upload-project', methods=['GET','POST'])
@login_required
def upload_project():
"""
Provide information of a new project from user at ``/user/upload-project``.
Returns:
user profile page at ``/user/<some_name>`` if new valid project is submitted.
Note:
Upload project on Mechanical Turk Platform or use local annotator system.
Error:
Error message emerges if there are invalid fields or there is no logged in user.
"""
# information about project
project_form = ProjectInformationForm()
if project_form.validate_on_submit():
# get data from uploaded file
data = upload_file(project_form.upload.data)
# check if user uploaded empty validated file(s)
if not data:
project_form.upload.errors.append(u'You uploaded only empty file(s)!')
return render_template('user/upload-project.html', form=project_form,
name=current_user.username)
# check if user uploaded too few items
elif data == 1:
project_form.upload.errors.append(u'There are fewer than 5 items!')
return render_template('user/upload-project.html', form=project_form,
name=current_user.username)
# get project name
# if no name was given, then this project has the name 'project - <project-id>'
if not project_form.name.data:
name = 'project-%d'%(len(Project.query.all()))
else:
name = project_form.name.data.strip()
# add link to project page for user to view project information
p_name = ("%s"%(re.sub('[^\w]+', '-', name))).strip('-').strip('_')
# if this name exists already (more than one user have the same project name)
if Project.query.filter_by(name=name).first():
# create link for this new project by adding its id
p_name = '%s-%d'%(p_name,len(Project.query.all()))
# rename the project of this user uses the same name for more than
# one of his project (which rarely happens)
while Project.query.filter_by(name=name, user=current_user).first():
name = '%s (%d)'%(name,len([project for project in current_user.projects \
if project.name==name]))
# if name of the project is unique but the link exists (p_name),
# hope this will happen only once, but to make sure
while Project.query.filter_by(p_name=p_name).first():
p_name = '%s-%d'%(p_name,len(Project.query.all()))
# add new project
current_project = Project(name=name, description=project_form.description.data, \
anno_number=project_form.anno_number.data, \
best_def=project_form.best_def.data, worst_def=project_form.worst_def.data, \
n_items=len(data.items), user=current_user, p_name=p_name, \
mturk=project_form.mturk.data)
# user wants to upload this project on Mechanical Turk Market
if project_form.mturk.data:
# use the aws_access_key_id and aws_secret_access_key given from user
# if this is not found in configuration
aws_access_key_id = current_app.config['AWS_ACCESS_KEY_ID'] \
if current_app.config['AWS_ACCESS_KEY_ID'] \
else project_form.aws_access_key_id.data
aws_secret_access_key = current_app.config['AWS_SECRET_ACCESS_KEY'] \
if current_app.config['AWS_SECRET_ACCESS_KEY'] \
else project_form.aws_secret_access_key.data
# check if the user uses default values which never exists
check = []
if aws_access_key_id == project_form.aws_access_key_id.default:
project_form.aws_access_key_id.errors.append("""You must specify your own
aws_access_key_id, default does not exist!""")
check.append(True)
if aws_secret_access_key == project_form.aws_secret_access_key.default:
project_form.aws_secret_access_key.errors.append("""You must specify your own
aws_secret_access_key, default does not exist!""")
check.append(True)
if any(check):
return render_template('user/upload-project.html', form=project_form,
name=current_user.username)
mturk = boto3.client(service_name='mturk',
aws_access_key_id = aws_access_key_id,
aws_secret_access_key = aws_secret_access_key,
region_name='us-east-1',
endpoint_url = current_app.config['MTURK_URL'])
# define endpoint to a HIT using generated hit_id
hit_ids = set()
hit_code = generate_keyword(chars=string.ascii_letters, k_length=3)
# user wants to choose annotators themselves (they want to use our local system)
else:
# add keywords for annotators in local system
for num_anno in range(project_form.anno_number.data):
new_keyword = generate_keyword()
# make sure the new created keyword is never used for any annotator of any project
while Annotator.query.filter_by(keyword=new_keyword).first():
new_keyword = generate_keyword()
# add new key word
Annotator(keyword=new_keyword, project=current_project)
# add batches, tuples and items
for i, tuples_ in data.batches.items():
# create keyword for each batch to upload this project on Mechanical Turk Market
if project_form.mturk.data:
new_keyword = generate_keyword()
# make sure the new created keyword is never used for any batch of any project
while Batch.query.filter_by(keyword=new_keyword).first():
new_keyword = generate_keyword()
# create this HIT on MTurk
# create HIT_ID for the batch in local system (has nothing to do with HITID on MTurk)
new_hit_id = hit_code+generate_keyword(chars=string.digits)
while new_hit_id in hit_ids:
new_hit_id = hit_code+generate_keyword(chars=string.digits)
hit_ids.add(new_hit_id)
# get url for the hit to save on corresponding one on MTurk
url = url_for('mturk.hit', p_name=p_name, hit_id=new_hit_id, _external=True)
# define the questions.xml template with the type box for keyword
response = Response(render_template('questions.xml', title=project_form.name.data,
description=project_form.description.data, url=url),
mimetype='text/plain')
response.implicit_sequence_conversion = False
question = response.get_data(as_text=True)
# get information from user for creating hit on MTurk
p_keyword = project_form.keywords.data
p_reward = project_form.reward.data
lifetime = convert_into_seconds(duration=project_form.lifetime.data, \
unit=project_form.lifetimeunit.data)
hit_duration = convert_into_seconds(duration=project_form.hit_duration.data, \
unit=project_form.duration_unit.data)
# create new hit on MTurk
new_hit = mturk.create_hit(
Title = project_form.name.data,
Description = project_form.description.data,
Keywords = p_keyword,
Reward = p_reward,
MaxAssignments = project_form.anno_number.data,
LifetimeInSeconds = lifetime,
AssignmentDurationInSeconds = hit_duration,
Question = question,
AssignmentReviewPolicy = {
'PolicyName':'ScoreMyKnownAnswers/2011-09-01',
'Parameters': [
{'Key':'AnswerKey',
'MapEntries':[{ 'Key':'keyword',
'Values':[new_keyword]
}]
},
{'Key':'ApproveIfKnownAnswerScoreIsAtLeast',
'Values':['1']
},
{'Key':'RejectIfKnownAnswerScoreIsLessThan',
'Values':['1']
},
{'Key':'RejectReason',
'Values':['''Sorry, we could not approve your submission
as you did not type in the right keyword.''']
}
]
}
)
# no need to create keyword and hit_id for batch as this is for the local process
else:
new_keyword = new_hit_id = None
# add new batch
current_batch = Batch(project=current_project, size=len(tuples_),
keyword=new_keyword, hit_id=new_hit_id)
for tuple_ in tuples_:
# add new tuple
current_tuple = Tuple(batch=current_batch)
for item in tuple_:
# check if this item is already saved in the database
if Item.query.filter_by(item=item).first():
current_tuple.items.append(Item.query.filter_by(item=item).first())
else:
new_item = Item(item=item)
current_tuple.items.append(new_item)
db.session.commit()
return redirect(url_for('user.profile', some_name=current_user.username))
return render_template('user/upload-project.html', form=project_form, name=current_user.username)
| 3.09375
| 3
|
openpnm/integrators/_scipy.py
|
lixuekai2001/OpenPNM
| 2
|
12785358
|
<gh_stars>1-10
from scipy.integrate import solve_ivp
from openpnm.integrators import Integrator
from openpnm.algorithms._solution import TransientSolution
__all__ = ['ScipyRK45']
class ScipyRK45(Integrator):
"""Brief description of 'ScipyRK45'"""
def __init__(self, atol=1e-6, rtol=1e-6, verbose=False, linsolver=None):
self.atol = atol
self.rtol = rtol
self.verbose = verbose
self.linsolver = linsolver
def solve(self, rhs, x0, tspan, saveat, **kwargs):
"""
Solves the system of ODEs defined by dy/dt = rhs(t, y).
Parameters
----------
rhs : function handle
RHS vector in the system of ODEs defined by dy/dt = rhs(t, y)
x0 : array_like
Initial value for the system of ODEs
tspan : array_like
2-element tuple (or array) representing the timespan for the
system of ODEs
saveat : float or array_like
If float, defines the time interval at which the solution is
to be stored. If array_like, defines the time points at which
the solution is to be stored.
**kwargs : keyword arguments
Other keyword arguments that might get used by the integrator
Returns
-------
TransientSolution
Solution of the system of ODEs stored in a subclass of numpy's
ndarray with some added functionalities (ex. you can get the
solution at intermediate time points via: y = soln(t_i)).
"""
options = {
"atol": self.atol,
"rtol": self.rtol,
"t_eval": saveat,
# FIXME: uncomment next line when/if scipy#11815 is merged
# "verbose": self.verbose,
}
sol = solve_ivp(rhs, tspan, x0, method="RK45", **options)
if sol.success:
return TransientSolution(sol.t, sol.y)
raise Exception(sol.message)
| 2.890625
| 3
|
wk1_functions.py
|
RitRa/Programming-for-Data-Analysis
| 0
|
12785359
|
# <NAME> 21/09/18 week 1
# function for the Greatest Common Divisor
def gcd(a, b):
while b > 0:
a, b = b, a % b
return(a)
print(gcd(50, 20))
print(gcd(22, 143))
| 3.90625
| 4
|
splat/helpers/purgeUser.py
|
samskivert/splatd
| 0
|
12785360
|
# purgeUser.py vi:ts=4:sw=4:expandtab:
#
# LDAP User Purging Helper.
# Author:
# <NAME> <<EMAIL>>
#
# Copyright (c) 2006 Three Rings Design, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright owner nor the names of contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import logging
import shutil
import tarfile
import time
import errno
import homeutils
import splat
from splat import plugin
logger = logging.getLogger(splat.LOG_NAME)
# Child process exit codes
PURGE_ERR_NONE = 0
PURGE_ERR_PRIVSEP = 1
PURGE_ERR_RM = 2
class WriterContext(object):
def __init__(self):
self.home = None
self.minuid = None
self.mingid = None
self.archiveHomeDir = True
self.purgeHomeDir = True
self.purgeHomeArchive = True
self.archiveDest = '/home'
self.purgeArchiveWait = 14
class Writer(plugin.Helper):
@classmethod
def attributes(self):
return ('pendingPurge', 'uid') + homeutils.requiredAttributes()
@classmethod
def parseOptions(self, options):
context = WriterContext()
for key in options.iterkeys():
if (key == 'home'):
context.home = str(options[key])
if (context.home[0] != '/'):
raise plugin.SplatPluginError, "Relative paths for the home option are not permitted"
continue
if (key == 'minuid'):
context.minuid = int(options[key])
continue
if (key == 'mingid'):
context.mingid = int(options[key])
continue
if (key == 'archivehomedir'):
context.archiveHomeDir = self._parseBooleanOption(str(options[key]))
continue
if (key == 'purgehomedir'):
context.purgeHomeDir = self._parseBooleanOption(str(options[key]))
continue
if (key == 'purgehomearchive'):
context.purgeHomeArchive = self._parseBooleanOption(str(options[key]))
continue
if (key == 'archivedest'):
context.archiveDest = os.path.abspath(options[key])
continue
if (key == 'purgearchivewait'):
context.purgeArchiveWait = int(options[key])
continue
raise plugin.SplatPluginError, "Invalid option '%s' specified." % key
# Validation of some options.
if (context.purgeHomeArchive and not context.archiveHomeDir):
raise plugin.SplatPluginError, "Cannot purge home directory archives if the archives are never created. Set archivehomedir to true."
if (context.archiveHomeDir):
if (context.archiveDest[0] != '/'):
raise plugin.SplatPluginError, "Relative paths for the archivedest option are not permitted."
if (not os.path.isdir(context.archiveDest)):
raise plugin.SplatPluginError, "Archive destination directory %s does not exist or is not a directory" % context.archiveDest
return context
# Creates a tarred and gzipped archive of a home directory.
def _archiveHomeDir(self, home, archiveFile):
# Create new gzipped tar file. Have to use os.open() to create it,
# close, then use tarfile.open() because tarfile.open() does not let
# you set file permissions.
try:
fd = os.open(archiveFile, os.O_CREAT, 0600)
os.close(fd)
archive = tarfile.open(archiveFile, 'w:gz')
except (IOError, OSError), e:
raise plugin.SplatPluginError, "Cannot create archive file %s: %s" % (archiveFile, str(e))
# Strip any trailing / characters from home
home = os.path.normpath(home)
# Add all files in homedir to tar file
try:
archive.add(home, arcname=os.path.basename(home))
# Keep close in the try block too, because it will throw an
# exception if we run out of space.
archive.close()
logger.info("Archive %s created." % archiveFile)
except (IOError, OSError), e:
raise plugin.SplatPluginError, "Unable to add all files to archive %s: %s" % (archiveFile, e)
# Drops privileges to the owner of home directory, then recursive removes
# all files in it. If this succeeds, the (probably empty) home directory
# will be removed by the privileged user splatd runs as.
def _purgeHomeDir(self, home, uidNumber, gidNumber):
# File descriptors to use for error strings from child process
pipe = os.pipe()
infd = os.fdopen(pipe[0], 'r')
outfd = os.fdopen(pipe[1], 'w')
# Fork and drop privileges
pid = os.fork()
if (pid == 0):
try:
os.setgid(gidNumber)
os.setuid(uidNumber)
except OSError, e:
outfd.write(str(e) + '\n')
outfd.close()
os._exit(PURGE_ERR_PRIVSEP)
# Recursively remove home directory contents
try:
for filename in os.listdir(home):
absPath = os.path.join(home, filename)
if (os.path.isdir(absPath)):
shutil.rmtree(absPath)
else:
os.remove(absPath)
except OSError, e:
outfd.write(str(e) + '\n')
outfd.close()
os._exit(PURGE_ERR_RM)
os._exit(PURGE_ERR_NONE)
# Wait for child to exit
else:
while True:
try:
result = os.waitpid(pid, 0)
except OSError, e:
if (e.errno == errno.EINTR):
continue
raise
break
# Check exit status of child process
status = os.WEXITSTATUS(result[1])
if (status == PURGE_ERR_NONE):
outfd.close()
infd.close()
# If everything went ok, delete home directory
try:
os.rmdir(home)
except OSError, e:
raise plugin.SplatPluginError, "Unable to remove directory %s: %s" % (home, str(e))
logger.info("Home directory %s purged successfully." % home)
# Deal with error conditions
else:
error = infd.readline()
infd.close()
if (status == PURGE_ERR_PRIVSEP):
raise plugin.SplatPluginError, "Unable to drop privileges to uid number %d, gid number %d and purge %s: %s" % (uidNumber, gidNumber, home, error)
elif (status == PURGE_ERR_RM):
raise plugin.SplatPluginError, "Unable to remove all files in %s: %s" % (home, error)
# Unlink the specified file archive, which should be an archived homedir.
def _purgeHomeArchive(self, archive):
try:
os.remove(archive)
except OSError, e:
raise plugin.SplatPluginError, "Unable to remove archive %s: %s" % (archive, str(e))
logger.info("Archive %s removed successfully." % archive)
def work(self, context, ldapEntry, modified):
# Get all needed LDAP attributes, and verify we have what we need
attributes = ldapEntry.attributes
if (not attributes.has_key('pendingPurge')):
raise plugin.SplatPluginError, "Required attribute pendingPurge not found for dn %s." % ldapEntry.dn
if (not attributes.has_key('uid')):
raise plugin.SplatPluginError, "Required attribute uid not found for dn %s." % ldapEntry.dn
pendingPurge = attributes.get('pendingPurge')[0]
username = attributes.get('uid')[0]
(home, uidNumber, gidNumber) = homeutils.getLDAPAttributes(ldapEntry, context.home, context.minuid, context.mingid)
# Get current time (in GMT).
now = int(time.strftime('%Y%m%d%H%M%S', time.gmtime(time.time())))
# Do nothing if pendingPurge is still in the future.
if (now < int(pendingPurge.rstrip('Z'))):
return
# If archiveHomeDir and not already archived or purged, archive homedir.
archiveFile = os.path.join(context.archiveDest, os.path.basename(home) + '.tar.gz')
if (context.archiveHomeDir and (not os.path.isfile(archiveFile)) and os.path.isdir(home)):
self._archiveHomeDir(home, archiveFile)
# If purgeHomeDir and not already purged, purge homedir.
if (context.purgeHomeDir and os.path.isdir(home)):
self._purgeHomeDir(home, uidNumber, gidNumber)
# Purge archive if it is old enough, and we are supposed to purge them.
if (context.purgeHomeArchive and os.path.isfile(archiveFile)):
# Number of seconds since archiveFile was last modified.
archiveModifiedAge = int(time.time()) - os.path.getmtime(archiveFile)
if ((archiveModifiedAge / 86400) > context.purgeArchiveWait):
self._purgeHomeArchive(archiveFile)
| 1.632813
| 2
|
python/physical/unit/statute.py
|
afrl-quantum/physical
| 1
|
12785361
|
from ..const import const
class statute(const):
def __init__(self,prefix,unit):
const.__init__(self,prefix + 'statute')
self.mile = 5280.0*unit.feet
self.miles = self.mile
self.mi = self.mile
self.league = 3.0*self.miles
self.leagues = self.league
| 2.921875
| 3
|
scripts/helm-version.py
|
mapster/k8ssandra
| 0
|
12785362
|
#!/usr/bin/env python3
import subprocess
from ruamel.yaml import YAML
import glob
import re
import argparse
class Semver:
def __init__(self, major: int, minor: int, patch: int):
self.major = major
self.minor = minor
self.patch = patch
def incr_major(self):
self.major = self.major + 1
self.patch = 0
self.minor = 0
def incr_minor(self):
self.minor = self.minor + 1
self.patch = 0
def incr_patch(self):
self.patch = self.patch + 1
def to_string(self) -> str:
return F'{self.major}.{self.minor}.{self.patch}'
@classmethod
def parse(self, input_str: str):
# Parse and validate, return new instance of Semver
if re.fullmatch(r'^([0-9]+)\.([0-9]+)\.([0-9]+)$', input_str):
split_list = input_str.split('.')
split_list = [int(i) for i in split_list]
return self(*split_list)
raise Exception(F'Invalid input version value: {input_str}')
def update_charts(update_func):
yaml = YAML()
yaml.indent(mapping = 2, sequence=4, offset=2)
main_dir = subprocess.run(["git", "rev-parse", "--show-toplevel"], check=True, stdout=subprocess.PIPE).stdout.strip().decode('utf-8')
search_path = F'{main_dir}/charts/**/Chart.yaml'
for path in glob.glob(search_path, recursive=True):
if re.match('^.*cass-operator.*', path):
continue
with open(path) as f:
chart = yaml.load(f)
semver = Semver.parse(chart['version'])
update_func(semver)
chart['version'] = semver.to_string()
with open(path, 'w') as f:
yaml.dump(chart, f)
print(F'Updated {path} to {semver.to_string()}')
def main():
parser = argparse.ArgumentParser(description='Update Helm chart versions in k8ssandra project')
parser.add_argument('--incr', choices=['major', 'minor', 'patch'], help='increase part of semver by one')
args = parser.parse_args()
if args.incr:
if args.incr == 'major':
update_charts(Semver.incr_major)
elif args.incr == 'minor':
update_charts(Semver.incr_minor)
elif args.incr == 'patch':
update_charts(Semver.incr_patch)
if __name__ == "__main__":
main()
| 2.40625
| 2
|
src/dugulib/sort.py
|
Peefy/CLRS_dugu_code-master
| 3
|
12785363
|
<filename>src/dugulib/sort.py
'''
排序算法集合
First
=====
冒泡排序 `O(n^2)` ok
鸡尾酒排序(双向冒泡排序) `O(n^2)`
插入排序 `O(n^2)` ok
桶排序 `O(n)` ok
计数排序 `O(n + k)` ok
合并排序 `O(nlgn)` ok
原地合并排序 `O(n^2)` ok
二叉排序树排序 `O(nlgn)` ok
鸽巢排序 `O(n+k)`
基数排序 `O(nk)` ok
Gnome排序 `O(n^2)`
图书馆排序 `O(nlgn)`
Second
======
选择排序 `O(n^2)` ok
希尔排序 `O(nlgn)`
组合排序 `O(nlgn)`
堆排序 `O(nlgn)` ok
平滑排序 `O(nlgn)`
快速排序 `O(nlgn)`
Intro排序 `O(nlgn)`
Patience排序 `O(nlgn + k)`
Third
=====
Bogo排序 `O(n*n!)`
Stupid排序 `O(n^3)`
珠排序 `O(n) or O(sqrt(n))`
Pancake排序 `O(n)`
Stooge排序 `O(n^2.7)` ok
'''
# python src/dugulib/sort.py
# python3 src/dugulib/sort.py
from __future__ import division, absolute_import, print_function
import math as _math
import random as _random
from copy import deepcopy as _deepcopy
from numpy import arange as _arange
__all__ = ['insertsort', 'selectsort', 'bubblesort',
'mergesort', 'heapsort', 'quicksort',
'stoogesort'].sort()
class Sort:
'''
排序算法集合类
'''
def insertsort(self, array : list) -> list:
'''
Summary
===
插入排序的升序排列,时间复杂度`O(n^2)`
Parameter
===
`array` : a list like
Return
===
`sortedarray` : 排序好的数组
Example
===
```python
>>> import sort
>>> array = [1, 3, 5, 2, 4, 6]
>>> sort.insertsort(array)
>>> [1, 2, 3, 4, 5, 6]
```
'''
A = array
n = len(A)
for j in range(1, n):
## Insert A[j] into the sorted sequece A[1...j-1] 前n - 1 张牌
# 下标j指示了待插入到手中的当前牌,所以j的索引从数组的第二个元素开始
# 后来摸的牌
key = A[j]
# 之前手中的已经排序好的牌的最大索引
i = j - 1
# 开始寻找插入的位置并且移动牌
while(i >= 0 and A[i] > key):
# 向右移动牌
A[i + 1] = A[i]
# 遍历之前的牌
i = i - 1
# 后来摸的牌插入相应的位置
A[i + 1] = key
# 输出升序排序后的牌
return A
def selectsort(self, array : list = []) -> list:
'''
Summary
===
选择排序的升序排列,时间复杂度:O(n^2):
Args
===
`array` : a list like
Return
===
`sortedArray` : 排序好的数组
Example
===
```python
>>> import sort
>>> array = [1, 3, 5, 2, 4, 6]
>>> sort.selectsort(array)
>>> [1, 2, 3, 4, 5, 6]
```
'''
A = array
length = len(A)
for j in range(length):
minIndex = j
# 找出A中第j个到最后一个元素中的最小值
# 仅需要在头n-1个元素上运行
for i in range(j, length):
if A[i] <= A[minIndex]:
minIndex = i
# 最小元素和最前面的元素交换
min = A[minIndex]
A[minIndex] = A[j]
A[j] = min
return A
def bubblesort(self, array : list) -> list:
'''
冒泡排序,时间复杂度`O(n^2)`
Args
====
`array` : 排序前的数组
Return
======
`sortedArray` : 使用冒泡排序排好的数组
Example
===
```python
>>> import sort
>>> A = [6, 5, 4, 3, 2, 1]
>>> sort.bubblesort(A)
>>> [1, 2, 3, 4, 5, 6]
```
'''
nums = _deepcopy(array)
for i in range(len(nums) - 1):
for j in range(len(nums) - i - 1):
if nums[j] > nums[j + 1]:
nums[j], nums[j + 1] = nums[j + 1], nums[j]
return nums
def __mergeSortOne(self, array : list, p : int ,q : int, r : int) -> list:
'''
一步合并两堆牌排序算法过程
Args
===
`array` : a array like
Returns
===
`sortedArray` : 排序好的数组
Raises
===
`None`
'''
# python中变量名和对象是分离的
# 此时A是array的一个引用
A = array
# 求数组的长度 然后分成两堆([p..q],[q+1..r]) ([0..q],[q+1..n-1])
n = r + 1
# 检测输入参数是否合理
if q < 0 or q > n - 1:
raise Exception("arg 'q' must not be in (0,len(array) range)")
# n1 + n2 = n
# 求两堆牌的长度
n1 = q - p + 1
n2 = r - q
# 构造两堆牌(包含“哨兵牌”)
L = _arange(n1 + 1, dtype=float)
R = _arange(n2 + 1, dtype=float)
# 将A分堆
for i in range(n1):
L[i] = A[p + i]
for j in range(n2):
R[j] = A[q + j + 1]
# 加入无穷大“哨兵牌”, 对不均匀分堆的完美解决
L[n1] = _math.inf
R[n2] = _math.inf
# 因为合并排序的前提是两堆牌是已经排序好的,所以这里排序一下
# chapter2 = Chapter2()
# L = chapter2.selectSortAscending(L)
# R = chapter2.selectSortAscending(R)
# 一直比较两堆牌的顶部大小大小放入新的堆中
i, j = 0, 0
for k in range(p, n):
if L[i] <= R[j]:
A[k] = L[i]
i += 1
else:
A[k] = R[j]
j += 1
return A
def __mergeSort(self, array : list, start : int, end : int) -> list:
'''
合并排序总过程
Args
===
`array` : 待排序数组
`start` : 排序起始索引
`end` : 排序结束索引
Return
===
`sortedArray` : 排序好的数组
Example
===
```python
>>> import sort
>>> sort.mergeSort([6, 5, 4, 3, 2, 1])
>>> [1, 2, 3, 4, 5, 6]
```
'''
# python一切皆对象和引用,所以要拷贝...特别是递归调用的时候
r = _deepcopy(end)
p = _deepcopy(start)
if p < r:
# 待排序序列劈成两半
middle = (r + p) // 2
q = _deepcopy(middle)
# 递归调用
# array = self.__mergeSort(array, start, middle)
self.__mergeSort(array, p, q)
# 递归调用
# array = self.__mergeSort(array, middle + 1, end)
self.__mergeSort(array, q + 1, r)
# 劈成的两半牌合并
# array = self.__mergeSortOne(array, start ,middle, end)
self.__mergeSortOne(array, p, q, r)
return array
def mergesort(self, array : list) -> list:
'''
归并排序/合并排序:最优排序复杂度`O(n * log2(n))`, 空间复杂度`O(n)`
Args
===
array : 待排序的数组
Returns
===
sortedArray : 排序好的数组
Example
===
```python
>>> import sort
>>> sort.mergesort([6, 5, 4, 3, 2, 1])
>>> [1, 2, 3, 4, 5, 6]
```
'''
return self.__mergeSort(array, 0, len(array) - 1)
def left(self, i : int) -> int:
'''
求:二叉堆:一个下标i的:左儿子:的下标
'''
return int(2 * i + 1)
def right(self, i : int) -> int:
'''
求:二叉堆:一个下标i的:右儿子:的下标
'''
return int(2 * i + 2)
def parent(self, i : int) -> int:
'''
求:二叉堆:一个下标i的:父节点:的下标
'''
return (i + 1) // 2 - 1
def heapsize(self, A : list) -> int:
'''
求一个数组形式的:二叉堆:的:堆大小:
'''
return len(A) - 1
def maxheapify(self, A : list, i : int) -> list:
'''
保持堆使某一个结点i成为最大堆(其子树本身已经为最大堆) :不使用递归算法:
'''
count = len(A)
largest = count
while largest != i:
l = self.left(i)
r = self.right(i)
if l <= self.heapsize(A) and A[l] >= A[i]:
largest = l
else:
largest = i
if r <= self.heapsize(A) and A[r] >= A[largest]:
largest = r
if largest != i:
A[i], A[largest] = A[largest], A[i]
i, largest = largest, count
return A
def buildmaxheap(self, A : list) -> list:
'''
对一个数组建立最大堆的过程, 时间代价为:O(n):
'''
count = int(len(A) // 2)
for i in range(count + 1):
self.maxheapify(A, count - i)
return A
def heapsort(self, A : list) -> list:
'''
堆排序算法过程, 时间代价为:O(nlgn):
Args
===
A : 待排序的数组A
Return
====
sortedA : 排序好的数组
Example
====
```python
>>> import sort
>>> sort.heapsort([7, 6, 5, 4, 3, 2, 1])
>>> [1, 2, 3, 4, 5, 6, 7]
```
'''
heapsize = len(A) - 1
def left(i : int):
'''
求:二叉堆:一个下标i的:左儿子:的下标
'''
return int(2 * i + 1)
def right(i : int):
'''
求:二叉堆:一个下标i的:右儿子:的下标
'''
return int(2 * i + 2)
def parent(i : int):
'''
求:二叉堆:一个下标i的:父节点:的下标
'''
return (i + 1) // 2 - 1
def __maxheapify(A : list, i : int):
count = len(A)
largest = count
while largest != i:
l = left(i)
r = right(i)
if l <= heapsize and A[l] >= A[i]:
largest = l
else:
largest = i
if r <= heapsize and A[r] >= A[largest]:
largest = r
if largest != i:
A[i], A[largest] = A[largest], A[i]
i, largest = largest, count
return A
self.buildmaxheap(A)
length = len(A)
for i in range(length - 1):
j = length - 1 - i
A[0], A[j] = A[j], A[0]
heapsize = heapsize - 1
__maxheapify(A, 0)
return A
def partition(self, A : list, p : int, r : int):
'''
快速排序的数组划分子程序
'''
x = A[r]
i = p - 1
j = p - 1
for j in range(p, r):
if A[j] <= x:
i = i + 1
A[i], A[j] = A[j], A[i]
if A[j] == x:
j = j + 1
A[i + 1], A[r] = A[r], A[i + 1]
if j == r:
return (p + r) // 2
return i + 1
def __quicksort(self, A : list, p : int, r : int):
left = _deepcopy(p)
right = _deepcopy(r)
if left < right:
middle = _deepcopy(self.partition(A, left, right))
self.__quicksort(A, left, middle - 1)
self.__quicksort(A, middle + 1, right)
def quicksort(self, A : list):
'''
快速排序,时间复杂度`o(n^2)`,但是期望的平均时间较好`Θ(nlgn)`
Args
====
`A` : 排序前的数组`(本地排序)`
Return
======
`A` : 使用快速排序排好的数组`(本地排序)`
Example
===
```python
>>> import sort
>>> A = [6, 5, 4, 3, 2, 1]
>>> sort.quicksort(A)
>>> [1, 2, 3, 4, 5, 6]
'''
self.__quicksort(A, 0, len(A) - 1)
return A
def __stoogesort(self, A, i, j):
if A[i] > A[j]:
A[i], A[j] = A[j], A[i]
if i + 1 >= j:
return A
k = (j - i + 1) // 3
__stoogesort(A, i, j - k)
__stoogesort(A, i + k, j)
return __stoogesort(A, i, j - k)
def stoogesort(self, A : list) -> list:
'''
Stooge原地排序 时间复杂度为:O(n^2.7):
Args
===
`A` : 排序前的数组:(本地排序):
'''
return __stoogesort(A, 0, len(A) - 1)
def shellsort(self, A : list):
"""
希尔排序 时间复杂度为:O(nlogn) 原地排序
"""
n = len(A)
fraction = n // 2
while fraction > 0:
for i in range(fraction, n):
for j in range(i - fraction, -1, -fraction):
if A[j] > A[j + fraction]:
A[j], A[j + fraction] = A[j + fraction], A[j]
else:
break
fraction //= 2
return A
def countingsort2(self, A):
'''
计数排序,无需比较,非原地排序,时间复杂度`Θ(n)`
Args
===
`A` : 待排序数组
Return
===
`sortedarray` : 排序好的数组
Example
===
```python
>>> countingsort2([0,1,1,3,4,6,5,3,5])
>>> [0,1,1,3,3,4,5,5,6]
```
'''
return self.countingsort(A, max(A) + 1)
def countingsort(self, A, k):
'''
针对数组`A`计数排序,无需比较,非原地排序,当`k=O(n)`时,算法时间复杂度为`Θ(n)`,
3个n for 循环
需要预先知道数组元素都不大于`k`
Args
===
`A` : 待排序数组
`k` : 数组中的元素都不大于k
Return
===
`sortedarray` : 排序好的数组
Example
===
```python
>>> countingsort([0,1,1,3,4,6,5,3,5], 6)
>>> [0,1,1,3,3,4,5,5,6]
```
'''
C = []
B = _deepcopy(A)
for i in range(k):
C.append(0)
length = len(A)
for j in range(length):
C[A[j]] = C[A[j]] + 1
for i in range(1, k):
C[i] = C[i] + C[i - 1]
for i in range(length):
j = length - 1 - i
B[C[A[j]] - 1] = A[j]
C[A[j]] = C[A[j]] - 1
return B
def getarraystr_subarray(self, A, k):
'''
取一个数组中每个元素第k位构成的子数组
Args
===
`A` : 待取子数组的数组
`k` : 第1位是最低位,第d位是最高位
Return
===
`subarray` : 取好的子数组
Example
===
```python
getarraystr_subarray(['ABC', 'DEF', 'OPQ'], 1)
['C', 'F', 'Q']
```
'''
B = []
length = len(A)
for i in range(length):
B.append(int(str(A[i])[-k]))
return B
def countingsort(self, A, k):
'''
计数排序,无需比较,非原地排序,时间复杂度`Θ(n)`
Args
===
`A` : 待排序数组
`k` : 数组中的元素都不大于k
Return
===
`sortedarray` : 排序好的数组
Example
===
```python
>>> countingsort([0,1,1,3,4,6,5,3,5], 6)
>>> [0,1,1,3,3,4,5,5,6]
```
'''
C = []
B = _deepcopy(A)
k = 27
for i in range(k):
C.append(0)
length = len(A)
for j in range(length):
C[A[j]] = C[A[j]] + 1
for i in range(1, k):
C[i] = C[i] + C[i - 1]
for i in range(length):
j = length - 1 - i
B[C[A[j]] - 1] = A[j]
C[A[j]] = C[A[j]] - 1
return B
def radixsort(self, A, d):
'''
基数排序 平均时间复杂度为`Θ(nlgn)`
Args
===
`A` : 待排序的数组
`d` : 数组A中每个元素都有d位数字/长度,其中第1位是最低位,第d位是最高位
Return
===
`sortedarray` : 排序好的数组
Example
===
```python
>>> Chapter8_3().radixsort([54,43,32,21,11], 2)
>>> [11, 21, 32, 43, 54]
```
'''
length = len(A)
B = []
for i in range(d):
B.append(self.getarraystr_subarray(A, i + 1))
for k in range(d):
B[k] = self.countingsort(B[k], max(B[k]) + 1)
C = _arange(length)
for j in range(length):
for i in range(d):
C[j] += B[i][j] * 10 ** i
C[j] = C[j] - j
return C
def bucketsort(self, A):
'''
桶排序,期望时间复杂度`Θ(n)`(满足输入分布条件`[0,1)`的情况下)
需要`链表list`额外的数据结构和存储空间
Args
===
`A` : 待排序的数组
Return
===
`sortedarray` : 排序好的数组
Example
===
```python
>>> Chapter8_4().bucketsort([0.5, 0.4, 0.3, 0.2, 0.1])
>>> [0.1, 0.2, 0.3, 0.4, 0.5]
```
'''
n = len(A)
B = []
for i in range(n):
B.insert(int(n * A[i]), A[i])
return self.insertsort(B)
def __find_matching_kettle(self, kettles1, kettles2):
'''
思考题8.4,找到匹配的水壶,并返回匹配索引集合
Example
===
```python
>>> list(find_matching_kettle([1,2,3,4,5], [5,4,3,2,1]))
[(0, 4), (1, 3), (2, 2), (3, 1), (4, 0)]
```
'''
assert len(kettles1) == len(kettles2)
n = len(kettles1)
for i in range(n):
for j in range(n):
if kettles1[i] == kettles2[j]:
yield (i, j)
def find_matching_kettle(self, kettles1, kettles2):
'''
思考题8.4,找到匹配的水壶,并返回匹配索引集合
Example
===
```python
>>> list(find_matching_kettle([1,2,3,4,5], [5,4,3,2,1]))
[(0, 4), (1, 3), (2, 2), (3, 1), (4, 0)]
```
'''
return list(self.__find_matching_kettle(kettles1, kettles2))
def quicksort_oneline(arr):
return arr if len(arr) < 2 else (quicksort_oneline([i for i in arr[1:] if i <= arr[0]]) + [arr[0]] + quicksort_oneline([i for i in arr[1:] if i > arr[0]]))
def merge(a, b):
ret = []
i = j = 0
while len(a) >= i + 1 and len(b) >= j + 1:
if a[i] <= b[j]:
ret.append(a[i])
i += 1
else:
ret.append(b[j])
j += 1
if len(a) > i:
ret += a[i:]
if len(b) > j:
ret += b[j:]
return ret
def mergesort_easy(arr):
if len(arr) < 2:
return arr
else:
left = mergesort_easy(arr[0 : len(arr) // 2])
right = mergesort_easy(arr[len(arr) // 2:])
return merge(left, right)
_inst = Sort()
insertsort = _inst.insertsort
selectsort = _inst.selectsort
bubblesort = _inst.bubblesort
mergesort = _inst.mergesort
heapsort = _inst.heapsort
quicksort = _inst.quicksort
stoogesort = _inst.stoogesort
shellsort = _inst.shellsort
def test():
'''
sort.insertsort test
sort.selectsort test
sort.bubblesort test
sort.mergesort test
sort.heapsort test
sort.quicksort test
'''
print(insertsort([8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0]))
print(selectsort([8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0]))
print(bubblesort([8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0]))
print(mergesort([8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0]))
print(heapsort([8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0]))
print(quicksort([8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0]))
print(shellsort([8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0]))
print(quicksort_oneline([8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0]))
print(mergesort_easy([8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0]))
print('module sort test successful!!')
if __name__ == '__main__':
# python src/dugulib/sort.py
# python3 src/dugulib/sort.py
test()
else:
pass
| 3.25
| 3
|
src/sagemaker_huggingface_inference_toolkit/serving.py
|
oconnoat/sagemaker-huggingface-inference-toolkit
| 46
|
12785364
|
<reponame>oconnoat/sagemaker-huggingface-inference-toolkit
# Copyright 2021 The HuggingFace Team, Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from subprocess import CalledProcessError
from retrying import retry
from sagemaker_huggingface_inference_toolkit import handler_service, mms_model_server
HANDLER_SERVICE = handler_service.__name__
def _retry_if_error(exception):
return isinstance(exception, CalledProcessError or OSError)
@retry(stop_max_delay=1000 * 50, retry_on_exception=_retry_if_error)
def _start_mms():
mms_model_server.start_model_server(handler_service=HANDLER_SERVICE)
def main():
_start_mms()
| 1.757813
| 2
|
0055.jump_game/solution.py
|
WZMJ/Algorithms
| 5
|
12785365
|
<filename>0055.jump_game/solution.py
from typing import List
class Solution:
def can_jump(self, nums: List[int]) -> bool:
last = len(nums) - 1
for i in range(len(nums) - 1, -1, -1):
if i + nums[i] >= last:
last = i
return last == 0
| 3.484375
| 3
|
board.py
|
adri326/ia41-project
| 0
|
12785366
|
<reponame>adri326/ia41-project<filename>board.py<gh_stars>0
class Board:
def __init__(self, width, height):
self.cars = []
self.width = width
self.height = height
self.exit_y = 2
# Finds and returns the car at (x, y). If there isn't any, returns None
def get_car(self, x, y):
for n in range(0, len(self.cars)):
if self.cars[n][3]:
if x >= self.cars[n][0] and \
x < self.cars[n][0] + self.cars[n][2] and \
y == self.cars[n][1]:
return n
else:
if y >= self.cars[n][1] and \
y < self.cars[n][1] + self.cars[n][2] and \
x == self.cars[n][0]:
return n
return None
# Hash function
def __hash__(self):
res = hash((self.width, self.height))
for car in self.cars:
# I don't know how to rotate `res`, so this shall do
res = res ^ hash(car)
return res
# Returns true if the position is solved
def solved(self):
return len(self.cars) > 0 and self.cars[0][0] + self.cars[0][2] == self.width
# Returns an instance of Board where the `index`-th car was moved to `(x, y)`
def move(self, index, x, y):
res = Board(self.width, self.height) # On crée une nouvelle instance de Board
res.exit_y = self.exit_y
for n in range(0, len(self.cars)): # Les voitures sont clonées,
if n == index: # à l'exception de la voiture `index`, qui reçoit une nouvelle position
res.cars.append((x, y, self.cars[n][2], self.cars[n][3]))
else:
res.cars.append(self.cars[n])
return res
# Returns a list of child states and the movements to get to these
def next_states(self):
res = [] # La liste à retourner, contenant des paires de positions et de mouvements
for n in range(0, len(self.cars)): # Pour chaque voiture dans self.cars
car = self.cars[n]
if car[3]: # Si la voiture est horizontale
for x in range(0, car[0]): # Pour x dans [0; car.x[
if self.get_car(car[0] - x - 1, car[1]) == None: # Si l'espace est vide:
# Ajouter la nouvelle position ainsi que le mouvement fait à res
res.append((self.move(n, car[0] - x - 1, car[1]), (n, car[0] - x - 1, car[1])))
else: # Sinon, s'arrêter
break
for x in range(car[0], self.width - car[2]): # Pour x dans [car.x; width - car.length[
if self.get_car(x + car[2], car[1]) == None:
res.append((self.move(n, x + 1, car[1]), (n, x + 1, car[1])))
else:
break
else: # Si la voiture est verticale
for y in range(0, car[1]): # Pour y dans [0; car.y[
if self.get_car(car[0], car[1] - y - 1) == None:
res.append((self.move(n, car[0], car[1] - y - 1), (n, car[0], car[1] - y - 1)))
else:
break
for y in range(car[1], self.height - car[2]): # Pour y dans [car.y; height - car.length[
if self.get_car(car[0], y + car[2]) == None:
res.append((self.move(n, car[0], y + 1), (n, car[0], y + 1)))
else:
break
return res
| 3.640625
| 4
|
threedi_modelchecker/simulation_templates/laterals/extractor.py
|
nens/threedi-modelchecker
| 0
|
12785367
|
<filename>threedi_modelchecker/simulation_templates/laterals/extractor.py<gh_stars>0
from typing import List
from threedi_modelchecker.simulation_templates.exceptions import SchematisationError
from threedi_modelchecker.threedi_model.models import Lateral1d, Lateral2D
from sqlalchemy.orm import Query
from threedi_api_client.openapi.models.lateral import Lateral
from sqlalchemy.orm.session import Session
from threedi_modelchecker.simulation_templates.utils import strip_dict_none_values
from threedi_modelchecker.simulation_templates.utils import parse_timeseries
def lateral_1d_to_api_lateral(lateral_1d: Lateral1d) -> Lateral:
try:
values = parse_timeseries(lateral_1d.timeseries)
except (ValueError, TypeError):
raise SchematisationError(
f"Incorrect timeseries format for lateral 1D with id: {lateral_1d.id}"
)
offset = values[0][0]
if offset > 0:
# Shift timeseries to start at t=0
values = [[x[0] - offset, x[1]] for x in values]
return Lateral(
connection_node=int(lateral_1d.connection_node_id),
offset=int(values[0][0]),
values=values,
units="m3/s",
interpolate=False,
)
def lateral_2d_to_api_lateral(lateral_2d: Lateral2D, session: Session) -> Lateral:
try:
values = parse_timeseries(lateral_2d.timeseries)
except (ValueError, TypeError):
raise SchematisationError(
f"Incorrect timeseries format for lateral 2D with id: {lateral_2d.id}"
)
offset = values[0][0]
if offset > 0:
# Shift timeseries to start at t=0
values = [[x[0] - offset, x[1]] for x in values]
# x,y is correct (4.294348493375471, 52.033176579129936) until we alter the API
point = session.scalar(lateral_2d.the_geom.ST_AsGeoJSON())
return Lateral(
offset=int(values[0][0]),
values=values,
point=point,
units="m3/s",
interpolate=False,
)
class LateralsExtractor(object):
def __init__(self, session: Session):
self.session = session
self._laterals_2d = None
self._laterals_1d = None
@property
def laterals_2d(self) -> List[Lateral]:
if self._laterals_2d is None:
laterals_2d = Query(Lateral2D).with_session(self.session).all()
self._laterals_2d = [
lateral_2d_to_api_lateral(x, self.session) for x in laterals_2d
]
return self._laterals_2d
@property
def laterals_1d(self) -> List[Lateral]:
if self._laterals_1d is None:
laterals_1d = Query(Lateral1d).with_session(self.session).all()
self._laterals_1d = [lateral_1d_to_api_lateral(x) for x in laterals_1d]
return self._laterals_1d
def all_laterals(self) -> List[Lateral]:
return self.laterals_2d + self.laterals_1d
def as_list(self) -> List[dict]:
json_laterals = []
for lateral in self.all_laterals():
json_lateral = lateral.to_dict()
strip_dict_none_values(json_lateral)
json_laterals.append(json_lateral)
return json_laterals
| 2.171875
| 2
|
tests/preprocessors/test_variables_preprocessor.py
|
swquinn/hon
| 0
|
12785368
|
<filename>tests/preprocessors/test_variables_preprocessor.py
import pytest
from hon.preprocessors.variables import (
VariablesPreprocessor
)
| 1.21875
| 1
|
alipay/aop/api/domain/SceneOrder.py
|
articuly/alipay-sdk-python-all
| 0
|
12785369
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MallDiscountDetail import MallDiscountDetail
class SceneOrder(object):
def __init__(self):
self._buyer_user_id = None
self._discount_detail = None
self._order_id = None
self._order_type = None
self._out_order_no = None
self._real_amount = None
self._scene_code = None
self._seller_user_id = None
self._status = None
self._subject = None
self._total_amount = None
self._trade_no = None
self._trade_success_time = None
self._trade_time = None
@property
def buyer_user_id(self):
return self._buyer_user_id
@buyer_user_id.setter
def buyer_user_id(self, value):
self._buyer_user_id = value
@property
def discount_detail(self):
return self._discount_detail
@discount_detail.setter
def discount_detail(self, value):
if isinstance(value, list):
self._discount_detail = list()
for i in value:
if isinstance(i, MallDiscountDetail):
self._discount_detail.append(i)
else:
self._discount_detail.append(MallDiscountDetail.from_alipay_dict(i))
@property
def order_id(self):
return self._order_id
@order_id.setter
def order_id(self, value):
self._order_id = value
@property
def order_type(self):
return self._order_type
@order_type.setter
def order_type(self, value):
self._order_type = value
@property
def out_order_no(self):
return self._out_order_no
@out_order_no.setter
def out_order_no(self, value):
self._out_order_no = value
@property
def real_amount(self):
return self._real_amount
@real_amount.setter
def real_amount(self, value):
self._real_amount = value
@property
def scene_code(self):
return self._scene_code
@scene_code.setter
def scene_code(self, value):
self._scene_code = value
@property
def seller_user_id(self):
return self._seller_user_id
@seller_user_id.setter
def seller_user_id(self, value):
self._seller_user_id = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def subject(self):
return self._subject
@subject.setter
def subject(self, value):
self._subject = value
@property
def total_amount(self):
return self._total_amount
@total_amount.setter
def total_amount(self, value):
self._total_amount = value
@property
def trade_no(self):
return self._trade_no
@trade_no.setter
def trade_no(self, value):
self._trade_no = value
@property
def trade_success_time(self):
return self._trade_success_time
@trade_success_time.setter
def trade_success_time(self, value):
self._trade_success_time = value
@property
def trade_time(self):
return self._trade_time
@trade_time.setter
def trade_time(self, value):
self._trade_time = value
def to_alipay_dict(self):
params = dict()
if self.buyer_user_id:
if hasattr(self.buyer_user_id, 'to_alipay_dict'):
params['buyer_user_id'] = self.buyer_user_id.to_alipay_dict()
else:
params['buyer_user_id'] = self.buyer_user_id
if self.discount_detail:
if isinstance(self.discount_detail, list):
for i in range(0, len(self.discount_detail)):
element = self.discount_detail[i]
if hasattr(element, 'to_alipay_dict'):
self.discount_detail[i] = element.to_alipay_dict()
if hasattr(self.discount_detail, 'to_alipay_dict'):
params['discount_detail'] = self.discount_detail.to_alipay_dict()
else:
params['discount_detail'] = self.discount_detail
if self.order_id:
if hasattr(self.order_id, 'to_alipay_dict'):
params['order_id'] = self.order_id.to_alipay_dict()
else:
params['order_id'] = self.order_id
if self.order_type:
if hasattr(self.order_type, 'to_alipay_dict'):
params['order_type'] = self.order_type.to_alipay_dict()
else:
params['order_type'] = self.order_type
if self.out_order_no:
if hasattr(self.out_order_no, 'to_alipay_dict'):
params['out_order_no'] = self.out_order_no.to_alipay_dict()
else:
params['out_order_no'] = self.out_order_no
if self.real_amount:
if hasattr(self.real_amount, 'to_alipay_dict'):
params['real_amount'] = self.real_amount.to_alipay_dict()
else:
params['real_amount'] = self.real_amount
if self.scene_code:
if hasattr(self.scene_code, 'to_alipay_dict'):
params['scene_code'] = self.scene_code.to_alipay_dict()
else:
params['scene_code'] = self.scene_code
if self.seller_user_id:
if hasattr(self.seller_user_id, 'to_alipay_dict'):
params['seller_user_id'] = self.seller_user_id.to_alipay_dict()
else:
params['seller_user_id'] = self.seller_user_id
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
if self.subject:
if hasattr(self.subject, 'to_alipay_dict'):
params['subject'] = self.subject.to_alipay_dict()
else:
params['subject'] = self.subject
if self.total_amount:
if hasattr(self.total_amount, 'to_alipay_dict'):
params['total_amount'] = self.total_amount.to_alipay_dict()
else:
params['total_amount'] = self.total_amount
if self.trade_no:
if hasattr(self.trade_no, 'to_alipay_dict'):
params['trade_no'] = self.trade_no.to_alipay_dict()
else:
params['trade_no'] = self.trade_no
if self.trade_success_time:
if hasattr(self.trade_success_time, 'to_alipay_dict'):
params['trade_success_time'] = self.trade_success_time.to_alipay_dict()
else:
params['trade_success_time'] = self.trade_success_time
if self.trade_time:
if hasattr(self.trade_time, 'to_alipay_dict'):
params['trade_time'] = self.trade_time.to_alipay_dict()
else:
params['trade_time'] = self.trade_time
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = SceneOrder()
if 'buyer_user_id' in d:
o.buyer_user_id = d['buyer_user_id']
if 'discount_detail' in d:
o.discount_detail = d['discount_detail']
if 'order_id' in d:
o.order_id = d['order_id']
if 'order_type' in d:
o.order_type = d['order_type']
if 'out_order_no' in d:
o.out_order_no = d['out_order_no']
if 'real_amount' in d:
o.real_amount = d['real_amount']
if 'scene_code' in d:
o.scene_code = d['scene_code']
if 'seller_user_id' in d:
o.seller_user_id = d['seller_user_id']
if 'status' in d:
o.status = d['status']
if 'subject' in d:
o.subject = d['subject']
if 'total_amount' in d:
o.total_amount = d['total_amount']
if 'trade_no' in d:
o.trade_no = d['trade_no']
if 'trade_success_time' in d:
o.trade_success_time = d['trade_success_time']
if 'trade_time' in d:
o.trade_time = d['trade_time']
return o
| 2.125
| 2
|
fetch/api.py
|
andrewhead/tutorial-data
| 2
|
12785370
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import requests
import logging
import time
import re
logger = logging.getLogger('data')
USER_AGENT = "<NAME> (for academic research) <<EMAIL>>"
default_requests_session = requests.Session()
default_requests_session.headers['User-Agent'] = USER_AGENT
def make_request(method, *args, **kwargs):
# We read the max_attempts and retry_delay arguments from the kwargs dictionary
# instead of named kwargs because we want to preserve the order of the
# "request" method's positional arguments for clients of this method.
max_attempts = kwargs.get('max_attempts', 2)
retry_delay = kwargs.get('retry_delay', 10)
try_again = True
attempts = 0
res = None
def log_error(err_msg):
logger.warning(
"Error (%s) For API call %s, Args: %s, Kwargs: %s",
str(err_msg), str(method), str(args), str(kwargs)
)
while try_again and attempts < max_attempts:
try:
res = method(*args, **kwargs)
if hasattr(res, 'status_code') and res.status_code not in [200]:
log_error(str(res.status_code))
res = None
try_again = False
except requests.exceptions.ConnectionError:
log_error("ConnectionError")
except requests.exceptions.ReadTimeout:
log_error("ReadTimeout")
if try_again:
logger.warning("Waiting %d seconds for before retrying.", int(retry_delay))
time.sleep(retry_delay)
attempts += 1
return res
def _get_mendeley_item_count(response):
'''
Returns and integer if a count was found, otherwise returns None.
See https://dev.mendeley.com/reference/topics/pagination.html for more documentation.
'''
if 'mendeley-count' not in response.headers:
return None
count = response.headers['mendeley-count']
count_int = None
try:
count_int = int(count)
except ValueError as e:
logger.warning("Unexpected item count %s: %s", count, e)
return count_int
def _get_next_page_url(response):
# If there is no "Link" header, then there is no next page
header = None
if 'Link' in response.headers:
header = response.headers['Link']
elif 'link' in response.headers:
header = response.headers['link']
if header is None:
return None
# Extract the next URL from the Link header.
next_url = None
next_url_match = re.search("<([^>]*)>; rel=\"next\"", header)
if next_url_match is not None:
next_url = next_url_match.group(1)
return next_url
| 2.9375
| 3
|
04/script.py
|
has-ctrl/advent-of-code-2021
| 0
|
12785371
|
import numpy as np
with open("data.txt") as f:
draws = np.array([int(d) for d in f.readline().split(",")])
boards = np.array([[[int(n) for n in r.split()] for r in b.split("\n")] for b in f.read()[1:].split("\n\n")])
def bingo(data: np.ndarray, fill: int):
"""
Returns horizontal (rows) and vertical (columns) bingo. TRUE if bingo. FALSE if not.
"""
transposed_data = np.transpose(data)
return any(np.equal(data, [fill for _ in range(5)]).all(1)) or \
any(np.equal(transposed_data, [fill for _ in range(5)]).all(1))
def one(d_data: np.ndarray, b_data: np.ndarray) -> int:
"""
To guarantee victory against the giant squid, figure out which board will win first.
What will your final score be if you choose that board?
"""
# If number is drawn, replace with {fill}
fill = -1
for draw in d_data:
# Replace drawn number by -1
b_data = np.where(b_data == draw, fill, b_data)
for board in b_data:
if bingo(board, fill):
return np.sum(np.where(board == fill, 0, board)) * draw
return -1
def two(d_data: np.ndarray, b_data: np.ndarray) -> int:
"""
Figure out which board will win last. Once it wins, what would its final score be?
"""
# If number is drawn, replace with {fill}
fill = -1
# List of completed bingo boards
completed_idx = []
for draw in d_data:
# Replace drawn number by -1
b_data = np.where(b_data == draw, fill, b_data)
for board, i in zip(b_data, range(len(b_data))):
if bingo(board, fill) and i not in completed_idx:
completed_idx.append(i)
if len(completed_idx) == len(b_data):
return np.sum(np.where(board == fill, 0, board)) * draw
return -1
print(f"1. {one(draws, boards)}")
print(f"2. {two(draws, boards)}")
| 3.515625
| 4
|
manage.py
|
FuzzyTraderExercise/Backend
| 0
|
12785372
|
<reponame>FuzzyTraderExercise/Backend
from flask.cli import FlaskGroup
from src import create_app, db
app = create_app()
cli = FlaskGroup(create_app=create_app)
# Run Flask
if __name__ == '__main__':
cli()
| 1.695313
| 2
|
backend/apps/projects/efficiency/urls.py
|
wuchaofan1654/tester
| 0
|
12785373
|
<reponame>wuchaofan1654/tester<filename>backend/apps/projects/efficiency/urls.py
# -*- coding: utf-8 -*-
"""
Create by sandy at 16:34 09/12/2021
Description: ToDo
"""
from django.urls import re_path
from rest_framework.routers import DefaultRouter
from apps.projects.efficiency.views import EfficiencyModelViewSet, ModuleModelViewSet
router = DefaultRouter()
router.register(r'efficiency', EfficiencyModelViewSet)
router.register(r'module', ModuleModelViewSet)
urlpatterns = [
re_path('module/tree/', ModuleModelViewSet.as_view({'get': 'tree_select_list'})),
re_path('module/children/', ModuleModelViewSet.as_view({'get': 'get_all'})),
]
urlpatterns += router.urls
| 1.914063
| 2
|
galaxy/main/migrations/0041_auto_20160207_2148.py
|
maxamillion/galaxy
| 1
|
12785374
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0040_auto_20160206_0921'),
]
operations = [
migrations.AlterUniqueTogether(
name='repository',
unique_together=set([('owner', 'github_user', 'github_repo')]),
),
migrations.AlterIndexTogether(
name='repository',
index_together=set([]),
)
]
| 1.492188
| 1
|
big_screen/utils/re_format.py
|
15653391491/black-broadcast-back-end
| 0
|
12785375
|
# 正则验证格式
DATE_FORMATTER_RE = "[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}" # 时间
DATE_FORMATTER_RE_PHONE = "[0-9]{8}.[0-9]{6}" # 设备时间格式
LNGLAT_FORMATTER_RE_PHONE = "[0-9]+.[0-9]+-[0-9]+.[0-9]+" # 坐标格式
PHONEID_FORMATTER_RE = "[0-9]{15}"
IDCARD_FORMATTER_RE = "[0-9]{18}"
INT_FORMATTER_RE = "[0-9]+" # 是否为整数
INT_OR_FLOAT = "^[+-]?([0-9]*\.?[0-9]+|[0-9]+\.?[0-9]*)([eE][+-]?[0-9]+)?$"
ADCODE_FORMATTER = "[0-9]{6}"
# 日期格式
DATA_FORMATTER = "%Y-%m-%d %H:%M:%S" # 日期格式
HEATMAP_DATE_FORMATTER = '%Y/%m/%d'
# 未知设备
UNKNOW_MOBILE = "0" * 15
# 未知人员
UNKNOW_IDCARD = "0" * 18
# 未知区域adcode
UNKNOW_DISTRICT = "0"
# 未知坐标
UNKNOW_LNGLAT = "x,x"
# 未知频点
UNKNOW_WHITELIST = 0
# 未知白名单种类
UNKNOW_WHITECATEGORY = 4
# 未知黑名单种类
UNKNOW_BLACKCATEGORY = 1
# 未知页数
UNKNOW_PAGE = "1"
# 未知每页数据量
UNKNOW_LIMIT = "10"
# 未知时间范围
UNKNOW_TIMERANGE = "-1"
# 未知行政区编码
UNKNOW_ADCODE = "0"
# 频点分类
# WHITE_TYPE = ["普通频点", "干扰频点", "区域频点", "未知种类", "黑广播"]
WHITE_TYPE = [{
"name": "普通频点",
"islegal": "1"
}, {
"name": "区域频点",
"islegal": "1"
}, {
"name": "干扰频点",
"islegal": "1"
}, {
"name": "未知种类",
"islegal": "0"
}, {
"name": "黑广播",
"islegal": "0"
}]
# 黑广播种类
BC_TYPE = ["未知", "假药", "虚假信息", "政治反动", "恐怖主义", "淫秽色情"]
| 1.671875
| 2
|
test/data/generic_acceptance_poe.py
|
hung135/pythoscope
| 78
|
12785376
|
from module import main
main()
| 1.257813
| 1
|
Sipros/Scripts/configure_subdb.py
|
xyz1396/Projects
| 0
|
12785377
|
<filename>Sipros/Scripts/configure_subdb.py
import getopt, sys
from urllib import urlencode
import cookielib, urllib2, os, re, copy, string, operator
def NewSearchName(currentLine, fileId) :
allInfo = currentLine.split("=")
sSearchName = allInfo[1]
sSearchName = sSearchName.strip()
if ((sSearchName == "Null") or (sSearchName == "null") or (sSearchName == "NULL")) :
sSearchName = "subdb"+str(fileId)
else :
sSearchName = sSearchName + "_subdb"+str(fileId)
return "Search_Name = "+sSearchName
def NewDB(currentLine, fileId, output_dir = "") :
allInfo = currentLine.split("=")
filePath = allInfo[1]
filePath = filePath.strip()
(pathRoot, pathExt) = os.path.splitext(filePath)
if output_dir == "":
return "FASTA_Database = "+pathRoot+"_subfile"+str(fileId)+".fasta"
else:
# drive, path_and_file = os.path.splitdrive(pathRoot)
# path, file = os.path.split(path_and_file)
# return "FASTA_Database = "+os.path.join(output_dir, file+"_subfile"+str(fileId)+".fasta")
return "FASTA_Database = "+output_dir+"_subfile"+str(fileId)+".fasta"
OriginalConfigureFileName = sys.argv[1]
OriginalConfigureFile = open(OriginalConfigureFileName)
outputpath = sys.argv[2]
filenum = int (sys.argv[3])
dboutputpath = ""
if len(sys.argv) == 5:
dboutputpath = sys.argv[4]
ConfigureSubFiles = []
for i in range (filenum) :
#os.popen("mkdir "+outputpath+str(i))
(OriginalConfigureFileNameRoot, OriginalConfigureFileNameExt) = os.path.splitext(OriginalConfigureFileName)
curruentConfigureSubFile = open(outputpath+os.sep+os.path.basename(OriginalConfigureFileNameRoot)+"_subdb"+str(i)+".cfg", "w")
ConfigureSubFiles.append(curruentConfigureSubFile)
OriginalConfigureContent = OriginalConfigureFile.readlines()
for i in range (filenum) :
for eachLine in OriginalConfigureContent :
currentLine = eachLine.strip()
if currentLine.startswith("Search_Name") :
newLine = NewSearchName(currentLine, i)
ConfigureSubFiles[i].write(newLine+"\n")
elif currentLine.startswith("FASTA_Database") :
newLine = NewDB(currentLine, i, dboutputpath)
ConfigureSubFiles[i].write(newLine+"\n")
else :
ConfigureSubFiles[i].write(currentLine+"\n")
# close files
for i in range (filenum) :
ConfigureSubFiles[i].close()
OriginalConfigureFile.close()
| 2.5625
| 3
|
myria/test/test_workers.py
|
BrandonHaynes/myria-python
| 7
|
12785378
|
<filename>myria/test/test_workers.py
from httmock import urlmatch, HTTMock
from json import dumps as jstr
import unittest
from myria import MyriaConnection
@urlmatch(netloc=r'localhost:12345')
def local_mock(url, request):
global query_counter
if url.path == '/workers':
return jstr({'1': 'localhost:12347', '2': 'localhost:12348'})
elif url.path == '/workers/alive':
return jstr([1, 2])
elif url.path == '/workers/worker-1':
return jstr("localhost:12347")
return None
class TestQuery(unittest.TestCase):
def __init__(self, args):
with HTTMock(local_mock):
self.connection = MyriaConnection(hostname='localhost', port=12345)
unittest.TestCase.__init__(self, args)
def test_workers(self):
with HTTMock(local_mock):
workers = self.connection.workers()
self.assertEquals(workers, {'1': 'localhost:12347',
'2': 'localhost:12348'})
def test_alive(self):
with HTTMock(local_mock):
workers = self.connection.workers_alive()
self.assertEquals(set(workers), set([1, 2]))
def test_worker_1(self):
with HTTMock(local_mock):
worker = self.connection.worker(1)
self.assertEquals(worker, 'localhost:12347')
| 2.53125
| 3
|
scripts/vim/bin/generate_kinds.py
|
n13l/OpenAAA
| 10
|
12785379
|
#!/usr/bin/env python2
#-*- coding: utf-8 -*-
import re
import sys
import os.path
import clang.cindex
# you can use this dictionary to map some kinds to better
# textual representation than just the number
mapping = {
1 : 't' , # CXCursor_UnexposedDecl (A declaration whose specific kind is not
# exposed via this interface)
2 : 't' , # CXCursor_StructDecl (A C or C++ struct)
3 : 't' , # CXCursor_UnionDecl (A C or C++ union)
4 : 't' , # CXCursor_ClassDecl (A C++ class)
5 : 't' , # CXCursor_EnumDecl (An enumeration)
6 : 'm' , # CXCursor_FieldDecl (A field (in C) or non-static data member
# (in C++) in a struct, union, or C++ class)
7 : 'e' , # CXCursor_EnumConstantDecl (An enumerator constant)
8 : 'f' , # CXCursor_FunctionDecl (A function)
9 : 'v' , # CXCursor_VarDecl (A variable)
10 : 'a' , # CXCursor_ParmDecl (A function or method parameter)
20 : 't' , # CXCursor_TypedefDecl (A typedef)
21 : 'f' , # CXCursor_CXXMethod (A C++ class method)
22 : 'n' , # CXCursor_Namespace (A C++ namespace)
24 : '+' , # CXCursor_Constructor (A C++ constructor)
25 : '~' , # CXCursor_Destructor (A C++ destructor)
27 : 'a' , # CXCursor_TemplateTypeParameter (A C++ template type parameter)
28 : 'a' , # CXCursor_NonTypeTemplateParameter (A C++ non-type template
# parameter)
29 : 'a' , # CXCursor_TemplateTemplateParameter (A C++ template template
# parameter)
30 : 'f' , # CXCursor_FunctionTemplate (A C++ function template)
31 : 'p' , # CXCursor_ClassTemplate (A C++ class template)
33 : 'n' , # CXCursor_NamespaceAlias (A C++ namespace alias declaration)
36 : 't' , # CXCursor_TypeAliasDecl (A C++ alias declaration)
72 : 'u' , # CXCursor_NotImplemented
501 : 'd' , # CXCursor_MacroDefinition
601 : 'ta', # CXCursor_TypeAliasTemplateDecl (Template alias declaration).
700 : 'oc', # CXCursor_OverloadCandidate A code completion overload candidate.
}
if len(sys.argv) != 2:
print "Usage:", sys.argv[0], "<path-to-Index.h>"
exit(-1)
index = clang.cindex.Index.create()
tu = index.parse(sys.argv[1])
kinds = None
for child in tu.cursor.get_children():
if (child.spelling == "CXCursorKind"):
kinds = child
break
else:
print "Index.h doesn't contain CXCursorKind where it is expected, please report a bug."
exit(-1)
kinds_py_path = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__)
)
),
"plugin",
"kinds.py"
)
with open(kinds_py_path, "w") as f:
# First/Last pattern
fl = re.compile("CXCursor_(First|Last)[A-Z].*")
f.write("# !! GENERATED FILE, DO NOT EDIT\n")
f.write("kinds = {\n")
for kind in kinds.get_children():
# filter out First/Last markers from the enum
if fl.match(kind.spelling) is not None:
continue
text = mapping.get(kind.enum_value, kind.enum_value)
f.write("{0} : '{1}', # {2} {3}\n".format(kind.enum_value, text, kind.spelling, kind.brief_comment))
f.write("}\n")
# vim: set ts=2 sts=2 sw=2 expandtab :
| 2.578125
| 3
|
slot/w/bow.py
|
qwewqa/dl
| 0
|
12785380
|
<filename>slot/w/bow.py
from slot import WeaponBase
from slot.w import agito_buffs
class HDT1_Valkyries_Blaze(WeaponBase):
ele = ['flame']
wt = 'bow'
att = 734
s3 = {
"dmg" : 3*3.16 ,
"sp" : 6750 ,
"startup" : 0.1 ,
"recovery" : 2.73 ,
"hit" : 3 ,
} # Valkyrie's Raid
a = [('k', 0.3, 'vs HMS')]
class HDT2_Valkyries_Fire(WeaponBase):
ele = ['flame']
wt = 'bow'
att = 1468
s3 = {
"dmg" : 3*3.16 ,
"sp" : 6750 ,
"startup" : 0.1 ,
"recovery" : 2.73 ,
"hit" : 3 ,
} # Valkyrie's Heroic Raid
a = []
class HDT1_Blue_Mercurius(WeaponBase):
ele = ['water']
wt = 'bow'
att = 713
s3 = {
"dmg" : 8.54 ,
"sp" : 7267 ,
"startup" : 0.1 ,
"recovery" : 2.38 ,
"hit" : 1 ,
} # Mercurius's Knowledge
a = [('k', 0.3, 'vs HBH')]
class HDT2_Azure_Mercurius(WeaponBase):
ele = ['water']
wt = 'bow'
att = 1426
s3 = {
"dmg" : 8.54 ,
"sp" : 7267 ,
"startup" : 0.1 ,
"recovery" : 2.38 ,
"hit" : 1 ,
} # Mercurius's Transcendant Knowledge
a = []
class HDT1_Jormungands_Squall(WeaponBase):
ele = ['wind']
wt = 'bow'
att = 713
s3 = {
"dmg" : 3*3.16 ,
"sp" : 6750 ,
"startup" : 0.1 ,
"recovery" : 2.73 ,
} # Jormungand's World
a = [('k', 0.3, 'vs HMC')]
class HDT2_Jormungands_Fury(WeaponBase):
ele = ['wind']
wt = 'bow'
att = 1426
s3 = {
"dmg" : 3*3.16 ,
"sp" : 6750 ,
"startup" : 0.1 ,
"recovery" : 2.73 ,
"hit" : 3 ,
} # Jormungand's Boundless World
a = []
class HDT1_Jupiters_Light(WeaponBase):
ele = ['light']
wt = 'bow'
att = 677
s3 = {
"dmg" : 8.54 ,
"sp" : 7267 ,
"startup" : 0.1 ,
"recovery" : 2.38 ,
"hit" : 1 ,
} # Jupiter's Protection
a = [('k', 0.3, 'vs HZD')]
class HDT2_Jupiters_Sky(WeaponBase):
ele = ['light']
wt = 'bow'
att = 1354
s3 = {
"dmg" : 8.54 ,
"sp" : 7267 ,
"startup" : 0.1 ,
"recovery" : 2.38 ,
"hit" : 1 ,
} # Jupiter's Celestial Protection
a = []
class HDT1_Dark_Prophecy(WeaponBase):
ele = ['shadow']
wt = 'bow'
att = 713
s3 = {
"dmg" : 9.49 ,
"sp" : 6750 ,
"startup" : 0.1 ,
"recovery" : 1.52 ,
"hit" : 1 ,
} # Prophecy's Guidance
a = [('k', 0.3, 'vs HJP')]
class HDT2_Hellish_Prophecy(WeaponBase):
ele = ['shadow']
wt = 'bow'
att = 1426
s3 = {
"dmg" : 9.49 ,
"sp" : 6750 ,
"startup" : 0.1 ,
"recovery" : 1.52 ,
"hit" : 1 ,
} # Prophecy's Immaculate Guidance
a = []
class Chimeratech_Bow(WeaponBase):
ele = ['flame', 'shadow']
wt = 'bow'
att = 961
s3 = {} #
a = [('uo', 0.04)]
class Agito_Ydalir(WeaponBase):
ele = ['flame']
wt = 'bow'
att = 1482
s3 = agito_buffs['flame'][1]
class Agito0UB_Ydalir(Agito_Ydalir):
att = 961
s3 = agito_buffs['flame'][0]
class Agito_Longshe_Gong(WeaponBase):
ele = ['shadow']
wt = 'bow'
att = 1482
s3 = agito_buffs['shadow'][1]
class Agito0UB_Longshe_Gong(Agito_Longshe_Gong):
att = 961
s3 = agito_buffs['shadow'][0]
class UnreleasedAgitoStr_WaterBow(Agito_Ydalir):
ele = ['water']
class UnreleasedAgitoStr_WindBow(Agito_Ydalir):
ele = ['wind']
class UnreleasedAgitoStr_LightBow(Agito_Ydalir):
ele = ['light']
class UnreleasedAgitoSpd_WaterBow(Agito_Longshe_Gong):
ele = ['water']
class UnreleasedAgitoSpd_WindBow(Agito_Longshe_Gong):
ele = ['wind']
class UnreleasedAgitoSpd_LightBow(Agito_Longshe_Gong):
ele = ['light']
flame = Agito_Ydalir
water = HDT2_Azure_Mercurius
wind = HDT2_Jormungands_Fury
light = HDT2_Jupiters_Sky
shadow = HDT2_Hellish_Prophecy
| 2.15625
| 2
|
automatic_design_program.py
|
joaocarvalhoopen/Design_Asymmetrical_Inverted_Schmitt_Trigger_Single_Supply_program
| 1
|
12785381
|
##############################################################
# #
# Automatic design of an #
# Asymmetrical Inverted Schmitt-Trigger with Single Supply #
# for E24 resistors scale #
# with tolerance analysis #
# #
##############################################################
# Author: <NAME> #
# Date: 2019.08.30 #
# License: MIT Open Source License #
# Description: This is a simple program to make the #
# automatic design of an Asymmetrical Inverted #
# Schmitt-Trigger with Single Supply, with #
# resistors from E24 scale. Typically used for #
# 1%, but in this case used for 5% or 0.1% . #
# The input is V_supply, V_low_threshold, #
# V_high_threshold and Resistor_tolerance_perc. #
# It works by making the full search of all #
# combinations of values from E24 to identify #
# the best ones. #
# In this way it speeds up immensely the manual #
# experimentation. It also makes resistor #
# tolerance analysis. Please see the schematic #
# diagram on the GitHub page. #
##############################################################
#######
# Please fill the following 4 program variables to your
# specification, see schematic diagram.
# VCC voltage in volts.
VCC = 5.0
# Input Voltage low threshold in volts.
V_low_threshold_target = 0.555
# Input Voltage high threshold in volts.
V_high_threshold_target = 0.575
# Resistor tolerance percentage 5.0%, 1.0%, 0.1%, one of this values [5.0, 1.0, 0.1].
Resistor_tolerance_perc = 1.0
#######
# Start of program.
import math
# E24 Standard resistor series.
E24_values = [1.0, 1.1, 1.2, 1.3, 1.5, 1.6, 1.8, 2.0, 2.2,
2.4, 2.7, 3.0, 3.3, 3.6, 3.9, 4.3, 4.7, 5.1,
5.6, 6.2, 6.8, 7.5, 8.2, 9.1]
# The scales of the resistor values so that an OpAmp circuit is stable,
# normally are between 1K and 100K, but I use a extended version from
# 100 Ohms to 1MOhms.
scales = [100, 1000, 10000, 100000]
def consistency_testing(VCC, V_low_threshold_target, V_high_threshold_target):
passed_tests = True
if not ( 0 < VCC):
print("Error in specification VCC, it has to be: 0 < VCC")
passed_tests = False
if not (V_low_threshold_target < V_high_threshold_target):
print("Error in specification, it has to be: V_low_threshold_target < V_high_threshold_target")
passed_tests = False
if not (0 <= V_low_threshold_target <= VCC):
print("Error in specification, it has to be: 0 <= V_low_threshold_target <= VCC")
passed_tests = False
if not (0 <= V_high_threshold_target <= VCC):
print("Error in specification, it has to be: 0 <= V_high_threshold_target <= VCC")
passed_tests = False
if Resistor_tolerance_perc not in [5.0, 1.0, 0.1]:
print("Error in specification Resistor_tolerance_perc, it has to be: 5.0, 1.0 or 0.1")
passed_tests = False
return passed_tests
def expansion_of_E24_values_for_range(E24_values, scales):
values_list = []
for scale in scales:
for val in E24_values:
value = val * scale
values_list.append(value)
return values_list
def calc_voltage_thresholds_for_circuit(VCC, R1, R2, R3):
V_low_threshold = 0.0
V_high_threshold = 0.0
# Calc V_low_threshold.
R_total_low = (R2 * R3) / float((R2 + R3))
V_low_threshold = VCC * R_total_low / float((R1 + R_total_low))
# Calc V_high_threshold.
R_total_high = (R1 * R3) / float((R1 + R3))
V_high_threshold = VCC * R2 / float((R2 + R_total_high))
return (V_low_threshold, V_high_threshold)
def calc_square_distance_error(V_low_threshold_target, V_high_threshold_target,
V_low_threshold_obtained, V_high_threshold_obtained):
res = math.sqrt( math.pow(V_low_threshold_target - V_low_threshold_obtained, 2) +
math.pow(V_high_threshold_target - V_high_threshold_obtained, 2) )
return res
def full_search_of_resister_values(values_list, VCC, V_low_threshold_target, V_high_threshold_target):
best_error = 1000000000.0
best_V_low_threshold = -1000.0
best_V_high_threshold = -1000.0
best_R1 = -1000.0
best_R2 = -1000.0
best_R3 = -1000.0
for R1 in values_list:
for R2 in values_list:
for R3 in values_list:
res = calc_voltage_thresholds_for_circuit(VCC, R1, R2, R3)
V_low_threshold_obtained, V_high_threshold_obtained = res
error = calc_square_distance_error(V_low_threshold_target, V_high_threshold_target,
V_low_threshold_obtained, V_high_threshold_obtained)
if error < best_error:
best_error = error
best_V_low_threshold = V_low_threshold_obtained
best_V_high_threshold = V_high_threshold_obtained
best_R1 = R1
best_R2 = R2
best_R3 = R3
return (best_error, best_V_low_threshold, best_V_high_threshold, best_R1, best_R2, best_R3)
def expand_resistor_vals_tolerance(R_val, Resistor_tolerance_perc):
resistor_vals = []
delta = R_val * Resistor_tolerance_perc * 0.01
resistor_vals.append(R_val - delta)
resistor_vals.append(R_val)
resistor_vals.append(R_val + delta)
return resistor_vals
def calc_absolute_distance_error(V_low_threshold_target, V_high_threshold_target,
V_low_threshold_obtained, V_high_threshold_obtained):
res = (math.fabs(V_low_threshold_target - V_low_threshold_obtained)
+ math.fabs(V_high_threshold_target - V_high_threshold_obtained))
return res
def worst_tolerance_resistor_analysis(VCC, V_low_threshold_target, V_high_threshold_target,
R1_nominal, R2_nominal, R3_nominal, Resistor_tolerance_perc):
worst_error = 0.0
worst_V_low_threshold = 0.00000001
worst_V_high_threshold = 0.00000001
R1_values = expand_resistor_vals_tolerance(R1_nominal, Resistor_tolerance_perc)
R2_values = expand_resistor_vals_tolerance(R2_nominal, Resistor_tolerance_perc)
R3_values = expand_resistor_vals_tolerance(R3_nominal, Resistor_tolerance_perc)
for R1 in R1_values:
for R2 in R2_values:
for R3 in R3_values:
res = calc_voltage_thresholds_for_circuit(VCC, R1, R2, R3)
V_low_threshold_obtained, V_high_threshold_obtained = res
error = calc_absolute_distance_error(V_low_threshold_target, V_high_threshold_target,
V_low_threshold_obtained, V_high_threshold_obtained)
if error > worst_error:
worst_error = error
worst_V_low_threshold = V_low_threshold_obtained
worst_V_high_threshold = V_high_threshold_obtained
return (worst_error, worst_V_low_threshold, worst_V_high_threshold)
def main():
print("##############################################################")
print("# #")
print("# Automatic design of an #")
print("# Asymmetrical Inverted Schmitt-Trigger with Single Supply #")
print("# for E24 resistors scale #")
print("# with tolerance analysis #")
print("# #")
print("##############################################################")
print("")
print("### Specification:")
print("VCC: ", VCC, " Volts")
print("V_low_threshold_target: ", V_low_threshold_target, " Volts")
print("V_high_threshold_target: ", V_high_threshold_target, " Volts")
print("Resistor_tolerance_perc: ", Resistor_tolerance_perc, " %")
print("")
passed_tests = consistency_testing(VCC, V_low_threshold_target, V_high_threshold_target)
if passed_tests == False:
return
values_list = expansion_of_E24_values_for_range(E24_values, scales)
res = full_search_of_resister_values(values_list, VCC, V_low_threshold_target, V_high_threshold_target)
best_error, V_low_threshold_obtained, V_high_threshold_obtained, best_R1, best_R2, best_R3 = res
print("### Solution")
print("Best_error: ", best_error)
print("V_low_threshold_obtained: ", V_low_threshold_obtained, " Volts, delta: ",
math.fabs(V_low_threshold_target - V_low_threshold_obtained), " Volts" )
print("V_high_threshold_obtained: ", V_high_threshold_obtained, " Volts, delta: ",
math.fabs(V_high_threshold_target - V_high_threshold_obtained), " Volts" )
print("Best_R1: ", best_R1, " Ohms 1%")
print("Best_R2: ", best_R2, " Ohms 1%")
print("Best_R3: ", best_R3, " Ohms 1%")
print("")
res = worst_tolerance_resistor_analysis(VCC, V_low_threshold_target, V_high_threshold_target,
best_R1, best_R2, best_R3, Resistor_tolerance_perc)
worst_error, worst_V_low_threshold_obtained, worst_V_high_threshold_obtained = res
print("### Resistor tolerance analysis")
print("Worst_error: ", worst_error)
print("Worst V_low_threshold_obtained: ", worst_V_low_threshold_obtained, " Volts, delta: ",
math.fabs(V_low_threshold_target - worst_V_low_threshold_obtained), " Volts" )
print("Worst V_high_threshold_obtained: ", worst_V_high_threshold_obtained, " Volts, delta: ",
math.fabs(V_high_threshold_target - worst_V_high_threshold_obtained), " Volts" )
if __name__ == "__main__":
main()
| 2.40625
| 2
|
Python/IWannaBeTheGuy.py
|
Zardosh/code-forces-solutions
| 0
|
12785382
|
<filename>Python/IWannaBeTheGuy.py
n = int(input())
px = list(map(int, input().split()))
py = list(map(int, input().split()))
x = px[1:]
y = py[1:]
x.extend(y)
if len(set(x)) == n:
print('I become the guy.')
else:
print('Oh, my keyboard!')
| 3.609375
| 4
|
Binary_conv.py
|
arunbutte/Python_Codes
| 0
|
12785383
|
def toBinary(n):
# Check if the number is Between 0 to 1 or Not
if(n >= 1 or n <= 0):
return "ERROR"
answer = ""
frac = 0.5
answer = answer + "."
# Setting a limit on length: 32 characters.
while(n > 0):
# Setting a limit on length: 32 characters
if(len(answer) >= 32):
return "ERROR"
# Multiply n by 2 to check it 1 or 0
b = n * 2
print(b)
if (b >= 1):
answer = answer + "1"
n = b - 1
print(1, answer, n)
else:
answer = answer + "0"
n = b
print(2, answer, n)
return answer
| 3.828125
| 4
|
contrib/report_builders/json_report_builder.py
|
berndonline/flan
| 3,711
|
12785384
|
import json
from typing import Any, Dict, List
from contrib.descriptions import VulnDescriptionProvider
from contrib.internal_types import ScanResult
from contrib.report_builders import ReportBuilder
class JsonReportBuilder(ReportBuilder):
def __init__(self, description_provider: VulnDescriptionProvider):
self.description_provider = description_provider
self._buffer = {'ips': [], 'vulnerable': {}, 'not_vulnerable': {}}
def init_report(self, start_date: str, nmap_command: str):
self._buffer['start_date'] = start_date
self._buffer['nmap_command'] = nmap_command
def build(self) -> Any:
return json.dumps(self._buffer)
def add_vulnerable_services(self, scan_results: Dict[str, ScanResult]):
for app_name, result in scan_results.items():
self._buffer['vulnerable'][app_name] = {
'vulnerabilities': [],
'locations': self._serialize_locations(result.locations)
}
for v in result.vulns:
data = v.to_dict()
description = self.description_provider.get_description(v.name, v.vuln_type)
data['description'], data['url'] = description.text, description.url
self._buffer['vulnerable'][app_name]['vulnerabilities'].append(data)
def add_non_vulnerable_services(self, scan_results: Dict[str, ScanResult]):
for app_name, result in scan_results.items():
self._buffer['not_vulnerable'][app_name] = {
'locations': self._serialize_locations(result.locations)
}
def add_ip_address(self, ip: str):
self._buffer['ips'].append(ip)
@staticmethod
def _serialize_locations(locations: Dict[str, List[str]]):
return {loc: [int(port) for port in ports] for loc, ports in locations.items()}
| 2.5
| 2
|
apps/network/tests/database/test_role.py
|
AmrMKayid/PyGrid
| 0
|
12785385
|
import pytest
from src.users.role import Role
from .presets.role import role_metrics
@pytest.mark.parametrize(
("name, can_edit_settings, can_create_users," "can_edit_roles, can_manage_roles"),
role_metrics,
)
def test_create_role_object(
name,
can_edit_settings,
can_create_users,
can_edit_roles,
can_manage_roles,
database,
):
role = Role(
name=name,
can_edit_settings=can_edit_settings,
can_create_users=can_create_users,
can_edit_roles=can_edit_roles,
can_manage_roles=can_manage_roles,
)
database.session.add(role)
database.session.commit()
| 2.203125
| 2
|
app.py
|
JixunMoe/pi-cctv
| 2
|
12785386
|
# -*- coding: utf-8 -*-
import time, os, io, picamera, threading
from flask import Flask, request, session, url_for, redirect, render_template, g, Response, send_file
# configuration
DATABASE = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'minitwit.db')
DEBUG = False
SECRET_KEY = 'This is a very secrey key.'
# PiCam, don't initialize the camera unless required.
class PiCam:
def __init__(self):
self.cam = None
self.time = 0
self.lock = threading.Lock()
self.s = io.BytesIO()
pass
def init_cam(self):
self.cam = picamera.PiCamera()
self.cam.start_preview()
self.cam.vflip = True
self.cam.hflip = True
time.sleep(2)
pass
def dup_stream(self):
_s = io.BytesIO()
self.lock.acquire()
### THREAD LOCK BEGIN
self.s.seek(0)
_s.write(self.s.read())
### THREAD LOCK END
self.lock.release()
_s.seek(0)
return _s
def capture(self):
if (self.cam is None):
self.init_cam()
_t = time.time()
# 30fps: 0.0333...
if (_t - self.time > 0.02):
self.time = _t
self.lock.acquire()
### THREAD LOCK BEGIN
self.s.seek(0)
self.cam.capture(self.s, 'png')
### THREAD LOCK END
self.lock.release()
return self.dup_stream()
# create our little application :)
app = Flask(__name__)
app.config.from_object(__name__)
app.config.from_envvar('PICAM_SETTINGS', silent=True)
@app.route('/')
def the_camera():
return render_template('index.html')
@app.route('/login')
def login():
return 'TODO: Login';
my_cam = PiCam()
@app.route('/capture')
def capture():
return send_file(my_cam.capture(), mimetype='image/png')
if __name__ == "__main__":
app.run(host='0.0.0.0')
while True:
pass
| 2.375
| 2
|
setup.py
|
vnrvjietlab/vnrlab
| 1
|
12785387
|
<filename>setup.py
from setuptools import setup
setup(
name='vnrlab',
version='0.1.0',
packages=['vnrlab'],
license='MIT',
include_package_data=True,
package_data={
"": ['Data/*.bin']
},
install_requires=[
'pycryptodome'
],
)
| 1.1875
| 1
|
quasimodo/tests/test_google_patterns.py
|
Aunsiels/CSK
| 16
|
12785388
|
<gh_stars>10-100
import unittest
from quasimodo.data_structures.subject import Subject
from quasimodo.patterns.pattern_google import PatternGoogle
class TestGooglePatterns(unittest.TestCase):
def test_get_str(self):
pattern = PatternGoogle("how are <SUBJS>")
self.assertEqual(pattern.to_str_subject(Subject("perl oyster")),
"how are perl oysters")
if __name__ == '__main__':
unittest.main()
| 2.625
| 3
|
src/pyxsys/storage.py
|
lmmx/pyx-sys
| 1
|
12785389
|
import pickle
from pathlib import Path
from datetime import datetime as dt
def storage_timestamp(t=dt.now(), storage_dir=Path.home() / ".pyx_store", ext="p"):
"""
Return a timestamp now suitable to create a file path as:
yy/mm/dd/hh-mm-ss
By default under `~/.pyx_store/` (`storage_dir`) with file extension `.p` (`ext`):
~/.pyx_store/yy/mm/dd/hh-mm-ss.p
Assuming you will never pickle a workspace representation more than once per second,
this can be used as a path into `~/.pyx_store/yy/mm/dd/hh_mm_ss`.
"""
assert type(t) is dt, TypeError("Time isn't a datetime.datetime instance")
datestamp, timestamp = t.isoformat().split("T")
datestamp = datestamp.replace("-", "/")
timestamp = timestamp[:timestamp.find(".")].replace(":", "-")
subpath = storage_dir / Path(datestamp)
storage_path = storage_dir / subpath
storage_path.mkdir(parents=True, exist_ok=True)
file_name = f"{timestamp}.{ext}"
return storage_path / file_name
def pickle_vars(local_names=vars()):
checklist = ["ff_session", "x_session", "wm_territory", "tmux_server"]
pickle_filepath = storage_timestamp() # Create a dir for today's date under ~/.pyx_store/
storables = []
for var_name in checklist:
if var_name in local_names:
storables.append(local_names[var_name])
with open(pickle_filepath, "wb") as f:
pickle.dump(storables, file=f, protocol=-1)
return
| 2.96875
| 3
|
fugue_spark/__init__.py
|
gityow/fugue
| 0
|
12785390
|
<gh_stars>0
# flake8: noqa
from fugue.workflow import register_raw_df_type
from fugue_version import __version__
from pyspark.sql import DataFrame
from fugue_spark.dataframe import SparkDataFrame
from fugue_spark.execution_engine import SparkExecutionEngine
register_raw_df_type(DataFrame)
| 1.367188
| 1
|
lib/assembler/assemblerLibrary.py
|
DimitarYordanov17/jack-compiler
| 5
|
12785391
|
# A library file to include the machine language and dictionariy specifications of the Hack language. @DimitarYordanov17
class AssemblerLibrary:
'''
Main class to map the Hack syntax to internal machine language bytecode
'''
def get_jump(jump: str):
'''
Return bytecode of jump commands
'''
bytecode = {
'' : '000',
'JGT': '001',
'JEQ': '010',
'JGE': '011',
'JLT': '100',
'JNE': '101',
'JLE': '110',
'JMP': '111',
}
return bytecode[jump]
def get_destination(destination: str):
'''
Return bytecode of destination commands
'''
bytecode = {
'' : '000',
'M' : '001',
'D' : '010',
'MD' : '011',
'A' : '100',
'AM' : '101',
'AD' : '110',
'AMD': '111',
}
return bytecode[destination]
def get_computation(computation):
'''
Return bytecode of computation commands
'''
bytecode = {
'0' : '0101010',
'1' : '0111111',
'-1' : '0111010',
'D' : '0001100',
'A' : '0110000',
'!D' : '0001101',
'!A' : '0110001',
'-D' : '0001111',
'-A' : '0110011',
'D+1': '0011111',
'A+1': '0110111',
'D-1': '0001110',
'A-1': '0110010',
'D+A': '0000010',
'D-A': '0010011',
'A-D': '0000111',
'D&A': '0000000',
'D|A': '0010101',
'M' : '1110000',
'!M' : '1110001',
'-M' : '1110011',
'M+1': '1110111',
'M-1': '1110010',
'D+M': '1000010',
'D-M': '1010011',
'M-D': '1000111',
'D&M': '1000000',
'D|M': '1010101',
}
try: # Handle differences, e.g. 'M+D'=='D+M'
return bytecode[computation]
except:
return bytecode[computation[::-1]]
def get_register(register):
'''
Return bytecode of built-in registers
'''
bytecode_mnemonics = {
'SP' : 0,
'LCL' : 1,
'ARG' : 2,
'THIS' : 3,
'THAT' : 4,
'SCREEN': 0x4000,
'KBD' : 0x6000,
}
if register in ['R' + str(n) for n in range(0, 16)]:
if len(register) == 2:
return int(register[1])
return int(register[1] + register[2])
elif register in bytecode_mnemonics.keys():
return bytecode_mnemonics[register]
else:
return "VARIABLE"
| 3.171875
| 3
|
lib/eco/renderers.py
|
softdevteam/eco
| 54
|
12785392
|
# Copyright (c) 2013--2014 King's College London
# Created by the Software Development Team <http://soft-dev.org/>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import sys
try:
# checkout https://github.com/Brittix1023/mipy into eco/lib/
sys.path.append("../mipy")
from mipy import kernel, request_listener
has_mipy = True
except ImportError:
has_mipy = False
from incparser.astree import TextNode, EOS #BOS, ImageNode, FinishSymbol
from grammar_parser.gparser import IndentationTerminal # BOS, MagicTerminal, Nonterminal
from PyQt5 import QtCore
from PyQt5.QtGui import QPen, QColor, QImage
from PyQt5.QtWidgets import QApplication
import math, os
class Renderer(object):
def __init__(self, fontwt, fontht, fontd):
self.fontwt = fontwt
self.fontht = fontht
self.fontd = fontd
def paint_node(self, paint, node, x, y, highlighter):
raise NotImplementedError
def nextNode(self, node):
node = node.next_term
if isinstance(node, EOS):
return None
def update_image(self, node):
pass
def setStyle(self, paint, style):
f = paint.font()
if style == "italic":
f.setItalic(True)
f.setBold(False)
elif style == "bold":
f.setItalic(False)
f.setBold(True)
else:
f.setItalic(False)
f.setBold(False)
paint.setFont(f)
class NormalRenderer(Renderer):
def paint_node(self, paint, node, x, y, highlighter):
dx, dy = (0, 0)
if node.symbol.name == "\r" or isinstance(node, EOS):
return dx, dy
if isinstance(node.symbol, IndentationTerminal):
paint.setPen(QPen(QColor("#aa3333")))
self.setStyle(paint, highlighter.get_style(node))
if QApplication.instance().showindent is True:
if node.symbol.name == "INDENT":
text = ">"
elif node.symbol.name == "DEDENT":
text = "<"
else:
return dx, dy
paint.drawText(QtCore.QPointF(x, 3 + self.fontht + y*self.fontht - self.fontd), text)
return 1*self.fontwt, dy
else:
return dx, dy
if isinstance(node, TextNode):
paint.setPen(QPen(QColor(highlighter.get_color(node))))
self.setStyle(paint, highlighter.get_style(node))
text = node.symbol.name
if not (node.lookup == "<ws>" and node.symbol.name.startswith(" ")): # speedhack: don't draw invisible nodes
paint.drawText(QtCore.QPointF(x, 3 + self.fontht + y*self.fontht - self.fontd), text)
dx = len(text) * self.fontwt
dy = 0
return dx, dy
def doubleClick(self):
pass # select/unselect
class ImageRenderer(NormalRenderer):
def paint_node(self, paint, node, x, y, highlighter):
self.update_image(node)
dx, dy = (0, 0)
if node.image is not None and not node.plain_mode:
paint.drawImage(QtCore.QPoint(x, 3 + y * self.fontht), node.image)
dx = int(math.ceil(node.image.width() * 1.0 / self.fontwt) * self.fontwt)
dy = int(math.ceil(node.image.height() * 1.0 / self.fontht))
else:
dx, dy = NormalRenderer.paint_node(self, paint, node, x, y, highlighter)
return dx, dy
def get_filename(self, node):
return node.symbol.name
def update_image(self, node):
filename = self.get_filename(node)
if node.image_src == filename:
return
if os.path.isfile(filename):
node.image = QImage(filename)
node.image_src = filename
else:
node.image = None
node.image_src = None
def doubleClick(self):
pass # switch between display modes
class ChemicalRenderer(ImageRenderer):
def get_filename(self, node):
return "chemicals/" + node.symbol.name + ".png"
if not has_mipy:
class IPythonRenderer(NormalRenderer):
pass
else:
class IPythonRenderer(NormalRenderer):
proc = kernel.IPythonKernelProcess()
def paint_node(self, paint, node, x, y, highlighter):
lbox = node.get_root().get_magicterminal()
if lbox.plain_mode:
return NormalRenderer.paint_node(self, paint, node, x, y, highlighter)
else:
dx, dy = NormalRenderer.paint_node(self, paint, node, x, y, highlighter)
if isinstance(node.next_term, EOS):
content = self.get_content(lbox)
try:
krn = IPythonRenderer.proc.connection
if krn is not None:
listener = IPythonExecuteListener()
krn.execute_request(content, listener=listener)
while not listener.finished:
krn.poll(-1)
text = str(listener.result)
except Exception as e:
text = e.message
paint.drawText(QtCore.QPointF(x+100, self.fontht + y*self.fontht), " | "+text)
return dx, dy
def get_content(self, lbox):
node = lbox.symbol.ast.children[0].next_term
l = []
while not isinstance(node, EOS):
if not isinstance(node.symbol, IndentationTerminal):
l.append(node.symbol.name)
node = node.next_term
return "".join(l)
class IPythonExecuteListener(request_listener.ExecuteRequestListener):
def __init__(self):
self.result = None
self.finished = False
def on_execute_result(self, execution_count, data, metadata):
self.result = data['text/plain']
def on_execute_finished(self):
self.finished = True
def on_error(self, ename, value, traceback):
raise Exception(ename)
def get_renderer(parent, fontwt, fontht, fontd):
if parent == "Chemicals":
return ChemicalRenderer(fontwt, fontht, fontd)
if parent == "Image":
return ImageRenderer(fontwt, fontht, fontd)
if parent == "IPython":
return IPythonRenderer(fontwt, fontht, fontd)
return NormalRenderer(fontwt, fontht, fontd)
| 1.703125
| 2
|
scraper/views.py
|
jazzify/newsify_back
| 0
|
12785393
|
<filename>scraper/views.py
from rest_framework import viewsets, mixins
from rest_framework.response import Response
from scraper.models import Post
from scraper.serializers import PostSerializer
class PostViewSet(viewsets.GenericViewSet, mixins.RetrieveModelMixin, mixins.ListModelMixin):
queryset = Post.objects.all()
serializer_class = PostSerializer
def list(self, request, *args, **kwargs):
post_types = [post_type[0] for post_type in Post.TYPE_CHOICES]
posts = {post_type:[] for post_type in post_types}
for post_type in post_types:
queryset = Post.objects.filter(post_type=post_type).order_by('-created_at')[:4]
serialized_posts = self.get_serializer(queryset, many=True).data
posts[post_type] = serialized_posts
return Response(posts)
| 2.328125
| 2
|
abstractions.py
|
Koruption/CSV-Dataset-Generator
| 0
|
12785394
|
<gh_stars>0
from random_username.generate import generate_username
from random_address import real_random_address
from typing import Any, Dict, List, Tuple
from prettytable import PrettyTable
from datetime import timedelta
from datetime import datetime
from json import load
import random
import names
import uuid
import csv
def random_index(arr: List):
return random.randint(0, len(arr) - 1)
def string_to_date(_date_string: str):
return datetime.strptime(_date_string, '%m/%d/%Y %I:%M %p')
def date_to_string(_datetime):
return datetime.strftime(_datetime, '%m/%d/%Y %I:%M %p')
class DataGenerator:
def __init__(self, amount: int = 25, from_list=None):
self.data: List[Any] = []
self.amount = amount
self.from_list = from_list
return
def generate(self):
for i in range(0, self.amount):
self.data.append(self.from_list[random_index(self.from_list)])
return self
def set_amount(self, amount: int):
self.amount = amount
def getData(self):
return self.data
class BooleanGenerator(DataGenerator):
def __init__(self, amount: int = 25, from_list=None):
super().__init__(amount, from_list)
def generate(self):
for i in range(0, self.amount):
self.data.append(bool(random.getrandbits(1)))
return self
class NameGenerator(DataGenerator):
def __init__(self, amount: int = 25, full_name: bool = True, from_list=None):
super().__init__(amount, from_list)
self.full_name = full_name
return
def generate(self):
'''
Generates a number of unique names based on the amount provided.
'''
if self.from_list:
super().generate()
return self
name_pool = self.__gen_names(self.full_name, self.amount)
for i in range(0, self.amount):
self.data.append(name_pool[i])
return self
def __gen_names(self, full_name: bool, name_pool_count: int):
return [names.get_full_name() for i in range(0, name_pool_count)] if full_name else [names.get_first_name() for i in range(0, name_pool_count)]
class EmailGenerator(DataGenerator):
def __init__(self, providers=['gmail', 'hotmail', 'outlook', 'yahoo', 'icloud'], amount: int = 25, from_list=None):
super().__init__(amount, from_list)
self.providers = list(
map(lambda provider: f'@{provider}.<EMAIL>', providers))
def generate(self):
usernames = generate_username(self.amount)
for i in range(0, self.amount):
self.data.append(
f'{usernames[i]}{self.providers[random_index(self.providers)]}')
return self
class NumberGenerator(DataGenerator):
def __init__(self, amount: int = 25, num_range: Tuple = (0, 10), use_floats: bool = False, decimal_cutoff: int = 2, from_list=None):
super().__init__(amount, from_list)
self.range: Tuple = num_range
self.use_floats = use_floats
self.decimal_cutoff = decimal_cutoff
return
def generate(self):
'''
Generates a list of random numbers (ints of floats) based on the amount provided.
'''
if self.from_list:
super().generate(self.amount, self.from_list)
return self
for i in range(0, self.amount):
self.data.append(round(random.uniform(self.range[0], self.range[1]), self.decimal_cutoff)) if self.use_floats else self.data.append(
random.randint(self.range[0], self.range[1]))
return self
class IDGenerator(DataGenerator):
def __init__(self, amount: int = 25, max_length: float = 3, from_list=None):
super().__init__(amount, from_list)
self.max_length = max_length
def generate(self):
'''
Generates unique ids based on the amount provided
'''
if self.from_list:
super().generate(self.amount, self.from_list)
return self
id_pool = self.__gen_ids(self.amount, self.max_length)
for i in range(0, self.amount):
self.data.append(id_pool[i])
return self
def __gen_ids(self, id_pool_count: int, maxLength: int):
return [str(uuid.uuid4())[0:maxLength] for i in range(0, id_pool_count)]
class PercentageGenerator(DataGenerator):
def __init__(self, amount: int = 25, from_list=None):
super().__init__(amount, from_list)
def generate(self):
'''
Generates a list of percentages, with possible duplicates.
'''
if self.from_list:
super().generate(self.amount, self.from_list)
return self
for i in range(0, self.amount):
self.data.appen(random.uniform(0.01, 99.9))
return self
class CategoryGenerator(DataGenerator):
def __init__(self, categories: List, amount: int = 25):
super().__init__(amount, categories)
def generate(self):
'''
Generates a list of categories, with duplicates.
'''
super().generate()
return self
class DateGenerator(DataGenerator):
def __init__(self, start_date, end_date, amount: int = 25, from_list=None):
super().__init__(amount, from_list)
self.start_date = string_to_date(start_date)
self.end_date = string_to_date(end_date)
def generate(self):
if self.from_list:
super().generate(self.amount, self.from_list)
return self
for i in range(0, self.amount):
self.data.append(self.__get_date(self.start_date, self.end_date))
return self
def __get_date(self, start_date, end_date):
delta = end_date - start_date
int_delta = (delta.days * 24 * 60 * 60) + delta.seconds
random_second = random.randrange(int_delta)
date_time = start_date + timedelta(seconds=random_second)
return date_to_string(date_time)
class AddressGenerator(DataGenerator):
def __init__(self, amount: int = 25, with_zips: bool = True, from_list=None):
super().__init__(amount, from_list)
self.with_zips = with_zips
def generate(self):
'''
Generates unique addresses based on the amount provided.
'''
if self.from_list:
super().generate(self.amount, self.from_list)
return self
address_pool = self.__gen_addresses(self.amount, self.with_zips)
for i in range(0, self.amount):
self.data.append(address_pool[i])
return self
def __gen_addresses(self, address_pool_count: int, with_zips: bool):
addresses = []
for i in range(0, address_pool_count):
address_obj = real_random_address()
addresses.append({'address': address_obj['address1'], 'zip': address_obj['postalCode']}
) if with_zips else addresses.append(address_obj['address1'])
return addresses
class ZipCodeGenerator(DataGenerator):
def __init__(self, amount: int = 25, from_list=None):
super().__init__(amount, from_list)
def generate(self):
'''
Generates unique zip codes based on the amount provided
'''
if self.from_list:
super().generate(self.amount, self.from_list)
return self
zipcodes_pool = self.__gen_zipcodes(self.amount)
for i in range(0, self.amount):
self.data.append(zipcodes_pool[i])
return self
def __gen_zipcodes(self, zip_pool_count: int):
return [real_random_address()['postalCode'] for i in range(0, zip_pool_count)]
class CurrencyGenerator(DataGenerator):
def __init__(self, currency_symbol: str, amount_range=(0, 100000), amount: int = 25, from_list=None):
super().__init__(amount, from_list)
self.currency_symbol: str = currency_symbol
self.amount_range = amount_range
def generate(self):
currencies = NumberGenerator(
self.amount, self.amount_range, True, 2).generate().getData()
for curr in currencies:
self.data.append(f'{self.currency_symbol}{str(curr)}')
return self
class UserGenerator(DataGenerator):
def __init__(self, amount: int = 25, full_names: bool = False, id_length: int = 4, from_list=None):
super().__init__(amount, from_list)
self.full_names = full_names
self.id_length = id_length
def generate(self):
'''
Generates a list of user objects based on the amount provided.
'''
if (self.from_list):
super().generate(self.amount, self.from_list)
return self
'''
user = {
name: '',
address: '',
zip: '',
id: ''
}
'''
name_pool = NameGenerator().generate(self.amount, self.full_names).getData()
address_pool = AddressGenerator().generate(self.amount).getData()
id_pool = IDGenerator().generate(self.amount, self.id_length).getData()
for i in range(0, self.amount):
self.data.append({
'name': name_pool[i],
'address': address_pool[i]['address'],
'zip': address_pool[i]['zip'],
'id': id_pool[i]
})
return self
class Column:
def __init__(self, name: str, data_generator: DataGenerator) -> None:
self.name = name
self.generator = data_generator
return
class ColumnFactory:
def create(params: Dict[str, Any]):
_type = params.get('type')
if _type == 'categorical':
config_params = params.get('config_params')
return Column(params.get('name'), CategoryGenerator(categories=config_params['categories'], amount=config_params.get('amount') if config_params.get('amount') else 25))
if _type == 'boolean':
config_params = params.get('config_params')
return Column(params.get('name'), BooleanGenerator(config_params.get('amount') if config_params.get('amount') else 25))
if _type == 'name':
config_params = params.get('config_params')
full_name = config_params.get(
'full_name') if 'full_name' in config_params else True
amount = config_params.get(
'amount') if 'amount' in config_params else 25
from_list = config_params.get(
'from_list') if 'from_list' in config_params else None
return Column(params.get('name'), NameGenerator(amount, full_name, from_list))
if _type == 'number':
config_params = params.get('config_params')
amount = config_params.get(
'amount') if 'amount' in config_params else 25
from_list = config_params.get(
'from_list') if 'from_list' in config_params else None
range = (config_params.get('range')[0], config_params.get(
'range')[1]) if 'range' in config_params else (0, 10)
use_floats = config_params.get(
'use_floats') if 'use_floats' in config_params else False
decimal_cutoff = config_params.get(
'decimal_cutoff') if 'decimal_cutoff' in config_params else 25
return Column(params.get('name'), NumberGenerator(amount, range, use_floats, decimal_cutoff, from_list))
if _type == 'currency':
config_params = params.get('config_params')
amount = config_params.get(
'amount') if 'amount' in config_params else 25
from_list = config_params.get(
'from_list') if 'from_list' in config_params else None
range = (config_params.get('range')[0], config_params.get(
'range')[1]) if 'range' in config_params else (0, 10)
currency_symbol = config_params.get(
'symbol') if 'symbol' in config_params else "$"
return Column(params.get('name'), CurrencyGenerator(currency_symbol, range, amount, from_list))
if _type == 'date':
config_params = params.get('config_params')
start_date = config_params.get('range')[0]
end_date = config_params.get('range')[1] if config_params.get(
'range')[1] != start_date else date_to_string(datetime.now())
amount = config_params.get(
'amount') if 'amount' in config_params else 25
from_list = config_params.get(
'from_list') if 'from_list' in config_params else None
return Column(params.get('name'), DateGenerator(start_date, end_date, amount, from_list))
if _type == 'email':
config_params = params.get('config_params')
amount = config_params.get(
'amount') if 'amount' in config_params else 25
from_list = config_params.get(
'from_list') if 'from_list' in config_params else None
providers = config_params.get(
'providers') if 'providers' in config_params else ['gmail', 'hotmail', 'outlook', 'yahoo', 'icloud']
return Column(params.get('name'), EmailGenerator(providers, amount, from_list))
if _type == 'identification':
config_params = params.get('config_params')
amount = config_params.get(
'amount') if 'amount' in config_params else 25
from_list = config_params.get(
'from_list') if 'from_list' in config_params else None
max_length = config_params.get(
'length') if 'lenth' in config_params else 3
return Column(params.get('name'), IDGenerator(amount, max_length, from_list))
if _type == 'percentage':
config_params = params.get('config_params')
amount = config_params.get(
'amount') if 'amount' in config_params else 25
from_list = config_params.get(
'from_list') if 'from_list' in config_params else None
return Column(params.get('name'), PercentageGenerator(amount, from_list))
if _type == 'address':
config_params = params.get('config_params')
amount = config_params.get(
'amount') if 'amount' in config_params else 25
from_list = config_params.get(
'from_list') if 'from_list' in config_params else None
include_zips = config_params.get(
'include_zips') if 'include_zips' in config_params else False
return Column(params.get('name'), AddressGenerator(amount, include_zips, from_list))
if _type == 'postal_code':
config_params = params.get('config_params')
amount = config_params.get(
'amount') if 'amount' in config_params else 25
from_list = config_params.get(
'from_list') if 'from_list' in config_params else None
return Column(params.get('name'), ZipCodeGenerator(amount, from_list))
if _type == 'user':
config_params = params.get('config_params')
amount = config_params.get(
'amount') if 'amount' in config_params else 25
from_list = config_params.get(
'from_list') if 'from_list' in config_params else None
full_name = config_params.get(
'full_name') if 'full_name' in config_params else True
id_length = config_params.get(
'id_length') if 'id_length' in config_params else 3
return Column(params.get('name'), UserGenerator(amount, full_name, id_length, from_list))
return None
class SchemaLoader:
def _load(schema_path: str) -> List[Dict]:
json_file = open(schema_path)
return load(json_file).get('columns')
class DataTable:
def __init__(self, cols: List[Column] = None, schema_path: str = None, row_count: int = 25):
self.cols: List[Column] = []
if schema_path:
_cols = SchemaLoader._load(schema_path)
self.cols = [ColumnFactory.create(_col) for _col in _cols]
elif cols:
for col in cols:
self.add_col(col)
self.row_count: int = row_count
self.rows: List[List[Any]] = []
self.__set_headers()
return
def __set_headers(self):
self.headers = [col.name for col in self.cols]
def add_col(self, column: Column):
self.cols.append(column)
self.__set_headers()
return
def del_col(self, name: str):
self.cols = list(filter(lambda col: col.name != name, self.cols))
self.__set_headers()
return
def set_rows(self, row_count: int):
self.row_count = row_count
def generate_rows(self, reuse_anchor_col: str = None, anchor_count: int = None):
'''
Generates row data based on the columns specified
and the number of rows defined. If reuse_anchor_col
is provided, multiple rows will be attributed
to anchor points e.g., ['john', 'asdf', 'asdfasd
', '12/12/23'], ... ['john', ..., '02/10/24']
'''
if (reuse_anchor_col):
# create a pool from anchors
return
for i in range(0, self.row_count):
row_data = []
for col in self.cols:
col.generator.set_amount(self.row_count)
row_data.append(col.generator.generate().getData()[i])
self.rows.append(row_data)
return
def print_rows(self, pretty=True):
if (not pretty):
print(self.headers)
for row in self.rows:
print(row)
return
p_table = PrettyTable(field_names=self.headers)
for row in self.rows:
p_table.add_row(row)
print(p_table)
class CSVWriter:
def write_to(file_name: str, table: DataTable):
with open(file_name, 'w') as csvfile:
fwriter = csv.writer(
csvfile,
delimiter=',',
quotechar='|',
quoting=csv.QUOTE_MINIMAL
)
# write the column header first
fwriter.writerow(table.headers)
for row in table.rows:
fwriter.writerow(row)
print(f'Completed writing data to: ${file_name}.csv')
return
| 3.09375
| 3
|
mysuper/users/forms.py
|
oreon/mysuper
| 0
|
12785395
|
<gh_stars>0
from django import forms
class ActorSearchForm(forms.Form):
name = forms.CharField(
required = False,
label='Search name or surname!',
widget=forms.TextInput(attrs={'placeholder': 'search here!'})
)
search_cap_exact = forms.IntegerField(
required = False,
label='Search age (exact match)!'
)
search_cap_min = forms.IntegerField(
required = False,
label='Min age'
)
search_cap_max = forms.IntegerField(
required = False,
label='Max age'
)
| 2.421875
| 2
|
teleband/submissions/api/views.py
|
JMU-CIME/CPR-Music-Backend
| 2
|
12785396
|
from django.contrib.auth import get_user_model
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.mixins import ListModelMixin, RetrieveModelMixin, CreateModelMixin
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet, ModelViewSet
from .serializers import (
GradeSerializer,
SubmissionSerializer,
AttachmentSerializer,
TeacherSubmissionSerializer,
)
from teleband.courses.models import Course
from teleband.submissions.models import Grade, Submission, SubmissionAttachment
from teleband.assignments.models import Assignment
class SubmissionViewSet(
ListModelMixin, RetrieveModelMixin, CreateModelMixin, GenericViewSet
):
serializer_class = SubmissionSerializer
queryset = Submission.objects.all()
def get_queryset(self):
return self.queryset.filter(assignment_id=self.kwargs["assignment_id"])
def perform_create(self, serializer):
serializer.save(
assignment=Assignment.objects.get(pk=self.kwargs["assignment_id"])
)
# @action(detail=False)
# def get_
class AttachmentViewSet(
ListModelMixin, RetrieveModelMixin, CreateModelMixin, GenericViewSet
):
serializer_class = AttachmentSerializer
queryset = SubmissionAttachment.objects.all()
def get_queryset(self):
return self.queryset.filter(submission_id=self.kwargs["submission_pk"])
def perform_create(self, serializer):
serializer.save(
submission=Submission.objects.get(pk=self.kwargs["submission_pk"])
)
class TeacherSubmissionViewSet(ListModelMixin, RetrieveModelMixin, GenericViewSet):
serializer_class = TeacherSubmissionSerializer
queryset = Submission.objects.all()
# def get_queryset(self,):
# pass
@action(detail=False)
def recent(self, request, **kwargs):
if "piece_slug" not in request.GET or "activity_name" not in request.GET:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={
"error": "Missing piece_slug or activity_name (figure it out!) in get data"
},
)
course_id = self.kwargs["course_slug_slug"]
piece_slug = request.GET["piece_slug"]
activity_name = request.GET["activity_name"]
queryset = (
Submission.objects.filter(
assignment__enrollment__course__slug=course_id,
assignment__activity__activity_type__name=activity_name,
assignment__part__piece__slug=piece_slug,
)
.order_by("assignment__enrollment", "-submitted")
.distinct("assignment__enrollment")
)
serializer = self.serializer_class(
queryset, many=True, context={"request": request}
)
return Response(status=status.HTTP_200_OK, data=serializer.data)
class GradeViewSet(ModelViewSet):
queryset = Grade.objects.all()
serializer_class = GradeSerializer
| 1.914063
| 2
|
app/users/models.py
|
prapeller/blackemployer_api
| 0
|
12785397
|
<gh_stars>0
import datetime
import hashlib
from random import random
from django.conf import settings
from django.contrib import auth
from django.utils import timezone
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, BaseUserManager, AbstractUser
from django.core.mail import send_mail
from django.db import models
from django.urls import reverse_lazy, reverse
from rest_framework.authtoken.models import Token
class UserManager(BaseUserManager):
def create_user(self, email, password, **extra_fields):
"""Creates and returns a new user with Token"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
username = extra_fields.get('username')
user.username = username if username else user.email
user.set_password(password)
user.is_active = False
user.save(using=self._db)
Token.objects.create(user=user)
return user
def create_superuser(self, email, password, **extra_fields):
"""Creates and returns a new superuser with Token"""
user = self.create_user(email, password, **extra_fields)
user.is_active = True
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractUser):
"""Custom user model with unique username and email"""
email = models.EmailField(max_length=64, unique=True,)
first_name = models.CharField(max_length=64, blank=True)
last_name = models.CharField(max_length=64, blank=True)
username = models.CharField(max_length=64, unique=True, blank=True)
age = models.PositiveIntegerField(null=True)
activation_key = models.CharField(max_length=128, null=True, blank=True)
activation_key_expiration_date = models.DateTimeField(null=True, blank=True)
objects = UserManager()
# EMAIL_FIELD = 'email'
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
def __str__(self):
if self.username != self.email:
return self.username
else:
return f'{self.date_joined}'
def set_activation_key(self):
salt = hashlib.sha1(str(random()).encode('utf8')).hexdigest()[:6]
self.activation_key = hashlib.sha1((self.email + salt).encode('utf8')).hexdigest()
self.activation_key_expiration_date = timezone.now() + datetime.timedelta(hours=48)
def activation_key_is_valid(self):
return True if self.activation_key_expiration_date > timezone.now() else False
def activate(self):
self.is_active = True
self.save()
def send_verify_link(self):
site = settings.DOMAIN_NAME
verify_path = reverse('users:verify', args=[self.email, self.activation_key])
varify_link = f'{site}{verify_path}'
subject = f'{self.username}, activate your "blackemployer.com" account!'
message = f'Please follow this link for that:\n{varify_link}'
send_mail(subject, message, settings.EMAIL_HOST_USER,
[self.email], fail_silently=False)
return varify_link
# @receiver(models.signals.post_save, sender=settings.AUTH_USER_MODEL)
# def create_auth_token(sender, instance, created, **kwargs):
# if created:
# Token.objects.create(user=instance)
| 2.25
| 2
|
app/main.py
|
Tehsurfer/image-history
| 0
|
12785398
|
import json
import base64
import requests
from flask import Flask, abort, jsonify, request
from app.config import Config
import os
import schedule
import threading
import time
from pathlib import Path
app = Flask(__name__)
# set environment variable
app.config["ENV"] = Config.DEPLOY_ENV
times = []
image_counter = 0
image_count = 20
image_path = Path.cwd() / 'images'
def create_database():
image = get_image()
save_image(image, f'takapuna{image_counter}.png')
for i in range(0,image_count):
save_image(image, f'takapuna{i}.png')
create_database()
def schedule_check():
while True:
schedule.run_pending()
time.sleep(5)
def image_run():
try:
update_images()
except Exception as e:
print('hit exepction!')
print(e)
pass
def get_image():
r = requests.get('http://www.windsurf.co.nz/webcams/takapuna.jpg')
if r.status_code is 200:
return r.content
else:
print(r.status_code)
print(r.text)
def save_image(image, filename):
f = open(image_path / filename, 'wb') # first argument is the filename
f.write(image)
f.close()
def update_images():
global image_counter
image = get_image()
save_image(image, f'takapuna{image_counter}.png')
image_counter += 1
schedule.every(5).minutes.do(image_run)
x = threading.Thread(target=schedule_check, daemon=True)
x.start()
@app.errorhandler(404)
def resource_not_found(e):
return jsonify(error=str(e)), 404
@app.route("/health")
def health():
return json.dumps({"status": "healthy"})
@app.route("/")
def cam():
return create_page_from_images( get_latest_images() )
def get_latest_images():
image_list = []
for i in range(0,image_count):
data_uri = base64.b64encode(open( image_path / f'takapuna{i}.png', 'rb').read()).decode('utf-8')
img_tag = '<img src="data:image/png;base64,{0}">'.format(data_uri)
image_list.append(img_tag)
i += 1
return image_list
def create_page_from_images(image_list):
page = ''
for im in image_list:
page += im
page += '\n'
return page
| 2.421875
| 2
|
plan/tests.py
|
18892021125/NWU-ACM-MIS-backend
| 5
|
12785399
|
from django.test import TestCase, Client
from plan.models import Announcement
class AnnouncementTestCase(TestCase):
def setUp(self):
self.client = Client()
Announcement.objects.create(title='test1', content='test content1')
Announcement.objects.create(title='test2', content='test content2')
Announcement.objects.create(title='test3', content='test content3')
def tearDown(self):
Announcement.objects.all().delete()
def test_get_one_announcement(self):
"""测试获取单个公告"""
for anno in Announcement.objects.all():
response = self.client.get(f'/plan/announcement/{anno.id}/')
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content, {
'id': anno.id,
'title': anno.title,
'created_date': str(anno.created_date),
'changed_date': str(anno.changed_date),
'content': anno.content,
})
def test_get_all_announcements(self):
"""测试获取所有公告"""
response = self.client.get('/plan/announcement/')
self.assertEqual(response.status_code, 200)
expected_data = list()
for anno in Announcement.objects.all():
expected_data.append({
'id': anno.id,
'title': anno.title,
'created_date': str(anno.created_date),
'changed_date': str(anno.changed_date),
})
self.assertJSONEqual(response.content, expected_data)
| 2.390625
| 2
|
Desafios/Desafio 015 (Aluguel de carros).py
|
Kimberly07-Ernane/Pythondesafios
| 0
|
12785400
|
<filename>Desafios/Desafio 015 (Aluguel de carros).py
#Desafio15
# Escreva um programa que pergunte a quantidade de Km percorridos por um carro alugado
#e a quantidade de dias pelos quais ele foi alugado
#Calcule o preço a pagar, sabendo que o carro custa R$60 por dia e R$0,15 por Km rodado.
dias=int(input('A quantidade de dias que você alugou o carro: '))
km=float(input('A quantidade de KM que foi percorrido: '))
pago=dias*60 + (km * 0.15)
print('O total a pagar é R${:.2f}'.format(pago))
| 3.890625
| 4
|
BGWpy/core/task.py
|
GkAntonius/BGWpy
| 27
|
12785401
|
<reponame>GkAntonius/BGWpy
from __future__ import print_function
import sys
import os
import warnings
import subprocess
import pickle
import contextlib
from ..config import default_mpi
from .util import exec_from_dir, last_lines_contain
from .runscript import RunScript
# Public
__all__ = ['Task', 'MPITask', 'IOTask']
class Task(object):
"""Task executed from a single directory, from a single script."""
_dirname = '.'
_TASK_NAME = 'Task'
_STATUS_COMPLETED = 'Completed'
_STATUS_UNSTARTED = 'Unstarted'
_STATUS_UNFINISHED = 'Unfinished'
_STATUS_UNKNOWN = 'Unknown'
_report_colors = {
_STATUS_COMPLETED : '\033[92m',
_STATUS_UNSTARTED : '\033[94m',
_STATUS_UNFINISHED : '\033[91m',
_STATUS_UNKNOWN : '\033[95m',
}
_end_color = '\033[0m'
def __init__(self, dirname='./', runscript_fname='run.sh', store_variables=False, *args, **kwargs):
"""
Keyword arguments
-----------------
dirname : str ('./')
Main directory from which the scripts are executed.
runscript_fname : str ('run.sh')
Name of the main execution script.
store_variables : bool (True)
Write all the initialization variables in a pkl file
at writing time. Must be set at initialization
in order to be effective.
"""
self.dirname = dirname
self.runscript = RunScript()
self.runscript.fname = runscript_fname
self.variables = kwargs if store_variables else dict()
self._TASK_NAME = '{:<10} : {}'.format(self._TASK_NAME, self.dirname)
@property
def dirname(self):
return self._dirname
@dirname.setter
def dirname(self, value):
self._dirname = value
# TODO: Update the links
@property
def runscript_fname(self):
basename = self.runscript.fname
return os.path.join(self.dirname, basename)
@runscript_fname.setter
def runscript_fname(self, value):
if os.path.basename(value) != value:
raise Exception('Cannot use a path for runscript_fname')
self.runscript.fname = value
@contextlib.contextmanager
def exec_from_dirname(self):
"""Exec commands from main directory then come back."""
original = os.path.realpath(os.curdir)
os.chdir(self.dirname)
try:
yield
finally:
os.chdir(original)
def exec_from_dirname(self):
return exec_from_dir(self.dirname)
def run(self):
with self.exec_from_dirname():
self.runscript.run()
def write(self):
subprocess.call(['mkdir', '-p', self.dirname])
with self.exec_from_dirname():
self.runscript.write()
if self.variables:
with open('variables.pkl', 'w') as f:
pickle.dump(self.variables, f)
def update_link(self, target, dest):
"""
Modify or add a symbolic link.
If dest is already defined in a link, then the target is replaced.
The target will be expressed relative to the dirname.
The destination *must* be relative to the dirname.
If target is empty or None, the link is suppressed.
"""
if not target:
self.remove_link(dest)
return
reltarget = os.path.relpath(
target, os.path.join(self.dirname, os.path.dirname(dest)))
for link in self.runscript.links:
if link[1] == dest:
link[0] = reltarget
break
else:
self.runscript.add_link(reltarget, dest)
def remove_link(self, dest):
"""Remove a link from the name of the destination."""
for i, link in enumerate(self.runscript.links):
if link[1] == dest:
del self.runscript.links[i]
break
def update_copy(self, source, dest):
"""
Modify or add a file to copy.
If dest is already defined in a link, then the source is replaced.
The source will be expressed relative to the dirname.
The destination *must* be relative to the dirname.
"""
relsource = os.path.relpath(source, os.path.realpath(self.dirname))
for copy in self.runscript.copies:
if copy[1] == dest:
copy[0] = relsource
break
else:
self.runscript.add_copy(relsource, dest)
def get_status(self):
"""
Return the status of the task. Possible status are:
Completed, Unstarted, Unfinished, Unknown.
"""
return self._STATUS_UNKNOWN
def is_complete(self):
"""True if the task reports a completed status."""
status = self.get_status()
return (status is self._STATUS_COMPLETED)
def report(self, file=None, color=True, **kwargs):
"""
Report whether the task completed normally.
Keyword arguments
-----------------
file: (sys.stdout)
Write the task status in an open file.
check_time: bool (False)
Consider a task as unstarted if output is older than input.
color: bool (True)
Color the output. Use this flag to disable the colors
e.g. if you want to pipe the output to a file.
No color are used whenever a 'file' argument is given.
"""
status = self.get_status(**kwargs)
if file is None and color:
col = self._report_colors[status]
status = col + str(status) + self._end_color
s = ' {:<50} - Status : {}'.format(self._TASK_NAME, status)
file = file if file is not None else sys.stdout
print(s, file=file)
# =========================================================================== #
class MPITask(Task):
"""Task whose run script defines MPI variables for the main execution."""
_mpirun = default_mpi['mpirun']
_nproc = default_mpi['nproc']
_nproc_flag = default_mpi['nproc_flag']
_nproc_per_node = default_mpi['nproc_per_node']
_nproc_per_node_flag = default_mpi['nproc_per_node_flag']
_nodes = default_mpi['nodes']
_nodes_flag = default_mpi['nodes_flag']
def __init__(self, *args, **kwargs):
"""
Keyword arguments
-----------------
dirname : str ('./')
Main directory from which the scripts are executed.
runscript_fname : str ('run.sh')
Name of the main execution script.
mpirun : str ('mpirun')
Command to call the mpi runner.
nproc : int (1)
Number of processors or number of parallel executions.
nproc_flag: str ('-n')
Flag to specify nproc to the mpi runner.
nproc_per_node : int (nproc)
Number of processors (parallel executions) per node.
nproc_per_node_flag : str ('--npernode')
Flag to specify the number of processors per node.
nodes : int (1)
Number of nodes.
nodes_flag: str ('-n')
Flag to specify the number of nodes to the mpi runner.
"""
super(MPITask, self).__init__(*args, **kwargs)
self.mpirun = default_mpi['mpirun']
self.nproc_flag = default_mpi['nproc_flag']
self.nproc_per_node_flag = default_mpi['nproc_per_node_flag']
self.nproc = default_mpi['nproc']
self.nproc_per_node = default_mpi['nproc_per_node']
for key in ('mpirun', 'nproc', 'nproc_flag',
'nproc_per_node', 'nproc_per_node_flag',
'nodes', 'nodes_flag'):
if key in kwargs:
setattr(self, key, kwargs[key])
# This is mostly for backward compatibility
if 'mpirun_n' in kwargs:
self.mpirun_n = kwargs['mpirun_n']
def _declare_mpirun(self):
self.runscript['MPIRUN'] = self.mpirun_variable
@property
def mpirun_variable(self):
if not self.mpirun:
return ''
variable = str(self.mpirun)
if self.nproc_flag and self.nproc:
variable += ' {} {}'.format(self.nproc_flag, self.nproc)
if self.nproc_per_node_flag and self.nproc_per_node:
variable += ' {} {}'.format(self.nproc_per_node_flag, self.nproc_per_node)
if self.nodes_flag and self.nodes:
variable += ' {} {}'.format(self.nodes_flag, self.nodes)
return variable
@property
def mpirun(self):
return self._mpirun
@mpirun.setter
def mpirun(self, value):
self._mpirun = value
self._declare_mpirun()
@property
def nproc(self):
return self._nproc
@nproc.setter
def nproc(self, value):
self._nproc = value
self._declare_mpirun()
@property
def nproc_flag(self):
return self._nproc_flag
@nproc_flag.setter
def nproc_flag(self, value):
self._nproc_flag = value
self._declare_mpirun()
@property
def nproc_per_node(self):
return self._nproc_per_node
@nproc_per_node.setter
def nproc_per_node(self, value):
self._nproc_per_node = value
self._declare_mpirun()
@property
def nproc_per_node_flag(self):
return self._nproc_per_node_flag
@nproc_per_node_flag.setter
def nproc_per_node_flag(self, value):
self._nproc_per_node_flag = value
self._declare_mpirun()
@property
def nodes(self):
return self._nodes
@nodes.setter
def nodes(self, value):
self._nodes = value
self._declare_mpirun()
@property
def nodes_flag(self):
return self._nodes_flag
@nodes_flag.setter
def nodes_flag(self, value):
self._nodes_flag = value
self._declare_mpirun()
@property
def mpirun_n(self):
return self.mpirun + ' ' + self.nproc_flag
@mpirun_n.setter
def mpirun_n(self, value):
if not value:
self.nproc_flag = ''
self.mpirun = ''
parts = value.split()
if len(parts) == 0:
self.nproc_flag = ''
self.mpirun = ''
elif len(parts) == 1:
self.nproc_flag = ''
self.mpirun = parts[0]
elif len(parts) == 2:
self.nproc_flag = parts[1]
self.mpirun = parts[0]
else:
self.nproc_flag = parts[1]
self.mpirun = parts[0]
# =========================================================================== #
class IOTask(Task):
"""
Task that depends on an input and that produces an output,
which might be checked for completion.
"""
_input_fname = ''
_output_fname = ''
_TAG_JOB_COMPLETED = 'JOB COMPLETED'
# It is important that this task has no __init__ function,
# because it is mostly used with multiple-inheritance classes.
def get_status(self, check_time=False):
if self._input_fname:
if not os.path.exists(self.input_fname):
return self._STATUS_UNSTARTED
if self._output_fname:
if not os.path.exists(self.output_fname):
return self._STATUS_UNSTARTED
if check_time:
input_creation_time = os.path.getmtime(self.input_fname)
output_creation_time = os.path.getmtime(self.output_fname)
if input_creation_time > output_creation_time:
return self._STATUS_UNSTARTED
if not self._TAG_JOB_COMPLETED:
return self._STATUS_UNKNOWN
if last_lines_contain(self.output_fname, self._TAG_JOB_COMPLETED):
return self._STATUS_COMPLETED
return self._STATUS_UNFINISHED
@property
def input_fname(self):
basename = self._input_fname
if 'input' in dir(self):
basename = self.input.fname
return os.path.join(self.dirname, basename)
@input_fname.setter
def input_fname(self, value):
if os.path.basename(value) != value:
raise Exception('Cannot use a path for input_fname')
self._input_fname = value
if 'input' in dir(self):
self.input.fname = value
@property
def output_fname(self):
return os.path.join(self.dirname, self._output_fname)
| 2.25
| 2
|
rabbitai/db_engine_specs/hana.py
|
psbsgic/rabbitai
| 0
|
12785402
|
from datetime import datetime
from typing import Optional
from rabbitai.db_engine_specs.base import LimitMethod
from rabbitai.db_engine_specs.postgres import PostgresBaseEngineSpec
from rabbitai.utils import core as utils
class HanaEngineSpec(PostgresBaseEngineSpec):
engine = "hana"
engine_name = "SAP HANA"
limit_method = LimitMethod.WRAP_SQL
force_column_alias_quotes = True
max_column_name_length = 30
_time_grain_expressions = {
None: "{col}",
"PT1S": "TO_TIMESTAMP(SUBSTRING(TO_TIMESTAMP({col}),0,20))",
"PT1M": "TO_TIMESTAMP(SUBSTRING(TO_TIMESTAMP({col}),0,17) || '00')",
"PT1H": "TO_TIMESTAMP(SUBSTRING(TO_TIMESTAMP({col}),0,14) || '00:00')",
"P1D": "TO_DATE({col})",
"P1M": "TO_DATE(SUBSTRING(TO_DATE({col}),0,7)||'-01')",
"P0.25Y": "TO_DATE(SUBSTRING( \
TO_DATE({col}), 0, 5)|| LPAD(CAST((CAST(SUBSTRING(QUARTER( \
TO_DATE({col}), 1), 7, 1) as int)-1)*3 +1 as text),2,'0') ||'-01')",
"P1Y": "TO_DATE(YEAR({col})||'-01-01')",
}
@classmethod
def convert_dttm(cls, target_type: str, dttm: datetime) -> Optional[str]:
tt = target_type.upper()
if tt == utils.TemporalType.DATE:
return f"TO_DATE('{dttm.date().isoformat()}', 'YYYY-MM-DD')"
if tt == utils.TemporalType.TIMESTAMP:
return f"""TO_TIMESTAMP('{dttm
.isoformat(timespec="microseconds")}', 'YYYY-MM-DD"T"HH24:MI:SS.ff6')"""
return None
| 2.234375
| 2
|
oop12/lesson12h.py
|
ay1011/MITx-6.00.1x-Introduction-to-Computer-Science-and-Programming-Using-Python
| 0
|
12785403
|
import math
import time
def genPrimes():
primes = [] # primes generated so far
last = 1 # last number tried
while True:
last += 1
temp = math.sqrt(last)
for p in primes:
if p<temp:
if last % p == 0 :
break
else:
primes.append(last)
yield last
start = time.time()
foo = genPrimes()
for n in range(30000):
print foo.next()
#for n in genPrimes(): print n
end = time.time()
print(end - start)
| 3.59375
| 4
|
ros_compatibility/src/ros_compatibility/exceptions.py
|
SebastianHuch/ros-bridge
| 314
|
12785404
|
#!/usr/bin/env python
#
# Copyright (c) 2021 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
#
from ros_compatibility.core import get_ros_version
ROS_VERSION = get_ros_version()
if ROS_VERSION == 1:
import rospy
class ROSException(rospy.ROSException):
pass
class ROSInterruptException(rospy.ROSInterruptException):
pass
class ServiceException(rospy.ServiceException):
pass
elif ROS_VERSION == 2:
import rclpy.exceptions
class ROSException(Exception):
pass
class ROSInterruptException(rclpy.exceptions.ROSInterruptException):
pass
class ServiceException(Exception):
pass
| 2.234375
| 2
|
src/irm_undirected.py
|
FreeTheOtter/BNP-Net
| 0
|
12785405
|
import igraph as ig
import numpy as np
from scipy.special import betaln
g = ig.Graph.Read_GML('karate.txt')
X = np.array(g.get_adjacency().data)
def irm(X, T, a, b, A, random_seed = 42):
N = len(X)
z = np.ones([N,1])
Z = []
np.random.seed(random_seed)
for t in range(T): # for T iterations
for n in range(N): # for each node n
#nn = index mask without currently sampled node n
nn = [_ for _ in range(N)]
nn.remove(n)
X_ = X[np.ix_(nn,nn)] #adjacency matrix without currently sampled node
# K = n. of components
K = len(z[0])
# Delete empty component if present
if K > 1:
idx = np.argwhere(np.sum(z[nn], 0) == 0)
z = np.delete(z, idx, axis=1)
K -= len(idx)
# m = n. of nodes in each component
m = np.sum(z[nn], 0)[np.newaxis]
M = np.tile(m, (K, 1))
# M1 = n. of links between components without current node
M1 = z[nn].T @ X_ @ z[nn] - np.diag(np.sum(X_@z[nn]*z[nn], 0) / 2)
# M0 = n. of non-links between components without current node
M0 = m.T@m - np.diag((m*(m+1) / 2).flatten()) - M1
# r = n. of links from current node to components
r = z[nn].T @ X[nn, n]
R = np.tile(r, (K, 1))
# lik matrix of current node sampled to each component
likelihood = betaln(M1+R+a, M0+M-R+b) - betaln(M1+a, M0+b)
# lik of current node to new component
likelihood_n = betaln(r+a, m-r+b) - betaln(a,b)
logLik = np.sum(np.concatenate([likelihood, likelihood_n]), 1)
logPrior = np.log(np.append(m, A))
logPost = logPrior + logLik
# Convert from log probabilities, normalized to max
P = np.exp(logPost-max(logPost))
# Assignment through random draw fron unif(0,1), taking first value from prob. vector
draw = np.random.rand()
i = np.argwhere(draw<np.cumsum(P)/sum(P))[0]
# Assignment of current node to component i
z[n,:] = 0
if i == K: # If new component: add new column to partition matrix
z = np.hstack((z, np.zeros((N,1))))
z[n,i] = 1
# Delete empty component if present
idx = np.argwhere(np.all(z[..., :] == 0, axis=0))
z = np.delete(z, idx, axis=1)
Z.append(z)
print(z)
print(m)
return Z
T = 500
a = 1
b = 1
A = 10
Z = irm(X, T, a, b, A)
for i in range(1, 11):
print(np.sum(Z[-i], 0))
| 2.140625
| 2
|
python-fastapi/api/app.py
|
duijf/boilerplates
| 0
|
12785406
|
from __future__ import annotations
import logging
import typing as t
from fastapi import APIRouter, FastAPI
from fastapi.param_functions import Depends
from starlette.responses import (
JSONResponse,
Response,
)
from api.config import get_config
from api.postgres import Connection, Postgres, connect_and_migrate
logger = logging.getLogger(__name__)
router = APIRouter()
@router.get("/")
async def get_home(conn: Connection = Depends(Postgres.connection)) -> Response:
users = await conn.fetch("SELECT username FROM users;")
return JSONResponse(content={"users": [user[1] for user in users]})
def create_app() -> FastAPI:
config = get_config()
async def on_startup() -> None:
await connect_and_migrate(config.postgres)
async def on_shutdown() -> None:
await Postgres.disconnect()
app = FastAPI(
openapi_url=None,
on_startup=[on_startup],
on_shutdown=[on_shutdown],
# CUSTOMIZE
# exception_handlers={},
)
# CUSTOMIZE
# app.mount(
# "/static", app=StaticFiles(directory=config.static_dir, html=True)
# )
app.include_router(router)
return app
| 2.328125
| 2
|
authors/apps/comments/signals.py
|
andela/ah-backend-summer
| 1
|
12785407
|
<reponame>andela/ah-backend-summer<gh_stars>1-10
from django.db.models.signals import (
post_save, m2m_changed as model_field_changed_signal)
from django.dispatch import receiver, Signal
from authors.apps.comments.models import Comment
# custom signal we shall send when a comment is published
# the rationale for this custom signal is discussed in the articles app
comment_published_signal = Signal(providing_args=["comment"])
comment_liked_signal = Signal(
providing_args=["comment", "user_model", "id"]
)
class CommentsSignalSender:
pass
@receiver(post_save, sender=Comment)
def on_comment_post_save(sender, **kwargs):
if kwargs['created']:
comment_published_signal.send(CommentsSignalSender,
comment=kwargs['instance'])
@receiver(model_field_changed_signal, sender=Comment.liked_by.through)
def on_like_comment(sender, **kwargs):
"""
on_like_comment is run when a user likes a comment. Then calls a signal
to notify the user
"""
comment = kwargs.get("instance")
user_model = kwargs.get("model")
action = kwargs.get("action")
pk_set = kwargs.get("pk_set")
if action == "post_add":
user_id = [pk for pk in pk_set]
comment_liked_signal.send(CommentsSignalSender,
comment=comment,
user_model=user_model,
id=user_id[0])
| 2.265625
| 2
|
rastervision/evaluation/object_detection_evaluation.py
|
ValRat/raster-vision
| 4
|
12785408
|
<gh_stars>1-10
from rastervision.evaluation import ClassEvaluationItem
from rastervision.evaluation import ClassificationEvaluation
class ObjectDetectionEvaluation(ClassificationEvaluation):
"""Evaluation for object detection.
Requires TensorFlow Object Detection to be available as it utilizes
some of its evalution utility functions.
"""
def __init__(self, class_map):
super().__init__()
self.class_map = class_map
def compute(self, ground_truth_labels, prediction_labels):
nb_classes = len(self.class_map)
od_eval = ObjectDetectionEvaluation.compute_od_eval(
ground_truth_labels, prediction_labels, nb_classes)
self.class_to_eval_item = ObjectDetectionEvaluation.parse_od_eval(
od_eval, self.class_map)
self.compute_avg()
@staticmethod
def compute_od_eval(ground_truth_labels, prediction_labels, nb_classes):
# Lazy import of TFOD
from object_detection.utils import object_detection_evaluation
matching_iou_threshold = 0.5
od_eval = object_detection_evaluation.ObjectDetectionEvaluation(
nb_classes, matching_iou_threshold=matching_iou_threshold)
image_key = 'image'
od_eval.add_single_ground_truth_image_info(
image_key, ground_truth_labels.get_npboxes(),
ground_truth_labels.get_class_ids() - 1)
od_eval.add_single_detected_image_info(
image_key, prediction_labels.get_npboxes(),
prediction_labels.get_scores(),
prediction_labels.get_class_ids() - 1)
od_eval.evaluate()
return od_eval
@staticmethod
def parse_od_eval(od_eval, class_map):
class_to_eval_item = {}
score_ind = -1
for class_id in range(1, len(class_map) + 1):
gt_count = int(od_eval.num_gt_instances_per_class[class_id - 1])
class_name = class_map.get_by_id(class_id).name
if gt_count == 0:
eval_item = ClassEvaluationItem(
class_id=class_id, class_name=class_name)
else:
# precisions_per_class has an element appended to it for each
# class_id that has gt_count > 0. This means that the length of
# precision_per_class can be shorter than the total number of
# classes in the class_map. Therefore, we use score_ind to index
# into precisions_per_class instead of simply using class_id - 1.
score_ind += 1
# Precisions and recalls across a range of detection thresholds.
precisions = od_eval.precisions_per_class[score_ind]
recalls = od_eval.recalls_per_class[score_ind]
if len(precisions) == 0 or len(recalls) == 0:
# No predicted boxes.
eval_item = ClassEvaluationItem(
precision=None,
recall=0,
gt_count=gt_count,
class_id=class_id,
class_name=class_name)
else:
# If we use the lowest detection threshold (ie. use all
# detected boxes as defined by score_thresh in the predict
# protobuf), that means we use all detected boxes, or the last
# element in the precisions array.
precision = float(precisions[-1])
recall = float(recalls[-1])
f1 = 0.
if precision + recall != 0.0:
f1 = (2 * precision * recall) / (precision + recall)
pred_count = len(recalls)
count_error = pred_count - gt_count
norm_count_error = None
if gt_count > 0:
norm_count_error = count_error / gt_count
eval_item = ClassEvaluationItem(
precision=precision,
recall=recall,
f1=f1,
count_error=norm_count_error,
gt_count=gt_count,
class_id=class_id,
class_name=class_name)
class_to_eval_item[class_id] = eval_item
return class_to_eval_item
| 2.46875
| 2
|
src/Solu9.py
|
wsdmakeup/codePractice
| 0
|
12785409
|
# -*- coding:utf-8 -*-
'''
Determine whether an integer is a palindrome. Do this without extra space.
'''
'''
不可以使用转换str的方法。
将自然对比转换为每次对比最左边和最右边,然后掐头去尾,就是数字除以100,
'''
class Solution(object):
def isPalindrome(self, x):
"""
:type x: int
:rtype: bool
"""
if x < 0:
return False
a = x
c = a
n = 0
flag = True
while(c>0):
n += 1
c = c/10
print n
for idx in range(n):
if idx > n -1 -idx:
break
else:
n1 = (a % (10**(idx+1)))/(10**idx)
n2 = (a / (10**(n-idx-1)))%10
print n1,n2
if n1!=n2:
flag = False
return flag
def anotherTry(self, x):
if x < 0:
return False
d = 1
while x/d >= 10:
d = d*10
while(x !=0):
l = x / d
r = x % 10
if l != r:
return False
x = x % d
x = x /10
d = d /100
return True
if __name__ == '__main__':
x = 123
print Solution().anotherTry(x)
x = 12321
print Solution().anotherTry(x)
x = 123321
print Solution().anotherTry(x)
x = -2147447412
print Solution().anotherTry(x)
x = 0
print Solution().anotherTry(x)
| 3.734375
| 4
|
NetworkX/Example_2_Drawing.py
|
Farhad-UPC/Zero-Touch_Network_Slicing
| 0
|
12785410
|
import networkx as nx
import matplotlib.pyplot as plt
G=nx.cubical_graph() #Return the 3-regular Platonic Cubical graph.
plt.subplot(121) #subplot(nrows, ncols, index)
nx.draw(G) # default spring_layout
plt.subplot(122)
nx.draw(G, pos=nx.circular_layout(G), nodecolor='r', edge_color='b')
| 3.40625
| 3
|
airflow/migrations/versions/c2091a80ac70_create_scheduler_state_table.py
|
TriggerMail/incubator-airflow
| 1
|
12785411
|
<reponame>TriggerMail/incubator-airflow<gh_stars>1-10
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""create scheduler_state table
Revision ID: c2091a80ac70
Revises: <PASSWORD>
Create Date: 2019-11-14 16:22:10.159454
"""
# revision identifiers, used by Alembic.
revision = 'c2091a80ac70'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'scheduler_state',
sa.Column('state', sa.String(50), primary_key=True)
)
def downgrade():
op.drop_table("scheduler_state")
| 1.421875
| 1
|
prajwal/__init__.py
|
Ishita-Sharma1/Prajwal
| 1
|
12785412
|
from flask import Flask
from flask import render_template
app = Flask(__name__)
# main routes
@app.route('/')
def index():
return render_template('index.html')
@app.route('/home')
def home():
return render_template('home.html')
| 2.46875
| 2
|
algorithms/implementation/taum_and_bday.py
|
avenet/hackerrank
| 0
|
12785413
|
<gh_stars>0
t = int(
input().strip()
)
for a0 in range(t):
b, w = input().strip().split(' ')
b, w = [int(b),int(w)]
x, y, z = input().strip().split(' ')
x, y, z = int(x), int(y), int(z)
min_black_price = min(b*x, b*(y+z))
min_white_price = min(w*y, w*(x+z))
print(min_black_price + min_white_price)
| 2.96875
| 3
|
AbstractSearchEngine/utils/stemmer.py
|
canuse/arXiv_explorer
| 0
|
12785414
|
<gh_stars>0
from Stemmer import Stemmer
from AbstractSearchEngine.db.StemHistory import update_stem_history, query_origin_word
from functools import lru_cache
stemmer = Stemmer('porter')
def stem(term, record=False):
"""
stem word
"""
stemmed_word = stemmer.stemWord(term)
if record:
update_stem_history(term, stemmed_word)
return stemmed_word
@lru_cache(maxsize=1024)
def unstem(stemmed_term):
"""
unstem word
"""
return query_origin_word(stemmed_term)
| 2.453125
| 2
|
api.py
|
ignaloidas/savetheplanet-backend
| 0
|
12785415
|
<filename>api.py<gh_stars>0
from datetime import datetime
import pytz
from firebase_admin import auth
from flask import Blueprint, abort, jsonify, request
from flask_login import login_user
from app import db
from models import User, FirebaseSubscription
from utils import generate_random_string, require_authentication
api = Blueprint("api", __name__)
@api.errorhandler(Exception)
def resource_not_found(e):
return jsonify(error=str(e)), 400
@api.route("/update_location", methods=["POST"])
@require_authentication
def update_location(user: User):
if not request.is_json:
abort(400, "Not JSON")
data = request.json
lat = data.get("lat")
lon = data.get("lon")
if not (lat or lon):
abort(400, "No lat/lon")
time = datetime.now(tz=pytz.UTC)
user.last_seen_lat = lat
user.last_seen_lon = lon
user.last_seen_time = time
db.session.add(user)
db.session.commit()
return jsonify(msg="success"), 200
@api.route("/register", methods=["POST"])
def register():
if not request.is_json:
abort(400, "Not JSON")
data = request.json
user = User()
password = data.get("password")
email = data.get("email")
if not (password and email):
abort(400, "Password and email not given")
if User.query.filter(User.email == email).count() > 0:
abort(400, "email already registered")
if len(password) < 6:
abort(400, "password too short")
token = generate_random_string(64)
firebase_user = auth.create_user(
email=email, email_verified=False, password=password, disabled=False
)
user.password = password
user.email = email
user.token = token
user.firebase_uid = firebase_user.uid
db.session.add(user)
db.session.commit()
login_user(user)
return jsonify(token=token), 200
@api.route("/login", methods=["POST"])
def login():
if not request.is_json:
abort(400, "Not JSON")
data = request.json
password = data.get("password")
email = data.get("email")
if not (password and email):
abort(400, "Password and email not given")
user = User.query.filter(User.email == email).one_or_none()
if user and user.verify_password(password):
login_user(user)
return jsonify(token=user.token)
abort(400, "Incorrect username or password")
@api.route("/get_firebase_token")
@require_authentication
def get_firebase_token(user: User):
uid = user.firebase_uid
custom_token = auth.create_custom_token(uid)
return jsonify(firebase_token=custom_token.decode()), 200
@api.route("/register_fcm_token", methods=["POST"])
@require_authentication
def register_fcm_token(user: User):
if not request.is_json:
abort(400, "Not JSON")
data = request.json
token = data.get("token")
if not token:
abort(400, "No token provided")
subscription = FirebaseSubscription()
subscription.user = user
subscription.firebase_token = token
db.session.add(subscription)
db.session.commit()
return "", 200
| 2.515625
| 3
|
data_processing/study_constants.py
|
calico/catnap
| 0
|
12785416
|
import datetime
END_DATE = datetime.date(2019, 4, 25)
STATE2DESCRIPTOR = {0: 'REST', 1: 'SLEEP', 2: 'ACTIVE', 3: 'RUN', 4: 'EAT&DRINK', 5: 'EAT'}
DAY_NIGHTS = ['day', 'night']
STATES = list(STATE2DESCRIPTOR.values())
STATE_AGG_BASE_FEATURES = ['VO2', 'VCO2', 'VH2O', 'KCal_hr', 'RQ', 'Food', 'PedMeters', 'AllMeters']
STATE_AGGREGATE_FNS = ['nanmean', 'pp99']
CIRCADIAN_AGG_BASE_FEATURES = ["VO2", "RQ", "KCal_hr", "Food", "Water", "BodyMass", "WheelSpeed",
"WheelMeters", 'PedMeters', "AllMeters", "XBreak", "YBreak", "ZBreak"]
CIRCADIAN_AGGREGATE_FNS = ['mean', 'pp99']
INDEX_VARS = ['mouse_id', 'trace_id', 'date', 'age_in_months', 'day_of_week']
AGE_ORDER = ['00-03 months', '03-06 months', '06-09 months', '09-12 months',
'12-15 months', '15-18 months', '18-21 months', '21-24 months',
'24-27 months', '27-30 months', '30-33 months',
'33 months or older']
| 2.0625
| 2
|
setup.py
|
stuwilkins/antplus
| 1
|
12785417
|
import os
import re
import sys
import platform
import subprocess
import versioneer
from setuptools import setup, Extension, find_packages
from setuptools.command.build_ext import build_ext
from distutils.version import LooseVersion
cpus = os.cpu_count()
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
with open(os.path.join(here, 'requirements.txt')) as f:
requirements = f.read().split()
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake must be installed to build the following"
"extensions: , ".join(
e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(
re.search(r'version\s*([\d.]+)', out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(
os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable]
# cfg = 'Debug' if self.debug else 'Release'
cfg = 'RelWithDebInfo'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'
.format(cfg.upper(), extdir)]
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--target', '_pyantplus']
build_args += ['--', '-j{}'.format(cpus)]
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DPY_VERSION_INFO=\\"{}\\"' .format(
env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args,
cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args,
cwd=self.build_temp)
cmdclass = versioneer.get_cmdclass()
cmdclass['build_ext'] = CMakeBuild
setup(
name='pyantplus',
author='<NAME>',
author_email='<EMAIL>',
description='ANT+ Utilities',
long_description=long_description,
long_description_content_type='text/markdown',
license='MIT',
url='https://github.com/stuwilkins/ant-recorder',
packages=find_packages(),
install_requires=requirements,
setup_requires=["pytest-runner"],
tests_require=["pytest"],
ext_modules=[CMakeExtension('_pyantplus')],
cmdclass=cmdclass,
zip_safe=False,
version=versioneer.get_version(),
)
| 2
| 2
|
examples/auth/app.py
|
Alma-field/twitcaspy
| 0
|
12785418
|
# Twitcaspy
# Copyright 2021 Alma-field
# See LICENSE for details.
from twitcaspy import API, AppAuthHandler
# The client id and/or secret can be found on your application's Details page
# located at select app in https://twitcasting.tv/developer.php
# (in "details" tab)
CLIENT_ID = ''
CLIENT_SECRET = ''
auth = AppAuthHandler(CLIENT_ID, CLIENT_SECRET)
api = API(auth)
# Target User ID and screen ID
user_id = '182224938'
screen_id = 'twitcasting_jp'
# If the authentication was successful, you should
# see the name of the account print out
print(api.get_user_info(id=user_id).user.name)
result = api.get_webhook_list()
print(result.all_count)
for webhook in result.webhooks:
print(f'{webhook.user_id}: {event}')
| 2.65625
| 3
|
libs/http.py
|
BreakUnrealGod/TanTan
| 1
|
12785419
|
from django.conf import settings
from django.http import JsonResponse
from common import errors
def render_json(code=errors.OK, data=None):
"""
自定义 json 输出
:param code:
:param data:
:return:
"""
result = {
'code': code
}
if data:
result['data'] = data
if settings.DEBUG:
json_dumps_params = {'indent': 4, 'ensure_ascii': False}
else:
json_dumps_params = {'separators': (',', ':')}
return JsonResponse(result, safe=False, json_dumps_params=json_dumps_params)
| 2.203125
| 2
|
src/GTV/Gtv.py
|
ErnstKapp/postchain-client
| 0
|
12785420
|
"""
This class contains the Merkleroot-Hash which will be used
for chaining blocks together
"""
class GTV:
def __init__(self):
merkleRoot = bytearray(0)
| 1.757813
| 2
|
utils.py
|
jiahuei/bottom-up-attention-tf
| 0
|
12785421
|
<reponame>jiahuei/bottom-up-attention-tf<filename>utils.py
import errno
import os
import numpy as np
from PIL import Image
EPS = 1e-7
def assert_eq(real, expected):
assert real == expected, '%s (true) vs %s (expected)' % (real, expected)
def assert_array_eq(real, expected):
assert (np.abs(real - expected) < EPS).all(), \
'%s (true) vs %s (expected)' % (real, expected)
def load_folder(folder, suffix):
imgs = []
for f in sorted(os.listdir(folder)):
if f.endswith(suffix):
imgs.append(os.path.join(folder, f))
return imgs
def load_imageid(folder):
images = load_folder(folder, 'jpg')
img_ids = set()
for img in images:
img_id = int(img.split('/')[-1].split('.')[0].split('_')[-1])
img_ids.add(img_id)
return img_ids
def pil_loader(path):
with open(path, 'rb') as f:
with Image.open(f) as img:
return img.convert('RGB')
def create_dir(path):
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
def convert_entries(entries):
new_entries = {}
entry_keys = list(entries[0].keys())
for key in entry_keys:
temp = [entry[key] for entry in entries]
new_entries[key] = np.array(temp)
return new_entries
def get_h5py_path(dataroot, name):
assert name in ['train', 'val']
h5_path = os.path.join(dataroot, '%s36.hdf5' % name)
return h5_path
if __name__ == '__main__':
entry = {'hi': 123, 'test': 456}
entries = [entry, entry, entry]
test = convert_entries(entries)
assert type(test) == type(entry)
assert list(test.keys()) == list(entry.keys())
print(test)
| 2.34375
| 2
|
train.py
|
oumayb/NeighConsensus
| 1
|
12785422
|
import sys
sys.path.append('./model')
import argparse
import torch
import numpy as np
from model.model import NCNet
import torchvision.transforms as transforms
from dataloader import TrainLoader, ValLoader
from loss import WeakLoss
import torch.optim as optim
import json
import os
## Parameters
parser = argparse.ArgumentParser(description='Nc-Net Training')
## Input / Output
parser.add_argument('--outDir', type=str, help='output model directory')
parser.add_argument('--resumePth', type=str, help='resume model path')
parser.add_argument('--featExtractorPth', type=str, default = 'model/FeatureExtractor/resnet18.pth', help='feature extractor path')
parser.add_argument('--imgDir', type=str, default = 'data/pf-pascal/JPEGImages/', help='image Directory')
parser.add_argument('--trainCSV', type=str, default = 'data/pf-pascal/train.csv', help='train csv')
parser.add_argument('--valCSV', type=str, default = 'data/pf-pascal/val.csv', help='val csv')
parser.add_argument('--imgSize', type=int, default = 400, help='train image size')
## learning parameter
parser.add_argument('--lr', type=float, default=5e-4, help='learning rate')
parser.add_argument('--batchSize', type=int, default=16, help='batch size')
parser.add_argument('--nbEpoch', type=int, default=5, help='number of training epochs')
parser.add_argument('--neighConsKernel', nargs='+', type=int, default=[5,5,5], help='kernels sizes in neigh. cons.')
parser.add_argument('--neighConsChannel', nargs='+', type=int, default=[16,16,1], help='channels in neigh. cons')
parser.add_argument('--finetuneFeatExtractor', action='store_true', help='whether fine-tuning feature extractor')
parser.add_argument('--featExtractor', type=str, default='ResNet18Conv4', choices=['ResNet18Conv4', 'ResNet18Conv5'], help='feature extractor')
parser.add_argument('--cuda', action='store_true', help='GPU setting')
parser.add_argument('--softmaxMM', action='store_true', help='whether use softmax Mutual Matching')
args = parser.parse_args()
print(args)
## Set seed
torch.manual_seed(1)
if args.cuda:
torch.cuda.manual_seed(1)
else :
raise RuntimeError('CPU Version is not supported yet.')
np.random.seed(1)
## Initial Model
model = NCNet(kernel_sizes=args.neighConsKernel,
channels=args.neighConsChannel,
featExtractor = args.featExtractor,
featExtractorPth = args.featExtractorPth,
finetuneFeatExtractor = args.finetuneFeatExtractor,
softmaxMutualMatching = args.softmaxMM)
if not args.finetuneFeatExtractor:
msg = '\nIgnore the gradient for the parameters in the feature extractor'
print (msg)
for p in model.featExtractor.parameters():
p.requires_grad=False
if args.resumePth :
msg = '\nResume from {}'.format(args.resumePth)
model.load_state_dict(torch.load(args.resumePth))
if args.cuda :
model.cuda()
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr)
## Train Val DataLoader
normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # ImageNet normalization
trainTransform = transforms.Compose([transforms.RandomResizedCrop(args.imgSize),
transforms.ToTensor(),
normalize,])
valTransform = transforms.Compose([transforms.Resize(args.imgSize),
transforms.CenterCrop(args.imgSize),
transforms.ToTensor(),
normalize,])
trainLoader = TrainLoader(batchSize=args.batchSize,
pairCSV=args.trainCSV,
imgDir = args.imgDir,
trainTransform = trainTransform)
valLoader = ValLoader(batchSize=args.batchSize,
pairCSV=args.valCSV,
imgDir = args.imgDir,
valTransform = valTransform)
if not os.path.exists(args.outDir) :
os.mkdir(args.outDir)
# Train
bestValLoss = np.inf
history = {'TrainLoss' : [], 'ValLoss' : []}
outHistory = os.path.join(args.outDir, 'history.json')
outModel = os.path.join(args.outDir, 'netBest.pth')
for epoch in range(1, args.nbEpoch + 1) :
trainLoss = 0.
valLoss = 0.
for i, batch in enumerate(trainLoader) :
optimizer.zero_grad()
if args.cuda :
batch['source_image'] = batch['source_image'].cuda()
batch['target_image'] = batch['target_image'].cuda()
loss = WeakLoss(model, batch, args.softmaxMM)
loss.backward()
optimizer.step()
trainLoss += loss.item()
if i % 30 == 29 :
msg = '\nEpoch {:d}, Batch {:d}, Train Loss : {:.4f}'.format(epoch, i + 1, trainLoss / (i + 1))
print (msg)
## Validation
trainLoss = trainLoss / len(trainLoader)
with torch.no_grad() :
for i, batch in enumerate(valLoader) :
if args.cuda :
batch['source_image'] = batch['source_image'].cuda()
batch['target_image'] = batch['target_image'].cuda()
loss = WeakLoss(model, batch, args.softmaxMM)
valLoss += loss.item()
valLoss = valLoss / len(valLoader)
msg = 'Epoch {:d}, Train Loss : {:.4f}, Val Loss : {:.4f}'.format(epoch, trainLoss , valLoss)
with open(outHistory, 'w') as f :
json.dump(history, f)
print (msg)
if valLoss < bestValLoss :
msg = 'Validation Loss Improved from {:.4f} to {:.4f}'.format(bestValLoss, valLoss)
print (msg)
bestValLoss = valLoss
torch.save(model.state_dict(), outModel)
finalOut = os.path.join(args.outDir, 'netBest{:.3f}.pth'.format(bestValLoss))
cmd = 'mv {} {}'.format(outModel, finalOut)
os.system(cmd)
| 2.078125
| 2
|
lang/Python/loops-for-1.py
|
ethansaxenian/RosettaDecode
| 0
|
12785423
|
import sys
for i in range(5):
for j in range(i+1):
sys.stdout.write("*")
print()
| 2.828125
| 3
|
run_concat.py
|
folguinch/GoContinuum
| 1
|
12785424
|
<reponame>folguinch/GoContinuum
#!casa -c
import argparse
def main():
# Command line options
parser = argparse.ArgumentParser()
parser.add_argument('-c', nargs=1,
help='Casa parameter.')
parser.add_argument('concatvis', nargs=1, type=str,
help='Concatenated ms name')
parser.add_argument('uvdata', nargs='*', type=str,
help='uv data ms files')
args = parser.parse_args()
# Concat
concat(vis=args.uvdata, concatvis=args.concatvis[0])
if __name__=="__main__":
main()
| 2.453125
| 2
|
Spark_AccessCodeLog_Sort.py
|
MLSTS/SparkRDD
| 0
|
12785425
|
<reponame>MLSTS/SparkRDD
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession.builder.getOrCreate()
sc = spark.sparkContext
lines = sc.textFile("/home/student/CSS333/dataset/accesslog.csv",4)
codes = lines.map(lambda str: (str.split(",")[7],1))
counts = codes.reduceByKey(lambda i,j: i+j)
sorted = counts.sortBy(lambda x: x[1], ascending=False, numPartitions=1)
output = sorted.collect()
for (code, count) in output:
print("%s: %i" %(code, count))
spark.stop()
| 3.078125
| 3
|
src/abaqus/Interaction/IncidentWaveState.py
|
Haiiliin/PyAbaqus
| 7
|
12785426
|
from abaqusConstants import *
from .InteractionState import InteractionState
class IncidentWaveState(InteractionState):
"""The IncidentWaveState object stores the propagating data of an IncidentWave object in a
step. One instance of this object is created internally by the IncidentWave object for
each step. The instance is also deleted internally by the IncidentWave object.
The IncidentWaveState object has no constructor or methods.
The IncidentWaveState object is derived from the InteractionState object.
Attributes
----------
status: SymbolicConstant
A SymbolicConstant specifying the propagation state of the :py:class:`~abaqus.Interaction.InteractionState.InteractionState` object.
Possible values
are:NOT_YET_ACTIVECREATEDPROPAGATEDMODIFIEDDEACTIVATEDNO_LONGER_ACTIVETYPE_NOT_APPLICABLEINSTANCE_NOT_APPLICABLEBUILT_INTO_BASE_STATE
Notes
-----
This object can be accessed by:
.. code-block:: python
import interaction
mdb.models[name].steps[name].interactionStates[name]
The corresponding analysis keywords are:
- INCIDENT WAVE INTERACTION
"""
# A SymbolicConstant specifying the propagation state of the InteractionState object.
# Possible values
# are:NOT_YET_ACTIVECREATEDPROPAGATEDMODIFIEDDEACTIVATEDNO_LONGER_ACTIVETYPE_NOT_APPLICABLEINSTANCE_NOT_APPLICABLEBUILT_INTO_BASE_STATE
status: SymbolicConstant = None
| 2.609375
| 3
|
aiohttp_swagger3/swagger_route.py
|
grandmetric/aiohttp-swagger3
| 0
|
12785427
|
import cgi
import json
from types import FunctionType
from typing import Any, Awaitable, Callable, Dict, List, Optional, Tuple, cast
import attr
from aiohttp import web
from .context import COMPONENTS
from .swagger import Swagger
from .validators import MISSING, Validator, ValidatorError, schema_to_validator, security_to_validator
_SwaggerHandler = Callable[..., Awaitable[web.StreamResponse]]
class RequestValidationFailed(web.HTTPBadRequest):
"""This exception can be caught in a aiohttp middleware.
:param dict errors: This dict stores validation errors.
"""
def __init__(self, errors: Dict, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.errors = errors
def _get_fn_parameters(fn: _SwaggerHandler) -> Tuple[str, ...]:
func = cast(FunctionType, fn)
if func.__closure__ is None:
arg_count = func.__code__.co_argcount + func.__code__.co_kwonlyargcount
return func.__code__.co_varnames[:arg_count]
return _get_fn_parameters(func.__closure__[0].cell_contents)
@attr.attrs(slots=True, auto_attribs=True)
class Parameter:
name: str
validator: Validator
required: bool
class SwaggerRoute:
__slots__ = (
"_swagger",
"method",
"path",
"handler",
"qp",
"pp",
"hp",
"cp",
"bp",
"auth",
"params",
)
def __init__(self, method: str, path: str, handler: _SwaggerHandler, *, swagger: Swagger) -> None:
self.method = method
self.path = path
self.handler = handler
self.qp: List[Parameter] = []
self.pp: List[Parameter] = []
self.hp: List[Parameter] = []
self.cp: List[Parameter] = []
self.bp: Dict[str, Parameter] = {}
self.auth: Optional[Parameter] = None
self._swagger = swagger
method_section = self._swagger.spec["paths"][path][method]
parameters = method_section.get("parameters")
body = method_section.get("requestBody")
security = method_section.get("security")
components = self._swagger.spec.get("components", {})
COMPONENTS.set(components)
if security is not None:
parameter = Parameter("", security_to_validator(security), True)
self.auth = parameter
if parameters is not None:
for param in parameters:
if "$ref" in param:
if not components:
raise Exception("file with components definitions is missing")
# '#/components/parameters/Month'
*_, section, obj = param["$ref"].split("/")
param = components[section][obj]
parameter = Parameter(
param["name"],
schema_to_validator(param["schema"]),
param.get("required", False),
)
if param["in"] == "query":
self.qp.append(parameter)
elif param["in"] == "path":
self.pp.append(parameter)
elif param["in"] == "header":
parameter.name = parameter.name.lower()
self.hp.append(parameter)
elif param["in"] == "cookie":
self.cp.append(parameter)
if body is not None:
for media_type, value in body["content"].items():
# check that we have handler for media_type
self._swagger._get_media_type_handler(media_type)
value = body["content"][media_type]
self.bp[media_type] = Parameter(
"body",
schema_to_validator(value["schema"]),
body.get("required", False),
)
self.params = set(_get_fn_parameters(self.handler))
async def parse(self, request: web.Request) -> Dict:
params = {}
if "request" in self.params:
params["request"] = request
request_key = self._swagger.request_key
request[request_key] = {}
errors: Dict = {}
# check auth
if self.auth:
try:
values = self.auth.validator.validate(request, True)
except ValidatorError as e:
if isinstance(e.error, str):
errors["authorization"] = e.error
else:
errors = e.error
raise RequestValidationFailed(reason=json.dumps(errors), errors=errors)
for key, value in values.items():
request[request_key][key] = value
# query parameters
if self.qp:
for param in self.qp:
if param.required:
try:
v: Any = request.rel_url.query.getall(param.name)
except KeyError:
errors[param.name] = "is required"
continue
if len(v) == 1:
v = v[0]
else:
v = request.rel_url.query.getall(param.name, MISSING)
if v != MISSING and len(v) == 1:
v = v[0]
try:
value = param.validator.validate(v, True)
except ValidatorError as e:
errors[param.name] = e.error
continue
if value != MISSING:
request[request_key][param.name] = value
if param.name in self.params:
params[param.name] = value
# body parameters
if self.bp:
if "Content-Type" not in request.headers:
if next(iter(self.bp.values())).required:
errors["body"] = "is required"
else:
media_type, _ = cgi.parse_header(request.headers["Content-Type"])
if media_type not in self.bp:
errors["body"] = f"no handler for {media_type}"
else:
handler = self._swagger._get_media_type_handler(media_type)
param = self.bp[media_type]
try:
v, has_raw = await handler(request)
except ValidatorError as e:
errors[param.name] = e.error
else:
try:
value = param.validator.validate(v, has_raw)
except ValidatorError as e:
errors[param.name] = e.error
else:
request[request_key][param.name] = value
if param.name in self.params:
params[param.name] = value
# header parameters
if self.hp:
for param in self.hp:
if param.required:
try:
v = request.headers.getone(param.name)
except KeyError:
errors[param.name] = "is required"
continue
else:
v = request.headers.get(param.name, MISSING)
try:
value = param.validator.validate(v, True)
except ValidatorError as e:
errors[param.name] = e.error
continue
if value != MISSING:
request[request_key][param.name] = value
if param.name in self.params:
params[param.name] = value
# path parameters
if self.pp:
for param in self.pp:
v = request.match_info[param.name]
try:
value = param.validator.validate(v, True)
except ValidatorError as e:
errors[param.name] = e.error
continue
request[request_key][param.name] = value
if param.name in self.params:
params[param.name] = value
# cookie parameters
if self.cp:
for param in self.cp:
if param.required:
try:
v = request.cookies[param.name]
except KeyError:
errors[param.name] = "is required"
continue
else:
v = request.cookies.get(param.name, MISSING)
try:
value = param.validator.validate(v, True)
except ValidatorError as e:
errors[param.name] = e.error
continue
if value != MISSING:
request[request_key][param.name] = value
if param.name in self.params:
params[param.name] = value
if errors:
raise RequestValidationFailed(reason=json.dumps(errors), errors=errors)
return params
| 2.1875
| 2
|
big_screen/redisOpration/AllOpration.py
|
15653391491/black-broadcast-back-end
| 0
|
12785428
|
from django_redis import get_redis_connection
import json
from .BaseOpration import BaseOpration
from big_screen.utils import tools as t
from big_screen.serialization.allSerialization import serMobile
from big_screen.utils import re_format as f
class defaultOp(BaseOpration):
def __init__(self):
BaseOpration.__init__(self)
self.con = get_redis_connection("default")
class sessionOp(BaseOpration):
def __init__(self):
BaseOpration.__init__(self)
self.con = get_redis_connection("session")
class isworkingOp(BaseOpration):
def __init__(self):
BaseOpration.__init__(self)
self.con = get_redis_connection("isworking")
self.mob = serMobile()
def formatter_info(self, info):
"""
格式转换
:param info:
:return:
"""
lnglat = info.get("lnglat")
address = t.getaddress(lnglat)
if address is 0:
address = {
"status": 1,
"district": f.UNKNOW_DISTRICT,
'formatted_address': "",
"data_from": "",
"adcode": f.UNKNOW_DISTRICT
}
info["address"] = address
return (info.get("mobile"), info)
class massmarkOp(BaseOpration):
def __init__(self):
"""
3号仓库
"""
BaseOpration.__init__(self)
self.con = get_redis_connection("massmark")
def resetMassMarkData(self, content):
"""
重置海量点数据
:param content:
:return:
"""
self.flush_db()
for con in content:
self.pushMassMarkData(con)
# ----------------- 数据插入 -----------------
def pushMassMarkData(self, info):
"""
插入海量点数据
:param info:
:return:
"""
k, v = info
self.list_push(k, v)
# --------------- 旧方法 ---------------------
def formatter_data_from_ser(self, info):
"""
用序列化器查询出的数据进行组织
:param info:
:return:
"""
content = dict()
lnglat = info.get("lnglat")
content["time"] = info.get("time")
content["address"] = info.get("address")
content["category"] = info.get("category__name")
content["freq"] = info.get("freq")
return (lnglat, content)
def formmater_data(self, info):
"""
处理数据
:param content:
:return:
"""
content = dict()
lnglat = info.get("lnglat")
content["time"] = info.get("time")
content["address"] = info.get("address")
content["category"] = info.get("category").name
content["freq"] = info.get("freq")
return (lnglat, content)
def get_for_view(self):
"""
为首页websocket组织数据
:return:
"""
content = list()
keys = self.get_keys()
if len(keys) is 0:
return content
else:
for key in keys:
info = dict()
info["lnglat"] = key.split(",")
data = self.list_get(key)
data = list(map(lambda info: json.loads(info), data))
info["address"] = list(map(lambda info: info.pop("address"), data))[0]
info["id_count"] = len(data)
info["data"] = data
content.append(info)
return content
class broadcastOp(BaseOpration):
def __init__(self):
"""
4号仓库
"""
BaseOpration.__init__(self)
self.con = get_redis_connection("broadcast")
self.scrollKey = "scroll_n"
self.heatmapKey = "heatmap_n"
# -------------- 重置 ----------------
def resetScrollData(self, content):
"""
重置轮播表数据
:param content:
:return:
"""
self.del_key(self.scrollKey)
for con in content:
self.pushScrollData(con)
def resetHeatMapData(self, content):
"""
重置热力图数据
:param content:
:return:
"""
self.del_key(self.heatmapKey)
for con in content:
self.pushHeatMapData(con)
# -------------- 数据插入 ---------------
def pushScrollData(self, info):
"""
插入轮播表数据
:param info:
:return:
"""
self.list_push(self.scrollKey, info)
def pushHeatMapData(self, info):
"""
插入热力图数据
:param info:
:return:
"""
self.list_push(self.heatmapKey, info)
# ------------- 旧方法 -----------------
def formatter_scroll_info(self, info):
"""
格式化轮播表数据
:param info:
:return:
"""
content = list()
content.append(info.get("time"))
content.append(info.get("freq"))
content.append(info.get("category").name)
content.append(info.get("address"))
return content
def formatter_heatmap_info(self, info):
"""
格式化热力图数据
:param info:
:return:
"""
content = dict()
content["time"] = info.get("time")
lnglat = info.get("lnglat").split(",")
content["lng"] = lnglat[0]
content["lat"] = lnglat[1]
content["count"] = 1
return content
def formatter_scroll_info_from_ser(self, info):
"""
格式化轮播表数据,数据来源为序列化器
:param info:
:return:
"""
content = list()
content.append(info.get("time"))
content.append(info.get("freq"))
content.append(info.get("category__name"))
content.append(info.get("address"))
return content
class chartOp(BaseOpration):
def __init__(self):
BaseOpration.__init__(self)
self.con = get_redis_connection("chart")
class whdisOp(BaseOpration):
def __init__(self):
BaseOpration.__init__(self)
self.con = get_redis_connection("whdis")
class MobListOp(BaseOpration):
def __init__(self):
BaseOpration.__init__(self)
self.con = get_redis_connection("mob-list")
self.mob = serMobile()
def get_mob_list(self):
"""
获取合法手机id列表
:return:
"""
result = self.kv_get("mob-list")
if result == "no this key":
mob_list = self.mob.get_mobile_list()
result = mob_list
self.kv_set("mob-list", mob_list)
return result
def update_mob_list(self):
mob_list = self.mob.get_mobile_list()
self.kv_set("mob-list", mob_list)
class ObjectOp(BaseOpration):
def __init__(self):
BaseOpration.__init__(self)
self.con = get_redis_connection("object")
self.mob = serMobile()
def get_mob_list(self):
"""
获取合法手机id列表
:return:
"""
result = self.kv_get("mob-list")
if result == "no this key":
mob_list = self.mob.get_mobile_list()
result = mob_list
self.kv_set("mob-list", mob_list)
return result
def update_mob_list(self):
mob_list = self.mob.get_mobile_list()
self.kv_set("mob-list", mob_list)
| 2.109375
| 2
|
mapnik-tiler.py
|
eyeNsky/mapnik-tiler
| 0
|
12785429
|
#!/usr/bin/env python
'''
Based on:
https://trac.openstreetmap.org/browser/applications/rendering/mapnik/generate_tiles_multiprocess.py
With snippet from:
http://www.klokan.cz/projects/gdal2tiles/gdal2tiles.py
The MIT License (MIT)
Copyright (c) 2015 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from math import pi,cos,sin,log,exp,atan
from subprocess import call
import sys, os
import multiprocessing
import sys,getopt
from osgeo import gdal
try:
import mapnik2 as mapnik
except:
import mapnik
USAGE = '''
Usage: mapniktiler.py
--image-in srcfile Input image -Required
--tile-dir /path/to/dir Output directory -Optional
--p value Number of threads (2) -Optional
--z-min value Min Zoom (0) -Optional
--z-max value Max Zoom (Based on GSD) -Optional
--bbox 'xmin ymin xmax ymax' Calculated from extent -Optional
bbox needs quotes!!
The only required argument is --image-in
eg: --image-in src.tif --p 4 --z-min 0 --z-max 6 --bbox '-170 15 -52.0 74.0'
'''
try :
args,byaa = getopt.getopt(sys.argv[1:], '', ['image-in=','tile-dir=','z-min=','z-max=','bbox=','p=',])
#print byaa
#print args
args = dict(args)
imageIn = args.get('--image-in')
tile_dir = args.get('--tile-dir')
zMin = args.get('--z-min')
zMax = args.get('--z-max')
p = args.get('--p')
b_box = args.get('--bbox')
except:
print USAGE
#sys.exit()
'''
Start def's
'''
def calcImgExt(img):
dataset = gdal.Open(img)
# get epsg code
try:
epsg = dataset.GetProjectionRef().split(',')[-1].split('"')[1]
except:
epsg = 0
print dataset.GetDescription(),'has no projection'
geot = dataset.GetGeoTransform()
# Get image height width and heigth in pixels
rastX = dataset.RasterXSize
rastY = dataset.RasterYSize
# Get pixel sizes
pixelX = geot[1]
pixelY = geot[5]
# Get ULX,ULY
ULX = geot[0] + pixelX/2
ULY = geot[3] + pixelY/2
# Calculate LRX,LRY
LRX = ULX+(pixelX * rastX)
LRY = ULY+(pixelY * rastY)
dataset = None
imgBounds = ULX,ULY,LRX,LRY
#print imgBounds
return imgBounds
def calcImgRes(img):
dataset = gdal.Open(img)
geot = dataset.GetGeoTransform()
# Get pixel size x and assume y is the same
pixelX = geot[1]
# release dataset
dataset = None
return pixelX
def jsonTemplate():
jt = '''{
"description": "",
"bounds": "%s,%s,%s,%s",
"minzoom": "%s",
"version": "1.0.0",
"template": "",
"maxzoom": "%s",
"name": "%s"
}'''
return jt
def xmlTemplate():
mapTemp = '''<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE Map[]>
<Map srs="+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0.0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs +over" background-color="transparent" maximum-extent="-20037508.34,-20037508.34,20037508.34,20037508.34" >
<Style name="bluemarble" filter-mode="first" >
<Rule>
<RasterSymbolizer opacity="1" />
</Rule>
</Style>
<Layer name="bluemarble"
srs="+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs">
<StyleName>bluemarble</StyleName>
<Datasource>
<Parameter name="file"><![CDATA[%s]]></Parameter>
<Parameter name="type"><![CDATA[gdal]]></Parameter>
</Datasource>
</Layer>
</Map>'''
return mapTemp
def testMap():
theTest = '''<!DOCTYPE html>
<html>
<head>
<title>TestMap</title>
<!-- Local
<script type="text/javascript" src="../leaflet/leaflet.js"></script>
<link rel="stylesheet" href="../leaflet/leaflet.css"/>
-->
<link rel="stylesheet" href="http://cdn.leafletjs.com/leaflet-0.7.2/leaflet.css" />
<script src="http://cdn.leafletjs.com/leaflet-0.7.2/leaflet.js"></script>
<style>
body { margin:0; padding:0; }
#map { position:absolute; top:0; bottom:0; width:100%%; }
</style>
</head>
<body>
<div id="map"></div>
<script type='text/javascript'>
var map = L.map("map", {
center: [%s,%s],
zoom: %s,
fadeAnimation: false
});
var mqOAM = new L.tileLayer("http://{s}.mqcdn.com/tiles/1.0.0/map/{z}/{x}/{y}.jpg",{
type: 'base',
tileSize:256,
minZoom: 0,
maxZoom: 18,
attribution:'Tiles Courtesy of <a href="http://www.mapquest.com/" target="_blank">MapQuest</a> <img src="http://developer.mapquest.com/content/osm/mq_logo.png">',
subdomains: ['otile1','otile2','otile3','otile4']
}).addTo(map);
var tileset = new L.tileLayer("{z}/{x}/{y}.png",{
tileSize:256,
minZoom: %s,
maxZoom: %s
}).addTo(map);
</script>
</body>
</html>'''
return theTest ##expects lat,lon,zoom,minZoom ,maxzoom
## Start klokkan snippet ##
initialResolution = 156543.033928041
def Resolution(zoom):
"Resolution (meters/pixel) for given zoom level (measured at Equator)"
return initialResolution / (2**zoom)
def ZoomForPixelSize(pixelSize):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(30):
if pixelSize > Resolution(i):
return i-1 if i!=0 else 0 # We don't want to scale up
## End klokkan snippet ##
'''
end 'o defs
'''
# handle missing zooms
# only works on decimal degree projections....
if zMin:
zMin = int(zMin)
else:
zMin = 0
if zMax:
zMax = int(zMax)
else:
dd = calcImgRes(imageIn)
m = dd*100000
zMax = ZoomForPixelSize(m)
if b_box:
xMin = b_box.split()[0]
yMin = b_box.split()[1]
xMax = b_box.split()[2]
yMax = b_box.split()[3]
else:
xMin,yMax,xMax,yMin = calcImgExt(imageIn)
bbox = (float(xMin),float(yMin), float(xMax), float(yMax))
#bbox = (-180,-90,180,90)
print bbox
#zMin = 0
#zMax = 6
### Done gathering cli arguments
imgPath = os.path.abspath(imageIn)
basename = os.path.basename(imageIn).replace('.','_')
## these need a way to get all extensions!!!!!!!!!
fName,theExt = os.path.splitext(imgPath)
xmlPath = imgPath.replace(theExt,'.xml')
if tile_dir == None:
tile_dir = imgPath.replace(theExt,'')
# open mapnik xml
xOut = open(xmlPath,'w')
xmlText = xmlTemplate()%imgPath
xOut.write(xmlText)
xOut.close()
theMapFile = xmlPath
## Begin what was mostly
DEG_TO_RAD = pi/180
RAD_TO_DEG = 180/pi
# Default number of rendering threads to spawn, should be roughly equal to number of CPU cores available
NUM_THREADS = 2
if p:
NUM_THREADS = int(p)
def minmax (a,b,c):
a = max(a,b)
a = min(a,c)
return a
class GoogleProjection:
def __init__(self,levels=18):
self.Bc = []
self.Cc = []
self.zc = []
self.Ac = []
c = 256
for d in range(0,levels):
e = c/2;
self.Bc.append(c/360.0)
self.Cc.append(c/(2 * pi))
self.zc.append((e,e))
self.Ac.append(c)
c *= 2
def fromLLtoPixel(self,ll,zoom):
d = self.zc[zoom]
e = round(d[0] + ll[0] * self.Bc[zoom])
f = minmax(sin(DEG_TO_RAD * ll[1]),-0.9999,0.9999)
g = round(d[1] + 0.5*log((1+f)/(1-f))*-self.Cc[zoom])
return (e,g)
def fromPixelToLL(self,px,zoom):
e = self.zc[zoom]
f = (px[0] - e[0])/self.Bc[zoom]
g = (px[1] - e[1])/-self.Cc[zoom]
h = RAD_TO_DEG * ( 2 * atan(exp(g)) - 0.5 * pi)
return (f,h)
class RenderThread:
def __init__(self, tile_dir, mapfile, q, printLock, maxZoom):
self.tile_dir = tile_dir
self.q = q
self.mapfile = mapfile
self.maxZoom = maxZoom
self.printLock = printLock
def render_tile(self, tile_uri, x, y, z):
# Calculate pixel positions of bottom-left & top-right
p0 = (x * 256, (y + 1) * 256)
p1 = ((x + 1) * 256, y * 256)
# Convert to LatLong (EPSG:4326)
l0 = self.tileproj.fromPixelToLL(p0, z);
l1 = self.tileproj.fromPixelToLL(p1, z);
# Convert to map projection (e.g. mercator co-ords EPSG:900913)
c0 = self.prj.forward(mapnik.Coord(l0[0],l0[1]))
c1 = self.prj.forward(mapnik.Coord(l1[0],l1[1]))
# Bounding box for the tile
if hasattr(mapnik,'mapnik_version') and mapnik.mapnik_version() >= 800:
bbox = mapnik.Box2d(c0.x,c0.y, c1.x,c1.y)
else:
bbox = mapnik.Envelope(c0.x,c0.y, c1.x,c1.y)
render_size = 256
self.m.resize(render_size, render_size)
self.m.zoom_to_box(bbox)
if(self.m.buffer_size < 128):
self.m.buffer_size = 128
# Render image with default Agg renderer
im = mapnik.Image(render_size, render_size)
mapnik.render(self.m, im)
im.save(tile_uri, 'png256')
def loop(self):
self.m = mapnik.Map(256, 256)
# Load style XML
mapnik.load_map(self.m, self.mapfile, True)
# Obtain <Map> projection
self.prj = mapnik.Projection(self.m.srs)
# Projects between tile pixel co-ordinates and LatLong (EPSG:4326)
self.tileproj = GoogleProjection(self.maxZoom+1)
while True:
#Fetch a tile from the queue and render it
r = self.q.get()
if (r == None):
self.q.task_done()
break
else:
(name, tile_uri, x, y, z) = r
exists= ""
if os.path.isfile(tile_uri):
exists= "exists"
else:
self.render_tile(tile_uri, x, y, z)
bytes=os.stat(tile_uri)[6]
empty= ''
if bytes == 103:
empty = " Empty Tile "
self.printLock.acquire()
#print name, ":", z, x, y, exists, empty
self.printLock.release()
self.q.task_done()
def render_tiles(bbox, mapfile, tile_dir, minZoom=1,maxZoom=18, name="unknown", num_threads=NUM_THREADS):
print "render_tiles(",bbox, mapfile, tile_dir, minZoom,maxZoom, name,")"
# Launch rendering threads
queue = multiprocessing.JoinableQueue(32)
printLock = multiprocessing.Lock()
renderers = {}
for i in range(num_threads):
renderer = RenderThread(tile_dir, mapfile, queue, printLock, maxZoom)
render_thread = multiprocessing.Process(target=renderer.loop)
render_thread.start()
#print "Started render thread %s" % render_thread.getName()
renderers[i] = render_thread
if not os.path.isdir(tile_dir):
os.mkdir(tile_dir)
gprj = GoogleProjection(maxZoom+1)
ll0 = (bbox[0],bbox[3])
ll1 = (bbox[2],bbox[1])
for z in range(minZoom,maxZoom + 1):
px0 = gprj.fromLLtoPixel(ll0,z)
px1 = gprj.fromLLtoPixel(ll1,z)
# check if we have directories in place
zoom = "%s" % z
if not os.path.isdir(tile_dir + zoom):
os.mkdir(tile_dir + zoom)
for x in range(int(px0[0]/256.0),int(px1[0]/256.0)+1):
# Validate x co-ordinate
if (x < 0) or (x >= 2**z):
continue
# check if we have directories in place
str_x = "%s" % x
if not os.path.isdir(tile_dir + zoom + '/' + str_x):
os.mkdir(tile_dir + zoom + '/' + str_x)
for y in range(int(px0[1]/256.0),int(px1[1]/256.0)+1):
# Validate x co-ordinate
if (y < 0) or (y >= 2**z):
continue
str_y = "%s" % y
tile_uri = tile_dir + zoom + '/' + str_x + '/' + str_y + '.png'
# Submit tile to be rendered into the queue
t = (name, tile_uri, x, y, z)
queue.put(t)
# Signal render threads to exit by sending empty request to queue
for i in range(num_threads):
queue.put(None)
# wait for pending rendering jobs to complete
queue.join()
for i in range(num_threads):
renderers[i].join()
if __name__ == "__main__":
home = os.environ['HOME']
try:
mapfile = os.environ['MAPNIK_MAP_FILE']
except KeyError:
mapfile = theMapFile
try:
tile_dir = os.environ['MAPNIK_TILE_DIR']
except KeyError:
tile_dir = tile_dir
if not tile_dir.endswith('/'):
tile_dir = tile_dir + '/'
#-------------------------------------------------------------------------
#
# Change the following for different bounding boxes and zoom levels
#
# Start with an overview
# World
# Europe+
#bbox = (-170, 15, -52.0, 74.0)
render_tiles(bbox, mapfile, tile_dir, zMin, zMax , basename)
metaTxt = jsonTemplate()%(xMin,yMin,xMax, yMax,zMin,zMax,basename)
metaPath = tile_dir+'/metadata.json'
m = open(metaPath,'w')
m.write(metaTxt)
m.close()
testPath = tile_dir+'/leaflet.html'
testTxt = testMap()% ( ((yMin+yMax)/2),((xMin+xMax)/2),((zMin+zMax)/2),zMin ,zMax)
t = open(testPath,'w')
t.write(testTxt)
t.close()
| 1.789063
| 2
|
sciencebeam_gym/preprocess/blockify_annotations.py
|
elifesciences/sciencebeam-gym
| 25
|
12785430
|
import logging
from collections import deque
from abc import ABC, abstractmethod
import math
from lxml import etree
from pyqtree import Index as PqtreeIndex
from PIL import Image, ImageDraw, ImageColor
from sciencebeam_gym.structured_document.svg import (
SVG_NSMAP,
SVG_DOC,
SVG_RECT,
)
DEFAULT_NEARBY_TOLERANCE = 5
def get_logger():
return logging.getLogger(__name__)
class AnnotationBlock(object):
def __init__(self, tag, bounding_box):
self.tag = tag
self.bounding_box = bounding_box
def merge_with(self, other):
return AnnotationBlock(
self.tag,
self.bounding_box.include(other.bounding_box)
)
def __str__(self):
return 'AnnotationBlock({}, {})'.format(self.tag, self.bounding_box)
def __repr__(self):
return str(self)
class BlockPoint(object):
def __init__(self, block, x, y):
self.block = block
self.point = (x, y)
def __str__(self):
return 'BlockPoint({}, {})'.format(self.block, self.point)
def __repr__(self):
return str(self)
def __len__(self):
return len(self.point)
def __getitem__(self, index):
return self.point[index]
def _to_bbox(bb):
return (bb.x, bb.y, bb.x + bb.width - 1, bb.y + bb.height - 1)
class DeletableWrapper(object):
def __init__(self, data):
self.data = data
self.deleted = False
def __hash__(self):
return hash(self.data)
def __eq__(self, other):
return self.data == other.data
class BlockSearch(object):
def __init__(self, blocks):
bboxs = [block.bounding_box for block in blocks]
xmax = max([bb.x + bb.width for bb in bboxs])
ymax = max([bb.y + bb.height for bb in bboxs])
self.spindex = PqtreeIndex(bbox=(0, 0, xmax, ymax))
self.wrapper_map = {}
for block in blocks:
wrapper = DeletableWrapper(block)
self.wrapper_map[block] = wrapper
self.spindex.insert(wrapper, _to_bbox(block.bounding_box))
def find_intersection_with(self, search_bounding_box):
return [
wrapper.data
for wrapper in self.spindex.intersect(_to_bbox(search_bounding_box))
if not wrapper.deleted
]
def remove(self, block):
wrapper = self.wrapper_map.get(block)
if wrapper is not None:
wrapper.deleted = True
def merge_blocks(blocks, nearby_tolerance=0):
if len(blocks) <= 1:
return blocks
merged_blocks = deque()
logger = get_logger()
logger.debug('nearby_tolerance: %s', nearby_tolerance)
logger.debug('blocks: %s', blocks)
logger.debug('bboxs: %s', [_to_bbox(block.bounding_box) for block in blocks])
tags = sorted({b.tag for b in blocks})
logger.debug('tags: %s', tags)
remaining_blocks = deque(blocks)
search_by_tag = {
tag: BlockSearch([b for b in remaining_blocks if b.tag == tag])
for tag in tags
}
while len(remaining_blocks) >= 2:
merged_block = remaining_blocks.popleft()
search = search_by_tag[merged_block.tag]
search.remove(merged_block)
search_bounding_box = merged_block.bounding_box.with_margin(1 + nearby_tolerance, 0)
logger.debug('search_bounding_box: %s (%s)',
search_bounding_box, _to_bbox(search_bounding_box))
neighbours = search.find_intersection_with(search_bounding_box)
logger.debug('neighbours: %s', neighbours)
neighbours_blocks_count = 0
for neighbour in neighbours:
if neighbour.tag == merged_block.tag:
merged_block = merged_block.merge_with(neighbour)
search.remove(neighbour)
remaining_blocks.remove(neighbour)
neighbours_blocks_count += 1
if neighbours_blocks_count == 0 or len(remaining_blocks) == 0:
logger.debug(
'no or all remaining blocks merged, mark block as merged: %d',
neighbours_blocks_count
)
merged_blocks.append(merged_block)
else:
logger.debug(
'some but not all remaining blocks merged, continue search: %d',
neighbours_blocks_count
)
remaining_blocks.appendleft(merged_block)
result = list(merged_blocks) + list(remaining_blocks)
return result
def expand_bounding_box(bb):
return bb.with_margin(4, 2)
def expand_block(block):
return AnnotationBlock(block.tag, expand_bounding_box(block.bounding_box))
def expand_blocks(blocks):
return [expand_block(block) for block in blocks]
def annotation_document_page_to_annotation_blocks(structured_document, page):
tags_and_tokens = (
(structured_document.get_tag_value(token), token)
for line in structured_document.get_lines_of_page(page)
for token in structured_document.get_tokens_of_line(line)
)
tags_and_bounding_boxes = (
(tag, structured_document.get_bounding_box(token))
for tag, token in tags_and_tokens
if tag
)
return [
AnnotationBlock(tag, bounding_box)
for tag, bounding_box in tags_and_bounding_boxes
if bounding_box
]
def annotation_document_page_to_merged_blocks(structured_document, page, **kwargs):
return merge_blocks(
annotation_document_page_to_annotation_blocks(structured_document, page),
**kwargs
)
def extend_color_map_for_tags(color_map, tags):
updated_color_map = dict(color_map)
for tag in tags:
if tag not in updated_color_map:
updated_color_map[tag] = (
max(updated_color_map.values()) + 1 if len(updated_color_map) > 0 else 1
)
return updated_color_map
def extend_color_map_for_blocks(color_map, blocks):
return extend_color_map_for_tags(
color_map,
sorted({b.tag for b in blocks})
)
class AbstractSurface(ABC):
@abstractmethod
def rect(self, bounding_box, color, tag=None):
pass
class SvgSurface(AbstractSurface):
def __init__(self, width, height, background):
if not (width and height):
raise AttributeError('width and height are required')
self.svg_root = etree.Element(SVG_DOC, nsmap=SVG_NSMAP, attrib={
'width': str(width),
'height': str(height)
})
if background:
self.svg_root.append(etree.Element(SVG_RECT, attrib={
'width': '100%',
'height': '100%',
'fill': background,
'class': 'background'
}))
def rect(self, bounding_box, color, tag=None):
attrib = {
'class': str(tag),
'shape-rendering': 'crispEdges',
'x': str(bounding_box.x),
'y': str(bounding_box.y),
'width': str(bounding_box.width),
'height': str(bounding_box.height)
}
if color:
attrib['fill'] = str(color)
rect = etree.Element(SVG_RECT, attrib=attrib)
self.svg_root.append(rect)
return rect
def color_to_tuple(color):
if isinstance(color, tuple):
return color
return ImageColor.getrgb(color)
class ImageSurface(AbstractSurface):
def __init__(self, width, height, background):
if not (width and height):
raise AttributeError('width and height are required')
width = int(math.ceil(width))
height = int(math.ceil(height))
if background:
self.image = Image.new('RGB', (width, height), color_to_tuple(background))
else:
self.image = Image.new('RGBA', (width, height), (255, 255, 255, 0))
self._draw = ImageDraw.Draw(self.image)
def rect(self, bounding_box, color, tag=None):
if color is None:
return
self._draw.rectangle(
(
(bounding_box.x, bounding_box.y),
(bounding_box.x + bounding_box.width, bounding_box.y + bounding_box.height)
),
fill=color_to_tuple(color)
)
def annotated_blocks_to_surface(blocks, surface, color_map):
for block in blocks:
color = color_map.get(block.tag)
surface.rect(block.bounding_box, color, block.tag)
def annotated_blocks_to_svg(blocks, color_map, width=None, height=None, background=None):
surface = SvgSurface(width, height, background)
annotated_blocks_to_surface(blocks, surface, color_map)
return surface.svg_root
def annotated_blocks_to_image(
blocks, color_map, width=None, height=None, background=None,
scale_to_size=None):
surface = ImageSurface(width, height, background)
annotated_blocks_to_surface(blocks, surface, color_map)
image = surface.image
if scale_to_size:
image = image.resize(scale_to_size, Image.NEAREST)
return image
| 2.515625
| 3
|
closure/2.py
|
janusnic/py-21v
| 0
|
12785431
|
# -*- coding:utf-8 -*-
def attribution(name):
return lambda x: x + ' -- ' + name
pp = attribution('John')
print pp('Dinner is in the fridge')
| 3.296875
| 3
|
time_calculator.py
|
trsilva32/TimeCalculator
| 0
|
12785432
|
import datetime
def add_time(start, duration, weekday_name = ' '):
start = datetime.datetime.strptime(start,'%I:%M %p')
h_duration,m_duration = duration.split(':')
t_duration = int(h_duration)*60 + int(m_duration)
calc_time = start + datetime.timedelta(minutes=int(t_duration))
day = calc_time.day
time = calc_time.strftime('%I:%M %p')
if day == 1:
day_text = ''
elif day == 2:
day_text = '(next day)'
else:
day_text = '('+ str(day-1) + ' days later)'
list_weekdays = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"]
if weekday_name != ' ' and day >= 2:
weekday_name = weekday_name.lower().capitalize()
weekday_name_index = list_weekdays.index(weekday_name)
i=1
indexw = weekday_name_index
while i != day:
if indexw == 6:
indexw = 0
i+=1
else:
indexw = indexw + 1
i+=1
new_weekday_name = list_weekdays[indexw]
new_time = str(time + ', ' + new_weekday_name + ' ' + day_text)
elif weekday_name != ' ' and day == 1:
new_time = str(time + ', '+ weekday_name.lower().capitalize())
else:
new_time = str(time + ' ' + day_text)
if new_time[0] == '0':
new_time = new_time[1:]
else:
new_time = new_time
return new_time.strip()
| 3.4375
| 3
|
home/migrations/0001_initial.py
|
MarkJaroski/aho-dev-dct
| 0
|
12785433
|
<reponame>MarkJaroski/aho-dev-dct
# Generated by Django 2.1.2 on 2020-07-23 04:37
from django.db import migrations, models
import django.db.models.deletion
import parler.fields
import parler.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='StgCategoryoption',
fields=[
('categoryoption_id', models.AutoField(primary_key=True, serialize=False)),
('date_created', models.DateTimeField(auto_now_add=True, null=True, verbose_name='Date Created')),
('date_lastupdated', models.DateTimeField(auto_now=True, null=True, verbose_name='Date Modified')),
],
options={
'verbose_name': 'Disaggregation Option',
'verbose_name_plural': 'Disaggregation Options',
'db_table': 'stg_categoryoption',
'managed': True,
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='StgCategoryoptionTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('name', models.CharField(max_length=230, verbose_name='Modality Name')),
('shortname', models.CharField(blank=True, max_length=50, null=True, verbose_name='Short Name')),
('code', models.CharField(blank=True, max_length=230, unique=True)),
('description', models.TextField(blank=True, null=True)),
('master', parler.fields.TranslationsForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='home.StgCategoryoption')),
],
options={
'verbose_name': 'Disaggregation Option Translation',
'db_table': 'stg_categoryoption_translation',
'db_tablespace': '',
'managed': True,
'default_permissions': (),
},
bases=(parler.models.TranslatedFieldsModelMixin, models.Model),
),
migrations.CreateModel(
name='StgCategoryParent',
fields=[
('category_id', models.AutoField(primary_key=True, serialize=False, verbose_name='Category Name')),
('date_created', models.DateTimeField(auto_now_add=True, null=True, verbose_name='Date Created')),
('date_lastupdated', models.DateTimeField(auto_now=True, null=True, verbose_name='Date Modified')),
],
options={
'verbose_name': 'Disaggregation Category',
'verbose_name_plural': 'Disaggregation Categories',
'db_table': 'stg_category_parent',
'managed': True,
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='StgCategoryParentTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('name', models.CharField(max_length=230, verbose_name='Category')),
('shortname', models.CharField(blank=True, max_length=50, null=True, verbose_name='Short Name')),
('code', models.CharField(blank=True, max_length=50, null=True, unique=True)),
('description', models.TextField(blank=True, null=True)),
('master', parler.fields.TranslationsForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='home.StgCategoryParent')),
],
options={
'verbose_name': 'Disaggregation Category Translation',
'db_table': 'stg_category_parent_translation',
'db_tablespace': '',
'managed': True,
'default_permissions': (),
},
bases=(parler.models.TranslatedFieldsModelMixin, models.Model),
),
migrations.CreateModel(
name='StgDatasource',
fields=[
('datasource_id', models.AutoField(primary_key=True, serialize=False)),
('date_created', models.DateTimeField(auto_now_add=True, null=True, verbose_name='Date Created')),
('date_lastupdated', models.DateTimeField(auto_now=True, null=True, verbose_name='Date Modified')),
],
options={
'verbose_name': 'Data Source',
'verbose_name_plural': 'Data Sources',
'db_table': 'stg_datasource',
'managed': True,
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='StgDatasourceTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('name', models.CharField(max_length=230, verbose_name='Data Source')),
('shortname', models.CharField(blank=True, max_length=50, null=True, verbose_name='Short Name')),
('code', models.CharField(blank=True, max_length=50, null=True, unique=True)),
('description', models.TextField(default='No specific definition')),
('master', parler.fields.TranslationsForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='home.StgDatasource')),
],
options={
'verbose_name': 'Data Source Translation',
'db_table': 'stg_datasource_translation',
'db_tablespace': '',
'managed': True,
'default_permissions': (),
},
bases=(parler.models.TranslatedFieldsModelMixin, models.Model),
),
migrations.CreateModel(
name='StgMeasuremethod',
fields=[
('measuremethod_id', models.AutoField(primary_key=True, serialize=False)),
('date_created', models.DateTimeField(auto_now_add=True, null=True, verbose_name='Date Created')),
('date_lastupdated', models.DateTimeField(auto_now=True, null=True, verbose_name='Date Modified')),
],
options={
'verbose_name': 'Measure Type',
'verbose_name_plural': 'Measure Types',
'db_table': 'stg_measuremethod',
'managed': True,
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='StgMeasuremethodTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('name', models.CharField(max_length=230, verbose_name='Measure Name')),
('measure_value', models.DecimalField(blank=True, decimal_places=0, max_digits=50, null=True, verbose_name='Indicator Type')),
('code', models.CharField(blank=True, max_length=50, unique=True)),
('description', models.TextField(blank=True, max_length=200, null=True)),
('master', parler.fields.TranslationsForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='home.StgMeasuremethod')),
],
options={
'verbose_name': 'Measure Type Translation',
'db_table': 'stg_measuremethod_translation',
'db_tablespace': '',
'managed': True,
'default_permissions': (),
},
bases=(parler.models.TranslatedFieldsModelMixin, models.Model),
),
migrations.CreateModel(
name='StgValueDatatype',
fields=[
('valuetype_id', models.AutoField(primary_key=True, serialize=False)),
('date_created', models.DateTimeField(auto_now_add=True, null=True, verbose_name='Date Created')),
('date_lastupdated', models.DateTimeField(auto_now=True, null=True, verbose_name='Date Modified')),
],
options={
'verbose_name': ' Value Type',
'verbose_name_plural': 'Value Types',
'db_table': 'stg_value_datatype',
'managed': True,
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='StgValueDatatypeTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('name', models.CharField(max_length=50, verbose_name='Data Value Type')),
('code', models.CharField(max_length=50, unique=True)),
('shortname', models.CharField(blank=True, max_length=50, null=True, verbose_name='Short Name')),
('description', models.TextField(blank=True, null=True)),
('master', parler.fields.TranslationsForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='home.StgValueDatatype')),
],
options={
'verbose_name': ' Value Type Translation',
'db_table': 'stg_value_datatype_translation',
'db_tablespace': '',
'managed': True,
'default_permissions': (),
},
bases=(parler.models.TranslatedFieldsModelMixin, models.Model),
),
migrations.AddField(
model_name='stgcategoryoption',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='home.StgCategoryParent', verbose_name='Disaggregation Category'),
),
migrations.AlterUniqueTogether(
name='stgvaluedatatypetranslation',
unique_together={('language_code', 'master')},
),
migrations.AlterUniqueTogether(
name='stgmeasuremethodtranslation',
unique_together={('language_code', 'master')},
),
migrations.AlterUniqueTogether(
name='stgdatasourcetranslation',
unique_together={('language_code', 'master')},
),
migrations.AlterUniqueTogether(
name='stgcategoryparenttranslation',
unique_together={('language_code', 'master')},
),
migrations.AlterUniqueTogether(
name='stgcategoryoptiontranslation',
unique_together={('language_code', 'master')},
),
]
| 1.632813
| 2
|
certbot-dns-dnspod/certbot_dns_dnspod/dns_dnspod_test.py
|
realkcn/certbot
| 0
|
12785434
|
<reponame>realkcn/certbot
"""Tests for certbot_dns_dnspod.dns_dnspod."""
import os
import unittest
import mock
from certbot import errors
from certbot.plugins import dns_test_common
from certbot.plugins.dns_test_common import DOMAIN
from certbot.tests import util as test_util
ID = 1201
TOKEN = 'a-token'
class AuthenticatorTest(test_util.TempDirTestCase, dns_test_common.BaseAuthenticatorTest):
def setUp(self):
from certbot_dns_dnspod.dns_dnspod import Authenticator
super(AuthenticatorTest, self).setUp()
path = os.path.join(self.tempdir, 'file.ini')
dns_test_common.write({"dnspod_token": TOKEN, "dnspod_id": ID}, path)
self.config = mock.MagicMock(dnspod_credentials=path,
dnspod_propagation_seconds=0) # don't wait during tests
self.auth = Authenticator(self.config, "dnspod")
self.mock_client = mock.MagicMock()
# _get_dnspod_client | pylint: disable=protected-access
self.auth._get_dnspod_client = mock.MagicMock(return_value=self.mock_client)
def test_perform(self):
self.mock_client.domain_list.return_value = {DOMAIN: mock.ANY}
self.auth.perform([self.achall])
expected = [mock.call.domain_list(),
mock.call.ensure_record(DOMAIN, '_acme-challenge.'+DOMAIN, 'TXT', mock.ANY)]
self.assertEqual(expected, self.mock_client.mock_calls)
def test_perform_fail_to_find_domain(self):
self.mock_client.domain_list.return_value = {}
self.assertRaises(errors.PluginError,
self.auth.perform,
[self.achall])
def test_cleanup(self):
# _attempt_cleanup | pylint: disable=protected-access
self.auth._attempt_cleanup = True
self.auth._find_domain = mock.MagicMock(return_value=DOMAIN)
self.auth.cleanup([self.achall])
expected = [mock.call.remove_record_by_sub_domain(DOMAIN, '_acme-challenge.'+DOMAIN, 'TXT')]
self.assertEqual(expected, self.mock_client.mock_calls)
if __name__ == "__main__":
unittest.main() # pragma: no cover
| 2.234375
| 2
|
pdm/retriever.py
|
trollfist20/python-download-manager
| 0
|
12785435
|
import time
from pdm.constants import BUFFER
from urllib.request import (
build_opener,
urlopen,
Request
)
from http.client import HTTPResponse, HTTPMessage
from threading import Thread, Event
from pdm.utils import get_filename
from concurrent.futures import Future
from pdm.hooker import ProgressDownloadHooker
class _Retriever:
def __init__(
self,
http_response: HTTPResponse,
file: str,
part: int,
single_mode=False
):
self.http = http_response
if single_mode:
self.file = file
else:
self.file = file + '.part' + str(part)
self.speed_download = 0
def get_file(self):
return open(self.file, 'wb')
def download(self):
file = self.get_file()
chunk_size = int(BUFFER.__repr__())
while True:
# adapted from https://github.com/choldgraf/download/blob/master/download/download.py#L380
# with some modifications
t0 = time.time()
chunk = self.http.read(chunk_size)
dt = time.time() - t0
if dt < 0.005:
chunk_size *= 2
elif dt > 0.1 and chunk_size > BUFFER:
chunk_size = chunk_size // 2
if not chunk:
break
file.write(chunk)
self.speed_download = len(chunk) * 8
if chunk == b'':
break
self.speed_download = 'finished'
file.close()
return self.file
class Retriever1Connections:
def __init__(self, url: str, info_length: int, filename: str=None):
self.opener = build_opener()
self.filename = filename
self.url = url
self.length = info_length
def download(self):
res = self.opener.open(self.url)
filename = self.filename or get_filename(res)
r = _Retriever(res, filename, 0, True)
r.download()
return filename
class Retriever2Connections:
def __init__(self, url: str, length: int, filename: str=None):
self.opener = build_opener()
self.filename = filename
self.url = url
self.length = self.get_length(length)
def get_length(self, length: int):
divided = length / 2
if not divided.is_integer():
final = [0, divided - 0.5, divided + 0.5, length]
elif divided.is_integer():
final = [0, divided - 1, divided, length]
return final
def _download(self, part: int, start_from: int, end_from: int, future: Future):
req = Request(self.url)
req.headers['Range'] = 'bytes=%s-%s' % (int(start_from), int(end_from))
res = self.opener.open(req)
filename = self.filename or get_filename(res)
r = _Retriever(res, filename, part)
future.set_result(r.download())
def download(self):
fut1 = Future()
thread = Thread(target=self._download, name='worker_pdm_0', daemon=True, args=(
0,
self.length[0],
self.length[1],
fut1
))
thread.start()
fut2 = Future()
thread = Thread(target=self._download, name='worker_pdm_1', daemon=True, args=(
1,
self.length[2],
self.length[3],
fut2
))
thread.start()
return [
fut1.result(),
fut2.result()
]
class Retriever3Connections:
def __init__(self, url: str, length: int, filename: str=None):
self.opener = build_opener()
self.filename = filename
self.url = url
self.length = self.get_length(length)
def get_length(self, length: int):
final = [0, int(length / 3), int(length / 3 + length / 3), length]
return final
def _download(
self,
part: int,
start_from: int,
end_from: int,
future: Future,
progress_bar: ProgressDownloadHooker
):
req = Request(self.url)
req.headers['Range'] = 'bytes=%s-%s' % (int(start_from), int(end_from))
res = self.opener.open(req)
filename = self.filename or get_filename(res)
r = _Retriever(res, filename, part)
progress_bar.add_worker(r)
future.set_result(r.download())
def download(self):
fut1 = Future()
print('Download Using 3 Connections')
progress_bar = ProgressDownloadHooker()
thread = Thread(target=self._download, name='worker_pdm_0', daemon=True, args=(
0,
self.length[0],
self.length[1],
fut1,
progress_bar
))
thread.start()
fut2 = Future()
thread = Thread(target=self._download, name='worker_pdm_1', daemon=True, args=(
1,
self.length[1],
self.length[2],
fut2,
progress_bar
))
thread.start()
fut3 = Future()
thread = Thread(target=self._download, name='worker_pdm_2', daemon=True, args=(
2,
self.length[2],
self.length[3],
fut3,
progress_bar
))
thread.start()
progress_bar.start()
result = [
fut1.result(),
fut2.result(),
fut3.result()
]
progress_bar.stop()
return result
class Retriever:
def __init__(
self,
url: str,
filename: str,
timeout: int=None,
connections: int=2
):
# Testing Connection to URL given
tester = urlopen(url, timeout=timeout)
tester.close()
self.filename = filename
self.url = url
self._connections = connections
def _download_single_conn(self):
r = Retriever1Connections(self.url, self.filename)
return r.download()
def _download_multi_conn(self, info_length):
if self._connections < 1 or self._connections > 4:
raise ValueError('invalid connections value, maximum connections allowed is 4')
else:
if self._connections == 2:
r = Retriever2Connections(self.url, info_length, self.filename)
return r.download()
elif self._connections == 3:
r = Retriever3Connections(self.url, info_length, self.filename)
return r.download()
def get_info_length(self):
return urlopen(self.url).length
def retrieve(self):
info_length = self.get_info_length()
# for doesn't support get length file like google-drive
# multi connection require to see length of the file
if info_length is None:
# if pdm can't retrieve Content-Length info
# force download to single connection
return self._download_single_conn()
else:
if self._connections == 1:
return self._download_single_conn()
else:
return self._download_multi_conn(info_length)
# def _retrieve(self, part, filename, start_from, end_from, event, single_mode=False):
# r = Request(self.url)
# if not single_mode:
# r.headers['Range'] = 'bytes=%s-%s' % (int(start_from), int(end_from))
# print(r.headers)
# http_response = self.opener.open(r)
# print(http_response.headers['Content-Disposition'])
# print(http_response.length, part)
# if single_mode:
# _ = _Retriever(self.url, http_response, filename, part, True)
# _.download()
# event.set()
# else:
# _ = _Retriever(
# self.url,
# http_response,
# filename,
# part
# )
# _.download()
# event.set()
# def get_length(self, length: int):
# divided = length / 2
# if not divided.is_integer():
# final = [0, divided - 0.5, divided + 0.5, length]
# elif divided.is_integer():
# final = [0, divided - 1, divided, length]
# return final
# def retrieve(self):
# info_length = self.get_info_length()
# # for doesn't support get length file like google-drive
# # multi connection require to see length of the file
# if info_length is None:
# return self._download_single_conn()
# else:
# return self._download_multi_conn(info_length)
# def _download_single_conn(self):
# e = Event()
# self._retrieve(None, self.filename, None, None, e, True)
# return [self.filename]
# def _download_multi_conn(self, info_length):
# i = 0
# length = self.get_length(info_length)
# wait_event1 = Event()
# thread = Thread(target=self._retrieve, name='worker_pdm_' + str(i), daemon=True, args=(
# i,
# self.filename,
# length[0],
# length[1],
# wait_event1
# ))
# thread.start()
# i += 1
# wait_event2= Event()
# thread = Thread(target=self._retrieve, name='worker_pdm_' + str(i), daemon=True, args=(
# i,
# self.filename,
# length[2],
# length[3],
# wait_event2
# ))
# thread.start()
# wait_event1.wait()
# wait_event2.wait()
# return [
# self.filename + '.part0',
# self.filename + '.part1'
# ]
| 2.6875
| 3
|
analyzer.py
|
Chibbluffy/TwitterKeywordPerceptionAnalysis
| 0
|
12785436
|
<filename>analyzer.py
import sys
import pandas as pd
import json
df_list = []
with open(sys.argv[1], 'r') as f:
for line in f:
loaded = json.loads(line)
label = loaded["label"]
text = loaded["text"]
df_list.append({'text': text, 'label': label})
df = pd.DataFrame(df_list)
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer(min_df=0, lowercase=False)
vectorizer.fit([x['text'] for x in df_list])
from sklearn.model_selection import train_test_split
sentences = [x['text'] for x in df_list]
labels = [y['label'] for y in df_list]
sentences_train, sentences_test, \
label_train, label_test = train_test_split(sentences, labels,
test_size=.10, random_state=1000)
vectorizer = CountVectorizer()
vectorizer.fit(sentences_train)
X_train = vectorizer.transform(sentences_train)
X_test = vectorizer.transform(sentences_test)
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(solver='lbfgs', multi_class='auto')
classifier.fit(X_train, label_train)
score = classifier.score(X_test, label_test)
# print("Accuracy: ", score)
from sklearn.metrics import precision_score, classification_report, confusion_matrix, recall_score, f1_score
from sklearn.svm import SVC
svclassifier = SVC(kernel='linear', C=1000, gamma='auto')
svclassifier.fit(X_train, label_train)
label_pred = svclassifier.predict(X_test)
# print("True positives: %s" % confusion_matrix(label_test, label_pred)[0,0])
# print("False positives: %s" % confusion_matrix(label_test, label_pred)[0,1])
# print("True negatives: %s" % confusion_matrix(label_test, label_pred)[1,0])
# print("False negatives: %s" % confusion_matrix(label_test, label_pred)[1,1])
print
print("Precision: %s" % precision_score(label_pred, label_test, average='macro'))
print("Recall: %s" % recall_score(label_pred, label_test, average='macro'))
print("F1_score: %s" % f1_score(label_pred, label_test, average='macro'))
print
| 2.859375
| 3
|
clipper-cli/src/test/resources/lubm-ex-20/execQuery.py
|
ghxiao/clipper
| 5
|
12785437
|
import os
univs = 100
ndeparts = 15
q = 2
shcmd = r'''
dlv -filter=ans
'''
#shcmd += "q" + q + ".dlv "
#for univ in range(univs):
shcmd += '''
q2.dlv LUBM-ex-20.owl.dl University0_0.owl.dl University0_1.owl.dl University0_2.owl.dl \
University0_3.owl.dl University0_4.owl.dl University0_5.owl.dl University0_6.owl.dl \
University0_7.owl.dl University0_8.owl.dl University0_9.owl.dl University0_10.owl.dl \
University0_7.owl.dl University0_8.owl.dl University0_9.owl.dl University0_10.owl.dl \
University0_11.owl.dl University0_12.owl.dl University0_13.owl.dl University0_14.owl.dl \
University0_7.owl.dl University0_8.owl.dl University0_9.owl.dl University0_10.owl.dl \
'''
print shcmd
#os.system(shcmd)
| 1.570313
| 2
|
menu_definitions.py
|
mheydasch/graph_app
| 0
|
12785438
|
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 11 12:06:05 2019
@author: max
Holds the menu definition, such as buttons and dropdown menus
"""
import dash_core_components as dcc
import dash_html_components as html
import dash_table
#%% data
def Upload_data():
return dcc.Upload(
id='upload-data',
children=html.Div([
'Drag and Drop or ',
html.A('Select Files')
]),
style={
'width': '100%',
'height': '60px',
'lineHeight': '60px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center',
'margin': '10px'
},
# Allow multiple files to be uploaded
multiple=False
)
#The following functions define several menus and are called in the initial
#layout when the app is launched
#radio item layout
def RadioItems():
return dcc.RadioItems(
options=[
{'label': 'lineplot', 'value': 'lineplot'},
{'label':'migration_distance', 'value': 'migration_distance'},
{'label':'time_series', 'value':'time_series'},
{'label':'corel plot', 'value': 'corel_plot'},
{'label':'flag_count', 'value':'flag_count'},
{'label':'boxplot', 'value':'boxplot'}],
value='lineplot',
id='graph_selector')
def graph_reuse():
return dcc.RadioItems(
options=[
{'label':'yes', 'value' : 'yes'},
{'label':'no', 'value' : 'no'}],
value='yes',
id='graph_reuse')
#table layout
def generate_table(df):
'''
called when data is uploaded
'''
return dash_table.DataTable(
data=df.to_dict('records'),
columns=[{'name': i, 'id': i} for i in df.columns],
fixed_rows={'headers':True, 'data':0},
style_cell={'width' :'150px'}
)
def plot_button():
return html.Button(id='plot_button', n_clicks=0, children='Display plots')
def plot_save_button():
return html.Button(id='plot_save_button', n_clicks=0, children='Download plot as svg')
#dropdown layout
def classifier_choice(df):
'''
dropdown menu to select which column should be used as the classifier
'''
columns=df.columns
classifieroptions= [{'label' :k, 'value' :k} for k in columns]
return dcc.Dropdown(
#label='Classifier Column',
id='classifier_choice',
options=classifieroptions,
placeholder='select the classifier column',
value='Classifier')
def identifier_selector(df):
'''
dropdown menu to select the column which identifies individual cells
'''
columns=df.columns
identifieroptions= [{'label' :k, 'value' :k} for k in columns]
return dcc.Dropdown(
id='identifier_selector',
options=identifieroptions,
placeholder='select the identifier column',
value='unique_id')
def unique_time_selector(df):
'''
dropdown menu to select which column contains the information about timepoints
'''
columns=df.columns
timepointoptions= [{'label' :k, 'value' :k} for k in columns]
return dcc.Dropdown(
id='unique_time_selector',
options=timepointoptions,
placeholder='select a column where the value is unique for timepoint and cell',
value='unique_time')
def timepoint_selector(df):
'''
dropdown menu to select which column contains the information about timepoints
'''
columns=df.columns
timepointoptions= [{'label' :k, 'value' :k} for k in columns]
return dcc.Dropdown(
id='timepoint_selector',
options=timepointoptions,
placeholder='select the timepoint column',
value='Metadata_Timepoint')
def data_selector(df):
columns=df.columns
data_options= [{'label' :k, 'value' :k} for k in columns]
return dcc.Dropdown(
id='data_selector',
options=data_options,
multi=True,
value=['Location_Center_X_Zeroed', 'Location_Center_Y_Zeroed'])
def coordinate_selector(df):
columns=df.columns
data_options= [{'label' :k, 'value' :k} for k in columns]
return dcc.Dropdown(
id='coordinate_selector',
options=data_options,
multi=True,
value=['Location_Center_X', 'Location_Center_Y'])
def average_button():
return html.Button(id='average_button', n_clicks=0, children='Calculate Average')
def average_selector(df):
'''
dropdown menu to select which column you want to average
'''
columns=df.columns
timepointoptions= [{'label' :k, 'value' :k} for k in columns]
return dcc.Dropdown(
id='average_selector',
options=timepointoptions,
placeholder='select the column which you want to average',
value='None'
)
def average_grouper(df):
'''
dropdown menu to select which measurement you want to group the average by
'''
columns=df.columns
timepointoptions= [{'label' :k, 'value' :k} for k in columns]
return dcc.Dropdown(
id='average_grouper',
options=timepointoptions,
placeholder='select the column which you want to average',
value='None'
)
#%% Filters
def track_length_selector():
'''
input box for minimum track length
'''
return dcc.Input(placeholder='Enter a value...',
type='number',
value=20,
id='track_length_selector')
# =============================================================================
# def track_length_selector():
#
# '''
# slider to select data cleaning method
# '''
# return dcc.Slider(
# id='track_length_selector',
# min=0,
# max=10,
# step=1,
# value=7,
# marks={0:'0',
# 5:'5',
# 10:'10'})
# =============================================================================
def distance_filter():
'''
input box for minum travelled distance
'''
return dcc.Input(placeholder='Enter a value...',
type='number',
value=30,
id='distance_filter')
def custom_filter_dropdown(df):
'''
dropdown menu to select which data type to filter by
'''
columns=df.columns
identifieroptions= [{'label' :k, 'value' :k} for k in columns]
identifieroptions.append({'label':'none', 'value':'none'})
return dcc.Dropdown(
id='custom_filter_dropdown',
options=identifieroptions,
placeholder='select a column as filter',
value='none')
def custom_filter_numeric():
'''
input box for custom filter
'''
return dcc.Input(placeholder='Enter a value...',
type='number',
value=0,
id='custom_filter_numeric')
def datatype_selector():
return dcc.RadioItems(options=[
{'label': 'X, Y coordinates', 'value': 'xy'},
{'label': 'individual features', 'value' : 'features'}],
value='xy',
id='datatype_selector')
#%% Patterns
def ID_pattern():
return dcc.Textarea(
value='(?P<Site_ID>W[A-Z][0-9]+_S[0-9]{4})(?P<TrackID>_E[0-9]+)(?P<Timepoint>_T[0-9]+)',
style={'width':'100%'},
id='ID_pattern',
)
def timeless_ID_pattern():
return dcc.Textarea(
value='(?P<Well>W[A-Z][0-9]+)(?P<Site>_S[0-9]{4})(?P<TrackID>_E[0-9]+)',
style={'width':'100%'},
id='timeless_ID_pattern',
)
def ID_submit():
return html.Button(id='ID_submit', n_clicks=0, children='Submit pattern')
def save_path():
return dcc.Textarea(
placeholder='save the datatable at the following location',
value='/Users/max/Desktop/Office/Phd/pythoncode/graph_app/flagged_datatable.csv',
style={'width':'100%'},
id='save_path')
def save_button():
return html.Button(id='save_button', n_clicks=0, children='Download datatable')
#%% images
def Image_folder():
return dcc.Textarea(
placeholder='Enter the path to your images',
value='enter full path to your image',
style={'width':'100%'},
id='Image_folder')
def Folder_submit():
return html.Button(id='Folder_submit', n_clicks=0, children='upload images')
def Image_selector():
return dcc.RadioItems(options=[
{'label' : 'Yes', 'value': 'Yes'},
{'label': 'No', 'value' : 'No'}],
value='No',
id='Image_selector')
def image_slider():
'''
slider to select image to display
'''
return dcc.Slider(
id='image_slider',
min=0,
max=10,
step=1,
value=0,
marks={0:'0',
5:'5',
10:'10'},
updatemode='drag')
def brightness_slider():
'''
slider to adjust brightness of image
'''
return dcc.Slider(
id='brightness_slider',
min=0,
max=15,
step=None,
value=1,
marks={0:'0',
0.5:'0.5',
1:'1',
1.5:'1.5',
2:'2',
3: '3',
4:'4',
5:'5',
6: '6',
7: '7',
8: '8',
10: '10',
15: '15',})
#%% track filtering
def track_comment():
return dcc.Textarea(
placeholder='Do you want to flag the track',
value='enter a comment to the track',
style={'width':'100%'},
id='track_comment',
)
def comment_submit():
return html.Button(id='comment_submit', n_clicks=0, children='Add Comment')
def flag_options():
return dcc.RadioItems(
options=[
{'label':'all', 'value' : 'all'},
{'label':'single', 'value' : 'single'}],
value='all',
id='flag_options')
def flag_filter():
return dcc.Dropdown(
id='flag_filter',
options=[{'label':'placeholder', 'value':'placeholder'}],
multi=True,
)
def plot_hider():
return dcc.RadioItems(options=[
{'label' : 'Yes', 'value': 'Yes'},
{'label': 'No', 'value' : 'No'}],
value='No',
id='plot_hider')
def exclude_seen():
return dcc.RadioItems(options=[
{'label' : 'Yes', 'value': 'Yes'},
{'label': 'No', 'value' : 'No'}],
value='No',
id='exclude_seen')
| 1.953125
| 2
|
pymarkdown/plugins/rule_md_004.py
|
scop/pymarkdown
| 0
|
12785439
|
<filename>pymarkdown/plugins/rule_md_004.py
"""
Module to implement a plugin that looks for hard tabs in the files.
"""
from pymarkdown.plugin_manager import Plugin, PluginDetails
class RuleMd004(Plugin):
"""
Class to implement a plugin that looks for hard tabs in the files.
"""
__consistent_style = "consistent"
__asterisk_style = "asterisk"
__plus_style = "plus"
__dash_style = "dash"
__sublist_style = "sublist"
__valid_styles = [
__consistent_style,
__asterisk_style,
__plus_style,
__dash_style,
__sublist_style,
]
def __init__(self):
super().__init__()
self.__style_type = None
self.__actual_style_type = None
self.__current_list_level = None
def get_details(self):
"""
Get the details for the plugin.
"""
return PluginDetails(
# bullet, ul
plugin_name="ul-style",
plugin_id="MD004",
plugin_enabled_by_default=True,
plugin_description="Inconsistent Unordered List Start style",
plugin_version="0.5.0",
plugin_interface_version=1,
plugin_url="https://github.com/jackdewinter/pymarkdown/blob/main/docs/rules/rule_md004.md",
plugin_configuration="style",
) # https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md004---unordered-list-style
# Parameters: style ("consistent", "asterisk", "plus", "dash", "sublist"; default "consistent")
@classmethod
def __validate_configuration_style(cls, found_value):
if found_value not in RuleMd004.__valid_styles:
raise ValueError(f"Allowable values: {str(RuleMd004.__valid_styles)}")
def initialize_from_config(self):
"""
Event to allow the plugin to load configuration information.
"""
self.__style_type = self.plugin_configuration.get_string_property(
"style",
default_value=RuleMd004.__consistent_style,
valid_value_fn=self.__validate_configuration_style,
)
def starting_new_file(self):
"""
Event that the a new file to be scanned is starting.
"""
self.__actual_style_type = {}
self.__current_list_level = 0
if self.__style_type not in (
RuleMd004.__consistent_style,
RuleMd004.__sublist_style,
):
self.__actual_style_type[0] = self.__style_type
@classmethod
def __get_sequence_type(cls, token):
if token.list_start_sequence == "*":
return RuleMd004.__asterisk_style
if token.list_start_sequence == "+":
return RuleMd004.__plus_style
assert token.list_start_sequence == "-"
return RuleMd004.__dash_style
def next_token(self, context, token):
"""
Event that a new token is being processed.
"""
if token.is_unordered_list_start:
if self.__current_list_level not in self.__actual_style_type:
if self.__style_type in (RuleMd004.__sublist_style,) or (
self.__style_type in (RuleMd004.__consistent_style)
and not self.__actual_style_type
):
self.__actual_style_type[
self.__current_list_level
] = self.__get_sequence_type(token)
else:
self.__actual_style_type[
self.__current_list_level
] = self.__actual_style_type[0]
this_start_style = self.__get_sequence_type(token)
if self.__actual_style_type[self.__current_list_level] != this_start_style:
extra_data = f"Expected: {self.__actual_style_type[self.__current_list_level]}; Actual: {this_start_style}"
self.report_next_token_error(context, token, extra_data)
self.__current_list_level += 1
elif token.is_unordered_list_end:
self.__current_list_level -= 1
| 2.65625
| 3
|
tests/unit/cartography/intel/gsuite/test_api.py
|
sckevmit/cartography
| 2,322
|
12785440
|
from unittest import mock
from unittest.mock import patch
from cartography.intel.gsuite import api
def test_get_all_users():
client = mock.MagicMock()
raw_request_1 = mock.MagicMock()
raw_request_2 = mock.MagicMock()
user1 = {'primaryEmail': '<EMAIL>'}
user2 = {'primaryEmail': '<EMAIL>'}
user3 = {'primaryEmail': '<EMAIL>'}
client.users().list.return_value = raw_request_1
client.users().list_next.side_effect = [raw_request_2, None]
raw_request_1.execute.return_value = {'users': [user1, user2]}
raw_request_2.execute.return_value = {'users': [user3]}
result = api.get_all_users(client)
emails = [user['primaryEmail'] for response_object in result for user in response_object['users']]
expected = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
]
assert sorted(emails) == sorted(expected)
def test_get_all_groups():
client = mock.MagicMock()
raw_request_1 = mock.MagicMock()
raw_request_2 = mock.MagicMock()
group1 = {'email': '<EMAIL>'}
group2 = {'email': '<EMAIL>'}
group3 = {'email': '<EMAIL>'}
client.groups().list.return_value = raw_request_1
client.groups().list_next.side_effect = [raw_request_2, None]
raw_request_1.execute.return_value = {'groups': [group1, group2]}
raw_request_2.execute.return_value = {'groups': [group3]}
result = api.get_all_groups(client)
emails = [group['email'] for response_object in result for group in response_object['groups']]
expected = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
]
assert sorted(emails) == sorted(expected)
@patch('cartography.intel.gsuite.api.cleanup_gsuite_users')
@patch('cartography.intel.gsuite.api.load_gsuite_users')
@patch(
'cartography.intel.gsuite.api.get_all_users', return_value=[
{'users': [{'primaryEmail': '<EMAIL>'}, {'primaryEmail': '<EMAIL>'}]},
{'users': [{'primaryEmail': '<EMAIL>'}, {'primaryEmail': '<EMAIL>'}]},
],
)
def test_sync_gsuite_users(get_all_users, load_gsuite_users, cleanup_gsuite_users):
client = mock.MagicMock()
gsuite_update_tag = 1
session = mock.MagicMock()
common_job_param = {
"UPDATE_TAG": gsuite_update_tag,
}
api.sync_gsuite_users(session, client, gsuite_update_tag, common_job_param)
users = api.transform_users(get_all_users())
load_gsuite_users.assert_called_with(
session, users, gsuite_update_tag,
)
cleanup_gsuite_users.assert_called_once()
@patch('cartography.intel.gsuite.api.sync_gsuite_members')
@patch('cartography.intel.gsuite.api.cleanup_gsuite_groups')
@patch('cartography.intel.gsuite.api.load_gsuite_groups')
@patch(
'cartography.intel.gsuite.api.get_all_groups', return_value=[
{'groups': [{'email': '<EMAIL>'}, {'email': '<EMAIL>'}]},
{'groups': [{'email': '<EMAIL>'}, {'email': '<EMAIL>'}]},
],
)
def test_sync_gsuite_groups(all_groups, load_gsuite_groups, cleanup_gsuite_groups, sync_gsuite_members):
admin_client = mock.MagicMock()
session = mock.MagicMock()
gsuite_update_tag = 1
common_job_param = {
"UPDATE_TAG": gsuite_update_tag,
}
api.sync_gsuite_groups(session, admin_client, gsuite_update_tag, common_job_param)
groups = api.transform_groups(all_groups())
load_gsuite_groups.assert_called_with(session, groups, gsuite_update_tag)
cleanup_gsuite_groups.assert_called_once()
sync_gsuite_members.assert_called_with(groups, session, admin_client, gsuite_update_tag)
def test_load_gsuite_groups():
ingestion_qry = """
UNWIND {GroupData} as group
MERGE (g:GSuiteGroup{id: group.id})
ON CREATE SET
g.firstseen = {UpdateTag}
ON MATCH SET
g.group_id = group.id,
g.admin_created = group.adminCreated,
g.description = group.description,
g.direct_members_count = group.directMembersCount,
g.email = group.email,
g.etag = group.etag,
g.kind = group.kind,
g.name = group.name,
g.lastupdated = {UpdateTag}
"""
groups = []
update_tag = 1
session = mock.MagicMock()
api.load_gsuite_groups(session, groups, update_tag)
session.run.assert_called_with(
ingestion_qry,
GroupData=groups,
UpdateTag=update_tag,
)
def test_load_gsuite_users():
ingestion_qry = """
UNWIND {UserData} as user
MERGE (u:GSuiteUser{id: user.id})
ON CREATE SET
u.firstseen = {UpdateTag}
ON MATCH SET
u.user_id = user.id,
u.agreed_to_terms = user.agreedToTerms,
u.archived = user.archived,
u.change_password_at_next_login = user.changePasswordAtNextLogin,
u.creation_time = user.creationTime,
u.customer_id = user.customerId,
u.etag = user.etag,
u.include_in_global_address_list = user.includeInGlobalAddressList,
u.ip_whitelisted = user.ipWhitelisted,
u.is_admin = user.isAdmin,
u.is_delegated_admin = user.isDelegatedAdmin,
u.is_enforced_in_2_sv = user.isEnforcedIn2Sv,
u.is_enrolled_in_2_sv = user.isEnrolledIn2Sv,
u.is_mailbox_setup = user.isMailboxSetup,
u.kind = user.kind,
u.last_login_time = user.lastLoginTime,
u.name = user.name.fullName,
u.family_name = user.name.familyName,
u.given_name = user.name.givenName,
u.org_unit_path = user.orgUnitPath,
u.primary_email = user.primaryEmail,
u.email = user.primaryEmail,
u.suspended = user.suspended,
u.thumbnail_photo_etag = user.thumbnailPhotoEtag,
u.thumbnail_photo_url = user.thumbnailPhotoUrl,
u.lastupdated = {UpdateTag}
"""
users = []
update_tag = 1
session = mock.MagicMock()
api.load_gsuite_users(session, users, update_tag)
session.run.assert_called_with(
ingestion_qry,
UserData=users,
UpdateTag=update_tag,
)
def test_transform_groups():
param = [
{'groups': [{'email': '<EMAIL>'}, {'email': '<EMAIL>'}]},
{'groups': [{'email': '<EMAIL>'}, {'email': '<EMAIL>'}]},
]
expected = [
{'email': '<EMAIL>'}, {'email': 'group<EMAIL>'},
{'email': '<EMAIL>'}, {'email': '<EMAIL>'},
]
result = api.transform_groups(param)
assert result == expected
def test_transform_users():
param = [
{'users': [{'primaryEmail': '<EMAIL>'}, {'primaryEmail': '<EMAIL>'}]},
{'users': [{'primaryEmail': '<EMAIL>'}, {'primaryEmail': '<EMAIL>'}]},
]
expected = [
{'primaryEmail': '<EMAIL>'}, {'primaryEmail': '<EMAIL>'},
{'primaryEmail': '<EMAIL>'}, {'primaryEmail': '<EMAIL>'},
]
result = api.transform_users(param)
assert result == expected
| 2.640625
| 3
|
apple/AppleOAuth2.py
|
rajeshwariC/WeVoteServer
| 0
|
12785441
|
<reponame>rajeshwariC/WeVoteServer
import jwt
import requests
from config.base import get_environment_variable
from datetime import timedelta
from django.utils import timezone
from social_core.backends.oauth import BaseOAuth2
from social_core.utils import handle_http_errors
class AppleOAuth2(BaseOAuth2):
"""apple authentication backend
see https://github.com/truffls/sign-in-with-apple-using-django
"""
name = 'apple'
ACCESS_TOKEN_URL = 'https://appleid.apple.com/auth/token'
SCOPE_SEPARATOR = ','
ID_KEY = 'uid'
@handle_http_errors
def do_auth(self, access_token, *args, **kwargs):
"""
Finish the auth process once the access_token was retrieved
Get the email from ID token received from apple
"""
response_data = {}
client_id, client_secret = self.get_key_and_secret()
headers = {'content-type': "application/x-www-form-urlencoded"}
data = {
'client_id': client_id,
'client_secret': client_secret,
'code': access_token,
'grant_type': 'authorization_code',
'redirect_uri': 'https://example-app.com/redirect'
}
res = requests.post(AppleOAuth2.ACCESS_TOKEN_URL, data=data, headers=headers)
response_dict = res.json()
id_token = response_dict.get('id_token', None)
if id_token:
decoded = jwt.decode(id_token, '', verify=False)
response_data.update({'email': decoded['email']}) if 'email' in decoded else None
response_data.update({'uid': decoded['sub']}) if 'sub' in decoded else None
response = kwargs.get('response') or {}
response.update(response_data)
response.update({'access_token': access_token}) if 'access_token' not in response else None
kwargs.update({'response': response, 'backend': self})
return self.strategy.authenticate(*args, **kwargs)
def get_user_details(self, response):
email = response.get('email', None)
details = {
'email': email,
}
return details
def get_key_and_secret(self):
headers = {
'kid': get_environment_variable("SOCIAL_AUTH_APPLE_KEY_ID")
}
payload = { 'iss': get_environment_variable("SOCIAL_AUTH_APPLE_TEAM_ID"),
'iat': timezone.now(),
'exp': timezone.now() + timedelta(days=180),
'aud': 'https://appleid.apple.com',
'sub': get_environment_variable("SOCIAL_AUTH_APPLE_CLIENT_ID"),
}
client_secret = jwt.encode(
payload,
get_environment_variable("SOCIAL_AUTH_APPLE_PRIVATE_KEY"),
algorithm='ES256',
headers=headers
).decode("utf-8")
return get_environment_variable("SOCIAL_AUTH_APPLE_CLIENT_ID"), client_secret
| 2.1875
| 2
|
pyschism/outputs/stations.py
|
pmav99/pyschism
| 17
|
12785442
|
<reponame>pmav99/pyschism<gh_stars>10-100
from datetime import datetime, timezone, timedelta
import pathlib
from typing import Union, List
import warnings
import f90nml
import matplotlib.pyplot as plt
import numpy as np
from pyschism.utils.coops import CoopsDataCollector
from pyschism.enums import StationOutputIndex, StationOutputVariables
class StationsOutput:
def __init__(self, outputs: Union[str, pathlib.Path],
stations_file: Union[str, pathlib.Path] = None,
):
outputs = pathlib.Path(outputs)
if not outputs.is_dir():
raise Exception(f'{str(outputs)} is not a directory.')
self._station_id: List = []
if stations_file is None:
stations_file = outputs.resolve() / '../station.in'
if not stations_file.is_file():
stations_file = None
else:
with open(stations_file) as f:
f.readline()
for station in range(int(f.readline())):
line = f.readline()
if '!' in line:
self._station_id.append(line.split('!')[-1])
else:
self._station_id.append(None)
self._manifest = list(outputs.glob('staout_*'))
# find the start_date. Let's first check if it's on the param.nml file
param_file = outputs.resolve() / '../param.nml'
if param_file.is_file():
param = f90nml.read(param_file)
start_year = param['opt'].get('start_year')
start_month = param['opt'].get('start_month')
start_day = param['opt'].get('start_day')
start_hour = param['opt'].get('start_hour')
utc_start = param['opt'].get('utc_start')
if None in [start_year, start_month, start_day, start_hour,
utc_start]:
warnings.warn('Could not determine start date automatically.')
self._start_date = None
else:
self._start_date = datetime(start_year, start_month,
start_day, int(start_hour), 0,
tzinfo=timezone(
timedelta(hours=-utc_start)))
self._rndays = None
for file in self._manifest:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
data = np.loadtxt(file)
if len(w) != 0:
pass
else:
self._rndays = timedelta(seconds=data[-1, 0])
break
if self.rndays is None:
raise Exception("Ouptut directory doesn't contain any station "
"output data.")
def set_start_date(self, start_date: datetime):
self._start_date = start_date
def get_station_id(self, index):
return self._station_id[index].strip()
def plot(self, variable, station_index=None, show=False):
filenames = [path.name for path in self._manifest]
var_index = StationOutputIndex[
StationOutputVariables(variable).name].value + 1
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
data = np.loadtxt(
self._manifest[filenames.index(f'staout_{var_index}')])
if len(w) != 0:
raise AttributeError(f'Empty record for variable {variable}.')
if self.start_date is not None:
start_date = self.start_date
dates = [start_date + timedelta(seconds=t) for t in data[:, 0]]
else:
dates = data[:, 0]
coops = CoopsDataCollector()
if station_index is None:
for i in range(data.shape[1] - 2):
plt.figure(i)
if self._obs:
station_id = self.get_station_id(i)
if station_id is not None:
obs = coops.fetch(
station_id, variable, self.start_date,
self.rndays)
plt.plot(obs['datetime'], obs['values'])
plt.title(obs['name'])
# fig, ax = plt.subplots(figsize=(8, 3))
# import matplotlib.dates as mdates
# plt.plot_date(obs['datetime'], obs['values'], ls='solid', lw=1.5, aa=True,marker='None',color='g')
# ax = plt.gca()
# dayFmt = mdates.DateFormatter('%a-%b-%d')
# hrFmt = mdates.DateFormatter('%H:00')
# ax.xaxis.set_major_formatter(dayFmt)
# ax.xaxis.set_major_locator(mdates.DayLocator())
# ax.xaxis.set_minor_locator(mdates.HourLocator(byhour=[12]))
# ax.xaxis.set_minor_formatter(hrFmt)
# plt.grid(b=True, which='both', linestyle='dotted')
# plt.show()
plt.plot(dates, data[:, i+1])
else:
plt.plot(data[:, 0], data[:, station_index])
if show is True:
plt.show()
# import pdb; pdb.set_trace()
# with open(self.manifest[filenames.index(f'staout_{index}')]) as f:
# for line in f:
# print(len(line.split()))
# exit()
@property
def start_date(self):
return self._start_date
@property
def rndays(self):
return self._rndays
| 2.5
| 2
|
notifications/notifications.py
|
uktrade/directory-api
| 2
|
12785443
|
from django.conf import settings
from notifications import constants, email, helpers
def verification_code_not_given():
verification_code_not_given_first_reminder()
verification_code_not_given_seconds_reminder()
def verification_code_not_given_first_reminder():
days_ago = settings.VERIFICATION_CODE_NOT_GIVEN_DAYS
category = constants.VERIFICATION_CODE_NOT_GIVEN
company_users = (
helpers.get_unverified_suppliers(days_ago)
.filter(
company__is_uk_isd_company=False,
)
.exclude(
supplieremailnotification__category=category,
)
)
for company_user in company_users:
notification = email.VerificationWaitingNotification(company_user)
notification.send()
def verification_code_not_given_seconds_reminder():
days_ago = settings.VERIFICATION_CODE_NOT_GIVEN_DAYS_2ND_EMAIL
category = constants.VERIFICATION_CODE_2ND_EMAIL
company_users = (
helpers.get_unverified_suppliers(days_ago)
.filter(
company__is_uk_isd_company=False,
)
.exclude(
supplieremailnotification__category=category,
)
)
for company_user in company_users:
notification = email.VerificationStillWaitingNotification(company_user)
notification.send()
def new_companies_in_sector():
companies_grouped_by_industry = helpers.group_new_companies_by_industry()
for subscriber in helpers.get_new_companies_anonymous_subscribers():
companies = set()
for industry in subscriber['industries']:
companies.update(companies_grouped_by_industry[industry])
if companies:
notification = email.NewCompaniesInSectorNotification(subscriber=subscriber, companies=companies)
notification.send()
def company_user_unsubscribed(company_user):
notification = email.SupplierUbsubscribed(company_user)
notification.send()
def anonymous_unsubscribed(recipient_email):
recipient = {'email': recipient_email, 'name': None}
notification = email.AnonymousSubscriberUbsubscribed(recipient)
notification.send()
| 2.046875
| 2
|
SDKs/Aspose.Imaging-Cloud-SDK-for-Python/asposeimagingcloud/models/JpegProperties.py
|
naeem244/Aspose.Imaging-for-Cloud
| 0
|
12785444
|
<reponame>naeem244/Aspose.Imaging-for-Cloud
#!/usr/bin/env python
class JpegProperties(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definition.
"""
self.swaggerTypes = {
'Comment': 'str',
'JpegExifData': 'JpegExifData',
'JpegJfifData': 'JfifData'
}
self.attributeMap = {
'Comment': 'Comment','JpegExifData': 'JpegExifData','JpegJfifData': 'JpegJfifData'}
self.Comment = None # str
self.JpegExifData = None # JpegExifData
self.JpegJfifData = None # JfifData
| 2.140625
| 2
|
ffa/webapp/api.py
|
kschweizer/fresnofieldarchers
| 0
|
12785445
|
<gh_stars>0
from rest_framework import permissions, viewsets
from rest_framework.permissions import BasePermission, IsAdminUser, SAFE_METHODS
from django.http import HttpResponse
from django.core import serializers
from webapp.models import Blogpost, Image, Album, Event, About, PinnedEvent
from knox.auth import TokenAuthentication
import json
from .serializers import AboutSerializer, BlogpostSerializer, ImageSerializer, AlbumSerializer, EventSerializer, PinnedEventSerializer
class ReadOnly(BasePermission):
def has_permission(self, request, view):
return request.method in SAFE_METHODS
# Blogpost Viewset
class BlogpostViewSet(viewsets.ModelViewSet):
queryset = Blogpost.objects.all().order_by('-created_at')
authentication_classes = (TokenAuthentication, )
permission_classes = [IsAdminUser | ReadOnly]
serializer_class = BlogpostSerializer
# Album Viewset
class AlbumViewSet(viewsets.ModelViewSet):
pagination_class = None
queryset = Album.objects.all().order_by('-created_at')
authentication_classes = (TokenAuthentication, )
permission_classes = [IsAdminUser | ReadOnly]
serializer_class = AlbumSerializer
def retrieve(self, request, *args, **kwargs):
album = self.get_object()
images = album.image_set.all()
album_json = json.loads(serializers.serialize('json', [album]))
# line below reduces json nesting. json root starts at album fields
album_json = album_json[0]['fields']
serializer = ImageSerializer(images, many=True)
album_json['photos'] = serializer.data
album_json = album_json
return HttpResponse(json.dumps(album_json), content_type="application/json", status=200)
# Image Viewset
class ImageViewSet(viewsets.ModelViewSet):
pagination_class = None
queryset = Image.objects.all()
authentication_classes= (TokenAuthentication, )
permission_classes = [IsAdminUser | ReadOnly]
serializer_class = ImageSerializer
def create(self, request):
post_data = request.data
image = post_data['image']
album_id = post_data['album']
album = Album.objects.get(id=album_id)
Image.objects.create(image=image, album=album)
return HttpResponse(str(post_data), status=200)
class EventViewSet(viewsets.ModelViewSet):
pagination_class = None
queryset = Event.objects.all().order_by('-created_at')
authentication_classes= (TokenAuthentication, )
permission_classes = [IsAdminUser | ReadOnly]
serializer_class = EventSerializer
def create(self, request):
post_data = request.data
name = post_data['name']
description = None if not ('description' in post_data) else post_data['description']
date = None if not ('date' in post_data) else post_data['date']
flyer = None if not ('flyer' in post_data) else post_data['flyer']
event = Event.objects.create(name=name, description=description, flyer=flyer, date=date)
event_data = json.loads(serializers.serialize('json', [event]))[0]
return HttpResponse(json.dumps(event_data), content_type="application/json", status=200)
class AboutViewSet(viewsets.ModelViewSet):
pagination_class = None
queryset = About.objects.all()
authentication_classes = (TokenAuthentication, )
permission_classes = [IsAdminUser | ReadOnly]
serializer_class = AboutSerializer
class PinnedEventViewSet(viewsets.ModelViewSet):
pagination_class = None
queryset = PinnedEvent.objects.all()
authentication_classes = (TokenAuthentication, )
permission_classes = [IsAdminUser | ReadOnly]
serializer_class = PinnedEventSerializer
| 2.046875
| 2
|
data/transforms/build.py
|
moranxiachong/PersonReID-VAAL
| 2
|
12785446
|
# encoding: utf-8
"""
@author: liaoxingyu
@contact: <EMAIL>
"""
import torchvision.transforms as T
import random
import numpy as np
import PIL
from .transforms import RandomErasing
class AddGaussianNoise(object):
def __call__(self, img):
std = random.uniform(0, 1.0)
if std > 0.5:
return img
# Convert to ndarray
img = np.asarray(img).copy()
noise = np.random.normal(size=img.shape, scale=std).astype(np.uint8)
img += noise
img = np.clip(img, 0, 255)
# Convert back to PIL image
img = PIL.Image.fromarray(img)
return img
def build_transforms(cfg, is_train=True):
normalize_transform = T.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD)
if is_train:
print('++++ hard train')
transform = T.Compose([
T.Resize(cfg.INPUT.SIZE_DOWN),
T.Resize(cfg.INPUT.SIZE_UP),
T.RandomHorizontalFlip(p=cfg.INPUT.PROB),
T.Pad(padding=cfg.INPUT.PADDING),
T.RandomRotation(cfg.INPUT.DEGREE),
T.ColorJitter(0.6,0.9,0.7),
T.RandomCrop(cfg.INPUT.SIZE_TRAIN),
#AddGaussianNoise(),
T.ToTensor(),
normalize_transform,
RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN)
])
else:
print('++++ init test')
transform = T.Compose([
T.Resize(cfg.INPUT.SIZE_TEST),
T.ToTensor(),
normalize_transform
])
return transform
def build_transforms2(cfg, is_train=True):
#print('++++ easy')
normalize_transform = T.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD)
if is_train:
print('++++ easy train')
transform = T.Compose([
T.Resize(cfg.INPUT.SIZE_TRAIN),
T.RandomHorizontalFlip(p=cfg.INPUT.PROB),
#T.Pad(cfg.INPUT.PADDING),
T.ColorJitter(0.4,0.6,0.7),
T.RandomRotation(cfg.INPUT.DEGREE),
#T.ColorJitter(0.4,0.6,0.7),
T.Pad(cfg.INPUT.PADDING),
T.RandomCrop(cfg.INPUT.SIZE_TRAIN),
T.ToTensor(),
normalize_transform,
RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN)
])
else:
print('++++ easy test')
transform = T.Compose([
T.Resize(cfg.INPUT.SIZE_TEST),
T.ToTensor(),
normalize_transform
])
return transform
def build_transforms3(cfg, is_train=True):
normalize_transform = T.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD)
if is_train:
print('++++ init train')
transform = T.Compose([
T.Resize(cfg.INPUT.SIZE_TRAIN),
T.RandomHorizontalFlip(p=cfg.INPUT.PROB),
T.Pad(cfg.INPUT.PADDING),
T.RandomCrop(cfg.INPUT.SIZE_TRAIN),
T.ToTensor(),
normalize_transform,
RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN)
])
else:
print('++++ init test')
transform = T.Compose([
T.Resize(cfg.INPUT.SIZE_TEST),
T.ToTensor(),
normalize_transform
])
return transform
| 2.375
| 2
|
tutorial/2 - Design Optimization/2 - Wing Drag Minimization, with Aerostructures.py
|
askprash/AeroSandbox
| 0
|
12785447
|
<reponame>askprash/AeroSandbox
"""
In our last example, we had a cautionary tale about using bad models and assumptions, and how you can easily find
yourself with nonsensical solutions if you throw together models without thinking about how they can be exploited.
Let's try doing another wing drag minimization problem, except this time let's model some important coupled effects,
such as:
* the mass of the wing, as well as how that scales with wing size and shape
* fuselage drag
* takeoff lift constraints
Problem is taken from Section 3 of "Geometric Programming for Aircraft Design Optimization" by <NAME> and <NAME>. http://web.mit.edu/~whoburg/www/papers/hoburgabbeel2014.pdf
GPKit implementation available at: https://gpkit.readthedocs.io/en/latest/examples.html#simple-wing
"""
import aerosandbox as asb
import aerosandbox.numpy as np
### Constants
form_factor = 1.2 # form factor [-]
oswalds_efficiency = 0.95 # Oswald efficiency factor [-]
viscosity = 1.78e-5 # viscosity of air [kg/m/s]
density = 1.23 # density of air [kg/m^3]
airfoil_thickness_fraction = 0.12 # airfoil thickness to chord ratio [-]
ultimate_load_factor = 3.8 # ultimate load factor [-]
airspeed_takeoff = 22 # takeoff speed [m/s]
CL_max = 1.5 # max CL with flaps down [-]
wetted_area_ratio = 2.05 # wetted area ratio [-]
W_W_coeff1 = 8.71e-5 # Wing Weight Coefficient 1 [1/m]
W_W_coeff2 = 45.24 # Wing Weight Coefficient 2 [Pa]
drag_area_fuselage = 0.031 # fuselage drag area [m^2]
weight_fuselage = 4940.0 # aircraft weight excluding wing [N]
opti = asb.Opti() # initialize an optimization environment
### Variables
aspect_ratio = opti.variable(init_guess=10) # aspect ratio
wing_area = opti.variable(init_guess=200) # total wing area [m^2]
airspeed = opti.variable(init_guess=100) # cruising speed [m/s]
weight = opti.variable(init_guess=10000) # total aircraft weight [N]
CL = opti.variable(init_guess=1) # Lift coefficient of wing [-]
### Constraints
# Aerodynamics model
CD_fuselage = drag_area_fuselage / wing_area
Re = (density / viscosity) * airspeed * (wing_area / aspect_ratio) ** 0.5
Cf = 0.074 / Re ** 0.2
CD_profile = form_factor * Cf * wetted_area_ratio
CD_induced = CL ** 2 / (np.pi * aspect_ratio * oswalds_efficiency)
CD = CD_fuselage + CD_profile + CD_induced
dynamic_pressure = 0.5 * density * airspeed ** 2
drag = dynamic_pressure * wing_area * CD
lift_cruise = dynamic_pressure * wing_area * CL
lift_takeoff = 0.5 * density * wing_area * CL_max * airspeed_takeoff ** 2
# Wing weight model
weight_wing_structural = W_W_coeff1 * (
ultimate_load_factor * aspect_ratio ** 1.5 *
(weight_fuselage * weight * wing_area) ** 0.5
) / airfoil_thickness_fraction
weight_wing_surface = W_W_coeff2 * wing_area
weight_wing = weight_wing_surface + weight_wing_structural
# Other constraints
opti.subject_to([
weight <= lift_cruise,
weight <= lift_takeoff,
weight == weight_fuselage + weight_wing
])
# Objective
opti.minimize(drag)
sol = opti.solve()
# Output
aspect_ratio_opt = sol.value(aspect_ratio)
wing_area_opt = sol.value(wing_area)
drag_opt = sol.value(drag)
print(f"Minimum drag = {drag_opt} N")
print(f"Aspect ratio = {aspect_ratio_opt}")
print(f"Wing area = {wing_area_opt} m^2")
"""
Now, we get a much more reasonable solution, with:
* Minimum drag = 303.07477260455386 N
* Aspect ratio = 8.459983145854816
* Wing area = 16.44179489398983 m^2
We also see that we get an L/D of around 24.2 - much more reasonable.
This illustrates just how important accurate modeling is when doing engineering design optimization - just like when
coding, an optimizer solves the problem that you actually give it, which is not necessarily the problem that you may
mean to solve.
"""
| 2.75
| 3
|
python_basics/string_manipulation.py
|
Cristobalz2/pythonLearning
| 0
|
12785448
|
# print("var=10\nif var > 10:\vprint('la casa esta vacia')\nfin del programa" )
def mix_up(a, b):
# +++your code here+++\
# a[0] = b[0]
strA = a.replace(a[:2],b[:2])
strB = b.replace(b[:2],a[:2])
return strA + " " + strB
a= 'mix'
b= 'pod'
print(mix_up(a,b))
def fix_start(s):
# +++your code here+++
return s[0] + s[1:].replace(s[0], '*')
# print(fix_start('babble'))
| 3.65625
| 4
|
main.py
|
MarcelloEdocia/cellphone_purchase
| 0
|
12785449
|
import tkinter
import sys
import time
import json
from config import Config
from login_page import LoginPage
from menu import MenuPage
from register import Register
from admin import Admin
from purchases import Purchase
from add_purchase import AddPurchase
class Window(tkinter.Tk):
def __init__(self, App):
self.app = App
self.config = App.config
super().__init__()
self.title(self.config.app_title)
self.geometry(self.config.screen)
self.resizable(False, False)
self.create_container()
self.pages = {}
self.users = self.config.load_user()
self.purchases = self.config.load_purchase()
self.create_add_purchase()
self.create_admin_page()
self.create_purchase_page()
self.create_register_page()
self.create_menu_page()
self.create_login_page()
def create_container(self):
self.container = tkinter.Frame(self, bg="grey")
self.container.pack(fill="both", expand=True)
def create_login_page(self):
self.pages["LoginPage"] = LoginPage(self.container, self)
def create_menu_page(self):
self.pages["MenuPage"] = MenuPage(self.container, self)
def create_register_page(self):
self.pages["RegisterPage"] = Register(self.container, self)
def create_admin_page(self):
self.pages["AdminPage"] = Admin(self.container, self)
def create_purchase_page(self):
self.pages["PurchasePage"] = Purchase(self.container, self)
def create_add_purchase(self):
self.pages["AddPurchase"] = AddPurchase(self.container, self)
def create_falselogin(self):
self.pixelVirtual = tkinter.PhotoImage(width=2, height=1)
self.button_width = 80
self.button_height = 30
pop = tkinter.Toplevel(self)
pop.title("Warning")
pop.geometry("250x150+700+300")
pop.config(bg="cyan")
# Text warning
pop_warning = tkinter.Label(pop, text="Wrong username or password !", font=("roboto sans-serif", 12), bg="cyan")
pop_warning.pack(pady=10)
# Warning button
pop_frame = tkinter.Frame(pop, bg="cyan")
pop_frame.pack(pady=5)
button1 = tkinter.Button(pop_frame, text="Okay", image=self.pixelVirtual, width=self.button_width,
height=self.button_height, compound="c", command=pop.destroy)
button1.pack(pady=15)
def create_register_warning(self):
self.pixelVirtual = tkinter.PhotoImage(width=2, height=1)
self.button_width = 80
self.button_height = 30
pop = tkinter.Toplevel(self)
pop.title("Warning")
pop.geometry("250x150+700+300")
pop.config(bg="cyan")
# Text warning
pop_warning = tkinter.Label(pop, text="Please include all field!", font=("roboto sans-serif", 12),
bg="cyan")
pop_warning.pack(pady=10)
# Warning button
pop_frame = tkinter.Frame(pop, bg="cyan")
pop_frame.pack(pady=5)
button1 = tkinter.Button(pop_frame, text="Okay", image=self.pixelVirtual, width=self.button_width,
height=self.button_height, compound="c", command=pop.destroy)
button1.pack(pady=15)
def create_usernamewarning(self):
self.buttonVirtual = tkinter.PhotoImage(width=3, height=1)
button_width = 80
button_height = 30
userwarning = tkinter.Toplevel(self)
userwarning.title("Warning")
userwarning.geometry("250x190+700+300")
userwarning.config(bg="cyan")
used_warning = tkinter.Label(userwarning, text="This Username has already been used",
font=("roboto sans-serif", 10, "bold"), bg="cyan")
used_warning.pack(expand=True, pady=10)
used_warning_button = tkinter.Button(userwarning, text="Return", font=("Arial", 17),
image=self.buttonVirtual, compound="c", height=button_height,
width=button_width, command=userwarning.destroy)
used_warning_button.pack(expand=True, pady=10)
def change_page(self, page):
new_page = self.pages[page]
new_page.tkraise()
def exit_button(self):
time.sleep(1)
sys.exit()
def check_login(self):
username = self.pages["LoginPage"].username_text.get()
password = self.pages["LoginPage"].password_text.get()
granted = self.config.login(username, password)
if granted:
self.change_page("MenuPage")
else:
self.create_falselogin()
def gotoadmin(self):
self.change_page("AdminPage")
def gotopurchase(self):
self.change_page("PurchasePage")
def gotologin(self):
self.change_page("LoginPage")
def register(self):
self.change_page("RegisterPage")
def gotomenu(self):
self.change_page("MenuPage")
def addpurchase(self):
self.change_page("AddPurchase")
def exit_popup(self):
self.pixelVirtual = tkinter.PhotoImage(width=2, height=1)
self.button_width = 80
self.button_height = 30
pop = tkinter.Toplevel(self)
pop.title("Warning")
pop.geometry("250x150+700+300")
pop.config(bg="cyan")
# Text warning
pop_warning = tkinter.Label(pop, text="Are you sure to exit?", font=("roboto sans-serif", 14), bg="cyan")
pop_warning.pack(pady=10)
# Warning button
pop_frame = tkinter.Frame(pop, bg="cyan")
pop_frame.pack(pady=5)
button1 = tkinter.Button(pop_frame, text="Yes", image=self.pixelVirtual, width=self.button_width,
height=self.button_height, compound="c", command=self.exit_button)
button1.pack(side="left", pady=10, padx=10)
button2 = tkinter.Button(pop_frame, text="No", image=self.pixelVirtual, width=self.button_width,
height=self.button_height, compound="c", command=pop.destroy)
button2.pack(side="right", pady=10, padx=10)
def save_user(self):
first_name = self.pages["RegisterPage"].first_name_text.get()
last_name = self.pages["RegisterPage"].last_name_text.get()
new_username = self.pages["RegisterPage"].username_text.get()
new_password = self.pages["RegisterPage"].password_text.get()
phone_number = self.pages["RegisterPage"].phone_text.get()
email = self.pages["RegisterPage"].email_text.get()
if first_name != "" and last_name != "" and new_username != "" and new_password != "" and email != "" and phone_number != "":
if new_username in self.users:
self.create_usernamewarning()
else:
self.users[new_username] = {
"first_name": first_name,
"last_name" : last_name,
"password" : <PASSWORD>,
"email": email,
"phone" : phone_number,
"level": "admin"
}
with open("./data/users.json", "w") as file:
json.dump(self.users, file)
self.create_admin_page()
self.change_page("LoginPage")
else:
self.create_register_warning()
def add_purchase(self):
customer = self.pages["AddPurchase"].customer_name_text.get()
product = self.pages["AddPurchase"].product_name_text.get()
amount = self.pages["AddPurchase"].amount_text.get()
address = self.pages["AddPurchase"].address_text.get()
date = self.pages["AddPurchase"].date_text.get()
price = self.pages["AddPurchase"].price_text.get()
retailer = self.pages["AddPurchase"].retailer_text.get()
#print(customer)
#print(len(self.purchases))
new_purchase = f"purchase{len(self.purchases)+1}"
#print(new_purchase)
if customer != "" and product != "" and amount != "" and address != "" and date != "" and price != "" and retailer != "":
self.purchases[new_purchase] = {
"customer_name": customer,
"product_name": product,
"amount": amount,
"address": address,
"transact_date": date,
"product_price": price,
"retailer_name": retailer
}
with open("./data/purchases.json", "w") as file:
json.dump(self.purchases, file)
self.create_purchase_page()
self.change_page("PurchasePage")
class App:
def __init__(self):
self.config = Config()
self.window = Window(self)
def run(self):
self.window.mainloop()
def main():
my_app = App()
my_app.run()
if __name__ == "__main__":
main()
| 2.625
| 3
|
modules/scripts/align_getMapStats.py
|
baigal628/CHIPS
| 10
|
12785450
|
<gh_stars>1-10
#!/usr/bin/env python
"""Script to collect the mapping statistics from across all samples.
outputs to stdout:
Sample,Mapped,Total,Percentage
"""
import os
import sys
from optparse import OptionParser
def main():
usage = "USAGE: %prog -f [FPKM FILE_1] -f [FPKM FILE_2] ...-f [FPKM FILE_N]"
optparser = OptionParser(usage=usage)
optparser.add_option("-f", "--files", action="append", help="list of sample_mapping.txt files (note: these are snakemake temp files)")
(options, args) = optparser.parse_args(sys.argv)
if not options.files:
optparser.print_help()
sys.exit(-1)
print(",".join(["Sample","Total","Mapped","UniquelyMapped"]))
for f in options.files:
#UGH: this script is ugly!!
#TRY to infer the SAMPLE NAMES--SAMPLE.virusseq.ReadsPerGene.out.tab
sampleID = f.strip().split("/")[-1].split('.')[0]
#ALSO remove the suffix '_mapping' from the sampleID name
if sampleID.endswith('_mapping'):
sampleID = sampleID.replace("_mapping","")
f = open(f)
total = int(f.readline().strip().split()[0])
#skip 3 lines
l = f.readline()
l = f.readline()
l = f.readline()
mapped = int(f.readline().strip().split()[0])
#skip 8 lines
l = f.readline()
l = f.readline()
l = f.readline()
l = f.readline()
l = f.readline()
l = f.readline()
l = f.readline()
l = f.readline()
uniq_mapped = int(f.readline().strip())
print(",".join([sampleID,str(total),str(mapped),str(uniq_mapped)]))#"%.2f" % (float(mapped)/total *100)]))
if __name__=='__main__':
main()
| 2.34375
| 2
|