max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
core/engine/transform.py
|
yuwilbur/birthday29
| 0
|
12781651
|
<filename>core/engine/transform.py
from ..engine.vector import Vector
from ..engine.component import Component
class Transform(Component):
def __init__(self, game_object):
self.position = Vector()
self.rotation = Vector()
| 1.804688
| 2
|
test/prediction_variations.py
|
dsosnoski/irvideo-classification
| 0
|
12781652
|
<gh_stars>0
import copy
import numpy as np
class PredictionVariation:
def __init__(self, description, evaluatefn):
self.description = description
self.evaluatefn = evaluatefn
self.accumulated_results = []
def evaluate(self, predicts, certainties, track):
self.accumulated_results.append(np.argmax(self.evaluatefn(predicts, certainties, track)))
def reset(self):
self.accumulated_results = []
def _sum_weighted(predicts, weights):
return np.matmul(weights.T, predicts)
_GENERAL_PREDICTIONS = [
PredictionVariation('Mean prediction', lambda p, c, t: p.sum(axis=0)),
PredictionVariation('Mean squared prediction', lambda p, c, t: (p**2).sum(axis=0)),
PredictionVariation('Pixels weighted mean squared prediction', lambda p, c, t: _sum_weighted(p**2, np.array(t.pixels))),
PredictionVariation('Mass weighted mean squared prediction', lambda p, c, t: _sum_weighted(p**2, np.array(t.masses)))
]
_CERTAINTY_PREDICTIONS = [
PredictionVariation('Certainty weighted mean prediction', lambda p, c, t: _sum_weighted(p, c)),
PredictionVariation('Certainty weighted mean squared prediction', lambda p, c, t: _sum_weighted(p**2, c)),
PredictionVariation('Certainty * pixels weighted mean squared prediction', lambda p, c, t: _sum_weighted(p ** 2, c.flatten()*np.array(t.pixels)))
]
def get_general_predictions():
return copy.deepcopy(_GENERAL_PREDICTIONS)
def get_predictions_with_certainty():
return copy.deepcopy(_GENERAL_PREDICTIONS + _CERTAINTY_PREDICTIONS)
| 2.390625
| 2
|
conftest.py
|
davidholiday/pyrest
| 0
|
12781653
|
<gh_stars>0
# pytest fixture file for pyrest
#
import os
import glob
import shutil
import time
import pytest
import json
def load_params_from_json(json_paths):
"""Takes a list of json file paths and returns an aggregated list of the
contents of all of them
Args:
json_paths(list[str]): list of paths to parameter json files
Returns:
return_list: list[str]
"""
return_list = []
for json_path in json_paths:
with open(json_path) as f:
return_list += json.load(f)
print("json is: " + str(return_list))
return return_list
def load_ids_from_json(json_paths):
"""
Args:
json_paths:
Returns:
return_list:
"""
return_list = []
for json_path in json_paths:
with open(json_path) as f:
param_list = json.load(f)
# get suite tag from filename
filename = os.path \
.split(json_path)[1]
name_list = os.path \
.splitext(filename)[0] \
.split('_')
suite_name = ' '.join(name_list) \
.upper()
for param in param_list:
# make sure to include notice of skipped tests in the test ID
# this way it gets reported both in the log and to the IDE
if param['skip']:
tag = "*SKIPPED* " + suite_name + ": " + param['tag']
else:
tag = suite_name + ": " + param['tag']
return_list += [tag]
print("tag list is: " + str(return_list))
return return_list
def get_test_parameter_file_list():
"""Contains the test parameter file paths
Returns:
list[str]
"""
absolute_current_file_path = os.path.abspath(__file__)
current_directory = os.path.dirname(absolute_current_file_path)
test_directory = os.path.join(current_directory, 'test_parameter_files')
search_pattern = test_directory + "/*.json"
test_parameter_file_list = glob.glob(search_pattern)
print("returning test_parameter_file_list: " + str(test_parameter_file_list))
return test_parameter_file_list
@pytest.fixture(
params=load_params_from_json(get_test_parameter_file_list()),
ids=load_ids_from_json(get_test_parameter_file_list())
)
def test_data(request):
"""Loads test params from JSON files then issues to test (one by one)
Args:
request: pytest request object
Returns:
request.param: Values for individual test
"""
print("request.param is: {}", request.param)
yield request.param
| 2.53125
| 3
|
add-two-numbers/add-two-numbers.py
|
Javran/leetcode
| 3
|
12781654
|
<filename>add-two-numbers/add-two-numbers.py
#!/usr/bin/env python3
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
# "Add Two Numbers", recursive version
# for some reason it seems this non-tail recursive version
# works fast and the code itself is simple, so let's keep it this way.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def show(self):
print(self.val)
if self.next:
self.next.show()
class Solution:
zero = ListNode(0)
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
ret = Solution.addTwoNumsAux(l1,l2,0)
if ret is None:
# this situation only happens
# when result of adding them togther is zero, which requires
# a non-empty rep.
return Solution.zero
else:
return ret
@staticmethod
def get(v):
if v is None:
return (0, None)
else:
return (v.val, v.next)
@staticmethod
def addTwoNumsAux(l1, l2, carry):
# short-circuiting:
# when either l1 or l2 is missing and no carry
# we can just use the non-zero one right away
if carry == 0:
if l1 is None:
return l2
if l2 is None:
return l1
(l1Val, l1Next) = Solution.get(l1)
(l2Val, l2Next) = Solution.get(l2)
carry += l1Val + l2Val
if carry == 0 and l1Next is None and l2Next is None:
return None
else:
(q,r) = divmod(carry, 10)
curNode = ListNode(r)
curNode.next = Solution.addTwoNumsAux(l1Next, l2Next, q)
return curNode
def mk(xs):
init = ListNode(None)
curr = init
for x in xs:
newNode = ListNode(x)
curr.next = newNode
curr = curr.next
return init.next
#a = mk([0,0,0,1])
#b = mk([0,0,1,2,3,4])
#Solution().addTwoNumbers(a,b).show()
| 4.25
| 4
|
monitoring/migrations/0002_communitysetting_jobsetting.py
|
hyphae/apis-service_center
| 0
|
12781655
|
<filename>monitoring/migrations/0002_communitysetting_jobsetting.py<gh_stars>0
# Generated by Django 2.2.11 on 2020-05-25 06:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('monitoring', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CommunitySetting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('community_id', models.CharField(max_length=128, unique=True)),
('notify_to', models.EmailField(default='<EMAIL>', max_length=254)),
],
),
migrations.CreateModel(
name='JobSetting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('community_id', models.CharField(max_length=128)),
('cluster_id', models.CharField(max_length=128)),
('type', models.CharField(max_length=128)),
('is_active', models.BooleanField(default=True)),
],
options={
'unique_together': {('community_id', 'cluster_id', 'type')},
},
),
]
| 1.640625
| 2
|
pages/themes/beginners/loops/examples/slides/user_input_DO-WHILE_shorter_than_3.py
|
ProgressBG-Python-Course/ProgressBG-VC2-Python
| 0
|
12781656
|
<gh_stars>0
# ask user to enter a name (string), until it contains at least 3 symbols
while True:
user_name = input("Enter a name (at least 3 symbols): ")
user_name_length = len(user_name)
if user_name_length > 3:
break
print("Thank you, {}!".format(user_name))
| 4.1875
| 4
|
landslide_pipeline/planet_mosaic_loader.py
|
chrisleboa/landslidePipeline
| 0
|
12781657
|
<filename>landslide_pipeline/planet_mosaic_loader.py
import os
from shapely.geometry import shape, Polygon, box
from shapely.ops import transform
import pyproj
import requests
from datetimerange import DateTimeRange
from functools import partial
def query_planet_mosaic(location, times, api_key):
def handle_page(response, ul, lr, start, end):
return_items = []
for items in response['mosaics']:
bd = items['bbox']
mosgeom = shape(Polygon(box(bd[0], bd[1], bd[2], bd[3]).exterior.coords))
boundgeom = shape(Polygon(box(ul[0], lr[1], lr[0], ul[1])))
proj = partial(pyproj.transform, pyproj.Proj(init='epsg:4326'), pyproj.Proj(init='epsg:3857'))
boundgeom = transform(proj, boundgeom)
mosgeom = transform(proj, mosgeom)
if boundgeom.intersection(mosgeom).is_empty:
pass
else:
id = items['id']
quad_ids = []
r = requests.get('https://api.planet.com/basemaps/v1/mosaics/' + str(id) + '/quads?bbox=' +
str(ul[0])+'%2C'+str(lr[1])+'%2C'+str(lr[0])+'%2C'+str(ul[1]) +
'&api_key=' + api_key)
resp = r.json()
if len(resp['items']) > 0:
time_range = DateTimeRange(items['first_acquired'].split('T')[0],
items['last_acquired'].split('T')[0])
x = DateTimeRange(start, end)
if time_range.is_intersection(x) is True:
quad_ids += [it['id'] for it in resp['items']]
while resp['_links'].get('_next') is not None:
r = requests.get(resp['_links'].get('_next'))
resp = r.json()
time_range = DateTimeRange(items['first_acquired'].split('T')[0],
items['last_acquired'].split('T')[0])
x = DateTimeRange(start, end)
if time_range.is_intersection(x) is True:
quad_ids += [it['id'] for it in resp['items']]
if len(quad_ids) > 0:
return_items += [{"name": str(items['name']),
"mosaic_id": str(items['id']),
"quad_ids": tuple(set(quad_ids)),
"first_acquired": str(items['first_acquired']).split('T')[0],
"last_acquired": str(items['last_acquired']).split('T')[0],
"coordinate_system": int(items['coordinate_system'].split(':')[1]),
"resolution": format(float(str(items['grid']['resolution'])),'.3f')}]
return return_items
def metadata(ul, lr, start, end):
r = requests.get('https://api.planet.com/basemaps/v1/mosaics?api_key=' + api_key)
response = r.json()
final_list = []
try:
if response['mosaics'][0]['quad_download']:
final_list += handle_page(response, ul, lr, start, end)
except KeyError:
print('No Download permission for: '+str(response['mosaics'][0]['name']))
try:
while response['_links'].get('_next') is not None:
page_url = response['_links'].get('_next')
r = requests.get(page_url)
response = r.json()
try:
if response['mosaics'][0]['quad_download']:
final_list += handle_page(response, ul, lr, start, end)
except KeyError:
print('No Download permission for: '+str(response['mosaics'][0]['name']))
except Exception as e:
print(e)
return final_list
ul = (location['min_longitude'], location['max_latitude'])
lr = (location['max_longitude'], location['min_latitude'])
start = times['start']
end = times['end']
return metadata(ul, lr, start, end)
def load_data(**kwargs):
if kwargs.get('cloudless_scenes') is not None:
return kwargs
location = kwargs['LOCATION']
times = kwargs['TIMES']
output = kwargs['OUTPUT']
api_key = kwargs['PL_API_KEY']
import os
cloudless_scenes = []
output_directory = os.path.join(os.getcwd(), output['output_path'])
try:
os.mkdir(output_directory)
except:
pass
try:
os.mkdir('.tmp')
except:
pass
metadata = query_planet_mosaic(location, times, api_key)
kwargs['query_metadata'] = metadata
for mosaic in metadata:
tilenames = []
counter = 0
for tile in mosaic['quad_ids']:
url = "https://api.planet.com/basemaps/v1/mosaics/" + mosaic['mosaic_id'] + '/quads/' + tile \
+ '/full?api_key=' + api_key
r = requests.get(url)
tilename = os.path.join('.tmp', str(counter))
counter += 1
tilenames += [tilename]
with open(tilename, 'wb') as f:
f.write(r.content)
ul = (location['max_latitude'], location['min_longitude'])
lr = (location['min_latitude'], location['max_longitude'])
from landslide_pipeline.utils import get_projected_bounds
(ulp, lrp) = get_projected_bounds(ul, lr, 4326, mosaic['coordinate_system'])
output_name = os.path.join(output['output_path'], mosaic['name'] + '.tif')
cloudless_scenes += [{"filename": output_name,
"coordinate_system": mosaic['coordinate_system']}]
arg = ['gdal_merge.py', '-o', output_name, '-of', 'GTiff', '-co',
'COMPRESS=LZW', '-co', 'BIGTIFF=IF_SAFER', '-ul_lr', str(ulp[0]), str(ulp[1]), str(lrp[0]),
str(lrp[1])] + tilenames
import subprocess
subprocess.call(arg)
for tilename in tilenames:
os.remove(tilename)
kwargs['cloudless_scenes'] = cloudless_scenes
return kwargs
def reproject_assets(**kwargs):
if kwargs.get('reprojected') is not None:
return kwargs
import os
output = kwargs['OUTPUT']
cloudless_scenes = kwargs['cloudless_scenes']
output_projection = output['output_projection']
for cloudless_scene in cloudless_scenes:
if cloudless_scene['coordinate_system'] != output_projection:
import subprocess as sp
arg = ['gdalwarp', '-s_srs', 'EPSG:' + str(cloudless_scene['coordinate_system']),
'-t_srs', 'EPSG:' + str(output['output_projection']), '-of', 'GTiff', '-co', 'COMPRESS=LZW',
'-co', 'BIGTIFF=IF_SAFER', cloudless_scene['filename'], os.path.join('.tmp','tmpreproj.tif')]
sp.call(arg)
arg = ['rm', '-f', cloudless_scene['filename']]
sp.call(arg)
arg = ['mv', os.path.join('.tmp','tmpreproj.tif'), cloudless_scene['filename']]
sp.call(arg)
print('Reprojected: ', cloudless_scene['filename'])
else:
print('Did not reproject: ', cloudless_scene['filename'])
kwargs['reprojected'] = True
return kwargs
| 2.4375
| 2
|
httprider/widgets/key_value_widget.py
|
iSWORD/http-rider
| 27
|
12781658
|
<filename>httprider/widgets/key_value_widget.py
from PyQt5 import QtWidgets
from ..core import DynamicStringData
from ..generated.key_value_widget import Ui_KeyValueWidget
class KeyValueWidget(QtWidgets.QWidget, Ui_KeyValueWidget):
def __init__(self, parent=None, parent_widget_item=None, on_remove_callback=None):
super(KeyValueWidget, self).__init__(parent)
self.setupUi(self)
self.k = ""
self.v = DynamicStringData()
self.setLayout(self.horizontalLayout)
self.btn_remove_header.pressed.connect(
lambda: on_remove_callback(parent_widget_item)
)
def set_data(self, name, v: DynamicStringData):
self.k = name
self.v = v
self.txt_name.setText(self.k)
self.txt_value.setValue(self.v)
self.chk_field_enabled.setChecked(v.is_enabled)
def get_data(self):
self.k = self.txt_name.text().strip()
self.v = self.txt_value.getValue()
self.v.is_enabled = self.chk_field_enabled.isChecked()
return self.k, self.v
| 2.1875
| 2
|
examples/poll.py
|
bogdan-kulynych/trials
| 29
|
12781659
|
"""Sociological poll example."""
import sys
sys.path.append('..')
from trials import Trials
if __name__ == '__main__':
test = Trials(['Poroshenko', 'Tymoshenko'])
test.update({
'Poroshenko': (48, 52),
'Tymoshenko': (12, 88)
})
estimates = test.evaluate('posterior CI')
dominance = test.evaluate('dominance', control='Tymoshenko')
print('Poroshenko estimated vote share: {lower:.2%} - {upper:.2%} '
'(95% credibility)'
.format(lower=estimates['Poroshenko'][0],
upper=estimates['Poroshenko'][2]))
print('Tymoshenko estimated vote share: {lower:.2%} - {upper:.2%} '
'(95% credibility)'
.format(lower=estimates['Tymoshenko'][0],
upper=estimates['Tymoshenko'][2]))
print('Chance that Poroshenko beats Tymoshenko based on the poll data: '
'{chance:.2%}'.format(chance=dominance['Poroshenko']))
| 3.09375
| 3
|
start_esp/fetch_service_config.py
|
JLXIA/esp
| 0
|
12781660
|
<reponame>JLXIA/esp
# Copyright (C) Extensible Service Proxy Authors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
###############################################################################
#
import certifi
import json
import logging
import urllib3
from oauth2client.service_account import ServiceAccountCredentials
# Service management service
SERVICE_MGMT_ROLLOUTS_URL_TEMPLATE = (
"{}/v1/services/{}/rollouts?filter=status=SUCCESS")
_GOOGLE_API_SCOPE = (
"https://www.googleapis.com/auth/service.management.readonly")
# Metadata service path
_METADATA_PATH = "/computeMetadata/v1/"
_INSTANCE_ATTRIBUTES = "instance/attributes/"
_METADATA_SERVICE_NAME = "endpoints-service-name"
_METADATA_SERVICE_CONFIG_ID = "endpoints-service-config-id"
_METADATA_ROLLOUT_STRATEGY = "endpoints-rollout-strategy"
class FetchError(Exception):
"""Error class for fetching and validation errors."""
def __init__(self, code, message):
self.code = code
self.message = message
def __str__(self):
return self.message
def fetch_metadata(metadata, attr_path, required):
"""Fetch an attribute from metadata URL."""
url = metadata + _METADATA_PATH + attr_path
headers = {"Metadata-Flavor": "Google"}
client = urllib3.PoolManager(ca_certs=certifi.where())
if required:
timeout = 1.0
retries = True
else:
timeout = 0.1
retries = False
try:
response = client.request("GET", url, headers=headers, timeout=timeout, retries=retries)
except:
if required:
raise FetchError(1,
"Failed fetching metadata attribute: " + url)
else:
return None
status_code = response.status
if status_code != 200:
if required:
message_template = "Failed fetching metadata attribute: {}, status code {}"
raise FetchError(1, message_template.format(url, status_code))
else:
return None
return response.data
def fetch_service_config_rollout_strategy(metadata):
"""Fetch service config rollout strategy from metadata URL."""
strategy = fetch_metadata(
metadata, _INSTANCE_ATTRIBUTES + _METADATA_ROLLOUT_STRATEGY, False)
if strategy:
logging.info("Service config rollout strategy: " + strategy)
return strategy
def fetch_service_name(metadata):
"""Fetch service name from metadata URL."""
name = fetch_metadata(
metadata, _INSTANCE_ATTRIBUTES + _METADATA_SERVICE_NAME, True)
logging.info("Service name: " + name)
return name
def fetch_service_config_id(metadata):
"""Fetch service config ID from metadata URL."""
version = fetch_metadata(
metadata, _INSTANCE_ATTRIBUTES + _METADATA_SERVICE_CONFIG_ID, False)
if version:
logging.info("Service config ID:" + version)
return version
def fetch_metadata_attributes(metadata):
"""Fetch metadata attributes from metadata URL."""
attrs = [
("zone", "instance/zone"),
("project_id", "project/project-id"),
("gae_server_software", "instance/attributes/gae_server_software"),
("kube_env", "instance/attributes/kube-env"),
("access_token", "instance/service-accounts/default/token"),
]
out_str = ""
for key, attr in attrs:
value = fetch_metadata(metadata, attr, False)
if key == "zone":
# If no zone, just bail out
if not value:
return None
else:
# Get the last section
value = value.split("/")[-1]
if value:
if key == "access_token":
json_token = json.loads(value)
value = "{\n"
value += " access_token: \"{}\"\n".format(json_token["access_token"])
value += " token_type: \"{}\"\n".format(json_token["token_type"])
value += " expires_in: {}\n".format(json_token["expires_in"])
value += " }"
out_str += " {}: {}".format(key, value)
else:
# Kube_env value is too big, esp only checks it is empty.
if key == "kube_env":
value = "KUBE_ENV"
out_str += " {}: \"{}\"".format(key, value) + "\n"
logging.info("Attribute {}: {}".format(key, value))
return out_str
def make_access_token(secret_token_json):
"""Construct an access token from service account token."""
logging.info("Constructing an access token with scope " + _GOOGLE_API_SCOPE)
credentials = ServiceAccountCredentials.from_json_keyfile_name(
secret_token_json,
scopes=[_GOOGLE_API_SCOPE])
logging.info("Service account email: " + credentials.service_account_email)
token = credentials.get_access_token().access_token
return token
def fetch_access_token(metadata):
"""Fetch access token from metadata URL."""
json_token = fetch_metadata(
metadata, "instance/service-accounts/default/token", True)
token = json.loads(json_token)["access_token"]
return token
def fetch_latest_rollout(management_service, service_name, access_token):
"""Fetch rollouts"""
if access_token is None:
headers = {}
else:
headers = {"Authorization": "Bearer {}".format(access_token)}
client = urllib3.PoolManager(ca_certs=certifi.where())
service_mgmt_url = SERVICE_MGMT_ROLLOUTS_URL_TEMPLATE.format(management_service,
service_name)
try:
response = client.request("GET", service_mgmt_url, headers=headers)
except:
raise FetchError(1, "Failed to fetch rollouts")
status_code = response.status
if status_code != 200:
message_template = ("Fetching rollouts failed "\
"(status code {}, reason {}, url {})")
raise FetchError(1, message_template.format(status_code,
response.reason,
service_mgmt_url))
rollouts = json.loads(response.data)
# No valid rollouts
if rollouts is None or \
'rollouts' not in rollouts or \
len(rollouts["rollouts"]) == 0 or \
"rolloutId" not in rollouts["rollouts"][0] or \
"trafficPercentStrategy" not in rollouts["rollouts"][0] or \
"percentages" not in rollouts["rollouts"][0]["trafficPercentStrategy"]:
message_template = ("Invalid rollouts response (url {}, data {})")
raise FetchError(1, message_template.format(service_mgmt_url,
response.data))
return rollouts["rollouts"][0]
def fetch_service_json(service_mgmt_url, access_token):
"""Fetch service config."""
if access_token is None:
headers = {}
else:
headers = {"Authorization": "Bearer {}".format(access_token)}
client = urllib3.PoolManager(ca_certs=certifi.where())
try:
response = client.request("GET", service_mgmt_url, headers=headers)
except:
raise FetchError(1, "Failed to fetch service config")
status_code = response.status
if status_code != 200:
message_template = "Fetching service config failed (status code {}, reason {}, url {})"
raise FetchError(1, message_template.format(status_code, response.reason, service_mgmt_url))
service_config = json.loads(response.data)
return service_config
def validate_service_config(service_config, expected_service_name,
expected_service_version):
"""Validate service config."""
service_name = service_config.get("name", None)
if not service_name:
raise FetchError(2, "No service name in the service config")
if service_name != expected_service_name:
message_template = "Unexpected service name in service config: {}"
raise FetchError(2, message_template.format(service_name))
service_version = service_config.get("id", None)
if not service_version:
raise FetchError(2, "No service config ID in the service config")
if service_version != expected_service_version:
message_template = "Unexpected service config ID in service config: {}"
raise FetchError(2, message_template.format(service_version))
# WARNING: sandbox migration workaround
control = service_config.get("control", None)
if not control:
raise FetchError(2, "No control section in the service config")
environment = control.get("environment", None)
if not environment:
raise FetchError(2, "Missing control environment")
if environment == "endpoints-servicecontrol.sandbox.googleapis.com":
logging.warning("Replacing sandbox control environment in the service config")
service_config["control"]["environment"] = (
"servicecontrol.googleapis.com")
| 0.929688
| 1
|
myApp/api/urls.py
|
Chrisfufu/freshworksTesting
| 0
|
12781661
|
<filename>myApp/api/urls.py<gh_stars>0
from django.conf.urls import url
from django.contrib import admin
from django.conf.urls.static import static
from .views import *
from django.conf import settings
# the following is the controller APIs.
urlpatterns = [
# feed duck information controllers.
# it has create information, view all information and delete information by id.
url(r'^info/create/$', FeedDuckInfoCreateAPIView.as_view(), name='info-create'),
url(r'^info/all/$', FeedDuckInfoListAPIView.as_view(), name='info-view'),
url(r'^info/(?P<pk>[0-9]+)/$', FeedDuckInfoDeteleAPIView.as_view(), name='info-delete'),
# foods model controllers.
# it has create foods, view all foods and delete foods by id.
url(r'^foods/create/$', FoodCreateAPIView.as_view(), name='food-create'),
url(r'^foods/all/$', FoodListAPIView.as_view(), name='food-view'),
url(r'^foods/(?P<pk>[0-9]+)/$', FoodDeteleAPIView.as_view(), name='food-delete'),
]
| 2.015625
| 2
|
Python3/1302.py
|
rakhi2001/ecom7
| 854
|
12781662
|
<gh_stars>100-1000
__________________________________________________________________________________________________
sample 72 ms submission
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def deepestLeavesSum(self, root: TreeNode) -> int:
pre = []
queue = [root]
while queue:
pre, queue = queue, [leaf for q in queue for leaf in [q.left, q.right] if leaf]
return sum([p.val for p in pre])
__________________________________________________________________________________________________
sample 76 ms submission
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def deepestLeavesSum(self, root: TreeNode) -> int:
layer = [root]
res = root.val
while layer:
next_layer = []
next_res = 0
for node in layer:
next_res += node.val
if node.left:
next_layer.append(node.left)
if node.right:
next_layer.append(node.right)
layer = next_layer
res = next_res
return res
__________________________________________________________________________________________________
| 3.25
| 3
|
splitter.py
|
Kamrulhasan12345/particip-certs-gen-mail
| 0
|
12781663
|
import os
import sys
import PyPDF2
from io import BytesIO
from mailer import mailer
def split(pB):
print(pB)
pTS = PyPDF2.PdfFileReader(pB)
if pTS.flattenedPages is None:
pTS._flatten()
for n, pO in enumerate(pTS.flattenedPages):
sP = PyPDF2.PdfFileWriter()
sP.addPage(pO)
b = BytesIO()
sP.write(b)
mailer(b.getvalue(), n)
with open('index.pdf', 'rb') as file:
split(file)
| 2.984375
| 3
|
src/mmgroup/tests/test_axes/beautify_axes.py
|
Martin-Seysen/mmgroup
| 14
|
12781664
|
<filename>src/mmgroup/tests/test_axes/beautify_axes.py
r"""Investigate certain orbits of 2A axes of the monster group
The functions in this module are used to find the orbits of 2A axes
of the subgroup :math:`G_{x0}` (of structure
:math:`2^{1+24}.\mbox{Co}_1`) of the monster group. According to
|Nor98| there are 12 such orbits.
"""
import sys
import os
import time
from math import floor, ceil
from random import randint, shuffle, sample
from collections import defaultdict, OrderedDict
import numpy as np
from operator import __or__
from functools import reduce
sys.path.append(r"C:\Data\projects\MonsterGit\src")
from mmgroup import MM0, MMV, MMVector, Cocode, XLeech2, Parity, PLoop
from mmgroup.mm_crt_space import MMVectorCRT
from mmgroup import mat24, GcVector, AutPL, Parity, GCode
from mmgroup.clifford12 import bitmatrix64_solve_equation
#################################################################
# Transitive closure and identifying block of matrix A
#################################################################
def transitive_closure(m):
"""Return transitive closure of symmetric square matrix ``m``
The function returns a 0-1 matrix ``c`` of the same size as
matrix ``m`` if ``c[i,j] = 1`` iff there is a path of nonzero
entries
``m[k[0],k[1]], m[k[1],k[2]], ... , m[k[l-1],k[k]],``
with ``k[0] = i, k[l] = j``. Diagoanl entrie of ``c`` are set
to 1.
"""
m = np.array(m != 0, dtype = np.uint32)
assert len(m.shape) == 2 and m.shape[0] == m.shape[1]
m_old = np.eye(m.shape[0], dtype = np.uint32)
m = m | m_old
while (m != m_old).any():
m_old = m
m = (m @ m) != 0
return np.array(m, dtype = np.uint32)
def bit_count(x):
return bin(x).count("1")
def block_sort_key(x):
return -bit_count(x), x & -x, x
def blocks(m):
"""Return the list of blocks of a symmetric square matrix ``m``.
Here a block a set of indices of matrix ``m`` which are in the
same equivalence class in the transitive closure of ``m``.
Each block in the returned list is encoded as an integer with
bit ``i`` of the integer set iff index ``i`` is in that block.
Blocks are ordered by size with larger block occuring first.
"""
m = np.array(m != 0, dtype = np.uint32)
m = np.array(m | m.T, dtype = np.uint32)
m = transitive_closure(m)
s = set()
for row in m:
d = int(sum(x << i for i, x in enumerate(row)))
s.add(d)
data = list(s)
return sorted(data, key = block_sort_key)
def adjacent_blocks(a):
"""Return adjacent blocks of a symmetric square matrix ``m``.
If matrix ``m`` may be split into ``n`` adjacent blocks then
the function returns a list ``i`` of lenggth ``n+1`` such that
all nonzero entries of ``m`` occur in one of the submatrices
``m[i[0]:i[1], i[0]:i[1]], ... m[i[n]:i[n+1], i[n]:i[n+1]]``
Then ``i[0] = 0`` and `i[n+1]`` is equal to the size of the
square matrix ``m``.
"""
bl = blocks(a)
adjacent_blocks = []
while len(bl):
x = max(bl)
adjacent_blocks.append(x.bit_length())
xmin = x & -x
for i, y in enumerate(bl):
if y >= xmin:
del bl[i]
adjacent_blocks.append(0)
return list(reversed(adjacent_blocks))
#################################################################
# Eigenvalues of matrix A
#################################################################
def purge_diag(diag, power = 1):
EPS = 1.0e-8
data = {}
non_ints = []
for x0 in diag:
x = x0**power
if abs(x - round(x)) < EPS:
i = int(round(x))
if i in data:
data[i] += 1
else:
data[i] = 1
else:
done = False
for d in non_ints:
if abs(d - x) < EPS:
data[d] += 1
done = True
if not done:
data[x] = 1
non_ints.append(x)
return data
def format_eigen_values(a):
eigen = purge_diag(np.linalg.eigvals(a))
eigenv = sorted(eigen.items(), key = lambda x: x[0])
data = [("%d^%d" if isinstance(k, int) else "%.2f^%d") %
(k,v) for k, v in eigenv ]
return "(" + ", ".join(data) + ")"
def block_eigenvalues(a):
ad = adjacent_blocks(a)
if len(ad) > 24:
return format_eigen_values(a)
eigen = []
exp = []
for i in range(len(ad) - 1):
m = a[ad[i]:ad[i+1], ad[i]:ad[i+1]]
str_eigen = format_eigen_values(m)
if len(eigen) and eigen[-1] == str_eigen:
exp[-1] += 1
else:
eigen.append(str_eigen)
exp.append(1)
for i, s in enumerate(eigen):
if exp[i] > 1:
eigen[i] += "^" + str(exp[i])
return ", ".join(eigen)
#################################################################
# Dealing with one large block (of size >= 5)
#################################################################
def beautify_large_block(bl):
v = GcVector(bl)
if len(v) < 3:
return MM0()
if len(v) <= 5:
b = v.bit_list
pi = AutPL(0, zip(b, range(len(v))), 0)
return MM0('p', pi)
try:
syn = v.syndrome()
except:
return MM0()
single = (v & syn).bit_list
v &= ~syn
if len(v) > 8:
return MM0()
v_list = v.bit_list
if len(single):
r_list = [7-x for x in range(len(v_list))]
else:
r_list = list(range(len(v_list)))
if len(single):
v_list += single
r_list.append(8)
for i in range(2000):
shuffle(v_list)
try:
pi = AutPL(0, zip(v_list, r_list), 0)
return MM0('p', pi)
except:
pass
return MM0()
def solve_gcode_diag(l):
"""Solve cocode equation
Here ``l`` is a list of tupeles ``(i0, i1, k)``. For an
unknown Golay code word ``x``, each tuple means an equation
``<x, Cocode([i0,i1])> = k``, where ``<.,.>`` is the scalar
product. If a solution ``x`` exists then the function
returns ``x`` as an instance of class |PLoop|.
"""
a = np.zeros(len(l), dtype = np.uint64)
for i, (i0, i1, k) in enumerate(l):
a[i] = Cocode([i0,i1]).ord + ((int(k) & 1) << 12)
v = bitmatrix64_solve_equation(a, len(l), 12)
if v < 0:
err = "Off-diagonal matrix equation has no solution"
raise ValueError(err)
result = PLoop(v)
for i0, i1, k in l:
c = Cocode([i0,i1])
check = hex(result.ord), hex(c.ord), k
assert result & c == Parity(int(k)), check
return result
def try_make_blocks_positive(A, all_blocks = True, first_row_only = False):
bl = 0xffffff if all_blocks else blocks(A)[0]
bl_list = [x for x in range(24) if (1 << x) & bl]
equations = []
for i in range(24):
for j in range(24):
if i == j or A[i,j] == 0:
continue
if i in bl_list and j in bl_list:
s = A[i,j] < 0
if not first_row_only or i == bl_list[0]:
equations.append((i, j, s))
#else:
# equations.append((i, j, 0))
y = solve_gcode_diag(equations)
return MM0('y', y)
def make_blocks_positive(A):
try:
return try_make_blocks_positive(A, True, False)
except:
try:
return try_make_blocks_positive(A, False, False)
except:
try:
return try_make_blocks_positive(A, False, True)
except:
return MM0(0)
#################################################################
# Dealing with one largest block of size 3
#################################################################
def sort_single_size3_block(A):
bl = blocks(A)
if mat24.bw24(bl[0]) != 3 or mat24.bw24(bl[1]) != 1:
return MM0()
blist = [x for x in range(24) if bl[0] & (1 << x)]
dlist = sorted([ (-A[x,x], x) for x in blist])
src = [x[1] for x in dlist]
return MM0('p', AutPL(0, zip(src, [0,1,2]), 0))
def beautify_signs_size3(A):
bl = blocks(A)
if mat24.bw24(bl[0]) != 3:
return MM0()
sign = A[0,1] * A[0,2] * A[1,2]
if sign > 0:
l = [(i, j, A[i,j] < 0) for i,j in [(0,1),(0,2),(0,3)]]
y = solve_gcode_diag(l)
if y:
return MM0('y', y)
return MM0()
def beautify_signs_size2_3(A):
bl = blocks(A)
if not 2 <= mat24.bw24(bl[0]) <= 3:
return MM0()
data2, data3 = [], []
for b in bl:
blist = [x for x in range(24) if b & (1 << x)]
if len(blist) == 2:
i0, i1 = blist
data2.append((i0, i1, A[i0,i1] < 0))
if len(blist) == 3:
i0, i1, i2 = blist
sign = A[i0,i1] * A[i0,i2] * A[i1,i2]
for j0, j1 in [(i0,i1), (i0,i2), (i1,i2)]:
s = sign * A[j0,j1] < 0
data3.append((j0, j1, s < 0))
try:
y = solve_gcode_diag(data2 + data3)
return MM0('y', y)
except:
try:
data2n = [(i0,i1,1-x) for i0,i1,x in data2]
y = solve_gcode_diag(data2n + data3)
return MM0('y', y)
except:
raise ValueError("Solve")
pass
return MM0()
def beautify_block_size3(A):
bl = blocks(A)
if mat24.bw24(bl[0]) != 3:
return MM0()
blist = [x for x in range(24) if bl[0] & (1 << x)]
assert blist == [0,1,2], (blist, A)
d = defaultdict(list)
for b in bl:
if b & (b-1) == 0:
index = b.bit_length() - 1
d[A[index, index]].append(index)
for lst in d.values():
if len(lst) < 8:
try:
#print(lst)
gc = GCode(blist + lst)
#print("GC =", gc.bit_list)
except:
#print("WTF")
raise
len_isect = len(set(blist) & set(gc.bit_list))
#print("isect", len_isect)
if len_isect == 3:
src = gc.bit_list
dest = list(range(8))
return MM0(AutPL(0, zip(src, dest), 0))
if len_isect == 1:
src0 = gc.bit_list[1:]
src1 = gc.bit_list[:1]
src2 = [x for x in blist if x not in src1]
dest = list(range(10))
for i in range(1000):
shuffle(src0)
shuffle(src2)
try:
pi = AutPL(0, zip(src0+src1+src2, dest))
return MM0(pi)
except:
pass
return MM0()
#################################################################
# Dealing with one largest block of size 2
#################################################################
def beautify_block_size2(A):
bl = blocks(A)
if mat24.bw24(bl[0]) != 2:
return MM0()
blist = [x for x in range(24) if bl[0] & (1 << x)]
#assert blist == [0,1], blist
d = defaultdict(list)
for b in bl:
if b & (b-1) == 0:
index = b.bit_length() - 1
d[A[index, index]].append(index)
for lst in d.values():
if len(lst) <= 8:
try:
#print(lst)
gc = GCode(blist + lst).bit_list
#print("GC =", gc)
except:
return MM0()
if set(blist).issubset(gc) and len(gc) == 8:
src0 = [x for x in gc if x in blist]
src1 = [x for x in gc if x not in blist]
dest = list(range(8))
for i in range(1000):
shuffle(src1)
try:
pi = AutPL(0, zip(src0+src1, dest), 0)
return MM0(pi)
except:
pass
if len(set(blist) & set(gc)) == 0 and len(gc) == 8:
src0 = gc
src1 = blist
dest = list(range(10))
for i in range(10000):
shuffle(src0)
try:
pi = AutPL(0, zip(src0+src1, dest), 0)
return MM0(pi)
except:
pass
return MM0()
#################################################################
# Dealing with a diagonal matrix
#################################################################
def beautify_diagonal_matrix(A):
bl = blocks(A)
if mat24.bw24(bl[0]) != 1:
return MM0()
d = defaultdict(list)
for b in bl:
if b & (b-1) == 0:
index = b.bit_length() - 1
d[A[index, index]].append(index)
singleton = None
for lst in d.values():
if len(lst) == 1: singleton = lst
for lst in d.values():
dest = list(range(8))
if len(lst) == 8:
#print(lst)
for i in range(10000):
shuffle(lst)
try:
pi = AutPL(0, zip(lst, dest), 0)
#print("yeah")
return MM0(pi)
except:
continue
elif len(lst) == 11 and singleton:
print("dodecad")
DOD = 0xeee111
dest = [i for i in range(24) if (1 << i) & DOD]
src = singleton + lst
#print(dest)
#print(src)
#return MM0()
pi = mat24.perm_from_dodecads(src, dest)
return MM0('p', pi)
elif singleton:
pi = pi = AutPL(0, zip(singleton, [0]), 0)
return MM0('p', pi)
return MM0()
#################################################################
# Beautify signs in the secial case 10N
#################################################################
def find_five_cycle(A):
"""Try to find a cycle of positive off-diagonal elemnts in submatrix
Let ``M `` be the subatrix ``A[1:6, 1:6]`` of the symmetric matrix
``A``. The function entries to find a cycle of length 5 such that
``A[i0, i1] > 0`` for any adjacent entries ``i0, i1`` of the cycle.
If such a cycle exists then the function returns that cycle as a list
of integers between 1 and 6, correspondig to the indices of ``A``.
If no such cycle exist the the function returns the empty list.
The function checks the following condition (and returns ``[]`` if
any of them is not satisfied):
* ``A[:6, 6:]`` must be zero.
* The diagonal of ``A[:6]`` must be nonnegative.
* A[0,:] must be nonnegative.
* ``A[i, 1:6], 1 <= i < 6`` must have precisely two (off-diagonal)
negative entries.
"""
if min(A[0]) < 0:
return []
for i in range(1,6):
x = sum(x < 0 for x in A[i])
if (x != 2) or A[i,i] < 0:
return []
if max(abs(A[:6, 6:].ravel())) > 0:
return []
l = [1]
for i in range(5):
for j in range(1,6):
if A[j, l[-1]] > 0 and j not in l:
l.append(j)
break
if len(l) < 5 or A[l[0],l[-1]] < 0:
return []
return l
def beautify_case_10B(A):
bl = blocks(A)
if bl[:3] != [63, 192, 256]:
return MM0()
c = find_five_cycle(A)
if len(c) != 5:
return MM0()
for j in [0, 1]:
pi_src = [0] + c + [6+j, 7-j]
pi_dest = list(range(8))
try:
pi = AutPL(0, zip(pi_src, pi_dest), False)
except ValueError:
continue
return MM0('p', pi)
return MM0()
#################################################################
# Enumarating the 2A axes
#################################################################
def get_A(w):
Afloat = 128 * w["A"]
A = np.array(Afloat, dtype = np.int32)
assert (A == Afloat).all()
return A
def beautify(v, verbose = 0):
g = MM0()
A = get_A(v)
bl = blocks(A)
if verbose:
print("Blocksizes", [bit_count(x) for x in bl])
bl_size = bit_count(bl[0])
if max(bit_count(b) for b in bl) > 3:
if bl_size > 5 and verbose:
print("vector type:", mat24.vect_type(bl[0]))
g1 = make_blocks_positive(A)
v *= g1
g *= g1
A = get_A(v)
g2 = beautify_large_block(bl[0])
v *= g2
g *= g2
A = get_A(v)
g3 = beautify_case_10B(A)
v *= g3
g *= g3
return g.reduce(), get_A(v)
elif bl_size == 3:
g0 = beautify_large_block(bl[0])
v *= g0
g *= g0
A = get_A(v)
g0 = sort_single_size3_block(A)
v *= g0
g *= g0
A = get_A(v)
g1 = beautify_signs_size3(A)
v *= g1
g *= g1
A = get_A(v)
g2 = beautify_block_size3(A)
v *= g2
g *= g2
A = get_A(v)
g3 = beautify_signs_size2_3(A)
v *= g3
g *= g3
return g.reduce(), get_A(v)
elif bl_size == 2:
g1 = beautify_block_size2(A)
v *= g1
A = get_A(v)
g2 = beautify_signs_size2_3(A)
v *= g2
return (g1 * g2).reduce(), get_A(v)
elif bl_size == 1:
g1 = beautify_diagonal_matrix(A)
v *= g1
return (g1).reduce(), get_A(v)
else:
raise ValueError("Beautifying of matrix A failed")
def compute_beautifiers(g_strings, class_names = None, verbose = 0):
v = MMVectorCRT(20, "I", 3, 2)
g_data = []
for i, g in enumerate(g_strings):
if verbose:
print("")
if class_names: print("class", class_names[i])
else: print("case", i)
if MM0(g) != MM0():
g1, A = beautify(v * MM0(g), verbose)
else:
g1, A = MM0(), get_A(v)
g_data.append(g1.raw_str())
if verbose:
print("g =", g1)
print("adjacent blocks:", adjacent_blocks(A))
print(A)
return g_data
if __name__ == "__main__":
from mmgroup.tests.test_axes.get_sample_axes import import_sample_axes
axes = import_sample_axes()
data = compute_beautifiers(axes.g_strings, axes.g_classes, verbose = 1)
#print(data)
| 2.9375
| 3
|
constants.py
|
alejandrobernardis/heroku-tornado
| 0
|
12781665
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Asumi Kamikaze Inc.
# Licensed under the MIT License.
# Author: <NAME>
# Email: alejandro (dot) bernardis (at) asumikamikaze (dot) com
# Created: 29/Jan/2015 18:06
LOCALE = 'en_US'
DOMAIN = 'localhost'
SHORT_DOMAIN = 'short.local'
LOCAL_DOMAIN = 'localhost.local'
API_DOMAIN = 'api.local'
IP = '127.0.0.1'
SHORT_IP = IP
LOCAL_IP = IP
API_IP = IP
PORT = 8000
LOCAL_PORT = PORT
SHORT_PORT = PORT
API_PORT = 9000
LOGIN_URL = '/auth/login'
SECONDS_PER_MINUTE = 60
SECONDS_PER_HOUR = 60 * 60
SECONDS_PER_DAY = 60 * 60 * 24
SECONDS_PER_WEEK = SECONDS_PER_DAY * 7
SECONDS_PER_MONTH = SECONDS_PER_WEEK * 4
ASYNC_WORKERS = 8
ASYNC_HTTP_CLIENT = 'tornado.curl_httpclient.CurlAsyncHTTPClient'
PREFORK_PROCESS = -1
SESSION_DAYS = 30
SESSION_SECONDS = SECONDS_PER_DAY * SESSION_DAYS
SESSION_ID = 'sid'
SESSION_COOKIE_ID = 'sid'
SESSION_HEADER_ID = 'X-Session-ID'
DATABASES_KEY = 'db'
KEYVALUES_KEY = 'kv'
OBJECTS_KEY = 'ob'
DEFAULT_KEY = 'default'
ROOT_PATH = './app/data'
CA_PATH = './app/data/etc/CA'
SECRETS_PATH = './app/data/etc/secrets'
LOCALE_PATH = './app/data/var/locale'
FILEOBJECT_PATH = './app/data/var/objects'
PUBLIC_PATH = './app/data/var/public'
STATIC_PATH = './app/data/var/public/static'
TEMPLATE_PATH = './app/data/var/template'
| 1.453125
| 1
|
src/go_bot/nlg/nlg_manager.py
|
ai4eu/ai4eu-chatbot
| 0
|
12781666
|
<filename>src/go_bot/nlg/nlg_manager.py
import re
from logging import getLogger
from pathlib import Path
from typing import Union, List
from deeppavlov.core.commands.utils import expand_path
import deeppavlov.models.go_bot.nlg.templates.templates as go_bot_templates
from deeppavlov.core.common.registry import register
from ..dto.dataset_features import BatchDialoguesFeatures
from .nlg_manager_interface import NLGManagerInterface
from ..policy.dto.policy_prediction import PolicyPrediction
from ..search_api.dto.search_item_in_focus import SearchItemInFocus
from ..tracker.chatbot_mode import ChatMode
from ..tracker.dialogue_state_tracker import DialogueStateTracker
import numpy as np
from datetime import datetime
log = getLogger(__name__)
# todo add the ability to configure nlg loglevel in config (now the setting is shared across all the GO-bot)
# todo add each method input-output logging when proper loglevel level specified
@register("gobot_nlg_manager")
class NLGManager(NLGManagerInterface):
"""
NLGManager is a unit of the go-bot pipeline that handles the generation of text
when the pattern is chosen among the known patterns and the named-entities-values-like knowledge is provided.
(the whole go-bot pipeline is as follows: NLU, dialogue-state-tracking&policy-NN, NLG)
Parameters:
template_path: file with mapping between actions and text templates
for response generation.
template_type: type of used response templates in string format.
ai4eu_web_search_api_call_action: label of the action that corresponds to ai4eu search api call
(it must be present in your ``template_path`` file), during interaction
it will be used to get the appropriate results from the web resources of the search API
ai4eu_asset_search_api_call_action: label of the action that corresponds to ai4eu search api call
(it must be present in your ``template_path`` file), during interaction
it will be used to get the appropriate assets from the ai-catalogue of the search API
ai4eu_qa_api_call_action: label of the action that corresponds to ai4eu QA api call
(it must be present in your ``template_path`` file), during interaction
it will be used to get the relevant answer from the QA module (either a domain specific or open domain answer)
debug: whether to display debug output.
"""
# static members used for human readable dates
days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
months = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
def __init__(self, template_path: Union[str, Path], template_type: str,
ai4eu_web_search_api_call_action: str,
ai4eu_asset_search_api_call_action: str,
ai4eu_qa_api_call_action: str,
debug=False):
self.debug = debug
if self.debug:
log.debug(f"BEFORE {self.__class__.__name__} init(): "
f"template_path={template_path}, template_type={template_type}, "
f"ai4eu_web_search_api_call_action={ai4eu_web_search_api_call_action}, debug={debug}, "
f"ai4eu_asset_search_api_call_action={ai4eu_asset_search_api_call_action}, debug={debug}, "
f"ai4eu_qa_api_call_action={ai4eu_qa_api_call_action}, debug={debug}")
template_path = expand_path(template_path)
template_type = getattr(go_bot_templates, template_type)
self.templates = go_bot_templates.Templates(template_type).load(template_path)
# These actions are API related and are given in the gobot config json file
self._ai4eu_web_search_api_call_id = -1
if ai4eu_web_search_api_call_action is not None:
self._ai4eu_web_search_api_call_id = self.templates.actions.index(ai4eu_web_search_api_call_action)
self._ai4eu_asset_search_api_call_id = -1
if ai4eu_asset_search_api_call_action is not None:
self._ai4eu_asset_search_api_call_id = self.templates.actions.index(ai4eu_asset_search_api_call_action)
self._ai4eu_qa_api_call_id = -1
if ai4eu_qa_api_call_action is not None:
self._ai4eu_qa_api_call_id = self.templates.actions.index(ai4eu_qa_api_call_action)
if self.debug:
log.debug(f"AFTER {self.__class__.__name__} init(): "
f"template_path={template_path}, template_type={template_type}, "
f"ai4eu_web_search_api_call_action={ai4eu_web_search_api_call_action}, debug={debug}, "
f"ai4eu_asset_search_api_call_action={ai4eu_asset_search_api_call_action}, debug={debug}, "
f"ai4eu_qa_api_call_action={ai4eu_qa_api_call_action}, debug={debug}")
def get_action_id(self, action_text: str) -> int:
"""
Looks up for an ID relevant to the passed action text in the list of known actions and their ids.
Args:
action_text: the text for which an ID needs to be returned.
Returns:
an ID corresponding to the passed action text
"""
return self.templates.actions.index(action_text) # todo unhandled exception when not found
def get_ai4eu_web_search_api_call_action_id(self) -> int:
"""
Returns:
an ID corresponding to the ai4eu web search api call action
"""
return self._ai4eu_web_search_api_call_id
def get_ai4eu_asset_search_api_call_action_id(self) -> int:
"""
Returns:
an ID corresponding to the ai4eu asset search api call action
"""
return self._ai4eu_asset_search_api_call_id
def get_ai4eu_qa_api_call_action_id(self) -> int:
"""
Returns:
an ID corresponding to the ai4eu faq api call action
"""
return self._ai4eu_qa_api_call_id
def decode_response(self,
utterance_batch_features: BatchDialoguesFeatures,
policy_prediction: PolicyPrediction,
dialogue_state_tracker,
training=False) -> str:
# todo: docstring
action_text = self._generate_slotfilled_text_for_action(policy_prediction,
dialogue_state_tracker,
training)
# in api calls replace unknown slots to "dontcare"
# This is only needed for the asset search call that uses the slots
# Hand-written actions and logic for APIs / reset / next object
# TODO: Probably no need for this (REMOVE IT)
#if policy_prediction.predicted_action_ix == self._ai4eu_asset_search_api_call_id:
# action_text = re.sub("#([A-Za-z]+)", "dontcare", action_text).lower()
return action_text
def _generate_slotfilled_text_for_action(self,
policy_prediction: PolicyPrediction,
dialogue_state_tracker: DialogueStateTracker,
training=False) -> str:
"""
Generate text for the predicted speech action using the pattern provided for the action.
We need the state tracker for getting the slotfilled state that provides info to encapsulate to the patterns
and for getting the current focus
Args:
policy_prediction: related info for policy prediction
dialogue_state_tracker: holds the current state including the slots and current search item
Returns:
the text generated for the passed action id and slot values.
"""
# current action id
action_id = policy_prediction.predicted_action_ix
# We have some templates that we create on the fly (e.g., API calls, focus info, date and time, etc.)
action = self.get_action(action_id)
# Update current searchAPI result slots / Just return the current state slots
# * calculate the slotfilled state:
# for each slot that is relevant to dialogue we fill this slot value if possible
# unfortunately we can not make an inverse query and get the slots for a specific result
# currently we are using AND semantics
slots = dialogue_state_tracker.fill_current_state_with_searchAPI_results_slots_values()
# We also need the current search item
item_in_focus = dialogue_state_tracker.get_current_search_item()
# Check the action and create responses appropriately
# These actions are specific for our chatbot
# If we are training we are just using the dummy template responses for things that are dynamic
# Else we create the corresponding responses
if not training:
# Respond with current debugging vectors
if action == 'debug':
text = self.tell_debug(policy_prediction, dialogue_state_tracker)
# tell the url of the resource
elif action == 'tell_resource_url':
text = self.tell_resource_url(item_in_focus)
# tell the title of the resource
elif action == 'tell_resource_title':
text = self.tell_resource_title(item_in_focus)
# tell the content of the resource
elif action == 'tell_resource_content':
text = self.tell_resource_content(item_in_focus)
# tell the score of the resource
elif action == 'tell_resource_score':
text = self.tell_resource_score(item_in_focus)
# tell the summary of the resource
elif action == 'tell_resource_summary':
text = self.tell_resource_summary(item_in_focus)
# tell the keywords of the resource
elif action == 'tell_resource_keywords':
text = self.tell_resource_keywords(item_in_focus)
# tell the number of objects in focus
elif action == 'tell_num_of_objects_in_focus':
# get the current items of the focus
items = dialogue_state_tracker.curr_search_items
text = self.tell_objects_in_focus(items)
# describe item in focus
elif action == 'tell_item_in_focus':
text = self.describe_item(item_in_focus)
# describe next item in focus
elif action == 'tell_next_in_focus':
# we change the item in focus in the state to the next one
item_in_focus = dialogue_state_tracker.get_next_search_item()
text = self.describe_item(item_in_focus)
# describe first item in focus when we have the tell_first_in_focus_action
elif action == 'tell_first_in_focus':
# we change the item in focus to the first one
item_in_focus = dialogue_state_tracker.get_first_search_item()
text = self.describe_item(item_in_focus)
# describe first item in focus when we have a search API call
# if first item is none then the current focus is empty
elif action == 'ai4eu_web_search_api_call'\
or action == 'ai4eu_asset_search_api_call':
# we change the item in focus to the first one
item_in_focus = dialogue_state_tracker.get_first_search_item()
if not item_in_focus:
text = 'There are no results. Please try to rephrase!'
else:
text = self.describe_item(item_in_focus)
# describe second item in focus
elif action == 'tell_second_in_focus':
# we change the item in focus to the second one
item_in_focus = dialogue_state_tracker.get_second_search_item()
text = self.describe_item(item_in_focus)
# describe previous item in focus
elif action == 'tell_previous_in_focus':
# we change the item in focus to the previous one
item_in_focus = dialogue_state_tracker.get_previous_search_item()
text = self.describe_item(item_in_focus)
elif action == 'rephrase':
text = self.templates.templates[action_id].generate_text(slots)
# Respond with current UTC time
elif action == 'tell_time':
return self.tell_time()
# Respond with current date
elif action == 'tell_date':
return self.tell_date()
elif action == 'clear_slots':
dialogue_state_tracker.clear_slots()
text = self.templates.templates[action_id].generate_text(slots)
else:
# General case - Just use the template
text = self.templates.templates[action_id].generate_text(slots)
else:
# General case - Just use the template
text = self.templates.templates[action_id].generate_text(slots)
print('==> AI4EU Predicted response: ', text)
return text
# Provide debugging state as response
# We have to report the intent, the slots, the current action and the previous action with their probabilities
# Along with the current focus state
def tell_debug(self, policy_prediction: PolicyPrediction, dialogue_state_tracker: DialogueStateTracker):
text = ''
mode = dialogue_state_tracker.mode
if mode is ChatMode.QA:
state = 'Mode: QA'
elif mode is ChatMode.WEB:
state = 'Mode: WEB'
elif mode is ChatMode.ASSET:
state = 'Mode: ASSET'
else:
state = 'Mode: DEFAULT'
text += state + '\n'
### NLU DATA - predicted intent, probability, and slots
nlu_response = policy_prediction.get_utterance_features()
intents = nlu_response.intents
# Get the max probability
max_prob = intents[np.argmax(intents)]
intent = nlu_response.intent
nlu = 'Predicted Intent: ' + str(intent) + ' with probability ' + str(max_prob) + '\n'
# Also add slot-values from NLU
slots = nlu_response.slots
nlu += 'Slots: ' + str(slots) + '\n'
nlu += '\n'
text += nlu
### CURRENT ACTION
# Print the predicted action
action = 'Predicted action: ' + self.get_action(policy_prediction.predicted_action_ix) + '\n'
action += 'Predicted action probability: ' + str(policy_prediction.probs[policy_prediction.predicted_action_ix]) + '\n'
action += '\n'
text += action
### REGARDING THE FOCUS
current_focus_len = 0
if dialogue_state_tracker.curr_search_items:
current_focus_len = len(dialogue_state_tracker.curr_search_items)
# Regarding the current item in focus
current_item_title = 'Empty'
if dialogue_state_tracker.curr_search_item:
# Get the title of the search item in focus
current_item_title = dialogue_state_tracker.curr_search_item.get_title()
focus = 'Current focus length: ' + str(current_focus_len) + '\n'
focus += 'Current item title: ' + current_item_title + '\n'
focus += 'Current item index: ' + str(dialogue_state_tracker.curr_search_item_index) + '\n'
focus += '\n'
text += focus
return text
'''
Tells the title of a resource
'''
def tell_resource_title(self, item: SearchItemInFocus) -> str:
response = None
if not item:
response = 'There is no item in the current focus!'
elif not item.get_title():
response = 'This resource has no title'
else:
response = 'The title of the resource is ' + item.get_title()
return response
'''
Tells the url of a resource
'''
def tell_resource_url(self, item: SearchItemInFocus) -> str:
response = None
if not item:
response = 'There is no item in the current focus!'
elif not item.get_url():
response = 'This resource has no url'
else:
# we need to offer clickable urls
response = item.get_url()
return response
'''
Tells the content of a resource
'''
def tell_resource_content(self, item: SearchItemInFocus) -> str:
response = None
if not item:
response = 'There is no item in the current focus'
elif not item.get_content():
response = 'This resource has empty content'
else:
# we need to offer clickable urls
response = item.get_content()
return response
'''
Tells the score of a resource
'''
def tell_resource_score(self, item: SearchItemInFocus) -> str:
response = None
if not item:
response = 'There is no item in the current focus'
elif not item.get_score():
response = 'The API returned no score for this resource'
else:
response = 'The score of this resource is ' + item.get_score()
return response
'''
Tells the summary of a resource
'''
def tell_resource_summary(self, item: SearchItemInFocus) -> str:
response = None
if not item:
response = 'There is no item in the current focus!'
elif not item.get_score():
response = 'There is no summary for this resource!'
else:
response = item.get_summary()
return response
'''
Tells the keywords of a resource
'''
def tell_resource_keywords(self, item: SearchItemInFocus) -> str:
response = None
if not item:
response = 'There is no item in the current focus!'
elif not item.get_keywords():
response = 'There are no keywords associated with this resource'
else:
keywords = item.get_keywords()
print(' The keywords are ', keywords)
keywords_str = ' '.join(map(str, keywords))
response = 'The relevant keywords, starting from the most important one are : ' + keywords_str
return response
'''
Tells the number of items in focus of a resource
'''
def tell_objects_in_focus(self, items: [SearchItemInFocus]) -> str:
response = None
if not items:
response = 'There are no items in the current focus!'
else:
response = 'There are ' + len(items) + ' items in the current focus'
return response
'''
Describes an item
'''
def describe_item(self, item: SearchItemInFocus) -> str:
response = None
if not item:
response = 'There is no item in the current focus!'
else:
item.print()
response = 'You might be interested in \"' + item.get_title() + '\". Check it at: ' + item.get_url()
return response
# Tell the time
def tell_time(self):
now = datetime.utcnow()
text = 'The time is ' + now.strftime('%H:%M:%S') + ' UTC'
return text
# Tell the date
def tell_date(self):
now = datetime.now()
text = 'Today is ' + self.days[now.weekday()] + now.strftime(', %d ') + self.months[
now.month - 1] + now.strftime(' %Y')
return text
def num_of_known_actions(self) -> int:
"""
Returns:
the number of actions known to the NLG module
"""
return len(self.templates)
def known_actions(self) -> List[str]:
"""
Returns:
the list of actions known to the NLG module
"""
return self.templates.actions
def get_action(self, action_id: int) -> str:
"""
Returns:
the action with id known to the NLG module
"""
return self.templates.actions[action_id]
| 2.046875
| 2
|
aoj/ALDS1_7_C_Tree_Walk.py
|
negiandleek/til
| 0
|
12781667
|
<reponame>negiandleek/til
# coding: utf-8
# Your code here!
n = int(input().rstrip())
nodes = [None] * n
for i in range(n):
index, left, right = list(map(int, input().rstrip().split()))
nodes[index] = [left, right, -1]
for i in range(n):
left, right, _ = nodes[i]
nodes[left][2] = i
nodes[right][2] = i
root = 0
for i in range(n):
if nodes[i][2] == -1:
root = i
break
preNodes = []
inOrderNodes = []
postOrderNodes = []
def preOrder(index):
global preNodes
left, right, _ = nodes[index]
preNodes.append(str(index))
if left != -1:
preOrder(left)
if right != -1:
preOrder(right)
def InOrder(index):
global inOrderNodes
left, right, _ = nodes[index]
if left != -1:
InOrder(left)
inOrderNodes.append(str(index))
if right != -1:
InOrder(right)
def PostOrder(index):
global postOrderNodes
left, right, _ = nodes[index]
if left != -1:
PostOrder(left)
if right != -1:
PostOrder(right)
postOrderNodes.append(str(index))
preOrder(root)
InOrder(root)
PostOrder(root)
print("Preorder")
print(" " + " ".join(preNodes))
print("Inorder")
print(" " + " ".join(inOrderNodes))
print("Postorder")
print(" " + " ".join(postOrderNodes))
| 3.546875
| 4
|
examples/visualization.py
|
zStupan/NiaARM
| 0
|
12781668
|
from matplotlib import pyplot as plt
from niaarm import Dataset, RuleList, get_rules
from niaarm.visualize import hill_slopes
dataset = Dataset('datasets/Abalone.csv')
metrics = ('support', 'confidence')
rules, _ = get_rules(dataset, 'DifferentialEvolution', metrics, max_evals=1000, seed=1234)
some_rule = rules[150]
print(some_rule)
fig, ax = hill_slopes(some_rule, dataset.transactions)
plt.show()
| 2.671875
| 3
|
jp.atcoder/ddcc2020-qual/ddcc2020_qual_b/8576141.py
|
kagemeka/atcoder-submissions
| 1
|
12781669
|
<reponame>kagemeka/atcoder-submissions<filename>jp.atcoder/ddcc2020-qual/ddcc2020_qual_b/8576141.py<gh_stars>1-10
# 2019-11-23 21:00:35(JST)
import itertools
import sys
def main():
n, *A = map(int, sys.stdin.read().split())
S = sum(A)
l = 0
for i in range(n):
l += A[i]
if l >= S // 2:
if l == S / 2:
ans = 0
break
else:
if l == S // 2 - 1:
ans = 1
break
else:
if l - A[i] >= S - l:
ans = A[i] + (S - l) - (l - A[i])
else:
ans = l - (S - l)
break
print(ans)
if __name__ == '__main__':
main()
| 2.359375
| 2
|
jinja2_fsloader/__init__.py
|
althonos/jinja2-fsloader
| 8
|
12781670
|
# coding: utf-8
"""jinja2_fsloader - A Jinja2 template loader using PyFilesystem2.
"""
import sys
import fs
import fs.path
import fs.errors
import jinja2
import pkg_resources
__author__ = "<NAME> <<EMAIL>>"
__license__ = "MIT"
__version__ = pkg_resources.resource_string(__name__, "_version.txt").decode('utf-8').strip()
class FSLoader(jinja2.BaseLoader):
"""Loads template from a PyFilesystem2.
The loader is created with a :class:`~fs.base.FS` instance, or a FS URL
which is used to search for the templates::
>>> zip_loader = FSLoader("zip:///path/to/my/templates.zip")
>>> ftp_loader = FSLoader(fs.ftpfs.FTPFS("server.net"))
>>> dir_loader = FSLoader("./templates/", fs_filter=["*.html"])
Per default the template encoding is ``'utf-8'`` which can be changed
by setting the `encoding` parameter to something else. The `use_syspath`
parameter can be opted in to provide Jinja2 the system path to the query
if it exist, otherwise it will only return the internal filesystem path.
The optional `fs_filter` parameter is a list of wildcard patterns like
``['*.html', '*.tpl']``. If present, only the matching files in the
filesystem will be loaded as templates.
.. seealso:: the `PyFilesystem docs <https://docs.pyfilesystem.org/>`_.
"""
def __init__(self, template_fs, encoding='utf-8', use_syspath=False, fs_filter=None):
self.filesystem = fs.open_fs(template_fs)
self.use_syspath = use_syspath
self.encoding = encoding
self.fs_filter = fs_filter
def get_source(self, environment, template):
template = _to_unicode(template)
if not self.filesystem.isfile(template):
raise jinja2.TemplateNotFound(template)
try:
mtime = self.filesystem.getdetails(template).modified
reload = lambda: self.filesystem.getdetails(template).modified > mtime
except fs.errors.MissingInfoNamespace:
reload = lambda: True
with self.filesystem.open(template, encoding=self.encoding) as input_file:
source = input_file.read()
if self.use_syspath:
if self.filesystem.hassyspath(template):
return source, self.filesystem.getsyspath(template), reload
elif self.filesystem.hasurl(template):
return source, self.filesystem.geturl(template), reload
return source, template, reload
def list_templates(self):
found = set()
for file in self.filesystem.walk.files(filter=self.fs_filter):
found.add(fs.path.relpath(file))
return sorted(found)
if sys.version_info[0] == 2:
def _to_unicode(path):
"""Convert str in Python 2 to unicode.
"""
return path.decode('utf-8') if type(path) is not unicode else path
else:
def _to_unicode(path):
return path
| 3
| 3
|
projects/01_fyyur/starter_code/app/main/views.py
|
oyasr/FSND
| 0
|
12781671
|
import sys
from datetime import datetime
from . import main
from flask import render_template, request, redirect, url_for, flash
from ..models import db, Artist, Venue, Show
from .forms import ShowForm, VenueForm, ArtistForm, DeleteArtist, DeleteVenue
@main.route('/')
def index():
return render_template('pages/home.html')
@main.route('/venues')
def venues():
data = []
date = datetime.now()
try:
# Get all the unique locations (city, state) of venues
locations = Venue.query.with_entities(
Venue.city.distinct(), Venue.state).all()
# Loop over all the locations & create a data_dict for each one
for location in locations:
data_dict = {}
city, state = location
data_dict['city'] = city
data_dict['state'] = state
# Get all venues in location
venue_list = []
venues = Venue.query.filter(Venue.city == city).all()
# Loop over all venues in that location & create a venue_dict for each one
for venue in venues:
venue_id, venue_dict = venue.id, venue.format_l()
# Get the number of upcoming shows for that venue
venue_dict['num_upcoming_shows'] = Show.query.filter(
Show.venue_id == venue_id,
Show.start_time > date).count()
venue_list.append(venue_dict)
data_dict['venues'] = venue_list
data.append(data_dict)
return render_template('pages/venues.html', areas=data)
except Exception:
db.session.rollback()
print(sys.exc_info())
flash("Something went wrong. Please try again.")
redirect(url_for('.index'))
finally:
db.session.close()
@main.route('/venues/search', methods=['POST'])
def search_venues():
data = {}
date = datetime.now()
try:
# Get the search term and query the database using LIKE
search_term = request.form.get('search_term', '')
venues = Venue.query.filter(Venue.name.ilike(f'%{search_term}%')).all()
data['count'] = len(venues)
data['data'] = []
# Loop over the resulting venues
for venue in venues:
venue_id, venue_dict = venue.id, venue.format_s()
# Get the number of upcoming shows for that venue
venue_dict['num_upcoming_shows'] = Show.query.filter(
Show.venue_id == venue_id,
Show.start_time > date).count()
data['data'].append(venue_dict)
return render_template('pages/search_venues.html',
results=data, search_term=search_term)
except Exception:
db.session.rollback()
print(sys.exc_info())
flash("Something went wrong. Please try again.")
redirect(url_for('.index'))
finally:
db.session.close()
@main.route('/venues/<int:venue_id>', methods=['GET', 'POST'])
def show_venue(venue_id):
date = datetime.now()
form = DeleteVenue()
try:
# If the user clicks the Delete Venue button
if request.method == 'POST':
# Delelte the venue from the database
venue = Venue.query.get(venue_id)
db.session.delete(venue)
db.session.commit()
# Flash a success message and redirect to homepage
flash(f'Venue {venue.name} was successfully deleted!')
return redirect(url_for('.index'))
# Get the venue with id = venue_id & create a data dict
venue_dict = Venue.query.get(venue_id).format_l()
venue_dict['upcoming_shows'] = []
venue_dict['past_shows'] = []
# Get the upcoming shows for that venue
upcoming_shows = Show.query.filter(
Show.venue_id == venue_id,
Show.start_time > date).all()
# Get the needed data from all upcoming shows
for show in upcoming_shows:
artist_id = show.artist_id
artist_dict = Artist.query.get(artist_id).format_m()
artist_dict['start_time'] = str(show.start_time)
venue_dict['upcoming_shows'].append(artist_dict)
venue_dict['upcoming_shows_count'] = len(upcoming_shows)
# Get the past shows for that venue
past_shows = Show.query.filter(
Show.venue_id == venue_id,
Show.start_time < date).all()
# Get the needed data from past shows
for show in past_shows:
artist_id = show.artist_id
artist_dict = Artist.query.get(artist_id).format_m()
artist_dict['start_time'] = str(show.start_time)
venue_dict['past_shows'].append(artist_dict)
venue_dict['past_shows_count'] = len(past_shows)
return render_template('pages/show_venue.html', venue=venue_dict, form=form)
except Exception:
db.session.rollback()
print(sys.exc_info())
flash("Something went wrong. Please try again.")
return redirect(url_for('.index'))
finally:
db.session.close()
@main.route('/venues/create', methods=['GET'])
def create_venue_form():
form = VenueForm()
return render_template('forms/new_venue.html', form=form)
@main.route('/venues/create', methods=['POST'])
def create_venue_submission():
try:
# Get the submitted form data
data = request.form
name = data.get('name', '')
city = data.get('city', '')
state = data.get('state', '')
address = data.get('address', '')
phone = data.get('phone', '')
genres = ','.join(data.getlist('genres'))
facebook_link = data.get('facebook_link', '')
# Create the venue and insert it into the DB
venue = Venue(name, city, state, address, phone, genres, facebook_link)
db.session.add(venue)
db.session.commit()
# On successful insert flash success
flash('Venue ' + request.form['name'] + ' was successfully listed!')
return redirect(url_for('.venues'))
except Exception:
db.session.rollback()
print(sys.exc_info())
flash("Something went wrong. Please try again.")
return redirect(url_for('.index'))
finally:
db.session.close()
@main.route('/artists')
def artists():
data = []
try:
# Get all the artists data
artists = Artist.query.all()
for artist in artists:
data.append(artist.format_s())
return render_template('pages/artists.html', artists=data)
except Exception:
db.session.rollback()
print(sys.exc_info())
flash("Something went wrong. Please try again.")
return redirect(url_for('.index'))
finally:
db.session.close()
@main.route('/artists/search', methods=['POST'])
def search_artists():
data = {}
date = datetime.now()
try:
# Get the search term and query the database using LIKE
search_term = request.form.get('search_term', '')
venues = Artist.query.filter(
Artist.name.ilike(f'%{search_term}%')).all()
data['count'] = len(venues)
data['data'] = []
# Loop over the resulting venues
for venue in venues:
venue_id, venue_dict = venue.id, venue.format_s()
# Get the number of upcoming shows for that venue
venue_dict['num_upcoming_shows'] = Show.query.filter(
Show.venue_id == venue_id,
Show.start_time > date).count()
data['data'].append(venue_dict)
return render_template('pages/search_venues.html',
results=data, search_term=search_term)
except Exception:
db.session.rollback()
print(sys.exc_info())
flash("Something went wrong. Please try again.")
redirect(url_for('.index'))
finally:
db.session.close()
@main.route('/artists/<int:artist_id>', methods=['GET', 'POST'])
def show_artist(artist_id):
date = datetime.now()
form = DeleteArtist()
try:
# If the user clicks the Delete Artist button
if request.method == 'POST':
artist = Artist.query.get(artist_id)
db.session.delete(artist)
db.session.commit()
# Flash a success message and redirect to homepage
flash(f'Artist {artist.name} was successfully deleted!')
return redirect(url_for('.index'))
# Get the artist with id = artist_id & create a data dict
artist_dict = Artist.query.get(artist_id).format_l()
artist_dict['upcoming_shows'] = []
artist_dict['past_shows'] = []
# Get the upcoming shows for that artist
upcoming_shows = Show.query.filter(
Show.artist_id == artist_id,
Show.start_time > date).all()
# Get the needed data from all upcoming shows
for show in upcoming_shows:
venue_id = show.venue_id
venue_dict = Venue.query.get(venue_id).format_m()
venue_dict['start_time'] = str(show.start_time)
artist_dict['upcoming_shows'].append(venue_dict)
artist_dict['upcoming_shows_count'] = len(upcoming_shows)
# Get the past shows for that artist
past_shows = Show.query.filter(
Show.artist_id == artist_id,
Show.start_time < date).all()
# Get the needed data from past shows
for show in past_shows:
venue_id = show.venue_id
venue_dict = Venue.query.get(venue_id).format_m()
venue_dict['start_time'] = str(show.start_time)
artist_dict['past_shows'].append(venue_dict)
artist_dict['past_shows_count'] = len(past_shows)
return render_template('pages/show_artist.html', artist=artist_dict, form=form)
except Exception:
db.session.rollback()
print(sys.exc_info())
flash("Something went wrong. Please try again.")
return redirect(url_for('.index'))
finally:
db.session.close()
@main.route('/artists/<int:artist_id>/edit', methods=['GET'])
def edit_artist(artist_id):
form = ArtistForm()
try:
# Get the artist's data
artist = Artist.query.get(artist_id).format_l()
return render_template('forms/edit_artist.html', form=form, artist=artist)
except Exception:
db.session.rollback()
print(sys.exc_info())
flash("Something went wrong. Please try again.")
return redirect(url_for('.index'))
finally:
db.session.close()
@main.route('/artists/<int:artist_id>/edit', methods=['POST'])
def edit_artist_submission(artist_id):
try:
# Get the submitted form data
data = request.form
name = data.get('name', '')
city = data.get('city', '')
state = data.get('state', '')
phone = data.get('phone', '')
genres = ','.join(data.getlist('genres'))
facebook_link = data.get('facebook_link', '')
# Get the artist and update its data
artist = Artist.query.get(artist_id)
artist.name = name
artist.city = city
artist.state = state
artist.phone = phone
artist.genres = genres
artist.facebook_link = facebook_link
db.session.add(artist)
db.session.commit()
# On successful insert flash success
flash('Artist ' + request.form['name'] + ' was successfully updated!')
return redirect(url_for('.show_artist', artist_id=artist_id))
except Exception:
db.session.rollback()
print(sys.exc_info())
flash("Something went wrong. Please try again.")
return redirect(url_for('.index'))
finally:
db.session.close()
@main.route('/venues/<int:venue_id>/edit', methods=['GET'])
def edit_venue(venue_id):
form = VenueForm()
try:
# Get the venue's data
venue = Venue.query.get(venue_id).format_l()
return render_template('forms/edit_venue.html', form=form, venue=venue)
except Exception:
db.session.rollback()
print(sys.exc_info())
flash("Something went wrong. Please try again.")
return redirect(url_for('.index'))
finally:
db.session.close()
@main.route('/venues/<int:venue_id>/edit', methods=['POST'])
def edit_venue_submission(venue_id):
try:
# Get the submitted form data
data = request.form
name = data.get('name', '')
city = data.get('city', '')
state = data.get('state', '')
address = data.get('address', '')
phone = data.get('phone', '')
genres = ','.join(data.getlist('genres'))
facebook_link = data.get('facebook_link', '')
# Get the venue and update its data
venue = Venue.query.get(venue_id)
venue.name = name
venue.city = city
venue.state = state
venue.phone = phone
venue.genres = genres
venue.facebook_link = facebook_link
db.session.add(venue)
db.session.commit()
# On successful insert flash success
flash('Venue ' + request.form['name'] + ' was successfully updated!')
return redirect(url_for('.show_venue', venue_id=venue_id))
except Exception:
db.session.rollback()
print(sys.exc_info())
flash("Something went wrong. Please try again.")
return redirect(url_for('.index'))
finally:
db.session.close()
@main.route('/artists/create', methods=['GET'])
def create_artist_form():
form = ArtistForm()
return render_template('forms/new_artist.html', form=form)
@main.route('/artists/create', methods=['POST'])
def create_artist_submission():
try:
# Get the submitted form data
data = request.form
name = data.get('name', '')
city = data.get('city', '')
state = data.get('state', '')
phone = data.get('phone', '')
genres = ','.join(data.getlist('genres'))
facebook_link = data.get('facebook_link', '')
# Create the venue and insert it into the DB
artist = Artist(name, city, state, phone, genres, facebook_link)
db.session.add(artist)
db.session.commit()
# On successful insert flash success
flash('Artist ' + request.form['name'] + ' was successfully listed!')
return redirect(url_for('.artists'))
except Exception:
db.session.rollback()
print(sys.exc_info())
flash("Something went wrong. Please try again.")
return redirect(url_for('.index'))
finally:
db.session.close()
@main.route('/shows')
def shows():
data = []
try:
# Get all the shows
shows = Show.query.all()
# Loop over each show and generate its data
for show in shows:
show_dict = show.format_l()
show_dict['artist_name'] = show.artist.name
show_dict['artist_image_link'] = show.artist.image_link
show_dict['venue_name'] = show.venue.name
data.append(show_dict)
return render_template('pages/shows.html', shows=data)
except Exception:
db.session.rollback()
print(sys.exc_info())
flash("Something went wrong. Please try again.")
return redirect(url_for('.index'))
finally:
db.session.close()
@main.route('/shows/create')
def create_shows():
form = ShowForm()
return render_template('forms/new_show.html', form=form)
@main.route('/shows/create', methods=['POST'])
def create_show_submission():
try:
# Get the submitted form data
data = request.form
artist_id = data.get('artist_id')
venue_id = data.get('venue_id')
start_time = data.get('start_time')
# Create the show and insert it to the DB
show = Show(artist_id, venue_id, start_time)
db.session.add(show)
db.session.commit()
# On successful insert flash success
flash('Show was successfully listed!')
return redirect(url_for('.shows'))
except Exception:
db.session.rollback()
print(sys.exc_info())
flash("Something went wrong. Please try again.")
return redirect(url_for('.index'))
finally:
db.session.close()
| 2.328125
| 2
|
app/source/routes.py
|
victorlomi/News-Catchup
| 0
|
12781672
|
import os
from flask import render_template, flash, redirect, url_for, request
import requests
from app.source import bp
from app.models import article as at
@bp.route('/source')
def search_source():
"""Show this view when a source is pressed and show articles."""
API_KEY = os.environ.get('API_KEY')
news_source = request.args.get('q')
# Make the request and change the response to a dict we can use.
url = f"https://newsapi.org/v2/top-headlines?sources={news_source}&apiKey={API_KEY}"
response = requests.get(url).json()
# create article objects and store them in a list.
articles = []
for article in response["articles"]:
articles.append(at.Article(
article["source"], article["author"], article["title"], article["description"],
article["url"], article["urlToImage"], article["publishedAt"], article["content"]))
return render_template('source.html', source=news_source, articles=articles, api_key=API_KEY)
| 2.875
| 3
|
src/Week1Bonus_PalindromePermutation_test.py
|
ruarfff/leetcode-jan-2021
| 0
|
12781673
|
import pytest
from .Week1Bonus_PalindromePermutation import Solution
s = Solution()
@pytest.mark.parametrize("test_input", ["code", "abc"])
def test_cannot_permute(test_input):
assert not s.canPermutePalindrome(test_input)
@pytest.mark.parametrize("test_input", ["aab", "carerac", "a", "aa"])
def test_can_permute(test_input):
assert s.canPermutePalindrome(test_input)
| 2.625
| 3
|
HOG-Features/scikithog.py
|
saneravi/ML_Stuff
| 209
|
12781674
|
#!/usr/bin/env python
"""Calculate HOG features for an image"""
import os
import matplotlib.pyplot as plt
from hog_features import image2pixelarray
from skimage import exposure
from skimage.feature import hog
def main(filename):
"""
Orchestrate the HOG feature calculation
Parameters
----------
filename : str
"""
image = image2pixelarray(filename)
fd, hog_image = hog(
image,
orientations=8,
pixels_per_cell=(16, 16),
cells_per_block=(1, 1),
visualise=True,
)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex=True, sharey=True)
ax1.axis("off")
ax1.imshow(image, cmap=plt.cm.gray)
ax1.set_title("Input image")
ax1.set_adjustable("box-forced")
# Rescale histogram for better display
hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02))
ax2.axis("off")
ax2.imshow(hog_image_rescaled, cmap=plt.cm.gray)
ax2.set_title("Histogram of Oriented Gradients")
ax1.set_adjustable("box-forced")
plt.show()
def is_valid_file(parser, arg):
"""
Check if arg is a valid file that already exists on the file system.
Parameters
----------
parser : argparse object
arg : str
Returns
-------
arg
"""
arg = os.path.abspath(arg)
if not os.path.exists(arg):
parser.error("The file %s does not exist!" % arg)
else:
return arg
def get_parser():
"""Get parser object for scikithog"""
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
parser = ArgumentParser(
description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"-f",
"--file",
dest="filename",
type=lambda x: is_valid_file(parser, x),
help="write report to FILE",
required=True,
metavar="FILE",
)
return parser
if __name__ == "__main__":
args = get_parser().parse_args()
main(args.filename)
| 3.21875
| 3
|
django_mri/analysis/specifications/mrtrix3/estimate_response.py
|
ZviBaratz/django_mri
| 1
|
12781675
|
"""
Input and output specification dictionaries for MRtrix's *dwi2response* script.
See Also
--------
* `nipype.interfaces.mrtrix3.preprocess.ResponseSD`_
Notes
-----
For more information, see MRtrix3's `dwi2response reference`_.
.. _dwi2response reference:
https://mrtrix.readthedocs.io/en/latest/reference/commands/dwi2response.html
.. _nipype.interfaces.mrtrix3.preprocess.ConstrainedSphericalDeconvolution:
https://nipype.readthedocs.io/en/latest/api/generated/nipype.interfaces.mrtrix3.preprocess.html#responsesd
"""
from django_analyses.models.input.definitions import (
FileInputDefinition,
IntegerInputDefinition,
ListInputDefinition,
StringInputDefinition,
)
from django_analyses.models.output.definitions import FileOutputDefinition
DWI2RESPONSE_INPUT_SPECIFICATION = {
"algorithm": {
"type": StringInputDefinition,
"description": "Response estimation algorithm (multi-tissue)",
"required": True,
"choices": ["msmt_5tt", "dhollander", "tournier", "tax"],
},
"in_file": {
"type": FileInputDefinition,
"description": "Input DWI image.",
"required": True,
"is_configuration": False,
},
"bval_scale": {
"type": StringInputDefinition,
"description": "Specifies whether the b - values should be scaled by the square of the corresponding DW gradient norm, as often required for multishell or DSI DW acquisition schemes.", # noqa: E501
"choices": ["yes", "no"],
"default": "yes",
},
"wm_file": {
"type": StringInputDefinition,
"description": "Output WM response text file.",
"is_output_path": True,
"default": "wm_response.txt",
},
"csf_file": {
"type": StringInputDefinition,
"description": "Output CSF response text file.",
"is_output_path": True,
"default": "csf_response.txt",
},
"gm_file": {
"type": StringInputDefinition,
"description": "Output GM response text file.",
"is_output_path": True,
"default": "gm_response.txt",
},
"grad_file": {
"type": StringInputDefinition,
"description": "Dw gradient scheme (MRTrix format). Mutually exclusive with inputs: grad_fsl.", # noqa: E501
},
"grad_fsl": {
"type": StringInputDefinition,
"description": "dw gradient scheme (FSL format). Mutually exclusive with inputs: grad_file.", # noqa: E501
},
"in_bval": {
"type": StringInputDefinition,
"description": "Bvals file in FSL format.",
},
"in_bvec": {
"type": StringInputDefinition,
"description": "Bvecs file in FSL format.",
},
"in_mask": {"type": StringInputDefinition, "description": "Mask image."},
"max_sh": {
"type": ListInputDefinition,
"element_type": "INT",
"description": "Maximum harmonic degree of response function - single value for single-shell response, list for multi-shell response.", # noqa: E501
},
"nthreads": {
"type": IntegerInputDefinition,
"description": "Number of threads. if zero, the number of available cpus will be used.", # noqa: E501
},
"mtt_file": {
"type": FileInputDefinition,
"description": "Input 5tt image.",
},
}
DWI2RESPONSE_OUTPUT_SPECIFICATION = {
"csf_file": {
"type": FileOutputDefinition,
"description": "Output CSF response text file.",
},
"gm_file": {
"type": FileOutputDefinition,
"description": "Output WM response text file.",
},
"wm_file": {
"type": FileOutputDefinition,
"description": "Output WM response text file.",
},
}
| 2.203125
| 2
|
audio/views.py
|
cyaiox/dreamsgen
| 0
|
12781676
|
<gh_stars>0
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated
from .models import Group, Audio
from .serializers import GroupSerializer, AudioSerializer
class GroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows group to be created, viewed, edited or deleted.
"""
permission_classes = (IsAuthenticated,)
queryset = Group.objects.all()
serializer_class = GroupSerializer
class AudioViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows audio to be created, viewed, edited or deleted.
"""
permission_classes = (IsAuthenticated,)
queryset = Audio.objects.all()
serializer_class = AudioSerializer
| 2.234375
| 2
|
queryTwitter.py
|
jeffmaddocks/queryTwitter
| 0
|
12781677
|
import pandas as pd
import tweepy
import json
import configparser
import re, string, random
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import twitter_samples, stopwords
from nltk.tag import pos_tag
from nltk import TweetTokenizer
from nltk import FreqDist, classify, NaiveBayesClassifier
def train_model(stop_words):
# https://www.digitalocean.com/community/tutorials/how-to-perform-sentiment-analysis-in-python-3-using-the-natural-language-toolkit-nltk
# https://github.com/sdaityari/sentiment.analysis.tutorial/blob/master/Sentiment%20Analysis%20in%20Python%203.ipynb
# <NAME>
positive_tweet_tokens = twitter_samples.tokenized('positive_tweets.json')
negative_tweet_tokens = twitter_samples.tokenized('negative_tweets.json')
positive_cleaned_tokens_list = []
negative_cleaned_tokens_list = []
for tokens in positive_tweet_tokens:
positive_cleaned_tokens_list.append(remove_noise(tokens, stop_words))
for tokens in negative_tweet_tokens:
negative_cleaned_tokens_list.append(remove_noise(tokens, stop_words))
all_pos_words = get_all_words(positive_cleaned_tokens_list)
freq_dist_pos = FreqDist(all_pos_words)
print(freq_dist_pos.most_common(10))
positive_tokens_for_model = get_tweets_for_model(positive_cleaned_tokens_list)
negative_tokens_for_model = get_tweets_for_model(negative_cleaned_tokens_list)
positive_dataset = [(tweet_dict, "Positive")
for tweet_dict in positive_tokens_for_model]
negative_dataset = [(tweet_dict, "Negative")
for tweet_dict in negative_tokens_for_model]
dataset = positive_dataset + negative_dataset
random.shuffle(dataset)
train_data = dataset[:7000]
test_data = dataset[7000:]
classifier = NaiveBayesClassifier.train(train_data)
print("Accuracy is:", classify.accuracy(classifier, test_data))
print(classifier.show_most_informative_features(10))
return classifier
def remove_noise(tweet_tokens, stop_words):
# print(f'noisy: {tweet_tokens}')
cleaned_tokens = []
for token, tag in pos_tag(tweet_tokens):
token = re.sub('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+#]|[!*(),]|'
'(?:%[0-9a-fA-F][0-9a-fA-F]))+','', token)
token = re.sub("(@[A-Za-z0-9_]+)","", token)
token = re.sub(r'[^\x00-\x7F]+','', token) # this line takes out win-1252 encoded characters, to be fixed in a future release of tabpy at which time this line can be removed
if tag.startswith("NN"):
pos = 'n'
elif tag.startswith('VB'):
pos = 'v'
else:
pos = 'a'
lemmatizer = WordNetLemmatizer()
token = lemmatizer.lemmatize(token, pos)
if len(token) > 0 and token not in string.punctuation and token.lower() not in stop_words:
cleaned_tokens.append(token.lower())
# print(f'quiet: {cleaned_tokens}')
return cleaned_tokens
def get_all_words(cleaned_tokens_list):
for tokens in cleaned_tokens_list:
for token in tokens:
yield token
def get_tweets_for_model(cleaned_tokens_list):
for tweet_tokens in cleaned_tokens_list:
yield dict([token, True] for token in tweet_tokens)
def queryTwitter(df): # the initial df is the csv containing the twitter handles to query
stop_words = stopwords.words('english')
classifier = train_model(stop_words)
df_final = pd.DataFrame() # the final df will hold all tweets across all handles
for i in df.iterrows(): # iterate thru the handles
print('processing: '+ i[1][0])
df2 = get_tweets(i[1][0]+' -filter:retweets', i[1][1]) # create a new df to hold the tweets for each handle
df2.insert(1,'search_handle', i[1][0])
df2 = df2.astype({'created_at': str})
df2 = df2.assign(tokens = '[]') # using assign instead of insert
df2 = df2.assign(sentiment = '') # using assign instead of insert
df2 = clean_tweets(classifier, df2, stop_words)
df2 = df2.astype({'tokens': str})
df_final = df_final.append(df2, ignore_index=True)
print(df_final.columns)
return df_final
def get_output_schema():
return pd.DataFrame({
'id': prep_string(),
'search_handle': prep_string(),
'author_name': prep_string(),
'author_handle': prep_string(),
'created_at': prep_string(),
'tweet_text': prep_string(),
'retweet_count': prep_int(),
'favorite_count': prep_int(),
'tokens': prep_string(),
'sentiment': prep_string()
})
def get_tweets(string_serch, int_returnrows):
# http://docs.tweepy.org/en/v3.9.0/getting_started.html
config = configparser.ConfigParser()
config.read('twitterkeys.ini')
# Consume:
consumer_key = config['Consume']['consumer_key']
consumer_secret = config['Consume']['consumer_secret']
# Access:
access_token = config['Access']['access_token']
access_secret = config['Access']['access_secret']
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
alltweets = []
for tweet in tweepy.Cursor(api.search, q=string_serch).items(int_returnrows):
# print(' ' + tweet.text)
outtweets = [tweet.id_str, tweet.author.name, '@'+tweet.author.screen_name, tweet.created_at, tweet.text, tweet.retweet_count, tweet.favorite_count]
alltweets.append(outtweets)
df = pd.DataFrame(data=alltweets, columns=['id','author_name', 'author_handle', 'created_at','tweet_text','retweet_count','favorite_count'])
return df
def clean_tweets(classifier, df, stop_words):
tknzr = TweetTokenizer()
for i in df.iterrows():
# print('tweet: '+df['tweet_text'][i[0]])
tokens = tknzr.tokenize(i[1]['tweet_text']) # using NLTK tweet tokenizer
custom_tokens = remove_noise(tokens, stop_words)
df['tokens'][i[0]] = custom_tokens # need to fix this warning later
# SettingWithCopyWarning:
# A value is trying to be set on a copy of a slice from a DataFrame
# See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
# grabs the current row: df.loc[i[0]]
# grabs the tokens column of the current row: df.loc[i[0]]['tokens']
# this is a python object of type array: df.loc[df.id == i[0], 'tokens']
# df.loc[df.id == i[0], 'tokens'] = remove_noise(tokens, stop_words)
score = classifier.classify(dict([token, True] for token in custom_tokens))
df['sentiment'][i[0]] = score
return df
if __name__ == "__main__":
import pandas as pd
df = pd.read_csv('twitter_query.csv')
df2 = queryTwitter(df)
df2.to_json('tweets.json', orient='table')
df2.to_excel('tweets.xlsx')
| 2.734375
| 3
|
posts/forms.py
|
pennyfea/404-project
| 0
|
12781678
|
from django import forms
from mimetypes import guess_type
import base64
import os
from .models import Post
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = [
"content",
"image",
"privacy",
"content_type",
"accessible_users",
"unlisted",
"user",
"publish"
]
def __init__(self, *args, **kwargs):
super(PostForm, self).__init__(*args, **kwargs)
self.fields['user'].widget = forms.HiddenInput()
self.fields['publish'].widget = forms.HiddenInput()
self.set_placeholder('content', 'What\'s on your mind?')
self.set_form_class()
#add placeholder text to fields
def set_placeholder(self, field, text):
self.fields[field].widget.attrs['placeholder'] = text
#add class for css
def set_form_class(self):
self.fields['content'].widget.attrs['class'] = "create_post"
self.fields['unlisted'].widget.attrs['class'] = "create_post"
"""
Creates the objects for the accessible useres and then save to the form
"""
def save(self, commit=True):
accessible_users = self.cleaned_data.pop('accessible_users', [])
print(accessible_users)
post = super().save(commit)
username = post.user.username
timestamp = post.timestamp.strftime("%b %-d, %Y, at %H:%M %p")
post.title = username+" - "+timestamp
post.save()
post.accessible_users.add(*accessible_users)
post.accessible_users.add(post.user)
return post
class ImageForm(forms.ModelForm):
class Meta:
model = Post
fields = [
"image",
"privacy",
"accessible_users",
"user",
"publish"
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['user'].widget = forms.HiddenInput()
self.fields['publish'].widget = forms.HiddenInput()
"""
Creates the objects for the accessible useres and then save to the form
"""
def save(self, commit=True):
accessible_users = self.cleaned_data.pop('accessible_users', [])
print(accessible_users)
post = super().save(commit)
username = post.user.username
timestamp = post.timestamp.strftime("%b %-d, %Y, at %H:%M %p")
post.title = username+" - "+timestamp
post.save()
post.accessible_users.add(*accessible_users)
post.accessible_users.add(post.user)
return post
| 2.453125
| 2
|
pon3ark/list.py
|
Arzaroth/Pon3Ark
| 1
|
12781679
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# File: list.py
# by <NAME>
# <EMAIL>
#
import os
from datetime import datetime as dt
def do_list(ark, opts):
total_size = sum(x.original_filesize for x in ark.metadatas)
len_size = max(len(str(total_size)), len('Length'))
if opts['-v']:
print("File: %s" % ark.filename)
print(' Flag %*s Date Time Name' % (len_size,
'Length'))
print(' ---- -%s ---------- ----- ----' % (len_size * '-'))
for meta in ark.metadatas:
if opts['-v']:
print(' %s %*d %s %s' % (meta.flag,
len_size,
meta.original_filesize,
dt.fromtimestamp(meta.timestamp)
.strftime('%Y-%m-%d %H:%M'),
meta.fullpath))
else:
print(meta.fullpath)
if opts['-v']:
print(' ---- -%s -------' % (len_size * '-'))
print(' %*d%s%d file%s' % (len_size, total_size,
' ' * 21, ark.file_count,
's' if ark.file_count > 1 else ''))
| 2.53125
| 3
|
Grundgeruest/migrations/0005_auto_20171211_1421.py
|
valuehack/scholarium.at
| 1
|
12781680
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-12-11 14:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Grundgeruest', '0004_auto_20171211_1418'),
]
operations = [
migrations.AlterField(
model_name='scholariumprofile',
name='alt_auslaufend',
field=models.SmallIntegerField(default=0, null=True, verbose_name='auslaufend'),
),
]
| 1.390625
| 1
|
getGoodsList.py
|
likebin16/1688
| 15
|
12781681
|
# -*- coding: utf-8 -*-
import urllib2
import urllib
from bs4 import BeautifulSoup
import json
import cookielib
import sqlite3
import time
import os
import sys
import socket
socket.setdefaulttimeout(30)
reload(sys)
sys.setdefaultencoding('utf-8')
def get_search_page_url(keyWord):
res = 1
pageURL = ''
try:
searchBaseURL = rootBaseURL + '/page/search.html?keywords='
searchKeyWordsURL = searchBaseURL + urllib2.quote(keyWord)
searchPageContent = getContentOfWebPage(searchKeyWordsURL)
searchPageSoup = BeautifulSoup(searchPageContent, 'html.parser')
pageURL = searchPageSoup.head.find('link', attrs={'rel':
'canonical'}).attrs['href']
except:
res = 0
return (res, pageURL)
def getContentOfWebPage(url):
user_agent = 'Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B137 Safari/601.1'
headers = {
'User-Agent': user_agent
# 'Connection': 'Keep-Alive'
}
req = urllib2.Request(url=url, headers=headers)
response = urllib2.urlopen(req)
content = response.read().decode('utf-8', 'ignore')
return content
def get_goods_list(url, data, opener):
user_agent = 'Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B137 Safari/601.1'
url_encode_data = urllib.urlencode(data)
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
# 'Connection': 'Keep-Alive',
'User-Agent': user_agent
}
req = urllib2.Request(url=url, data=url_encode_data, headers=headers)
content = ''
res = 1
try:
content = opener.open(req).read()
except:
res = 0
finally:
opener.close()
return (res, content)
def create_url_opener():
cookie = cookielib.CookieJar()
handler = urllib2.HTTPCookieProcessor(cookie)
opener = urllib2.build_opener(handler)
return opener
def get_csrf(url):
res = 1
csrf = ''
try:
response = opener.open(url).read()
subStr = r'"csrf":"'
headIndex = response.rindex(subStr) + len(subStr)
tailIndex = response.index(r'"', headIndex)
csrf = response[headIndex:tailIndex]
except:
res = 0
return (res, csrf)
# 1688
rootBaseURL = 'http://m.1688.com'
# 连接搜索历史数据库
historyDBName = 'keyWordsHistory.db'
if not os.path.exists(historyDBName):
print('keyWordsHistory.db is not exist.please run initKeyWordsHistoryDB.py')
sys.exit(1)
historyDBConn = sqlite3.connect('keyWordsHistory.db')
historyDBCursor = historyDBConn.execute(
"SELECT KEYWORD FROM HISTORY WHERE COMPLETED='NO';"
)
# 连接商品数据库
goodsDBConn = sqlite3.connect('goods.db')
goodsDBCursor = goodsDBConn.cursor()
# 建表
goodsDBCursor.execute('''CREATE TABLE IF NOT EXISTS GOODS
(ID TEXT PRIMARY KEY NOT NULL,
SIMPLE_SUBJECT TEXT NOT NULL,
COMPANY_NAME TEXT NOT NULL);''')
for row in historyDBCursor:
keyWord = row[0].encode('utf-8')
print('开始搜索关键字: ' + keyWord)
opener = create_url_opener()
(res, searchPageURL) = get_search_page_url(keyWord)
if not res == 1:
print('有异常,等待10秒')
time.sleep(10)
continue
# 取得CSRF
(res, csrf) = get_csrf(searchPageURL)
if not res == 1:
print('有异常,等待10秒')
time.sleep(10)
continue
beginPage = 1
pageSize = 100
while True:
wing_navigate_options = {
"data": {
"type": "offer",
"keywords": keyWord,
"beginPage": beginPage,
"pageSize": pageSize,
"offset": 1,
"sortType": "pop" # 综合:pop 销量:booked 价格:price
}
}
# 请求参数
requestParam = {
"_csrf": csrf,
"__wing_navigate_type": "action",
"__wing_navigate_url": "search:pages/search/offerresult",
"__wing_navigate_options": json.dumps(wing_navigate_options)
}
# 获得带有商品列表的JSON串
(res, goodsListJsonStr) = get_goods_list(
searchPageURL.encode('utf-8'),
requestParam,
opener
)
if not res == 1:
print('有异常,等待10秒')
time.sleep(10)
continue
# 解析JSON
goodsList = json.loads(goodsListJsonStr)
# JSON中没有offers,说明商品列表已经全请求完了
if not goodsList['data'].has_key('offers'):
print('关键字搜索完毕: ' + keyWord)
break
for good in goodsList['data']['offers']:
try:
goodsDBCursor.execute(
'''INSERT INTO GOODS (ID, SIMPLE_SUBJECT, COMPANY_NAME)
VALUES (?, ?, ?);''', (
good['id'],
good['simpleSubject'],
good['companyName']
)
)
except sqlite3.IntegrityError:
pass # print("该记录ID已存在: " + good['id'])
# 提交事务
goodsDBConn.commit()
# 页数加1
beginPage += 1
print('插入了 ' + str(len(goodsList['data']['offers'])) + ' 条记录')
# 成功搜索完一个关键字,更新一下history
historyDBCursor.execute('''UPDATE HISTORY SET COMPLETED='YES'
WHERE KEYWORD=?;''', (keyWord.decode(),))
historyDBConn.commit()
# 关闭连接
goodsDBCursor.close()
goodsDBConn.close()
historyDBCursor.close()
historyDBConn.close()
| 2.75
| 3
|
src/cltl_service/backend/schema.py
|
leolani/cltl-backend
| 0
|
12781682
|
<reponame>leolani/cltl-backend
from dataclasses import dataclass
from typing import List
from emissor.representation.scenario import Modality
from cltl.backend.api.storage import AudioParameters
@dataclass
class SignalEvent:
type: str
signal_id: str
timestamp: float
modality: Modality
files: List[str]
@dataclass
class SignalStarted(SignalEvent):
pass
@dataclass
class SignalStopped(SignalEvent):
pass
@dataclass
class TextSignalEvent(SignalEvent):
text: str
@classmethod
def create(cls, signal_id: str, timestamp: float, text: str, files: List[str] = []):
return cls(cls.__name__, signal_id, timestamp, Modality.TEXT, files, text)
@dataclass
class AudioSignalStarted(SignalStarted):
parameters: AudioParameters
@classmethod
def create(cls, signal_id: str, timestamp: float, files: List[str], parameters: AudioParameters):
return cls(cls.__name__, signal_id, timestamp, Modality.AUDIO, files, parameters)
@dataclass
class AudioSignalStopped(SignalStopped):
length: int
@classmethod
def create(cls, signal_id: str, timestamp: float, length: int):
return cls(cls.__name__, signal_id, timestamp, Modality.AUDIO, None, length)
| 2.296875
| 2
|
tests/test_decorators.py
|
adamchainz/django-GNU-Terry-Pratchett
| 26
|
12781683
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.http import HttpResponse
from django.test import RequestFactory
from nose.tools import eq_
from gnu_terry_pratchett.decorators import clacks_overhead
@clacks_overhead
def view(request):
return HttpResponse("Death can't have him")
def test_view_decorator():
request = RequestFactory().get('/')
response = view(request)
eq_(response['x-clacks-overhead'], 'GNU Terry Pratchett')
| 2.34375
| 2
|
plugins/_Post_Process/_Utils/tsne.py
|
isabella232/nnc-plugin
| 7
|
12781684
|
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import csv
import numpy as np
import tqdm
from sklearn.manifold import TSNE
from nnabla import logger
from nnabla.utils.data_iterator import data_iterator_csv_dataset
def func(args):
# Load variable
data_iterator = (lambda: data_iterator_csv_dataset(
uri=args.input,
batch_size=64,
shuffle=False,
normalize=True,
with_memory_cache=False,
with_file_cache=False))
logger.log(99, 'Loading variable...')
dataset = []
with data_iterator() as di:
pbar = tqdm.tqdm(total=di.size)
while len(dataset) < di.size:
data = di.next()
variable = data[di.variables.index(args.variable)]
dataset.extend(variable)
pbar.update(len(variable))
pbar.close()
dataset = np.array(dataset)[:di.size].reshape(di.size, -1)
logger.log(99, 'variable={}, length={}, dim={}'.format(
args.variable, dataset.shape[0], dataset.shape[1]))
# t-SNE
logger.log(99, 'Processing t-SNE...')
dim = int(args.dim)
result = TSNE(n_components=dim, random_state=0).fit_transform(dataset)
# output
with open(args.input, newline='', encoding='utf-8-sig') as f:
rows = [row for row in csv.reader(f)]
row0 = rows.pop(0)
row0.extend([args.variable + '_tsne__{}'.format(i) for i in range(dim)])
for i, y in enumerate(result):
rows[i].extend(y)
with open(args.output, 'w', encoding='utf-8') as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerow(row0)
writer.writerows(rows)
logger.log(99, 't-SNE completed successfully.')
def main():
parser = argparse.ArgumentParser(
description='t-SNE\n\n' +
'<NAME>, <NAME>. Visualizing Data using t-SNE\n' +
'http://jmlr.org/papers/volume9/vandermaaten08a/vandermaaten08a.pdf\n\n',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'-i',
'--input',
help='path to input csv file (csv) default=output_result.csv',
required=True,
default='output_result.csv')
parser.add_argument(
'-v',
'--variable',
help="Variable to be processed (variable) default=x",
required=True,
default="x")
parser.add_argument(
'-d',
'--dim',
help='dimension of the embedded space (variable) default=2',
default=2)
parser.add_argument(
'-o',
'--output',
help='path to output csv file (csv) default=tsne.csv',
required=True,
default='tsne.csv')
parser.set_defaults(func=func)
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
main()
| 2.15625
| 2
|
accounts/admin.py
|
Ashwin-Pokharel/BudgetApp1
| 0
|
12781685
|
from django.contrib import admin
from .models import Category , Incomes , Expense
# Register your models here.
admin.site.register(Category)
admin.site.register(Incomes)
admin.site.register(Expense)
| 1.328125
| 1
|
reproduce_distance_measure.py
|
YHRen/NGFP
| 5
|
12781686
|
from torch.utils.data import DataLoader, Subset
from pathlib import Path
import torch
import torch.nn as nn
import itertools as its
import pandas as pd
import numpy as np
import json
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem
import matplotlib.pyplot as plt
from NeuralGraph.dataset import MolData
from NeuralGraph.model import QSAR
from NeuralGraph.util import dev
def tanimoto_distance(x, y):
idx = x<=y
return 1 - (x[idx].sum() + y[~idx].sum()) / (x[~idx].sum() + y[idx].sum())
def get_circular_fp(smile, radius=6, fp_len=128):
mol = Chem.MolFromSmiles(smile)
fingerprint = Chem.AllChem.GetMorganFingerprintAsBitVect(mol, radius, fp_len)
arr = np.zeros((1,))
DataStructs.ConvertToNumpyArray(fingerprint, arr)
return arr
def get_neural_fp(X, net):
x0, x1, x2 = X
x0, x1, x2 = x0.to(dev), x1.to(dev), x2.to(dev)
x0, x1, x2 = (torch.unsqueeze(x, 0) for x in (x0, x1, x2))
res = net.nfp(x0, x1, x2)
res = res.detach().cpu().numpy()
return res
def mse(x, y):
return ((x-y)**2).mean()
def normalize_array(A):
mean, std = np.mean(A), np.std(A)
A_normed = (A - mean) / std
def restore_function(X):
return X * std + mean
return A_normed, restore_function
def change_net_to_weights(net, lo_bnd, hi_bnd):
for n,m in net.named_children():
if isinstance(m, torch.nn.Linear):
nn.init.uniform_(m.weight, lo_bnd, hi_bnd)
if m.bias is not None:
nn.init.uniform_(m.bias, lo_bnd, hi_bnd)
change_net_to_weights(m, lo_bnd, hi_bnd)
def calc_distance(net, data, smiles, FP_LEN,\
sample_sz=1000, SEED=None):
N, sample_sz = len(data), sample_sz
if SEED: np.random.seed(SEED)
res = [[],[]]
for _ in range(sample_sz):
i, j = np.random.choice(N, 2)
dst0 = tanimoto_distance(get_circular_fp(smiles[i], fp_len=FP_LEN),
get_circular_fp(smiles[j], fp_len=FP_LEN))
dst1 = tanimoto_distance(get_neural_fp(data[i][0], net),
get_neural_fp(data[j][0], net))
res[0].append(dst0)
res[1].append(dst1)
res = np.asarray(res)
return res
def calc_corr(res):
return (np.corrcoef(res[0], res[1])[0,1])
def plot_scatter(net, data, smiles, FP_LEN, filename,\
sample_sz = 1000, SEED=None):
res = calc_distance(net, data, smiles, FP_LEN, \
sample_sz, SEED)
plt.scatter(res[0], res[1], marker='o', facecolors='none', edgecolors='b', alpha=0.3)
plt.xlabel("circular fingerprint distance")
plt.ylabel("neural fingerprint distance")
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.title("Correlation = {:.4f}".format(np.corrcoef(res[0], res[1])[0,1]))
plt.savefig(filename, dpi=300, bbox_inches='tight')
if __name__ == '__main__':
# Load Data
DATAFILE = Path('./dataset/solubility/delaney-processed.csv')
df = pd.read_csv(DATAFILE)
target = df['measured log solubility in mols per litre'].values
target, restore = normalize_array(target)
data = MolData(df['smiles'], target)
print(type(df['smiles'][0]), df['smiles'][0])
tmp = df['smiles'][0]
print(get_circular_fp(tmp))
exit()
# Plot with a random weight and 2048 length as in Figure3Left
gcn_act = ['sigmoid', 'relu', 'tanh']
gop_act = ['sigmoid', 'tanh', 'softmax']
large_weights = [(-1e7, 1e7), (0, 1e7), (-1e3, 1e3), (-10, 10)]
max_degs = [1, 6]
res = {}
for a1, a2, bnds, rd in its.product(gcn_act, gop_act, large_weights,
max_degs):
SEED, FP_LEN = 7, 1<<11
net = QSAR(hid_dim=FP_LEN, n_class=1, max_degree=rd,
gcn_activation=a1,
gop_activation=a2)
print("nbnds", bnds)
change_net_to_weights(net.nfp, *bnds)
tmp = calc_distance(net, data, df['smiles'], FP_LEN, sample_sz=500,
SEED=7)
tmp = calc_corr(tmp)
res[f"gcn-{a1}_gop-{a2}_weights-{bnds}_radius-{rd}"]=tmp
print(f"gcn-{a1}_gop-{a2}_weights-{bnds}_radius-{rd}", tmp)
with open('./output.json', 'w') as fp:
json.dump(res, fp)
exit()
plot_scatter(net,
data,
df['smiles'],
FP_LEN,
"./figs/scatter_nfp_vs_cfp_2048_random_weight.png")
exit()
# Plot with a trained model
OUTPUT = './output/best_delaney.pkl'
net = torch.load(OUTPUT+'.pkg')
SEED, FP_LEN = 7, 1<<11
plot_scatter(net,
data,
df['smiles'],
FP_LEN,
"./figs/scatter_nfp_vs_cfp_128_trained_weight.png")
| 2.265625
| 2
|
Forms/API/errors/tests.py
|
looking-for-a-job/django-examples
| 0
|
12781687
|
from django.test import TestCase
from .forms import MyForm
"""
https://docs.djangoproject.com/en/dev/ref/forms/api/#django.forms.Form.errors
"""
form = MyForm({'subject':'','sender':'invalid-email'})
print(form.errors)
print(form.errors.as_data())
# {'sender': ['Enter a valid email address.'], 'subject': ['This field is required.']}
| 2.53125
| 3
|
src/sdk/python/OsduClient/models/legal_tag_invalid_response_list.py
|
mstest123/self-managed-osdu_from_Daniel
| 3
|
12781688
|
# coding: utf-8
"""
self-managed-osdu
Rest API Documentation for Self Managed OSDU # noqa: E501
OpenAPI spec version: 0.11.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from OsduClient.configuration import Configuration
class LegalTagInvalidResponseList(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'invalid_legal_tags': 'list[LegalTagInvalidResponse]'
}
attribute_map = {
'invalid_legal_tags': 'invalidLegalTags'
}
def __init__(self, invalid_legal_tags=None, _configuration=None): # noqa: E501
"""LegalTagInvalidResponseList - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._invalid_legal_tags = None
self.discriminator = None
if invalid_legal_tags is not None:
self.invalid_legal_tags = invalid_legal_tags
@property
def invalid_legal_tags(self):
"""Gets the invalid_legal_tags of this LegalTagInvalidResponseList. # noqa: E501
A collection of invalid LegalTags. # noqa: E501
:return: The invalid_legal_tags of this LegalTagInvalidResponseList. # noqa: E501
:rtype: list[LegalTagInvalidResponse]
"""
return self._invalid_legal_tags
@invalid_legal_tags.setter
def invalid_legal_tags(self, invalid_legal_tags):
"""Sets the invalid_legal_tags of this LegalTagInvalidResponseList.
A collection of invalid LegalTags. # noqa: E501
:param invalid_legal_tags: The invalid_legal_tags of this LegalTagInvalidResponseList. # noqa: E501
:type: list[LegalTagInvalidResponse]
"""
self._invalid_legal_tags = invalid_legal_tags
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(LegalTagInvalidResponseList, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LegalTagInvalidResponseList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, LegalTagInvalidResponseList):
return True
return self.to_dict() != other.to_dict()
| 1.90625
| 2
|
finalterm_Riley_Fitzgibbons/run2.py
|
rjfitzg/Python3030
| 1
|
12781689
|
<reponame>rjfitzg/Python3030
'''
Almost completed but couldn't get the file to load as a json object in time after completing the other scripts.
'''
def get_city_image_url(city):
import requests
import json
# Normalize request and make call to api
city = city.lower()
city = city.replace(" ", "-") # turn spaces into '-' as is wanted by the api
# Load and filter json file
# Filter out json
city_url_file = open("data.txt", "w+")
# Load json to a variable from the file name
file_json = ...
for i in range(0,file_json['_links']['ua:item'][i]):
if file_json['_links']['ua:item'][i]['name'] == city:
# Load url from href and remove the url that you want
req = requests.get(file_json['_links']['ua:item'][i]['href'])
text = json.loads(req.text)
photo_url = text['photos'][0]['attribution']['source']
return photo_url
###
cities = ['Barcelona', 'London', 'berlin',
'paris', 'seattle', 'dubai',
'new york', 'shanghai', 'Rome', 'Toronto']
for city in cities:
city_url = get_city_image_url(city)
| 3.34375
| 3
|
monster.py
|
Rydra/fighting-aws
| 0
|
12781690
|
from melange import DriverManager
from melange.messaging.exchange_message_publisher import ExchangeMessagePublisher
DriverManager.instance().use_driver(driver_name='aws')
publisher = ExchangeMessagePublisher('dev-superbattle')
publisher.publish({
'amount': 20
}, 'DamageDealtToHero')
print('Gñeeee, die you fool!')
| 1.984375
| 2
|
typed_jsonrpc/msg_types.py
|
thautwarm/typed-jsonrpc
| 5
|
12781691
|
<reponame>thautwarm/typed-jsonrpc
import typing
Json = list["Json"] | dict[str, "Json"] | int | float | str | None
CANCEL_METHOD = "$/cancelRequest"
JSONRPC_VERSION = "2.0"
class ResponseError(typing.TypedDict):
code: int
message: str
data: Json | None
class Response(typing.TypedDict):
id: str
result: Json | None
error: ResponseError | None
jsonrpc: str
class Request(typing.TypedDict):
id: str
method: str
params: Json | None
jsonrpc: str
class Notification(typing.TypedDict):
method: str
params: Json | None
jsonrpc: str
| 2.375
| 2
|
chief/utils/__init__.py
|
Manny2014/vm2docker
| 11
|
12781692
|
<gh_stars>10-100
__author__ = 'elubin'
| 1.085938
| 1
|
cracking_the_coding_interview/chapter_09/staircase.py
|
yasserglez/programming-problems
| 2
|
12781693
|
<reponame>yasserglez/programming-problems
# Interview Question 9.1
import sys
def count_ways(steps_left, memory=None):
if memory is None:
memory = [None] * steps_left
if steps_left == 0:
return 1
else:
ways = 0
for k in {1, 2, 3}:
if steps_left - k >= 0:
if memory[steps_left - k] is None:
memory[steps_left - k] = count_ways(steps_left - k, memory)
ways += memory[steps_left - k]
return ways
if __name__ == '__main__':
n = int(sys.stdin.readline())
print(count_ways(n))
| 3.625
| 4
|
solved_questions.py
|
Diego-Zulu/leetcode_answers
| 0
|
12781694
|
import argparse
import re
import subprocess
REGEX = r'\[\s*(\d+)\s*\]'
def solved_questions():
print('Getting list of solved questions.')
out = subprocess.check_output(
['leetcode', 'list', '-q', 'd'],
)
problems = []
for line in out.decode().split('\n'):
matches = re.search(REGEX, line)
if not matches:
continue
problems.append(matches.group(1))
return problems
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Get list of solved questions.')
question_numbers = solved_questions()
print(', '.join(question_numbers))
| 3.234375
| 3
|
imbalanceddata.py
|
sgulyano/ipst_AI_lvl3
| 0
|
12781695
|
<filename>imbalanceddata.py
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.graph_objects as go
import pandas as pd
import numpy as np
from sklearn import tree
from sklearn.datasets import load_breast_cancer
np.random.seed(42)
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
##
data = load_breast_cancer()
X = data['data'][:, :2]
y = data['target']
idx = np.random.choice(np.where(y == 0)[0], size=int(
np.sum(y == 1)*0.1), replace=False)
noise = np.random.normal(0, 0.1, (idx.size*10, 2))
noise[:idx.size, :] = 0
x_train = np.concatenate((X[y == 1], X[idx]))
y_train = np.concatenate((y[y == 1], y[idx]))
def get_fig(x_train, y_train, show_dec_bound=False):
clf = tree.DecisionTreeClassifier(
random_state=0, max_depth=4, min_samples_split=10)
clf = clf.fit(x_train, y_train)
tree.plot_tree(clf)
plot_step = 0.1
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
colorscale = [[0, 'peachpuff'], [1, 'lightcyan']]
if show_dec_bound:
fig = go.Figure(data=go.Heatmap(
z=Z,
x=np.arange(x_min, x_max, plot_step),
y=np.arange(y_min, y_max, plot_step),
colorscale=colorscale,
colorbar=dict(),
showscale=False
# colorbar=dict(nticks=10, ticks='outside',
# ticklen=5, tickwidth=1,
# showticklabels=True,
# tickangle=0, tickfont_size=12)
),
layout=go.Layout(
xaxis=dict(range=[x_min, x_max]),
xaxis_title=data.feature_names[0],
yaxis=dict(range=[y_min, y_max]),
yaxis_title=data.feature_names[1],
)
)
else:
fig = go.Figure(layout=go.Layout(
xaxis=dict(range=[x_min, x_max]),
xaxis_title=data.feature_names[0],
yaxis=dict(range=[y_min, y_max]),
yaxis_title=data.feature_names[1],
))
colors = ['red', 'blue']
for i, color in enumerate(colors):
idx = np.where(y_train == i)
fig.add_trace(go.Scatter(x=x_train[idx, 0].squeeze(), y=x_train[idx, 1].squeeze(),
mode='markers',
name=data.target_names[i],
marker_color=color))
return fig
fig = get_fig(x_train, y_train)
app.layout = html.Div([
html.H1(children='จำนวนข้อมูลที่ไม่สมดุลกัน (Imbalanced Data)'),
html.Div(children='''
ในแบบฝึกหัดนี้ ให้นักเรียนลองใช้เทคนิค 1) การสุ่มข้อมูลจากกลุ่มหลักให้มีน้อยลง (Under-Sampling) และ 2)
การสร้างข้อมูลของกลุ่มย่อยให้มีมากขึ้น (Over-Sampling) แล้วลองสังเกต Decision Tree ผลลัพธ์ ที่ได้
'''),
html.Div(children=[
# dcc.Markdown('### ชุดข้อมูล'),
# dcc.Dropdown(
# options=[
# {'label': 'มะเร็งเต้านม', 'value': 'breast_cancer'},
# ],
# value='breast_cancer'
# ),
dcc.Markdown('### Under-Sampling'),
dcc.Slider(
id='under-spl-slider-id',
min=10,
max=100,
marks={i: '{}%'.format(i) for i in range(10, 101, 10)},
value=100,
),
dcc.Markdown('### Over-Sampling'),
dcc.Slider(
id='over-spl-slider-id',
min=100,
max=1000,
marks={i: '{}%'.format(i) for i in range(100, 1001, 100)},
value=100,
),
dcc.Markdown('### Parameter'),
dcc.Checklist(
id='show-dec-bound-id',
options=[
{'label': 'แสดง Decision Boundary',
'value': 'show_decision_boundary'},
],
value=[]
)
],
style={'width': '30%', 'display': 'inline-block', 'vertical-align': 'top'}
),
html.Div(children=[
dcc.Graph(id='graph-id', figure=fig),
],
style={'width': '50%', 'display': 'inline-block'}
)
])
def under_sampling(x_train, y_train, ratio):
num = int(np.sum(y_train == 1)*ratio/100.0)
idx = np.where(y_train == 1)[0][:num]
x_new = np.concatenate((x_train[y_train == 0], x_train[idx]))
y_new = np.concatenate((y_train[y_train == 0], y_train[idx]))
return x_new, y_new
def over_sampling(x_train, y_train, ratio, noise=noise):
n = np.sum(y_train == 0)
num = int(n*ratio/100.0)
pos = np.arange(num) % n
idx = np.where(y_train == 0)[0][pos]
x_new = np.concatenate(
(x_train[idx]+noise[:pos.shape[0]], x_train[y_train == 1]))
y_new = np.concatenate((y_train[idx], y_train[y_train == 1]))
return x_new, y_new
@app.callback(
Output(component_id='graph-id', component_property='figure'),
[Input(component_id='under-spl-slider-id', component_property='value'),
Input(component_id='over-spl-slider-id', component_property='value'),
Input(component_id='show-dec-bound-id', component_property='value')]
)
def update_under_div(under_ratio, over_ratio, show_decision_boundary):
x_under, y_under = under_sampling(x_train, y_train, under_ratio)
x_new, y_new = over_sampling(x_under, y_under, over_ratio)
fig = get_fig(x_new, y_new, len(show_decision_boundary))
return fig
if __name__ == '__main__':
app.run_server(debug=True)
| 2.375
| 2
|
catan-services_1.0/examples/gps.py
|
mit-ll/CATAN
| 15
|
12781696
|
"""
This is a simple example script showing our GPS module
(c) 2015 Massachusetts Institute of Technology
"""
# CATAN
import catan.utils as utils
from catan.gps import GPSReceiver
if __name__ == "__main__":
gps = GPSReceiver(serial_interface="/dev/ttyUSB1")
while True:
coords = gps.get_coordinates()
print coords
time = gps.get_time()
if time is not None:
print time.timetuple()
utils.linux_set_time(time.timetuple())
| 2.546875
| 3
|
uroborosqlfmt/exceptions.py
|
rhyddereh/Sublime-uroboroSQL-formatter
| 26
|
12781697
|
# coding:utf-8
'''
Created on 2016/07/09
@author: ota
'''
import sys
import traceback
class SqlFormatterException(Exception):
'''
SqlFormatter用のExceptionクラス
'''
def __init__(self, tlist, ex, trace):
super(SqlFormatterException, self).__init__(ex.message if hasattr(ex, "message") else "")
self.tlist = self.__decode(tlist)
self.e = ex
self.trace = self.__decode(trace)
self.message = ex.message
def __decode(self, text):
text = str(text)
if sys.version_info[0] < 3:
return text.decode("utf-8")
else:
return text
def __encode(self, text):
if sys.version_info[0] < 3 and isinstance(text, unicode):
return text.encode("utf-8")
else:
return text
def __str__(self, *args):
return self.message \
+ "\ntoken:" + self.__encode(self.tlist) \
+ "\ntrace:" + self.__encode(self.trace) \
+ "\noriginal:" + str(self.e)
@staticmethod
def wrap_try_except(fnc, token, *args):
try:
if args:
return fnc(*args)
else:
return fnc(token)
except Exception as ex:
if not isinstance(ex, SqlFormatterException):
raise SqlFormatterException(token, ex, traceback.format_exc())
raise
@staticmethod
def to_wrap_try_except(fnc, token_arg_index):
def call(*args):
try:
return fnc(*args)
except Exception as ex:
if not isinstance(ex, SqlFormatterException):
raise SqlFormatterException(args[token_arg_index], ex, traceback.format_exc())
raise
return call
| 2.78125
| 3
|
src/hyp3r0pt.py
|
akshayc11/hyp3r0pt
| 0
|
12781698
|
<reponame>akshayc11/hyp3r0pt
#!/usr/bin/env python3
import argparse
import os
def _parse_args():
# type: () -> object
parser = argparse.ArgumentParser(description='''
This is the main argument parser for the hyp3r0pt toolkit
''')
return parser.parse_args()
def _main():
args = _parse_args()
print(args)
if __name__ == '__main__':
_main()
| 2.890625
| 3
|
GraphMatchingByConvexPolygon/src/all.py
|
Anonymous772066235/GraduationDesignProgram
| 0
|
12781699
|
# File :all.py
# Author :WJ
# Function :
# Time :2021/02/18
# Version :
# Amend :
import numpy as np
import ConvexPolygon as cp
import HierarchicalClustering as hc
import ConPolyProcess as cs
import LaplacianMatrice as lm
import time
from scipy.optimize import linear_sum_assignment
import Visualization as vs
import TransformationMatrix as tf
# <editor-fold desc="Method">
def conpoly_points(data, clusters, clusters_num=2):
P = []
for k in range(clusters_num):
##根据lables中的值是否等于k,重新组成一个True、False的数组
my_members = clusters == k
##X[my_members, 0] 取出my_members对应位置为True的值的横坐标
data_tem = data[my_members, :]
indexes = cp.ConvexPolygon(data_tem)
points = np.array(data_tem[indexes, :], dtype=np.float32)
while 1:
max, a0, b0 = cs.maxPoints(points=points)
if max > 2:
points = cs.delete_linepoints(points, a0, b0, 3)
else:
break
points = hc.mergeClosePoints(points, 3)
for i in range(len(points)):
P.append(points[i, :])
return np.array(P)
# </editor-fold>
start0 = time.time()
print('求建筑物凸多边形顶点------------------------------------------------------')
# 导入数据
data_dlg = np.loadtxt('..\\data\\Polyline_PCB02_500.txt', delimiter=',')
data_dopp = np.loadtxt('..\\data\\PCB_c1_z5_t20.txt', delimiter='\t')
data_dlg0 = data_dlg[:, 0:2]
data_dopp0 = data_dopp[:, 0:2]
# 设置点云中建筑物聚类数
clusters_num = 2
# 聚类
data_dlg, clusters_dlg = hc.HierarchicalClustering(data_dlg0, clusters_num, 'dlg')
data_dopp, clusters_dopp = hc.HierarchicalClustering(data_dopp0, clusters_num, 'dopp')
# 求每栋建筑物的凸多边形(并对凸多边形顶点进行处理)
P_dlg = conpoly_points(data_dlg, clusters_dlg, clusters_num)
P_dopp = conpoly_points(data_dopp, clusters_dopp, clusters_num)
# 可视化凸多边形顶点
vs.Visualize2PointClouds(data_dlg, P_dlg, 'ConPoly_dlg', feature1=['blue', 'dlg', '.'], feature2=['red', 'vertex', 'o'])
vs.Visualize2PointClouds(data_dopp, P_dopp, 'ConPoly_dopp', feature1=['blue', 'dopp', '.'],
feature2=['red', 'vertex', 'o'])
start1 = time.time()
TIME = start1 - start0
print('耗时:{:.0f} hours {:.0f} minutes {:.0f} seconds'.format(TIME // 3600, TIME % 3600 // 60, TIME % 3600 % 60))
print('图匹配------------------------------------------------------')
# 计算拉普拉斯矩阵
B_dlg = lm.LaplacianMatrice(P_dlg)
B_dopp = lm.LaplacianMatrice(P_dopp)
# 对拉普拉斯矩阵进行谱分解
U_dlg, Lambda_dlg = lm.LaplacianMatrice_decomposed(B_dlg)
U_dopp, Lambda_dopp = lm.LaplacianMatrice_decomposed(B_dopp)
# 计算相异度矩阵
k = min(len(P_dlg), len(P_dopp))
A = lm.corrlation(U_dopp, U_dlg, k)
# 对相似度矩阵进行二分匹配(删除相异度过大的结果)
row_ind, col_ind = linear_sum_assignment(A)
row, col = lm.DeleteLargeValue(A, row_ind, col_ind, 0.9)
# 根据匹配结果对点云重新排序
P_dlg_new=lm.resort_clouds(P_dlg,row)
P_dopp_new=lm.resort_clouds(P_dopp,col)
# 可视化凸多边形交点匹配结果
vs.VisualizeMatch(P_dopp, P_dlg, row, col,'凸多边形顶点')
# 计算变换矩阵(并对dopp进行变换)
R, T = tf.ca_rt(P_dopp_new, P_dlg_new, 'MatchingByConPolyPoints_result.txt')
data_dopp = tf.transformation(data_dopp0, R, T, 'dopp_transformed.txt')
# 可视化原始点云配准结果
vs.Visualize2PointClouds(data_dopp, data_dlg0, 'Macth_dlg&dopp', feature1=['blue', 'dopp', '.'],
feature2=['red', 'dlg', '.'])
start2 = time.time()
TIME = start2 - start1
print('耗时:{:.0f} hours {:.0f} minutes {:.0f} seconds'.format(TIME // 3600, TIME % 3600 // 60, TIME % 3600 % 60))
TIME = time.time() - start0
print('\n总耗时:{:.0f} hours {:.0f} minutes {:.0f} seconds'.format(TIME // 3600, TIME % 3600 // 60, TIME % 3600 % 60))
| 2.1875
| 2
|
whirlpool/__init__.py
|
datashaman/whirlpool
| 1
|
12781700
|
<gh_stars>1-10
"""
Tuplespace implementation for python.
Copyright (c) 2010, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the Data Shaman nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
__author__ = '<NAME> <<EMAIL>>'
import time, redis
FETCH_INTERVAL = 0.0001
class Whirlpool(redis.Redis):
def __init__(self, opts={}):
self.opts = dict(host='localhost', port=6379, db=0)
self.opts.update(opts)
super(Whirlpool, self).__init__(**self.opts)
def take(self, template):
while True:
matches = self.keys(template)
if len(matches) > 0:
results = {}
for match in matches:
results[match] = self.get(match)
self.delete(match)
return results
time.sleep(FETCH_INTERVAL)
def read(self, template):
result = self.blpop(template)
print result
self.lpush(template)
return result
| 1.796875
| 2
|
lib/graphite.py
|
sglass68/chromite
| 0
|
12781701
|
<reponame>sglass68/chromite<filename>lib/graphite.py
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Entry point to stats reporting objects for cbuildbot.
These factories setup the stats collection modules (es_utils, statsd) correctly
so that cbuildbot stats from different sources (official builders, trybots,
developer machines etc.) stay separate.
"""
from __future__ import print_function
from chromite.cbuildbot import constants
from chromite.cbuildbot import topology
from chromite.lib import factory
from chromite.lib.graphite_lib import es_utils
from chromite.lib.graphite_lib import stats
from chromite.lib.graphite_lib import stats_es_mock
CONNECTION_TYPE_DEBUG = 'debug'
CONNECTION_TYPE_MOCK = 'none'
CONNECTION_TYPE_PROD = 'prod'
CONNECTION_TYPE_READONLY = 'readonly'
# The types definitions below make linter unhappy. The 'right' way of using
# functools.partial makes functools.wraps (and hence our decorators) blow up.
# pylint: disable=unnecessary-lambda
class ESMetadataFactoryClass(factory.ObjectFactory):
"""Factory class for setting up an Elastic Search connection."""
_ELASTIC_SEARCH_TYPES = {
CONNECTION_TYPE_PROD: factory.CachedFunctionCall(
lambda: es_utils.ESMetadata(
use_http=constants.ELASTIC_SEARCH_USE_HTTP,
host=topology.topology.get(topology.ELASTIC_SEARCH_HOST_KEY),
port=topology.topology.get(topology.ELASTIC_SEARCH_PORT_KEY),
index=constants.ELASTIC_SEARCH_INDEX,
udp_port=topology.topology.get(
topology.ELASTIC_SEARCH_UDP_PORT_KEY))),
CONNECTION_TYPE_READONLY: factory.CachedFunctionCall(
lambda: es_utils.ESMetadataRO(
use_http=constants.ELASTIC_SEARCH_USE_HTTP,
host=topology.topology.get(topology.ELASTIC_SEARCH_HOST_KEY),
port=topology.topology.get(topology.ELASTIC_SEARCH_PORT_KEY),
index=constants.ELASTIC_SEARCH_INDEX,
udp_port=topology.topology.get(
topology.ELASTIC_SEARCH_UDP_PORT_KEY)))
}
def __init__(self):
super(ESMetadataFactoryClass, self).__init__(
'elastic search connection', self._ELASTIC_SEARCH_TYPES,
lambda from_setup, to_setup: from_setup == to_setup)
def SetupProd(self):
"""Set up this factory to connect to the production Elastic Search."""
self.Setup(CONNECTION_TYPE_PROD)
def SetupReadOnly(self):
"""Set up this factory to allow querying the production Elastic Search."""
self.Setup(CONNECTION_TYPE_READONLY)
ESMetadataFactory = ESMetadataFactoryClass()
class StatsFactoryClass(factory.ObjectFactory):
"""Factory class for setting up a Statsd connection."""
_STATSD_TYPES = {
CONNECTION_TYPE_PROD: factory.CachedFunctionCall(
lambda: stats.Statsd(
es=ESMetadataFactory.GetInstance(),
host=topology.topology.get(topology.STATSD_HOST_KEY),
port=topology.topology.get(topology.STATSD_PORT_KEY),
prefix=constants.STATSD_PROD_PREFIX)),
CONNECTION_TYPE_DEBUG: factory.CachedFunctionCall(
lambda: stats.Statsd(
es=ESMetadataFactory.GetInstance(),
host=topology.topology.get(topology.STATSD_HOST_KEY),
port=topology.topology.get(topology.STATSD_PORT_KEY),
prefix=constants.STATSD_DEBUG_PREFIX)),
CONNECTION_TYPE_MOCK: factory.CachedFunctionCall(
lambda: stats_es_mock.Stats())
}
def __init__(self):
super(StatsFactoryClass, self).__init__(
'statsd connection', self._STATSD_TYPES,
lambda from_setup, to_setup: from_setup == to_setup)
def SetupProd(self):
"""Set up this factory to connect to the production Statsd."""
self.Setup(CONNECTION_TYPE_PROD)
def SetupDebug(self):
"""Set up this factory to connect to the debug Statsd."""
self.Setup(CONNECTION_TYPE_DEBUG)
def SetupMock(self):
"""Set up this factory to return a mock statsd object."""
self.Setup(CONNECTION_TYPE_MOCK)
StatsFactory = StatsFactoryClass()
| 1.851563
| 2
|
lambdata_danielmartinalarcon/coding_challenges/cipher_tests.py
|
DanielMartinAlarcon/lambdata
| 0
|
12781702
|
import unittest
from cipher import sub_cipher
class CipherTests(unittest.TestCase):
"""
Run several error tests
"""
def test_one_to_one(self):
self.assertTrue(sub_cipher('toot', 'peep'))
def test_one_to_two_correspondence(self):
self.assertFalse(sub_cipher('lambda', 'school'))
def test_two_to_one_correspondence(self):
self.assertFalse(sub_cipher('school', 'lambda'))
def test_unequal_length(self):
self.assertFalse(sub_cipher('o', 'lambda'))
def test_empty_strings(self):
self.assertTrue(sub_cipher('', ''))
if __name__ == '__main__':
unittest.main()
| 3.515625
| 4
|
Recursion/palindrome_check.py
|
lakshyarawal/pythonPractice
| 0
|
12781703
|
<gh_stars>0
""" Palindrome Check: Given a string find if it is a palindrome using recursion """
"""Solution: Recursively make the string shorter until length is two or one, if two compare both and return value """
def palindrome_check(a) -> bool:
if len(a) == 1 or len(a) == 0:
return True
return a[0] == a[-1] and palindrome_check(a[1:-1])
def main():
val1 = input("Enter your string: ")
if palindrome_check(val1):
print("Yes")
else:
print("No")
# Using the special variable
# __name__
if __name__ == "__main__":
main()
| 4.0625
| 4
|
fastface/transforms/compose.py
|
ethanwharris/light-face-detection
| 0
|
12781704
|
<reponame>ethanwharris/light-face-detection
from typing import Dict, Tuple
import numpy as np
class Compose():
"""Compose given transforms"""
def __init__(self, *transforms):
self.transforms = transforms
def __call__(self, img: np.ndarray, targets: Dict = {}) -> Tuple[np.ndarray, Dict]:
# TODO add logger
for transform in self.transforms:
img, targets = transform(img, targets=targets)
return (img, targets)
| 2.5625
| 3
|
tests/test_sessions.py
|
gdubicki/requests-extra
| 0
|
12781705
|
import logging
from requests_extra import get
logging.basicConfig(level=logging.DEBUG)
def test_sessions_automatically_reused_for_same_scheme_and_netloc(caplog):
# we will capture the debug logs that will print sth like "Got session from cache"
caplog.set_level(logging.DEBUG)
get("https://httpbin.org/ip")
get("https://httpbin.org/user-agent")
second_request_reused_session = False
for record in caplog.records:
if "Got session from cache!" in record.getMessage():
second_request_reused_session = True
break
assert second_request_reused_session
def test_automatic_session_cookies_working_on_first_request():
# on the 1st request that gets a response with cookies we SHOULD be able to read them
response1 = get("https://httpbin.org/cookies/set/foo/bar", allow_redirects=False)
assert response1.cookies["foo"] == "bar"
def test_automatic_session_cookies_not_getting_passed_on_subsequent_requests():
# on the 1st request that gets a response with cookies we SHOULD be able to read them
response1 = get("https://httpbin.org/cookies/set/foo2/bar2", allow_redirects=False)
assert response1.cookies["foo2"] == "bar2"
# ...but the 2nd request should NOT contain the cookie set above!
response2 = get("https://httpbin.org/cookies")
assert response2.json()["cookies"] == {}
| 2.4375
| 2
|
src/nn1.py
|
cxz/avito-demand-prediction
| 0
|
12781706
|
<gh_stars>0
import tensorflow as tf
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import keras
from keras.layers import *
from keras.callbacks import *
from keras.models import Model
from keras.optimizers import Adam
from keras.models import Sequential
from keras.models import load_model
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error as mse
import pandas as pd
import numpy as np
from tqdm import tqdm
from util import setup_logs
import data
import fasttext
logger = setup_logs("", "../tmp/tmp.log")
def build_model(E, numerical, sequence):
# numerical inputs:
# price, item_seq_number, ...
numerical = Input(shape=(numerical,), name="numerical")
n1 = Reshape((1, -1))(numerical)
# i0 = Input(shape=(1,), name='user_id')
# e0 = Embedding(1009906, 14)(i0)
i1 = Input(shape=(1,), name="region")
e1 = Embedding(27, 4)(i1)
i2 = Input(shape=(1,), name="city")
e2 = Embedding(1751, 8)(i2)
i3 = Input(shape=(1,), name="parent_category_name")
e3 = Embedding(8, 3)(i3)
i4 = Input(shape=(1,), name="category_name")
e4 = Embedding(46, 4)(i4)
# i5 = Input(shape=(1, ), name='item_seq_number')
# e5 = Embedding(33945, 11)(i5)
i6 = Input(shape=(1,), name="user_type")
e6 = Embedding(2, 2)(i6)
i7 = Input(shape=(1,), name="image_top_1")
e7 = Embedding(3063, 9)(i7)
i8 = Input(shape=(1,), name="param_1")
e8 = Embedding(371, 6)(i8)
i9 = Input(shape=(1,), name="param_2")
e9 = Embedding(277, 6)(i9)
i10 = Input(shape=(1,), name="param_3")
e10 = Embedding(1275, 8)(i10)
i11 = Input(shape=(1,), name="weekday")
e11 = Embedding(6, 2)(i11)
# i13 = Input(shape=(1,), name="has_image")
# e13 = Embedding(1, 1)(i13)
e_cnn = Embedding(1000, 10)
i14 = Input(shape=(1,), name="top_1_name_resnet50")
e14 = e_cnn(i14)
i15 = Input(shape=(1,), name="top_1_name_vgg16")
e15 = e_cnn(i15)
# sequence inputs
sequence = Input(shape=(sequence,), name="sequence")
embedding = Embedding(E.shape[0], E.shape[1], weights=[E], trainable=False)(
sequence
)
x = SpatialDropout1D(0.1)(embedding)
# x = Bidirectional(GRU(128, return_sequences=True,dropout=0.1,recurrent_dropout=0.1))(x)
x = Bidirectional(CuDNNGRU(128, return_sequences=True))(x)
x = Conv1D(32, kernel_size=3, padding="valid", kernel_initializer="glorot_uniform")(
x
)
avg_pool = GlobalAveragePooling1D()(x)
max_pool = GlobalMaxPooling1D()(x)
inputs = concatenate(
[
# e0,
e1,
e2,
e3,
e4,
# e5,
e6,
e7,
e8,
e9,
e10,
e11,
# e13,
e14,
e15,
n1,
Reshape((-1, 32))(avg_pool),
Reshape((-1, 32))(max_pool),
]
)
x = Dense(256, activation="relu")(inputs)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Dense(128, activation="relu")(x)
x = BatchNormalization()(x)
x = Dropout(0.1)(x)
x = Dense(32, activation="relu")(x)
x = BatchNormalization()(x)
x = Dropout(0.1)(x)
x = Flatten()(x)
predictions = Dense(1, activation="linear")(x)
keras_input = [ # i0,
i1,
i2,
i3,
i4,
i6,
i7,
i8,
i9,
i10,
i11,
# i13,
i14,
i15,
numerical,
sequence,
]
model = Model(inputs=keras_input, outputs=predictions)
adam = keras.optimizers.Adam(lr=0.001)
model.compile(optimizer=adam, loss="mse", metrics=[])
return model
def build_input(X, text, X_tfidf2, X_target_encoded, X_user_stats, X_image_cnn):
X_dict = dict([(c, X[c].values) for c in X.columns])
print(X_image_cnn.head())
for c in X_image_cnn.columns:
if c.startswith("top_1_name"):
print("adding ", c)
X_dict[c] = X_image_cnn[c]
X_dict["numerical"] = np.hstack(
[
X[["price"]].values,
# X[["item_seq_number"]].values,
X_tfidf2.values,
# X_target_encoded.values,
# X_user_stats.values
]
)
X_dict["sequence"] = text
return X_dict
def run(model, X_train, y_train, X_val, y_val):
ckpt = ModelCheckpoint(
"../tmp/weights.{epoch:02d}-{val_loss:.4f}.hdf5", verbose=1, save_best_only=True
)
reduce_lr = ReduceLROnPlateau(monitor="val_loss", factor=0.2, patience=5)
model.fit(
X_train,
y_train,
epochs=20,
batch_size=128,
validation_data=(X_val, y_val),
callbacks=[reduce_lr, ckpt],
)
y_val_pred = model.predict(X_val, verbose=1, batch_size=1024)
print(np.sqrt(mse(y_val, y_val_pred)))
def main(model=None):
X = data.load_traintestX_base()
y = np.load("../cache/20180601_trainy.npy")
ntrain = y.shape[0]
X = X.iloc[: y.shape[0]]
logger.info(f"{X.shape} columns:{' '.join(X.columns)}")
text_vec = np.load("../cache/title-description-seq100.npy")
text_vec = text_vec[: y.shape[0]]
logger.info(f"text shape: {text_vec.shape}")
# X_ridge1 = data.load_traintestX_tfidf1_ridge()
# X_ridge1 = X_ridge1[:ntrain]
X_tfidf2 = data.load_traintestX_tfidf2() # df
# X_mean_price = data.load_traintestX_mean_price()
X_target_encoded = data.load_traintestX_target_encoded()
X_user_stats = data.load_traintestX_user_stats2()
X_image_cnn = data.load_traintestX_image_cnn()
train_idx, val_idx = train_test_split(range(ntrain), test_size=0.2)
X_train_dict = build_input(
X.iloc[train_idx],
text_vec[train_idx],
X_tfidf2.iloc[train_idx],
# X_mean_price.iloc[train_idx]
X_target_encoded.iloc[train_idx],
X_user_stats.iloc[train_idx],
X_image_cnn.iloc[train_idx],
)
y_train = y[train_idx]
X_val_dict = build_input(
X.iloc[val_idx],
text_vec[val_idx],
X_tfidf2.iloc[val_idx],
# X_mean_price.iloc[val_idx]
X_target_encoded.iloc[val_idx],
X_user_stats.iloc[val_idx],
X_image_cnn.iloc[val_idx],
)
y_val = y[val_idx]
logger.info("train/val loaded.")
logger.info("loading fasttext weights..")
E = fasttext.load_cached()
if model is None:
model = build_model(
E, X_train_dict["numerical"].shape[1], X_train_dict["sequence"].shape[1]
)
run(model, X_train_dict, y_train, X_val_dict, y_val)
else:
y_val_pred = model.predict(X_val_dict, verbose=1, batch_size=1024)
print(np.sqrt(mse(y_val, y_val_pred)))
if __name__ == "__main__":
# model = load_model("../tmp/weights.10-0.0516.hdf5")
model = None
main(model)
| 2.171875
| 2
|
Code/Lecture_06/bitonic_sort.py
|
Alexhuyi/cme213-spring-2021
| 20
|
12781707
|
<filename>Code/Lecture_06/bitonic_sort.py
import numpy as np
import math as m
up = 1
down = 0
def pretty_print(a, clr):
print('[', end=' ')
for i in range(a.size):
if clr[i] == 1:
print('\033[91m', a[i], end=' ')
elif clr[i] == 2:
print('\033[92m', a[i], end=' ')
elif clr[i] == 3:
print('\033[93m', a[i], end=' ')
elif clr[i] == 4:
print('\033[34m', a[i], end=' ')
else:
print('\033[0m', a[i], end=' ')
print('\033[0m', ']')
def bitonic_sort(a, start, end, flag):
if end <= start+1:
return
length = end - start
if length % 2 != 0:
print("The length of a (sub)sequence is not divisible by 2")
exit
split_length = length >> 1
# Bitonic compare
for i in range(start, start+split_length):
if flag == up:
if a[i] > a[i+split_length]:
a[i], a[i+split_length] = a[i+split_length], a[i]
else:
if a[i] < a[i+split_length]:
a[i], a[i+split_length] = a[i+split_length], a[i]
# This piece of code is just for the pretty printing
color = np.zeros(n)
power_of_two = 1
while ((1 << power_of_two) != length):
power_of_two += 1
color[start:start+length] = 1+power_of_two % 4
pretty_print(a, color)
# Recursive calls
bitonic_sort(a, start, start+split_length, flag)
bitonic_sort(a, start+split_length, end, flag)
n = 1 << 4
a = np.random.randint(0, n**2, n)
b = a.copy()
i = 2
while i <= n:
flag = up
print('')
color = np.zeros(n)
pretty_print(a, color)
for j in range(0, n, i):
# Sorting sequences of increasing length
bitonic_sort(a, j, j+i, flag)
flag = 1-flag
i = i << 1
b = np.sort(b)
assert(np.array_equal(a, b))
| 4.15625
| 4
|
groups/models.py
|
eldarion-client/hedera
| 8
|
12781708
|
import uuid
from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from lemmatized_text.models import LemmatizedText
from vocab_list.models import VocabularyList
# class Course(models.Model):
# pass
class Group(models.Model):
"""
Respresents a Class but due to the reserved keyword of Class in Python we
are using the more generic name "Group".
"""
class_key = models.UUIDField(default=uuid.uuid4, primary_key=True, editable=False)
title = models.CharField(max_length=100)
description = models.TextField()
teachers = models.ManyToManyField(User, related_name="taught_classes")
students = models.ManyToManyField(User, related_name="enrolled_classes")
texts = models.ManyToManyField(LemmatizedText, related_name="classes")
vocab_lists = models.ManyToManyField(VocabularyList, related_name="classes")
student_invite_key = models.UUIDField(default=uuid.uuid4)
teacher_invite_key = models.UUIDField(default=uuid.uuid4)
created_by = models.ForeignKey(User, related_name="created_classes", on_delete=models.CASCADE)
created_at = models.DateTimeField(default=timezone.now)
def get_absolute_url(self):
return reverse("groups_detail", args=[self.class_key])
def roll_student_invite(self):
self.student_invite_key = <KEY>()
self.save()
def roll_teacher_invite(self):
self.teacher_invite_key = <KEY>()
self.save()
def students_join_link(self):
domain = Site.objects.get_current().domain
url = reverse("groups_join", args=[self.student_invite_key])
return f"https://{domain}{url}"
def teachers_join_link(self):
domain = Site.objects.get_current().domain
url = reverse("groups_join", args=[self.teacher_invite_key])
return f"https://{domain}{url}"
| 2.390625
| 2
|
esi_leap/resource_objects/resource_object_factory.py
|
DanNiESh/esi-leap
| 0
|
12781709
|
<gh_stars>0
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from esi_leap.common import exception
from esi_leap.resource_objects import dummy_node
from esi_leap.resource_objects import ironic_node
from esi_leap.resource_objects import test_node
RESOURCE_TYPES = ['ironic_node', 'dummy_node', 'test_node']
class ResourceObjectFactory(object):
@staticmethod
def get_resource_object(resource_type, resource_uuid):
if resource_type == 'ironic_node':
return ironic_node.IronicNode(resource_uuid)
elif resource_type == 'dummy_node':
return dummy_node.DummyNode(resource_uuid)
elif resource_type == 'test_node':
return test_node.TestNode(resource_uuid)
raise exception.ResourceTypeUnknown(resource_type=resource_type)
| 1.828125
| 2
|
pyext/professor2/ml/histos.py
|
iamholger/professor
| 2
|
12781710
|
<gh_stars>1-10
# -*- python -*-
from professor2.histos import *
class MLHisto(Histo):
"Specialisation of Histo as a container of MLBins"
def __init__(self, nnbins=None, path=None):
Histo.__init__(self, nnbins, path)
def toDataHisto(self, *params):
"Convert this IpolBin to a DataBin with values and errors computed at params"
dbins = [ib.toDataBin(*params) for ib in self.bins]
dhist = DataHisto(dbins, self.path)
return dhist
class MLBin(Bin):
"""
A bin containing a value Machine Learning
"""
__slots__ = ["mlval", "__dict__"]
def __init__(self, xmin, xmax, X, Y, pnames=None, pC=1.0, pEps=0.0):
Bin.__init__(self, xmin, xmax)
from numpy import array
X=array(X)
Y=array([[y,0] for y in Y]) # Manky hack to get rid of the deprecation warning
# Data scaling --- Standard scaler works much better than MinMaxScaler
from sklearn import preprocessing
self._xscaler = preprocessing.StandardScaler()
xscaled = self._xscaler.fit_transform(X)
self._yscaler = preprocessing.StandardScaler()
yscaled = self._yscaler.fit_transform(Y) # This produces the noisy deprecation warning
# Machine Learning magic
from sklearn import svm
ml = svm.SVR(kernel='rbf', C=pC, epsilon=pEps) # TODO --- explore parameters of SVR
ml.fit(xscaled, yscaled[:,0]) # PArt of the hack
self.mlval = ml
def val(self, *params):
"Get the ML prediction of this bin"
from numpy import array
p_raw =array(params[0][0]).reshape(1,-1) # The raw, unscaled param point
p_scaled = self._xscaler.transform(p_raw) # The scaled param point
ret_raw = self.mlval.predict(p_scaled) # The prediction in the scaled value world
ret = self._yscaler.inverse_transform([ret_raw,0]) # The prediction in the unscaled value world
return float(ret[0]) # part of the hack
def toDataBin(self, *params): #< needs Python3
"Convert this NNBin to a DataBin with values at params"
db = DataBin(self.xmin, self.xmax,
val=self.val(params),
)
return db
def mk_MLHisto(histos, runs, paramslist, paramnames=None, pC=1.0, pEps=0.0):
from numpy import array
nbins = len(histos.itervalues().next().bins)
mbins = []
for n in xrange(nbins):
xmins = set([histos[run].bins[n].xmin for run in runs])
xmaxs = set([histos[run].bins[n].xmax for run in runs])
xmin, xmax = xmins.pop(), xmaxs.pop()
vals = [histos[run].bins[n].val for run in runs]
mbins.append(MLBin(xmin, xmax, array(paramslist), array(vals), paramnames, pC, pEps))
return MLHisto(mbins, histos.values()[0].path)
| 2.75
| 3
|
innopoints/views/activity.py
|
Innopoints/backend
| 1
|
12781711
|
"""Views related to the Activity model.
Activity:
- POST /projects/{project_id}/activities
- PATCH /projects/{project_id}/activities/{activity_id}
- DELETE /projects/{project_id}/activities/{activity_id}
- PATCH /projects/{project_id}/activities/{activity_id}/publish
Competence:
- GET /competences
- POST /competences
- PATCH /competences/{competence_id}
- DELETE /competences/{competence_id}
"""
import logging
from flask import request
from flask.views import MethodView
from flask_login import login_required, current_user
from marshmallow import ValidationError
from sqlalchemy.exc import IntegrityError
from innopoints.extensions import db
from innopoints.blueprints import api
from innopoints.core.helpers import abort, allow_no_json, admin_required
from innopoints.core.notifications import remove_notifications
from innopoints.models import (
Activity,
ApplicationStatus,
Competence,
IPTS_PER_HOUR,
LifetimeStage,
Project,
)
from innopoints.schemas import ActivitySchema, CompetenceSchema
NO_PAYLOAD = ('', 204)
log = logging.getLogger(__name__)
@api.route('/projects/<int:project_id>/activities', methods=['POST'])
@login_required
def create_activity(project_id):
"""Create a new activity to an existing project."""
project = Project.query.get_or_404(project_id)
if not current_user.is_admin and current_user not in project.moderators:
abort(403)
if project.lifetime_stage not in (LifetimeStage.draft, LifetimeStage.ongoing):
abort(400, {'message': 'Activities may only be created on draft and ongoing projects.'})
in_schema = ActivitySchema(exclude=('id', 'project', 'applications', 'internal'))
try:
new_activity = in_schema.load(request.json)
except ValidationError as err:
abort(400, {'message': err.messages})
if new_activity.draft is None:
new_activity.draft = True
if not new_activity.draft and not new_activity.is_complete:
abort(400, {'message': 'Incomplete activities cannot be marked as non-draft.'})
new_activity.project = project
try:
db.session.add(new_activity)
db.session.commit()
except IntegrityError as err:
db.session.rollback()
log.exception(err)
abort(400, {'message': 'Data integrity violated.'})
out_schema = ActivitySchema(exclude=('existing_application',),
context={'user': current_user})
return out_schema.jsonify(new_activity)
class ActivityAPI(MethodView):
"""REST views for a particular instance of an Activity model."""
@login_required
def patch(self, project_id, activity_id):
"""Edit the activity."""
project = Project.query.get_or_404(project_id)
if not current_user.is_admin and current_user not in project.moderators:
abort(403)
if project.lifetime_stage not in (LifetimeStage.draft, LifetimeStage.ongoing):
abort(400, {'message': 'Activities may only be edited on draft and ongoing projects.'})
activity = Activity.query.get_or_404(activity_id)
if activity.internal:
abort(404)
if activity.project != project:
abort(400, {'message': 'The specified project and activity are unrelated.'})
in_schema = ActivitySchema(exclude=('id', 'project', 'applications', 'internal'))
try:
with db.session.no_autoflush:
updated_activity = in_schema.load(request.json, instance=activity, partial=True)
except ValidationError as err:
abort(400, {'message': err.messages})
if not updated_activity.draft and not updated_activity.is_complete:
abort(400, {'message': 'Incomplete activities cannot be marked as non-draft.'})
if activity.fixed_reward and activity.working_hours != 1:
abort(400, {'message': 'Cannot set working hours for fixed activities.'})
if not activity.fixed_reward and activity.reward_rate != IPTS_PER_HOUR:
abort(400, {'message': 'The reward rate for hourly activities may not be changed.'})
with db.session.no_autoflush:
if updated_activity.people_required is not None:
if updated_activity.accepted_applications > updated_activity.people_required:
abort(400, {'message': 'Cannot reduce the required people '
'beyond the amount of existing applications.'})
if updated_activity.draft and updated_activity.applications:
abort(400, {'message': 'Cannot mark as draft, applications exist.'})
for application in updated_activity.applications:
if (updated_activity.application_deadline is not None
and updated_activity.application_deadline < application.application_time):
abort(400, {'message': 'Cannot set the deadline earlier '
'than the existing application'})
if application.status != ApplicationStatus.rejected:
application.actual_hours = updated_activity.working_hours
try:
db.session.add(updated_activity)
db.session.commit()
except IntegrityError as err:
db.session.rollback()
log.exception(err)
abort(400, {'message': 'Data integrity violated.'})
out_schema = ActivitySchema(exclude=('existing_application',),
context={'user': current_user})
return out_schema.jsonify(updated_activity)
@login_required
def delete(self, project_id, activity_id):
"""Delete the activity."""
project = Project.query.get_or_404(project_id)
if not current_user.is_admin and current_user not in project.moderators:
abort(403)
if project.lifetime_stage not in (LifetimeStage.draft, LifetimeStage.ongoing):
abort(400, {'message': 'Activities may only be deleted on draft and ongoing projects.'})
activity = Activity.query.get_or_404(activity_id)
if activity.internal:
abort(404)
if activity.project != project:
abort(400, {'message': 'The specified project and activity are unrelated.'})
db.session.delete(activity)
try:
db.session.commit()
remove_notifications({
'activity_id': activity_id,
})
except IntegrityError as err:
db.session.rollback()
log.exception(err)
abort(400, {'message': 'Data integrity violated.'})
return NO_PAYLOAD
activity_api = ActivityAPI.as_view('activity_api')
api.add_url_rule('/projects/<int:project_id>/activities/<int:activity_id>',
view_func=activity_api,
methods=('PATCH', 'DELETE'))
@allow_no_json
@api.route('/projects/<int:project_id>/activities/<int:activity_id>/publish', methods=['PATCH'])
@login_required
def publish_activity(project_id, activity_id):
"""Publish the activity."""
project = Project.query.get_or_404(project_id)
if not current_user.is_admin and current_user not in project.moderators:
abort(403)
activity = Activity.query.get_or_404(activity_id)
if activity.internal:
abort(404)
if activity.project != project:
abort(400, {'message': 'The specified project and activity are unrelated.'})
if (activity.name is None
or activity.start_date is None
or activity.end_date is None
or activity.start_date > activity.end_date):
abort(400, {'message': 'The name or dates of the activity are invalid.'})
activity.draft = False
try:
db.session.commit()
except IntegrityError as err:
db.session.rollback()
log.exception(err)
abort(400, {'message': 'Data integrity violated.'})
return NO_PAYLOAD
# ----- Competence -----
@api.route('/competences')
def list_competences():
"""List all of the existing competences."""
schema = CompetenceSchema(many=True)
return schema.jsonify(Competence.query.all())
@api.route('/competences', methods=['POST'])
@admin_required
def create_competence():
"""Create a new competence."""
in_schema = CompetenceSchema(exclude=('id',))
try:
new_competence = in_schema.load(request.json)
except ValidationError as err:
abort(400, {'message': err.messages})
try:
db.session.add(new_competence)
db.session.commit()
except IntegrityError as err:
db.session.rollback()
log.exception(err)
abort(400, {'message': 'Data integrity violated.'})
out_schema = CompetenceSchema()
return out_schema.jsonify(new_competence)
class CompetenceAPI(MethodView):
"""REST views for a particular instance of a Competence model."""
@admin_required
def patch(self, compt_id):
"""Edit the competence."""
competence = Competence.query.get_or_404(compt_id)
in_schema = CompetenceSchema(exclude=('id',))
try:
updated_competence = in_schema.load(request.json, instance=competence, partial=True)
except ValidationError as err:
abort(400, {'message': err.messages})
try:
db.session.add(updated_competence)
db.session.commit()
except IntegrityError as err:
db.session.rollback()
log.exception(err)
abort(400, {'message': 'Data integrity violated.'})
out_schema = CompetenceSchema()
return out_schema.jsonify(updated_competence)
@admin_required
def delete(self, compt_id):
"""Delete the competence."""
competence = Competence.query.get_or_404(compt_id)
try:
db.session.delete(competence)
db.session.commit()
except IntegrityError as err:
db.session.rollback()
log.exception(err)
abort(400, {'message': 'Data integrity violated.'})
return NO_PAYLOAD
competence_api = CompetenceAPI.as_view('competence_api')
api.add_url_rule('/competences/<int:compt_id>',
view_func=competence_api,
methods=('PATCH', 'DELETE'))
| 2.171875
| 2
|
cogs/utils/DataBase/user.py
|
milindmadhukar/Martin-Garrix-Bot
| 2
|
12781712
|
from datetime import datetime
class User(object):
def __init__(self, bot,
id: int,
guild_id: int,
messages_sent: int = 0,
total_xp: int = 0,
last_xp_added: datetime = None,
garrix_coins: int = 0):
self.bot = bot
self.id = id
self.guild_id = guild_id
self.messages_sent = messages_sent
self.total_xp = total_xp
self.last_xp_added = last_xp_added
self.garrix_coins = garrix_coins
async def post(self) -> None:
query = """SELECT * FROM users WHERE id = $1 AND guild_id = $2"""
assure_exclusive = await self.bot.db.fetch(query, self.id, self.guild_id)
if len(assure_exclusive) == 0:
query = """INSERT INTO users (id, guild_id)
VALUES ( $1, $2 )
ON CONFLICT DO NOTHING"""
await self.bot.db.execute(query, self.id, self.guild_id)
async def update_garrix_coins(self) -> None:
query = """UPDATE users SET garrix_coins = $1 WHERE id = $2 AND guild_id = $3"""
await self.bot.db.execute(query, self.garrix_coins, self.id, self.guild_id)
| 2.65625
| 3
|
modules/voteban/consts.py
|
Kylmakalle/assistant-bot
| 4
|
12781713
|
<gh_stars>1-10
from aiogram.utils.callback_data import CallbackData
import random
voter = CallbackData('voter', 'chat_id', 'user_id')
from modules.captcha_button.consts import LogEvents
class LogEvents(LogEvents):
UNMEDIA = 'unmedia'
VOTEBAN = 'voteban'
BAN = 'BAN'
TEMPBAN = 'TEMPBAN'
KICK = 'KICK'
ADMIN_REPORT_RESPONSES = [
'Маленький шаг в сторону бана, анимешник.',
'Так-так, что тут у нас? Образованный, революционер...',
'Telegram не любит амдшников.',
'План по репортам за день выполнен, пора банить.',
'Пойди это Петровичу расскажи.',
'Вот ты и попался, анимешник!',
'Вскрывайся, амудешник!',
'Заслуженный бан за свиней!',
'W A S T E D'
]
def get_admin_report_response():
return random.sample(ADMIN_REPORT_RESPONSES, 1)[0]
| 2.421875
| 2
|
visualSHARK/migrations/0019_auto_20201227_0947.py
|
smartshark/visualSHARK
| 1
|
12781714
|
<gh_stars>1-10
# Generated by Django 2.2.13 on 2020-12-27 09:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('visualSHARK', '0018_auto_20201221_1923'),
]
operations = [
migrations.RemoveField(
model_name='technologylabel',
name='times_used',
),
migrations.AddField(
model_name='technologylabel',
name='created_at',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='technologylabel',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='technologylabelcommit',
name='technologies',
field=models.ManyToManyField(blank=True, null=True, to='visualSHARK.TechnologyLabel'),
),
migrations.AlterField(
model_name='changetypelabel',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='technologylabel',
name='ident',
field=models.CharField(max_length=255, unique=True),
),
migrations.AlterField(
model_name='technologylabel',
name='name',
field=models.CharField(max_length=255, unique=True),
),
migrations.AlterField(
model_name='technologylabelcommit',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
]
| 1.726563
| 2
|
test_mi.py
|
timsainb/automi
| 3
|
12781715
|
from matplotlib import mlab
import matplotlib.pyplot as plt
import numpy as np
import colorednoise as cn
from automutualinformation import sequential_mutual_information as smi
from automutualinformation import fit_model
beta = 0.5 # the exponent
samples = 10000 # number of samples to generate
y = cn.powerlaw_psd_gaussian(beta, samples)
nbins = 10 # how many bins to compute over
bins = np.linspace(np.min(y), np.max(y), nbins)
y_dig = np.digitize(y, bins, right=True)
range_ = np.arange(1, 10)
def test_compute_mi():
(MI, _), (shuff_MI, _) = smi([y_dig], distances=range_, n_jobs=1)
def test_compute_mi_fit_model():
(MI, _), (shuff_MI, _) = smi([y_dig], distances=range_, n_jobs=1)
decay_model, model_y = fit_model(
distances=range_,
sig=MI - shuff_MI,
)
| 2.203125
| 2
|
satisfiability.py
|
maxtuno/CNFExamples
| 2
|
12781716
|
<reponame>maxtuno/CNFExamples
import functools
import operator
import peqnp.cnf as cnf
n, m, instance = 10, 24, [[9, -5, 10, -6, 3],
[6, 8],
[8, 4],
[-10, 5],
[-9, 8],
[-9, -3],
[-2, 5],
[6, 4],
[-2, -1],
[7, -2],
[-9, 4],
[-1, -10],
[-3, 4],
[7, 5],
[6, -3],
[-10, 7],
[-1, 7],
[8, -3],
[-2, -10],
[-1, 5],
[-7, 1, 9, -6, 3],
[-9, 6],
[-8, 10, -5, -4, 2],
[-4, -7, 1, -8, 2]]
# Initialize the engine with 2 bits
cnf.begin(bits=2, key='satisfiability')
# Declare an integer of n-bits, each ith-bits is one ith-literal on the model.
x = cnf.integer(bits=n)
# For each clause ensure that that one of the lierals are true.
for cls in instance:
assert functools.reduce(operator.or_, (cnf.switch(x, abs(lit) - 1, neg=lit > 0) for lit in cls)) > 0
cnf.end({'x': x})
if cnf.satisfy(solver='java -jar -Xmx4g blue.jar'):
print('SAT')
print(' '.join(map(str, [(i + 1) if b else -(i + 1) for i, b in enumerate(x.binary)])) + ' 0')
else:
print('UNSAT')
| 1.84375
| 2
|
fairy/fairy/lang/compile.py
|
thautwarm/lang.red
| 1
|
12781717
|
<filename>fairy/fairy/lang/compile.py
from Ruikowa.ErrorFamily import handle_error
from Ruikowa.ObjectRegex.ASTDef import Ast
from .parser import expression, MetaInfo
from .token import token
parser = handle_error(expression)
def generate_ast(filename: str) -> Ast:
"""
parse the codes in one file into Abstract Syntax Tree.
"""
meta = MetaInfo(fileName=filename)
try:
with open(filename, 'r', encoding='utf8') as to_read:
string = to_read.read()
except UnicodeDecodeError:
raise UnicodeDecodeError(
'The encoding recognize package `chardet` cannot be accurate enough,\n'
' you\'d better make sure that you saved your code source with `UTF-8` encoding.')
ast = parser(token(string), meta=meta, partial=False)
return ast
| 2.390625
| 2
|
env/CartPole.py
|
tphanson/tf-agent-labs
| 0
|
12781718
|
<gh_stars>0
import gym
import numpy as np
import cv2 as cv
from tf_agents.specs import array_spec
from tf_agents.environments import py_environment
from tf_agents.environments import tf_py_environment
from tf_agents.trajectories import time_step as ts
from env.display import virtual_display
class PyEnv(py_environment.PyEnvironment):
def __init__(self, image_shape=(96, 96)):
super(PyEnv, self).__init__()
# Create env
self.image_shape = image_shape
self.input_shape = self.image_shape+(3,)
self.destination = 10
self._env = gym.make('CartPole-v1')
# Env specs
self._action_spec = array_spec.BoundedArraySpec(
shape=(), dtype=np.int32,
minimum=0, maximum=1, name='action')
self._observation_spec = array_spec.BoundedArraySpec(
shape=self.input_shape, dtype=np.float32,
minimum=0, maximum=1, name='observation')
self._state = None
self._episode_ended = False
# Reset
self._reset()
def __nomarlize(self, img):
img = cv.cvtColor(img, cv.COLOR_RGB2GRAY)
(h, _) = img.shape
img = img[int(h*0.4): int(h*0.8), :]
img = cv.resize(img, self.image_shape)
img = np.reshape(img, self.image_shape+(1,))
img = np.array(img/255, dtype=np.float32)
return img
def action_spec(self):
return self._action_spec
def observation_spec(self):
return self._observation_spec
def get_state(self):
return self._state
@virtual_display
def set_state(self, _unused=None):
img = self._env.render(mode='rgb_array')
observation = self.__nomarlize(img)
if self._state is None:
init_state = observation
(_, _, stack_channel) = self.input_shape
for _ in range(stack_channel-1):
init_state = np.append(init_state, observation, axis=2)
self._state = np.array(init_state, dtype=np.float32)
self._state = self._state[:, :, 1:]
self._state = np.append(self._state, observation, axis=2)
def get_info(self):
return {}
def _reset(self):
_ = self._env.reset()
self._episode_ended = False
self._state = None
self.set_state()
return ts.restart(self._state)
def _step(self, action):
if self._episode_ended:
return self.reset()
_, reward, done, _ = self._env.step(action)
self.set_state()
self._episode_ended = done
if self._episode_ended:
return ts.termination(self._state, reward)
else:
return ts.transition(self._state, reward)
def render(self, mode='rgb_array'):
img = self.get_state()
drawed_img = np.copy(img)
# (h, _) = self.image_shape
# start, end = (self.destination, 0), (self.destination, h)
# color = (0, 0, 255)
# thickness = 1
# drawed_img = cv.line(
# drawed_img, start, end, color, thickness)
drawed_img = cv.resize(drawed_img, (512, 512))
cv.imshow('CartPole-v1', drawed_img)
cv.waitKey(10)
return img
def env():
""" Convert pyenv to tfenv """
pyenv = PyEnv()
tfenv = tf_py_environment.TFPyEnvironment(pyenv)
return tfenv
| 2.328125
| 2
|
test/test_rgb_to_hsl.py
|
liamwb/colourise
| 0
|
12781719
|
import unittest
from colourise import rgb2hsl
class TestRGBtoHSL(unittest.TestCase):
def test_primary_colour_red(self):
r, g, b = 255, 0, 0
h, s, l = rgb2hsl(r, g, b)
self.assertEqual(h, 0.0)
self.assertEqual(s, 1.0)
self.assertEqual(l, 0.5)
def test_primary_colour_green(self):
r, g, b = 0, 255, 0
h, s, l = rgb2hsl(r, g, b)
self.assertEqual(h, 120.0)
self.assertEqual(s, 1.0)
self.assertEqual(l, 0.5)
def test_primary_colour_blue(self):
r, g, b = 0, 0, 255
h, s, l = rgb2hsl(r, g, b)
self.assertEqual(h, 240.0)
self.assertEqual(s, 1.0)
self.assertEqual(l, 0.5)
def test_secondary_colour_cyan(self):
r, g, b = 0, 255, 255
h, s, l = rgb2hsl(r, g, b)
self.assertAlmostEqual(h, 180.0, delta=0.15)
self.assertEqual(s, 1.0)
self.assertEqual(l, 0.5)
def test_secondary_colour_magenta(self):
r, g, b = 255, 0, 255
h, s, l = rgb2hsl(r, g, b)
self.assertAlmostEqual(h, 300.0, delta=0.15)
self.assertEqual(s, 1.0)
self.assertEqual(l, 0.5)
def test_secondary_colour_yellow(self):
r, g, b = 255, 255, 0
h, s, l = rgb2hsl(r, g, b)
self.assertAlmostEqual(h, 60.0, delta=0.15)
self.assertEqual(s, 1.0)
self.assertEqual(l, 0.5)
def test_black(self):
r, g, b = 0, 0, 0
h, s, l = rgb2hsl(r, g, b)
self.assertEqual(s, 0.0)
self.assertEqual(s, 0.0)
self.assertEqual(l, 0.0)
def test_white(self):
r, g, b = 255, 255, 255
h, s, l = rgb2hsl(r, g, b)
self.assertEqual(s, 0.0)
self.assertEqual(s, 0.0)
self.assertEqual(l, 1.0)
| 2.84375
| 3
|
Project/jni/make_jni.py
|
LiangYue1981816/AresEngine
| 3
|
12781720
|
<gh_stars>1-10
def ModuleFileList(moduleName, msvcProjectFileName, androidFileName):
from xml.dom.minidom import parse;
import xml.dom.minidom;
DOMTree = xml.dom.minidom.parse(msvcProjectFileName);
collection = DOMTree.documentElement;
file = open(androidFileName, "w");
file.write("%s := \\\n" % moduleName);
itemGroups = collection.getElementsByTagName("ItemGroup");
for itemGroup in itemGroups:
files = itemGroup.getElementsByTagName("ClCompile");
for index in range(files.length):
'''
isExcluded = False;
excluded = files[index].getElementsByTagName("ExcludedFromBuild");
for index in range(excluded.length):
condition = excluded[index].getAttribute("Condition");
if condition.find("Library") != "-1":
isExcluded = True;
break;
if isExcluded == True:
continue;
'''
fileName = files[index].getAttribute("Include");
fileName = fileName.replace("\\", "/");
if index < files.length - 1:
fileName += " \\";
file.write(fileName + "\n");
file.close();
return;
ModuleFileList("ENGINE_SRC_FILES", "../msvc/Engine.vcxproj", "./ENGINE_SRC_FILES.mk");
ModuleFileList("GFX_RENDERER_SRC_FILES", "../msvc/GfxRenderer.vcxproj", "./GFX_RENDERER_SRC_FILES.mk");
| 2.28125
| 2
|
strategies.py
|
HelgeS/mcap_with_rotation_diversity
| 1
|
12781721
|
<reponame>HelgeS/mcap_with_rotation_diversity<filename>strategies.py
import time
import numpy as np
import pymzn
from function import affinity_pressure, matrizes
class Strategy(object):
def profits(self, tasks, agents):
pass
def mode(self):
return ''
def exchange(self, agents, tasks, profits, initial_assignments, objective):
return initial_assignments, objective
@staticmethod
def profits_and_affs(tasks, agents):
profits = []
affinities = []
for t in tasks:
tprio, taffs = zip(
*[(t.profits[x.name], t.affinities[x.name]) for x in agents if x.name in t.profits.keys()])
profits.append(np.array(tprio))
affinities.append(np.array(taffs))
return profits, affinities
class ProfitStrategy(Strategy):
def profits(self, tasks, agents):
profits, affinities = self.profits_and_affs(tasks, agents)
return profits
def __str__(self):
return 'profit'
class AffinityStrategy(Strategy):
def profits(self, tasks, agents):
_, affinities = self.profits_and_affs(tasks, agents)
return affinities
def __str__(self):
return 'affinity'
class SwitchAtThresholdStrategy(Strategy):
def __init__(self, threshold):
self.threshold = threshold
self.pp = 1
def profits(self, tasks, agents):
profits, affinities = self.profits_and_affs(tasks, agents)
self.pp = affinity_pressure(tasks, agents)
if self.pp < self.threshold:
return profits
else:
return affinities
def mode(self):
if self.pp < self.threshold:
return 'profit'
else:
return 'affinity'
def __str__(self):
return 'switch%d' % self.threshold
class ProductCombinationStrategy(Strategy):
def profits(self, tasks, agents):
profits, affinities = self.profits_and_affs(tasks, agents)
profits = [prio * aff for (prio, aff) in zip(profits, affinities)]
return profits
def __str__(self):
return 'productcomb'
class WeightedPartialProfits(Strategy):
def __init__(self, individual_weights=False):
self.individual_weights = individual_weights
self.weights = []
def profits(self, tasks, agents):
profits, affinities = self.profits_and_affs(tasks, agents)
prio_max = np.max([p.max() for p in profits])
aff_max = np.max([a.max() for a in affinities])
self.weights = []
if not self.individual_weights:
ideal_sum = np.sum([t.ideal_affinity_sum(agents) for t in tasks])
actual_sum = np.sum([t.affinity_sum(agents) for t in tasks])
weight = ideal_sum / actual_sum
weight = np.minimum(weight, 1)
self.weights.append(weight)
values = []
for (t, prio, aff) in zip(tasks, profits, affinities):
aff *= np.min(aff[aff > 0])
if self.individual_weights:
weight = t.ideal_affinity_sum(agents) / np.sum(aff[aff > 0]) # t.affinity_sum(agents)
weight = min(weight, 1)
self.weights.append(weight)
p = (weight * prio / prio_max + (1 - weight) * aff / aff_max) * 1000
assert (0 <= weight <= 1)
assert (np.all(p > 0))
values.append(p.astype(int))
return values
def mode(self):
return np.mean(self.weights).round(decimals=3)
def __str__(self):
if self.individual_weights:
return 'wppind'
else:
return 'wppshared'
class LimitedAssignment(Strategy):
def __init__(self, core_strategy):
self.core_strategy = core_strategy
def profits(self, tasks, agents):
for t in tasks:
if len(t.poss_agents) <= 1 or len(t.history) == 0:
# We do not remove tasks by limited assignment
continue
possible_assignments = [(x.name, t.affinities[x.name]) for x in agents if x.name in t.poss_agents]
possible_assignments.sort(key=lambda x: x[1])
if len(possible_assignments) < 2:
# Must have at least one possible assignment left
continue
mean_aff = np.floor(np.mean([k[1] for k in possible_assignments]))
for name, aff in possible_assignments:
if aff < mean_aff:
t.restrict_agent(name)
# TODO Alternative formulations: < median(affinity), < mean(affinity)
# Mean probably better as it captures outliers, median likely to cut in half
# name_to_remove = min(possible_assignments, key=lambda k: k[1])[0]
# Fetch updated profits + affinities from core strategy
values = self.core_strategy.profits(tasks, agents)
return values
def __str__(self):
return str(self.core_strategy) + '-limit'
class Negotiation(ProfitStrategy):
def __init__(self, acceptance_ratio=0.6):
self.acceptance_ratio = acceptance_ratio
def assignment_matrix(self, agents, tasks, assignments):
all_assigned = []
task_pos = [t.name for t in tasks]
x = np.zeros((len(tasks), len(agents) + 1), dtype=bool)
for col_idx, agent_key in enumerate(sorted(assignments), start=1):
assigned_tasks = assignments[agent_key]
for t in assigned_tasks:
row_idx = task_pos.index(t)
x[row_idx, col_idx] = 1
all_assigned.extend(assigned_tasks)
unassigned = set([t.name for t in tasks]) - set(all_assigned)
for t in unassigned:
row_idx = task_pos.index(t)
x[row_idx, 0] = 1
return x
def assignment_mat_to_dict(self, agents, tasks, x):
new_assignments = {}
for agent_idx, column in enumerate(x[:, 1:].T):
assigned_rows = np.where(column == 1)[0]
assigned_tasks = [tasks[r].name for r in assigned_rows]
assert (all(agents[agent_idx].name in tasks[r].poss_agents for r in assigned_rows))
new_assignments[agents[agent_idx].name] = assigned_tasks
return new_assignments
class OneSwapNegotiation(Negotiation):
def exchange(self, agents, tasks, profits, initial_assignments, objective):
min_objective = int(objective * self.acceptance_ratio)
print('Objective: %d / Bound: %d' % (objective, min_objective))
candidates = []
capacities = [0] + [a.capacity for a in agents]
profit_matrix, aff_mat, weight_matrix = matrizes(agents, tasks, pad_dummy_agent=True)
x = self.assignment_matrix(agents, tasks, initial_assignments)
initial_affinities = np.sum(aff_mat * x, axis=1, keepdims=True)
initial_profits = np.sum(profit_matrix * x, axis=1, keepdims=True)
aff_improv = (aff_mat - initial_affinities) * (aff_mat > 0)
aff_improv[:, [0]] -= initial_affinities
prof_diff = (profit_matrix - initial_profits) * (profit_matrix > 0)
prof_diff[:, [0]] -= initial_profits
# 1. Build a list of all potential, welfare-improving exchanges
for source_agent, affimp in enumerate(aff_improv.T):
if source_agent == 0:
continue # Don't initiate from non-assigned
pot_gains = affimp[affimp > 0]
task_ids = np.where(affimp > 0)[0] # row id
sorted_order = np.argsort(affimp[affimp > 0][::-1])
pot_gains = pot_gains[sorted_order]
task_ids = task_ids[sorted_order]
for dest_task, my_pot_gain in zip(task_ids, pot_gains):
dest_agent = np.where(x[dest_task, :])[0][0] # column id
source_offerings = x[:, source_agent]
dest_demand = aff_improv[:, dest_agent] > -my_pot_gain
dest_compatible = profit_matrix[:, dest_agent] > 0 # Could also be aff_mat or weight_matrix
potential_exchanges = np.logical_and(source_offerings, dest_demand)
potential_exchanges = np.logical_and(potential_exchanges, dest_compatible)
for source_task in np.where(potential_exchanges)[0]:
welfare_improv = my_pot_gain + aff_improv[source_task, dest_agent]
profit_change = prof_diff[dest_task, source_agent] + prof_diff[source_task, dest_agent]
assert (x[source_task, source_agent])
assert (x[dest_task, dest_agent])
assert (welfare_improv >= 0)
candidates.append((source_agent, source_task, dest_agent, dest_task, welfare_improv, profit_change))
print('Tasks: %d / Candidates: %d' % (len(tasks), len(candidates)))
# 2 Sort by 1) potential welfare improvement and 2) least profit decrease
candidates.sort(key=lambda x: (-x[4], x[5]))
exchanged_tasks = set()
applied_exchanges = []
already_exchanged = 0
objective_bound = 0
weight_problem = 0
# 3. Greedily apply exchanges (this could be solved as CP/SAT or simply as a multi-pass heuristic)
# But as long as the weight-barrier is the main failure reason, another heuristic will not help
for (source_agent, source_task, dest_agent, dest_task, welfare_improv, profit_change) in candidates:
if source_task in exchanged_tasks or dest_task in exchanged_tasks:
already_exchanged += 1
continue
cur_weights = np.sum(weight_matrix * x, axis=0)
new_source_weight = cur_weights[source_agent] - weight_matrix[source_task, source_agent] + weight_matrix[
dest_task, source_agent]
new_dest_weight = cur_weights[dest_agent] - weight_matrix[dest_task, dest_agent] + weight_matrix[
source_task, dest_agent]
if new_source_weight > capacities[source_agent] or new_dest_weight > capacities[dest_agent]:
weight_problem += 1
continue
if (objective + profit_change) < min_objective:
objective_bound += 1
continue
assert (x[source_task, source_agent])
assert (x[dest_task, dest_agent])
assert (not x[dest_task, source_agent])
assert (not x[source_task, dest_agent])
x[source_task, source_agent] = 0
x[source_task, dest_agent] = 1
x[dest_task, dest_agent] = 0
x[dest_task, source_agent] = 1
exchanged_tasks.add(source_task)
exchanged_tasks.add(dest_task)
applied_exchanges.append((source_agent, source_task, dest_agent, dest_task, welfare_improv, profit_change))
new_assignments = self.assignment_mat_to_dict(agents, tasks, x)
new_objective = np.sum(profit_matrix * x)
assert (np.all(np.count_nonzero(x, axis=1) == 1))
assert (np.all(np.sum(weight_matrix * x, axis=0) <= capacities))
assert (new_objective >= min_objective)
aff_improvement = np.sum(aff_mat * x) - np.sum(initial_affinities)
aff_imp_perc = np.sum(aff_mat * x) / np.sum(initial_affinities) - 1.0
objective_decrease = new_objective - objective
print('Failure reason: Already exchanged: %d / Objective: %d / Weight: %d' % (
already_exchanged, objective_bound, weight_problem))
print('Changes occurred: %d / Aff. Improved: %d (%.2f) / Objective decreased: %d' % (
len(exchanged_tasks) / 2, aff_improvement, aff_imp_perc, objective_decrease))
return new_assignments, new_objective
def __str__(self):
return 'oneswap%d' % int(self.acceptance_ratio * 100)
class SolverNegotiation(Negotiation):
def exchange(self, agents, tasks, profits, initial_assignments, objective):
min_objective = int(objective * self.acceptance_ratio)
print('Objective: %d / Bound: %d' % (objective, min_objective))
candidates = set()
capacities = [0] + [a.capacity for a in agents]
profit_matrix, aff_mat, weight_matrix = matrizes(agents, tasks, pad_dummy_agent=True)
x = self.assignment_matrix(agents, tasks, initial_assignments)
initial_affinities = np.sum(aff_mat * x, axis=1, keepdims=True)
initial_profits = np.sum(profit_matrix * x, axis=1, keepdims=True)
aff_improv = (aff_mat - initial_affinities) * (aff_mat > 0)
aff_improv[:, [0]] -= initial_affinities
prof_diff = (profit_matrix - initial_profits) * (profit_matrix > 0)
prof_diff[:, [0]] -= initial_profits
delta_welfares = []
delta_profits = []
delta_weights = []
affected_agents = []
exchanged_tasks = []
# 1. Build a list of all potential, welfare-improving exchanges
for source_agent, affimp in enumerate(aff_improv.T):
# if source_agent == 0:
# continue # Don't initiate from non-assigned
pot_gains = affimp[affimp > 0]
task_ids = np.where(affimp > 0)[0] # row id
sorted_order = np.argsort(affimp[affimp > 0][::-1])
pot_gains = pot_gains[sorted_order]
task_ids = task_ids[sorted_order]
for dest_task, my_pot_gain in zip(task_ids, pot_gains):
dest_agent = np.where(x[dest_task, :])[0][0] # column id
source_offerings = x[:, source_agent]
dest_demand = aff_improv[:, dest_agent] > -my_pot_gain
dest_compatible = profit_matrix[:, dest_agent] > 0 # Could also be aff_mat or weight_matrix
potential_exchanges = np.logical_and(source_offerings, dest_demand)
potential_exchanges = np.logical_and(potential_exchanges, dest_compatible)
for source_task in np.where(potential_exchanges)[0]:
welfare_improv = my_pot_gain + aff_improv[source_task, dest_agent]
profit_change = prof_diff[dest_task, source_agent] + prof_diff[source_task, dest_agent]
assert (x[source_task, source_agent])
assert (x[dest_task, dest_agent])
assert (welfare_improv >= 0)
assert (welfare_improv == aff_improv[source_task, dest_agent] + aff_improv[dest_task, source_agent])
ex1 = (source_agent, source_task, dest_agent, aff_improv[source_task, dest_agent],
prof_diff[source_task, dest_agent],
(-weight_matrix[source_task, source_agent], weight_matrix[source_task, dest_agent]))
ex2 = (dest_agent, dest_task, source_agent, aff_improv[dest_task, source_agent],
prof_diff[dest_task, source_agent],
(-weight_matrix[dest_task, dest_agent], weight_matrix[dest_task, source_agent]))
candidates.add(((-welfare_improv, -profit_change), ex1, ex2))
for _, ex1, ex2 in sorted(candidates, key=lambda trans: trans[0]):
delta_welfares.append(ex1[3])
delta_welfares.append(ex2[3])
delta_profits.append(ex1[4])
delta_profits.append(ex2[4])
delta_weights.append(list(ex1[5]))
delta_weights.append(list(ex2[5]))
affected_agents.append([source_agent + 1, dest_agent + 1])
affected_agents.append([dest_agent + 1, source_agent + 1])
exchanged_tasks.append(source_task)
exchanged_tasks.append(dest_task)
weight_budget = np.array(capacities) - np.sum(weight_matrix * x, axis=0)
assert (len(delta_welfares) == len(candidates) * 2)
assert (len(weight_budget) == len(agents) + 1)
data = {
'n_agents': len(agents) + 1,
'n_tasks': len(tasks),
'n_exchanges': len(delta_welfares),
'profit_budget': objective - min_objective,
'weight_budget': weight_budget,
'delta_welfares': delta_welfares,
'delta_profits': delta_profits,
'delta_weights': delta_weights,
'agents': affected_agents,
'task_ids': exchanged_tasks
}
print('Exchanges: %d' % len(delta_welfares))
if len(delta_welfares) > 0:
pymzn.dict2dzn(data, fout='neg1.dzn')
start = time.time()
output = pymzn.minizinc('negotiation.mzn', solver='gecode', data=data, timeout=30) # Not a MIP problem
duration = time.time() - start
sel_exchanges = output[0]['assignment']
affinity_improvement = output[0]['objective']
nb_exchanges = np.count_nonzero(sel_exchanges)
print('Applied Exchanges: %d / Improvement: %d / Time: %d' % (nb_exchanges, affinity_improvement, duration))
if nb_exchanges == 0:
return initial_assignments, objective
for ex_id in np.where(sel_exchanges)[0]:
task_id = exchanged_tasks[ex_id]
source_agent, dest_agent = affected_agents[ex_id]
source_agent, dest_agent = source_agent - 1, dest_agent - 1
assert (x[task_id, source_agent])
assert (not x[task_id, dest_agent])
x[task_id, source_agent] = 0
x[task_id, dest_agent] = 1
new_assignments = self.assignment_mat_to_dict(agents, tasks, x)
new_objective = np.sum(profit_matrix * x)
assert (np.sum(aff_mat * x) == (np.sum(initial_affinities) + affinity_improvement))
assert (np.all(np.count_nonzero(x, axis=1) == 1))
assert (np.all(np.sum(weight_matrix * x, axis=0) <= capacities))
assert (new_objective >= min_objective)
return new_assignments, new_objective
else:
return initial_assignments, objective
def __str__(self):
return 'exchange%d' % int(self.acceptance_ratio * 100)
STRATEGY_MAPPING = {
'profit': ProfitStrategy,
'affinity': AffinityStrategy,
'switch': SwitchAtThresholdStrategy,
'productcomb': ProductCombinationStrategy,
'wpp': WeightedPartialProfits,
'negotiation': OneSwapNegotiation,
'exchange': SolverNegotiation
}
| 2.421875
| 2
|
package.py
|
Impactstory/jump-api
| 0
|
12781722
|
<filename>package.py
# coding: utf-8
from cached_property import cached_property
import numpy as np
from collections import OrderedDict
import datetime
import shortuuid
from time import time
import os
import numpy as np
import pandas as pd
from app import db
from app import get_db_cursor
from app import DEMO_PACKAGE_ID
from app import s3_client
# from app import my_memcached # disable memcached
from apc_journal import ApcJournal
from saved_scenario import SavedScenario # used in relationship
from institution import Institution # used in relationship
from scenario import get_core_list_from_db
from scenario import get_apc_data_from_db
from util import get_sql_dict_rows
from util import safe_commit
from util import for_sorting
from util import elapsed
from openalex import JournalMetadata, MissingJournalMetadata
class Package(db.Model):
__tablename__ = "jump_account_package"
institution_id = db.Column(db.Text, db.ForeignKey("jump_institution.id"))
package_id = db.Column(db.Text, primary_key=True)
publisher = db.Column(db.Text)
package_name = db.Column(db.Text)
consortium_package_id = db.Column(db.Text)
created = db.Column(db.DateTime)
is_demo = db.Column(db.Boolean)
big_deal_cost = db.Column(db.Numeric(asdecimal=False))
big_deal_cost_increase = db.Column(db.Float)
is_deleted = db.Column(db.Boolean)
currency = db.Column(db.Text)
saved_scenarios = db.relationship("SavedScenario", lazy="subquery", backref=db.backref("package", lazy="subquery"))
institution = db.relationship("Institution", lazy="subquery", uselist=False, backref=db.backref("packages", lazy="subquery"))
def __init__(self, **kwargs):
self.created = datetime.datetime.utcnow().isoformat()
self.is_deleted = False
super(Package, self).__init__(**kwargs)
@cached_property
def is_consortial_package(self):
is_cons_pkg = False
if self.consortial_package_ids:
is_cons_pkg = True
return is_cons_pkg
@cached_property
def consortial_package_ids(self):
with get_db_cursor() as cursor:
qry = "select member_package_id from jump_consortium_members where consortium_package_id = %s"
cursor.execute(qry, (self.package_id,))
rows = cursor.fetchall()
return [w[0] for w in rows]
@cached_property
def unique_issns(self):
if self.is_consortial_package:
package_ids = tuple(self.consortial_package_ids)
with get_db_cursor() as cursor:
qry = "select distinct(issn_l) from jump_counter where package_id in %s"
cursor.execute(qry, (package_ids,))
rows = cursor.fetchall()
else:
with get_db_cursor() as cursor:
qry = "select distinct(issn_l) from jump_counter where package_id = %s"
cursor.execute(qry, (self.package_id,))
rows = cursor.fetchall()
return [w[0] for w in rows]
@cached_property
def journal_metadata(self):
pub_lookup = {
"SpringerNature": "Springer Nature",
"Springer": "Springer Nature",
"Sage": "SAGE",
"TaylorFrancis": "<NAME>"
}
publisher_normalized = pub_lookup.get(self.publisher, self.publisher)
meta_list = JournalMetadata.query.filter(
JournalMetadata.issn_l.in_(self.unique_issns),
JournalMetadata.publisher == publisher_normalized,
JournalMetadata.is_current_subscription_journal).all()
[db.session.expunge(my_meta) for my_meta in meta_list]
return dict(list(zip([j.issn_l for j in meta_list], meta_list)))
@cached_property
def journal_metadata_flat(self):
jmf = {}
for issn_l, x in self.journal_metadata.items():
for issn in x.issns:
jmf[issn] = x
return jmf
def get_journal_metadata(self, issn):
journal_meta = self.journal_metadata_flat.get(issn, None)
if not journal_meta:
journal_meta = MissingJournalMetadata(issn_l=issn)
return journal_meta
@property
def unique_saved_scenarios(self):
return self.saved_scenarios
@property
def scenario_ids(self):
return [s.scenario_id for s in self.saved_scenarios]
@property
def is_demo_account(self):
return self.package_id.startswith("demo")
@cached_property
def has_complete_counter_data(self):
if self.institution.is_consortium:
return True
if self.data_files_dict["counter"]["is_live"]:
return True
if self.data_files_dict["counter-trj2"]["is_live"] and \
self.data_files_dict["counter-trj3"]["is_live"] and \
self.data_files_dict["counter-trj4"]["is_live"]:
return True
return False
@property
def has_custom_perpetual_access(self):
my_data_file_dict = self.data_files_dict["perpetual-access"]
if my_data_file_dict["is_live"]:
return True
return False
@property
def has_custom_prices(self):
my_data_file_dict = self.data_files_dict["price"]
if my_data_file_dict["is_live"]:
return True
return False
@property
def has_core_journal_list(self):
rows = get_core_list_from_db(self.package_id)
if rows:
return True
return False
def filter_by_core_list(self, my_list):
if not self.has_core_journal_list:
return my_list
core_rows = get_core_list_from_db(self.package_id)
core_issnls = list(core_rows.keys())
return [row for row in my_list if row["issn_l"] in core_issnls]
@cached_property
def get_core_journal_rows(self):
q = """
select
core.issn_l,
title as title
from jump_core_journals core
left outer join openalex_computed on core.issn_l = openalex_computed.issn_l
where package_id=%(package_id_for_db)s
order by title desc
"""
rows = get_sql_dict_rows(q, {'package_id_for_db': self.package_id_for_db})
return rows
@cached_property
def get_counter_rows(self):
return self.filter_by_core_list(self.get_unfiltered_counter_rows)
@cached_property
def get_unfiltered_counter_rows(self):
q = """
select
rj.issn_l,
listagg(rj.issn, ',') as issns,
listagg(title, ',') as title,
sum(total::int) as num_2018_downloads
from jump_counter counter
left outer join openalex_computed_flat rj on counter.issn_l = rj.issn
where package_id=%(package_id_for_db)s
group by rj.issn_l
order by num_2018_downloads desc
"""
return get_sql_dict_rows(q, {'package_id_for_db': self.package_id_for_db})
def get_base(self, and_where="", extrakv={}):
q = """
select
rj.issn_l,
listagg(rj.issn, ',') as issns,
listagg(title, ',') as title,
sum(total::int) as num_2018_downloads,
count(*) as num_journals_with_issn_l
from jump_counter counter
left outer join openalex_computed_flat rj on counter.issn_l = rj.issn
where package_id=%(package_id_for_db)s
{}
group by rj.issn_l
order by num_2018_downloads desc
""".format(and_where)
rows = get_sql_dict_rows(q, {'package_id_for_db': self.package_id_for_db} | extrakv)
return rows
@cached_property
def get_published_in_2019(self):
rows = self.get_base(and_where=""" and rj.issn_l in
(select rj.issn_l from unpaywall u
join openalex_computed_flat rj on u.journal_issn_l = rj.issn
where year=2019
group by rj.issn_l) """)
return self.filter_by_core_list(rows)
@cached_property
def get_published_toll_access_in_2019(self):
rows = self.get_base(and_where=""" and rj.issn_l in
(select rj.issn_l from unpaywall u
join openalex_computed_flat rj on u.journal_issn_l = rj.issn
where year=2019 and journal_is_oa='false'
group by rj.issn_l) """)
return self.filter_by_core_list(rows)
@cached_property
def publisher_name(self):
if self.publisher == "Elsevier":
return "Elsevier"
elif self.publisher == "Wiley":
return "Wiley"
elif self.publisher == "SpringerNature":
return "Springer Nature"
elif self.publisher == "Sage":
return "SAGE"
elif self.publisher == "TaylorFrancis":
return "Taylor & Francis"
else:
return "false"
@property
def publisher_name_snippets(self):
if self.publisher == "Elsevier":
return ["elsevier"]
elif self.publisher == "Wiley":
return ["wiley"]
elif self.publisher == "SpringerNature":
return ["springer", "nature"]
elif self.publisher == "Sage":
return ["sage"]
elif self.publisher == "TaylorFrancis":
return ["taylor", "informa"]
else:
return []
@cached_property
def get_published_toll_access_in_2019_with_publisher(self):
rows = self.get_base(and_where=""" and rj.issn_l in
(select distinct rj.issn_l
from unpaywall u
join openalex_computed_flat rj on u.journal_issn_l=rj.issn
where year=2019 and journal_is_oa='false'
and rj.publisher = %(publisher)s
) """, extrakv={'publisher': self.publisher_name})
return self.filter_by_core_list(rows)
@cached_property
def get_published_toll_access_in_2019_with_publisher_have_price(self):
rows = self.get_base(and_where=""" and rj.issn_l in
(select distinct rj.issn_l
from unpaywall u
join openalex_computed_flat rj on u.journal_issn_l=rj.issn
where year=2019 and journal_is_oa='false'
and rj.publisher = %(publisher)s
)
and rj.issn_l in
(select distinct issn_l from jump_journal_prices
where price > 0 and package_id in('658349d9', %(package_id)s)
) """, extrakv={'package_id': self.package_id, 'publisher': self.publisher_name})
return self.filter_by_core_list(rows)
@cached_property
def get_in_scenario(self):
my_saved_scenario = None
if self.unique_saved_scenarios:
first_scenario = self.unique_saved_scenarios[0]
my_saved_scenario = SavedScenario.query.get(first_scenario.scenario_id)
if not my_saved_scenario:
my_saved_scenario = SavedScenario.query.get("demo")
my_saved_scenario.set_live_scenario(None)
response = my_saved_scenario.to_dict_journals()
rows = response["journals"]
return self.filter_by_core_list(rows)
@cached_property
def get_counter_unique_rows(self):
rows = self.get_base()
return self.filter_by_core_list(rows)
@cached_property
def get_diff_not_in_counter(self):
if not self.has_core_journal_list:
return []
response_dict = {}
remove = [row["issn_l"] for row in self.get_counter_rows]
for row in self.get_core_journal_rows:
if row["issn_l"] not in remove:
response_dict[row["issn_l"]] = row
response = sorted(list(response_dict.values()), key=lambda x: x["issn_l"], reverse=True)
return response
@cached_property
def get_diff_non_unique(self):
response = []
for row in self.get_counter_unique_rows:
if not row["issn_l"]:
response += [row]
if row["num_journals_with_issn_l"] > 1:
response += [row]
response = sorted(response, key=lambda x: x["num_2018_downloads"], reverse=True)
return response
@cached_property
def get_diff_not_published_in_2019(self):
response_dict = {}
remove = [row["issn_l"] for row in self.get_published_in_2019]
for row in self.get_counter_unique_rows:
if row["issn_l"] not in remove:
response_dict[row["issn_l"]] = row
response = sorted(list(response_dict.values()), key=lambda x: x["num_2018_downloads"], reverse=True)
return response
@cached_property
def get_diff_open_access_journals(self):
response_dict = {}
remove = [row["issn_l"] for row in self.get_published_toll_access_in_2019]
for row in self.get_published_in_2019:
if row["issn_l"] not in remove:
response_dict[row["issn_l"]] = row
response = sorted(list(response_dict.values()), key=lambda x: x["num_2018_downloads"], reverse=True)
return response
@cached_property
def get_diff_changed_publisher(self):
response_dict = {}
remove = [row["issn_l"] for row in self.get_published_toll_access_in_2019_with_publisher]
for row in self.get_published_toll_access_in_2019:
if row["issn_l"] not in remove:
response_dict[row["issn_l"]] = row
response = sorted(list(response_dict.values()), key=lambda x: x["num_2018_downloads"], reverse=True)
return response
@cached_property
def get_diff_no_price(self):
response_dict = {}
remove = [row["issn_l"] for row in self.get_published_toll_access_in_2019_with_publisher_have_price]
for row in self.get_published_toll_access_in_2019_with_publisher:
if row["issn_l"] not in remove:
response_dict[row["issn_l"]] = row
response = sorted(list(response_dict.values()), key=lambda x: x["num_2018_downloads"], reverse=True)
return response
@cached_property
def get_diff_missing_from_scenario(self):
response_dict = {}
remove = [row["issn_l"] for row in self.get_in_scenario]
for row in self.get_published_toll_access_in_2019_with_publisher_have_price:
if row["issn_l"] not in remove:
response_dict[row["issn_l"]] = row
response = sorted(list(response_dict.values()), key=lambda x: x["num_2018_downloads"], reverse=True)
return response
@cached_property
def get_diff_extra_in_scenario(self):
response_dict = {}
remove = [row["issn_l"] for row in self.get_published_toll_access_in_2019_with_publisher_have_price]
for row in self.get_in_scenario:
if row["issn_l"] not in remove:
response_dict[row["issn_l"]] = row
response = list(response_dict.values())
# response = sorted(response_dict.values(), key=lambda x: x["num_2018_downloads"], reverse=True)
return response
@cached_property
def package_id_for_db(self):
package_id = self.package_id
if not package_id or package_id.startswith("demo") or package_id==DEMO_PACKAGE_ID:
package_id = DEMO_PACKAGE_ID
return package_id
@property
def feedback_scenario_dicts(self):
feedback_scenarios = [s for s in self.saved_scenarios if s.is_feedback_scenario]
feedback_scenario_dicts = [s.to_dict_minimal() for s in feedback_scenarios]
return feedback_scenario_dicts
@cached_property
def feedback_set_id(self):
return self.package_id.replace("package-", "feedback-")
@cached_property
def feedback_set_name(self):
return "Feedback on {} scenarios".format(self.publisher)
@cached_property
def feedback_rows(self):
if not self.is_feeder_package:
return []
command = 'select * from jump_consortium_feedback_requests where member_package_id=%s'
with get_db_cursor() as cursor:
cursor.execute(command, (self.package_id,))
rows_for_feedback = cursor.fetchall()
return rows_for_feedback
@cached_property
def is_feedback_package(self):
if not self.is_feeder_package:
return False
return (len(self.feedback_rows) > 0)
@cached_property
def is_feeder_package(self):
return self.is_owned_by_consortium
@cached_property
def is_owned_by_consortium(self):
if self.consortia_scenario_ids_who_own_this_package:
return True
return False
@cached_property
def consortia_scenario_ids_who_own_this_package(self):
q = """
select consortium_package_id, scenario_id as consortium_scenario_id
from jump_consortium_members cm
join jump_package_scenario ps on cm.consortium_package_id=ps.package_id
where member_package_id=%s
"""
with get_db_cursor() as cursor:
cursor.execute(q, (self.package_id,))
rows = cursor.fetchall()
return [row["consortium_scenario_id"] for row in rows]
@cached_property
def counter_totals_from_db(self):
from scenario import get_counter_totals_from_db
return get_counter_totals_from_db(self.package_id)
@cached_property
def counter_journals_by_report_name(self):
from scenario import get_counter_journals_by_report_name_from_db
return get_counter_journals_by_report_name_from_db(self.package_id)
@cached_property
def journals_missing_prices(self):
counter_rows = self.counter_totals_from_db
prices_uploaded_raw = get_custom_prices(self.package_id)
journals_missing_prices = []
for my_journal_metadata in list(self.journal_metadata.values()):
if my_journal_metadata.publisher_code == self.publisher:
if my_journal_metadata.is_current_subscription_journal:
issn_l = my_journal_metadata.issn_l
if not issn_l in counter_rows:
pass
elif counter_rows[issn_l] == 0:
pass
elif prices_uploaded_raw.get(issn_l, None) != None:
pass
elif my_journal_metadata.get_subscription_price(self.currency, use_high_price_if_unknown=False) != None:
pass
else:
my_dict = OrderedDict([
("issn_l_prefixed", my_journal_metadata.display_issn_l),
("issn_l", my_journal_metadata.issn_l),
("name", my_journal_metadata.title),
("issns", my_journal_metadata.display_issns),
("currency", self.currency),
("counter_total", counter_rows[issn_l]),
])
journals_missing_prices.append(my_dict)
journals_missing_prices = sorted(journals_missing_prices, key=lambda x: 0 if x["counter_total"]==None else x["counter_total"], reverse=True)
return journals_missing_prices
@cached_property
def returned_big_deal_cost(self):
if self.institution.is_consortium:
return 42
return self.big_deal_cost
@cached_property
def returned_big_deal_cost_increase(self):
if self.institution.is_consortium:
return 42
return self.big_deal_cost_increase
@cached_property
def warnings(self):
from scenario import get_package_specific_scenario_data_from_db
if self.institution.is_consortium:
return []
if any([w in self.package_id for w in ['jiscels', 'jiscsage', 'jiscwiley', 'jiscspringer', 'jisctf']]):
# don't show warnings for those packages
# maybe best thing is don't show warnings for any feedback packages?
# Update on 2022-03-07: Scott added ignore warnings for sage, wiley, springer, and tf pkgs
# This removes warnings for not just feeder pkgs but standalone packages for each institution
return []
response = []
if not self.has_custom_perpetual_access:
response += [OrderedDict([
("id", "missing_perpetual_access"),
("journals", None)
])]
if (not self.has_complete_counter_data) or (len(self.journals_missing_prices) > 0):
response += [OrderedDict([
("id", "missing_prices"),
("journals", self.journals_missing_prices)
])]
return response
def public_price_rows(self):
prices_rows = []
for my_journal_metadata in list(self.journal_metadata.values()):
if my_journal_metadata.publisher_code == self.publisher:
if my_journal_metadata.is_current_subscription_journal:
my_price = my_journal_metadata.get_subscription_price(self.currency, use_high_price_if_unknown=False)
if my_price != None:
my_dict = OrderedDict()
my_dict["issn_l_prefixed"] = my_journal_metadata.display_issn_l
my_dict["issn_l"] = my_journal_metadata.issn_l
my_dict["issns"] = my_journal_metadata.display_issns
my_dict["title"] = my_journal_metadata.title
my_dict["publisher"] = my_journal_metadata.publisher
my_dict["currency"] = self.currency
my_dict["price"] = my_price
prices_rows += [my_dict]
prices_rows = sorted(prices_rows, key=lambda x: 0 if x["price"]==None else x["price"], reverse=True)
return prices_rows
def get_fresh_apc_journal_list(self, issn_ls, apc_df_dict):
apc_journals = []
if not hasattr(self, "apc_data"):
self.apc_data = get_apc_data_from_db(self.package_id)
for issn_l in issn_ls:
meta = self.get_journal_metadata(issn_l)
if meta:
if meta.get_apc_price(self.currency):
apc_journal = ApcJournal(issn_l, self.apc_data, apc_df_dict, self.currency, self)
apc_journals.append(apc_journal)
return apc_journals
@cached_property
def apc_journals(self):
if not hasattr(self, "apc_data"):
self.apc_data = get_apc_data_from_db(self.package_id)
if not self.apc_data:
return []
df = pd.DataFrame(self.apc_data)
df["year"] = df["year"].astype(int)
df["authorship_fraction"] = df.num_authors_from_uni/df.num_authors_total
df_by_issn_l_and_year = df.groupby(["issn_l", "year"]).authorship_fraction.agg([np.size, np.sum]).reset_index().rename(columns={'size': 'num_papers', "sum": "authorship_fraction"})
my_dict = {"df": df, "df_by_issn_l_and_year": df_by_issn_l_and_year}
return self.get_fresh_apc_journal_list(my_dict["df"].issn_l.unique(), my_dict)
@cached_property
def apc_journals_sorted_spend(self):
self.apc_journals.sort(key=lambda k: for_sorting(k.cost_apc_historical), reverse=True)
return self.apc_journals
@cached_property
def num_apc_papers_historical(self):
return round(np.sum([j.num_apc_papers_historical for j in self.apc_journals]))
@cached_property
def years(self):
return list(range(0, 5))
@cached_property
def cost_apc_historical_by_year(self):
return [round(np.sum([j.cost_apc_historical_by_year[year] for j in self.apc_journals])) for year in self.years]
@cached_property
def cost_apc_historical(self):
return round(np.mean(self.cost_apc_historical_by_year))
@cached_property
def cost_apc_historical_hybrid_by_year(self):
return [round(np.sum([j.cost_apc_historical_by_year[year] for j in self.apc_journals if j.oa_status=="hybrid"]), 4) for year in self.years]
@cached_property
def cost_apc_historical_hybrid(self):
return round(np.mean(self.cost_apc_historical_hybrid_by_year))
@cached_property
def cost_apc_historical_gold_by_year(self):
return [round(np.sum([j.cost_apc_historical_by_year[year] for j in self.apc_journals if j.oa_status=="gold"]), 4) for year in self.years]
@cached_property
def cost_apc_historical_gold(self):
return round(np.mean(self.cost_apc_historical_gold_by_year))
@cached_property
def fractional_authorships_total_by_year(self):
return [round(np.sum([j.fractional_authorships_total_by_year[year] for j in self.apc_journals]), 4) for year in self.years]
@cached_property
def fractional_authorships_total(self):
return round(np.mean(self.fractional_authorships_total_by_year), 2)
@cached_property
def apc_journals_sorted_fractional_authorship(self):
self.apc_journals.sort(key=lambda k: for_sorting(k.fractional_authorships_total), reverse=True)
return self.apc_journals
@cached_property
def apc_price(self):
if self.apc_journals:
return np.max([j.apc_price for j in self.apc_journals])
else:
return 0
def update_apc_authorships(self):
delete_q = 'delete from jump_apc_authorships where package_id = %s'
insert_q = """
insert into jump_apc_authorships (
select * from jump_apc_authorships_view
where package_id = %s and issn_l in
(select issn_l from openalex_computed rj where rj.publisher = %s))
"""
with get_db_cursor() as cursor:
cursor.execute(delete_q, (self.package_id,))
cursor.execute(insert_q, (self.package_id, self.publisher_name,))
def to_dict_apc(self):
response = {
"headers": [
{"text": "OA type", "value": "oa_status", "percent": None, "raw": None, "display": "text"},
{"text": "APC price", "value": "apc_price", "percent": None, "raw": self.apc_price, "display": "currency_int"},
{"text": "Number APC papers", "value": "num_apc_papers", "percent": None, "raw": self.num_apc_papers_historical, "display": "float1"},
{"text": "Total fractional authorship", "value": "fractional_authorship", "percent": None, "raw": self.fractional_authorships_total, "display": "float1"},
{"text": "APC Dollars Spent", "value": "cost_apc", "percent": None, "raw": self.cost_apc_historical, "display": "currency_int"},
]
}
response["journals"] = [j.to_dict() for j in self.apc_journals_sorted_spend]
return response
def to_dict_summary(self):
return {
"id": self.package_id,
"name": self.package_name,
"currency": self.currency,
"hasCounterData": self.has_complete_counter_data,
"hasCustomPrices": self.has_custom_prices,
"hasCoreJournalList": self.has_core_journal_list,
"hasCustomPerpetualAccess": self.has_custom_perpetual_access,
}
@cached_property
def data_files_dict(self):
command = "select * from jump_raw_file_upload_object where package_id=%s"
with get_db_cursor() as cursor:
cursor.execute(command, (self.package_id,))
raw_file_upload_rows = cursor.fetchall()
data_files_dict = {}
data_file_types = ["counter", "counter-trj2", "counter-trj3", "counter-trj4", "price-public", "price", "perpetual-access"]
for data_file_type in data_file_types:
my_dict = OrderedDict()
my_dict["name"] = data_file_type
my_dict["is_live"] = False
my_dict["is_parsed"] = False
my_dict["is_uploaded"] = False
my_dict["rows_count"] = None
my_dict["error"] = None
my_dict["error_details"] = None
my_dict["created_date"] = None
data_files_dict[data_file_type] = my_dict
data_files_dict["price-public"]["is_uploaded"] = True
data_files_dict["price-public"]["is_parsed"] = True
data_files_dict["price-public"]["is_live"] = True
data_files_dict["price-public"]["rows_count"] = len(self.public_price_rows())
# go through all the upload rows
for raw_file_upload_row in raw_file_upload_rows:
my_dict = data_files_dict[raw_file_upload_row["file"]]
if (my_dict["name"] == raw_file_upload_row["file"]):
if raw_file_upload_row["to_delete_date"] != None:
# handle the ones that have been marked for delete but not deleted yet
my_dict["rows_count"] = 0
else:
my_dict["is_uploaded"] = True
my_dict["is_parsed"] = True
my_dict["is_live"] = True
my_dict["created_date"] = raw_file_upload_row["created"]
if raw_file_upload_row["num_rows"]:
my_dict["rows_count"] = raw_file_upload_row["num_rows"]
if raw_file_upload_row["error"]:
my_dict["error"] = raw_file_upload_row["error"]
my_dict["error_details"] = raw_file_upload_row["error_details"]
my_dict["is_live"] = False
# handle the ones that have been uploaded but not processed yet
upload_preprocess_bucket = "unsub-file-uploads-preprocess-testing" if os.getenv("TESTING_DB") else "unsub-file-uploads-preprocess"
preprocess_file_list = s3_client.list_objects(Bucket=upload_preprocess_bucket)
for preprocess_file in preprocess_file_list.get("Contents", []):
filename = preprocess_file["Key"]
filename_base = filename.split(".")[0]
try:
preprocess_package_id, preprocess_filetype = filename_base.split("_")
except ValueError:
# not a valid file, skip it
continue
# size = preprocess_file["Size"]
# age_seconds = (datetime.datetime.utcnow() - preprocess_file["LastModified"].replace(tzinfo=None)).total_seconds()
if preprocess_package_id == self.package_id:
my_dict = data_files_dict[preprocess_filetype]
my_dict["is_uploaded"] = True
my_dict["is_parsed"] = False
my_dict["is_live"] = False
return data_files_dict
def to_package_dict(self):
data_files_list = sorted(list(self.data_files_dict.values()), key=lambda x: 0 if x["rows_count"]==None else x["rows_count"], reverse=True)
response = OrderedDict([
("id", self.package_id),
("name", self.package_name),
("publisher", self.publisher),
("currency", self.currency),
("cost_bigdeal", self.returned_big_deal_cost),
("cost_bigdeal_increase", self.returned_big_deal_cost_increase),
("is_consortium", self.institution.is_consortium),
("is_deleted", self.is_deleted is not None and self.is_deleted),
("is_demo", self.is_demo),
("has_complete_counter_data", self.has_complete_counter_data),
("data_files", data_files_list),
# @todo for testing, show all scenarios even with owned by consortium
# ("is_owned_by_consortium", self.is_owned_by_consortium),
("is_owned_by_consortium", False),
("is_consortial_proposal_set", self.is_feedback_package),
# ("scenarios", [s.to_dict_minimal() for s in self.saved_scenarios if not s.is_feedback_scenario]),
("scenarios", [s.to_dict_minimal() for s in self.saved_scenarios]),
("warnings", self.warnings),
])
return response
def to_package_dict_feedback_set(self):
response = self.to_package_dict()
response["id"] = self.feedback_set_id
response["name"] = self.feedback_set_name
response["scenarios"] = self.feedback_scenario_dicts
return response
def to_dict_minimal(self):
return self.to_dict_minimal_base()
def to_dict_minimal_base(self):
response = OrderedDict([
("id", self.package_id),
("name", self.package_name),
("currency", self.currency),
("publisher", self.publisher),
("is_deleted", self.is_deleted is not None and self.is_deleted),
("is_consortium", self.institution.is_consortium),
("is_owned_by_consortium", self.is_owned_by_consortium),
("is_feeder_package", self.is_feeder_package),
("is_consortial_proposal_set", False),
])
return response
def to_dict_minimal_feedback_set(self):
response = self.to_dict_minimal_base()
response["id"] = self.feedback_set_id
response["name"] = self.feedback_set_name
response["is_feeder_package"] = False
response["is_owned_by_consortium"] = False
response["is_consortial_proposal_set"] = True
return response
def __repr__(self):
return "<{} ({}) {}>".format(self.__class__.__name__, self.package_id, self.package_name)
def clone_demo_package(institution):
demo_package = Package.query.filter(Package.package_id == DEMO_PACKAGE_ID).first()
now = datetime.datetime.utcnow().isoformat(),
# jump_account_package
new_package = Package(
package_id="package-{}".format(shortuuid.uuid()[0:12]),
publisher=demo_package.publisher,
package_name=demo_package.publisher,
created=now,
institution_id=institution.id,
is_demo=True
)
db.session.add(new_package)
# jump_package_scenario
demo_scenarios = SavedScenario.query.filter(SavedScenario.package_id == DEMO_PACKAGE_ID).all()
for scenario in demo_scenarios:
new_scenario = SavedScenario(False, "scenario-{}".format(shortuuid.uuid()[0:12]), None)
new_scenario.package_id = new_package.package_id
new_scenario.created = now
new_scenario.is_base_scenario = scenario.is_base_scenario
db.session.add(new_scenario)
safe_commit(db)
with get_db_cursor() as cursor:
# jump_counter
cursor.execute(
"""
insert into jump_counter (issn_l, package_id, journal_name, total, report_year, report_name, report_version, metric_type, yop, access_type, created) (
select issn_l, '{}', journal_name, total, report_year, report_name, report_version, metric_type, yop, access_type, created
from jump_counter
where package_id = '{}'
)
""".format(new_package.package_id, DEMO_PACKAGE_ID)
)
# 'jump_counter_input',
cursor.execute(
"""
insert into jump_counter_input (issn, journal_name, total, package_id, report_year, report_name, report_version, metric_type, yop, access_type) (
select issn, journal_name, total, '{}', report_year, report_name, report_version, metric_type, yop, access_type
from jump_counter_input
where package_id = '{}'
)
""".format(new_package.package_id, DEMO_PACKAGE_ID)
)
# jump_core_journals
cursor.execute(
"""
insert into jump_core_journals (package_id, issn_l, baseline_access) (
select '{}', issn_l, baseline_access from jump_core_journals where package_id = '{}'
)
""".format(new_package.package_id, DEMO_PACKAGE_ID)
)
# 'jump_perpetual_access'
cursor.execute(
"""
insert into jump_perpetual_access (package_id, issn_l, start_date, end_date, created) (
select '{}', issn_l, start_date, end_date, created
from jump_perpetual_access
where package_id = '{}'
)
""".format(new_package.package_id, DEMO_PACKAGE_ID)
)
# 'jump_perpetual_access_input'
cursor.execute(
"""
insert into jump_perpetual_access_input (package_id, issn, start_date, end_date) (
select '{}', issn, start_date, end_date
from jump_perpetual_access_input
where package_id = '{}'
)
""".format(new_package.package_id, DEMO_PACKAGE_ID)
)
# 'jump_apc_authorships'
cursor.execute(
"""
insert into jump_apc_authorships (
package_id, doi, publisher, num_authors_total, num_authors_from_uni, journal_name, issn_l, year, oa_status, apc
) (
select '{}', doi, publisher, num_authors_total, num_authors_from_uni, journal_name, issn_l, year, oa_status, apc
from jump_apc_authorships
where package_id = '{}'
)
""".format(new_package.package_id, DEMO_PACKAGE_ID)
)
return new_package
def check_if_to_delete(package_id, file):
command = "select * from jump_raw_file_upload_object where package_id=%s and to_delete_date is not null"
with get_db_cursor() as cursor:
cursor.execute(command, (package_id,))
rows_to_delete = cursor.fetchall()
for row in rows_to_delete:
if (row["package_id"] == package_id) and (row["file"] == file):
return True
return False
def get_custom_prices(package_id):
package_dict = {}
if check_if_to_delete(package_id, "price"):
return package_dict
command = "select issn_l, price from jump_journal_prices where (package_id=%s)"
with get_db_cursor() as cursor:
cursor.execute(command, (package_id,))
rows = cursor.fetchall()
for row in rows:
package_dict[row["issn_l"]] = row["price"]
return package_dict
| 1.960938
| 2
|
scripts/reactor/autogen_aquaItem3.py
|
hsienjan/SideQuest-Server
| 0
|
12781723
|
<reponame>hsienjan/SideQuest-Server
# ParentID: 2302006
# Character field ID when accessed: 230010400
# ObjectID: 1000016
# Object Position Y: 559
# Object Position X: -1813
| 0.863281
| 1
|
year_2021/day04/test_giant_squid.py
|
mjalkio/advent-of-code
| 0
|
12781724
|
from util import read_puzzle_input
from year_2021.day04.giant_squid import (
get_losing_board_score,
get_winning_board_score,
)
def test_get_winning_board_score():
assert get_winning_board_score(read_puzzle_input("test_input.txt")) == 4512
def test_get_losing_board_score():
assert get_losing_board_score(read_puzzle_input("test_input.txt")) == 1924
| 2.4375
| 2
|
src/model/__init__.py
|
521xueweihan/fpage
| 0
|
12781725
|
<reponame>521xueweihan/fpage<filename>src/model/__init__.py<gh_stars>0
# coding:utf-8
import config
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
db = create_engine(config.DATABASE_URI)
BaseModel = declarative_base()
DBSession = sessionmaker(bind=db)
| 1.742188
| 2
|
src/python/Algorithms/scc.py
|
rmallermartins/graphun
| 1
|
12781726
|
import sys
sys.path.append("../Structures/")
import graph
from dfs import Dfs
from collections import deque
class Scc:
def execute(self, G):
dfs = Dfs()
dfs.executeNormal(G)
G.buildTranspGraph()
dfs.executeTransp(G)
self.printScc(G, dfs.getSccList())
def printScc(self, G, sccList):
for v in sccList:
if v.getPi() == None:
print v.getName()
else:
print v.getName(),
| 2.328125
| 2
|
pyxtal/constants.py
|
ubikpt/PyXtal
| 1
|
12781727
|
<reponame>ubikpt/PyXtal
"""
Module to store the constants
"""
import numpy as np
from pyxtal.version import __version__
# Constants
# ------------------------------
tol_m = 0.3 # seperation tolerance in Angstroms
rad = np.pi / 180.0 # converting degrees to radians
deg = 180.0 / np.pi # converting radians to degrees
pyxtal_verbosity = 1 # constant for printx function
# Max number of atoms per molecule before using fast distance check
max_fast_mol_size = 30
letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
ltype_keywords = [
"triclinic", "Triclinic",
"monoclinic", "Monoclinic",
"orthorhombic", "Orthorhombic",
"tetragonal", "Tetragonal",
"trigonal", "Trigonal",
"hexagonal", "Hexagonal",
"cubic", "Cubic",
"spherical", "Spherical",
"ellipsoidal", "Ellipsoidal",
]
logo = """#############################################################
# ______ _ _ _ #
# (_____ \ \ \ / / | | #
# _____) ) _ \ \/ / |_ ____| | #
# | ____/ | | | ) (| _)/ _ | | #
# | | | |_| |/ /\ \ |_( (_| | |___ #
# |_| \__ /_/ \_\___)__|_|_____) #
# (____/ #
#---------------------(version {:>8s})--------------------#
# A Python package for random crystal generation #
# url: https://github.com/qzhu2017/pyxtal #
# @Zhu's group at University of Nevada Las Vegas #
#############################################################
""".format(__version__)
| 2.203125
| 2
|
recover.py
|
Aziz-Mo/QR-Shamir-Secret-Sharing
| 2
|
12781728
|
from secretsharing import PlaintextToHexSecretSharer
def main():
# Enter shares
shares = [input('Enter your share: ')]
while True:
numofSHares = input("Still have more?\tYes\tNo\n").upper()
if numofSHares == "Y":
shares.append(input('Enter your share: '))
elif numofSHares == "N":
break
else:
print("You haven't answered correctly, try again\n")
# Recover
message = PlaintextToHexSecretSharer.recover_secret(shares)
print('Original message:\n'+message)
if __name__ == '__main__':
main()
| 3.390625
| 3
|
src/faces-train.py
|
ParthSareen/HackTheNorthThreatAssess
| 0
|
12781729
|
<reponame>ParthSareen/HackTheNorthThreatAssess
import os
import cv2
from PIL import Image
import numpy as np
import pickle
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
image_dir = os.path.join(BASE_DIR, "images")
face_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_frontalface_alt2.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
current_id = 0
label_ids = {}
y_labels = []
x_train =[]
for root, dirs, files in os.walk(image_dir):
for file in files:
if file.endswith("png") or file.endswith("jpg"):
path = os.path.join(root, file)
label = os.path.basename(root).replace(" ", "-").lower()
print(label, path)
if not label in label_ids:
label_ids[label] = current_id
current_id += 1
id = label_ids[label]
print (label_ids)
#y_labels.append(labels) #some number
#x_train.append(path) #verify this image, turn to numpy array, gray images
pil_image = Image.open(path).convert("L") #grayscale
image_array = np.array(pil_image, "uint8")#numpy array
#print(image_array)
faces = face_cascade.detectMultiScale(image_array, scaleFactor = 1.5, minNeighbors = 5) #gray, scaleFactor = 1.5, minNeighbors = 5
for (x,y,w,h) in faces:
roi = image_array[y:y+h, x:x+w]
x_train.append(roi)
y_labels.append(id)#make sure only one person in each folder
#print(y_labels)
#print(x_train)
with open("labels.pickle", "wb") as f:
pickle.dump(label_ids,f)
recognizer.train(x_train, np.array(y_labels))
recognizer.save("trainer.yml")
| 2.46875
| 2
|
main.py
|
codingaudrey/HuginAutomator
| 0
|
12781730
|
from HuginAutomator import HuginAutomator
from flask import Flask
import time
import datetime
import os
CONTEXTS = ('run', 'compute')
def get_env():
return {'credentials': os.getenv('DROPBOX_TOKEN'),
'min_s': os.getenv('MIN_STITCH'),
'max_s': os.getenv('MAX_STITCH'),
'min_a': os.getenv('MIN_ALIGN'),
'max_a': os.getenv('MAX_ALIGN')}
def main_loop_compute():
"""
periodically check dropbox folders to see if there are new projects
if a new project is found, download + align/build + upload it and continue with the loop
"""
env = get_env()
hugin = HuginAutomator(env['credentials'], env['min_s'], env['max_s'], env['min_a'], env['max_a'])
now = datetime.datetime.now
start_time = now()
most_recent_job = start_time
while now() - most_recent_job < datetime.timedelta(minutes=10):
if hugin.check_for_stitch():
hugin.build()
most_recent_job = now()
if hugin.check_for_align():
hugin.align()
most_recent_job = now()
time.sleep(5)
# go to some url to execute cloud function that turns off the instance
app = Flask(__name__)
@app.route('/')
def main():
env = get_env()
hugin = HuginAutomator(env['credentials'], env['min_s'], env['max_s'], env['min_a'], env['max_a'])
if hugin.check_for_stitch():
return hugin.build()
elif hugin.check_for_align():
return hugin.align()
return "asdf"
if __name__ == "__main__":
context = os.getenv('CONTEXT')
if context == CONTEXTS[0]:
app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', 8080)))
if context == CONTEXTS[1]:
main_loop_compute()
| 2.515625
| 3
|
lists.py
|
ralymuhif/Python_Course
| 0
|
12781731
|
<filename>lists.py<gh_stars>0
names = ["Ralph", "Aury", "Gabin", "Miguel", "Ketia"]
print(names[-1])
#Exercice
intList = [1, 34, 22, 34, 60, 12, 26, 100, 234, 67, 99, 45]
answer = 0
for i in intList:
if (i>answer):
answer = i
print (answer)
| 3.421875
| 3
|
example/setup.py
|
smok-serwis/cython-multibuild
| 6
|
12781732
|
import os
from setuptools import setup
from snakehouse import Multibuild, build, monkey_patch_parallel_compilation, find_pyx_and_c, \
find_all
from setuptools import Extension
monkey_patch_parallel_compilation()
dont_snakehouse = False
if 'DEBUG' in os.environ:
print('Debug is enabled!')
dont_snakehouse = True
# note that you can include standard Extension classes in this list, those won't be touched
# and will be directed directly to Cython.Build.cythonize()
cython_multibuilds = [
# note that Windows-style pathes are supported on Linux build environment,
# the reverse not necessarily being true (issue #5)
Multibuild('example_module', find_all('example_module', True),
define_macros=[("CYTHON_TRACE_NOGIL", "1")],
dont_snakehouse=dont_snakehouse),
Extension('example2.example', ['example2/example.pyx']),
Multibuild('example3.example3.example3', ['example3/example3/example3/test.pyx'],
dont_snakehouse=dont_snakehouse)
]
# first argument is used directly by snakehouse, the rest and **kwargs are passed to
# Cython.Build.cythonize()
ext_modules = build(cython_multibuilds,
compiler_directives={
'language_level': '3',
})
setup(name='example_module',
version='0.1',
packages=['example_module', 'example2'],
install_requires=[
'Cython', 'snakehouse'
],
zip_safe=False,
tests_require=[
"nose2"
],
test_suite='nose2.collector.collector',
python_requires='!=2.7.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',
ext_modules=ext_modules
)
| 1.664063
| 2
|
imageRecognition/testCode/resizeImage.py
|
Emilurenius/Pool-Table-RGB
| 0
|
12781733
|
<filename>imageRecognition/testCode/resizeImage.py
import cv2
img = cv2.imread("resources/lambo.png")
print(img.shape)
imgResize = cv2.resize(img,(300,200))
print(imgResize.shape)
imgCropped = img[0:200,200:500]
cv2.imshow("Image", img)
cv2.imshow("Image resize", imgResize)
cv2.imshow("Image cropped", imgCropped)
cv2.waitKey(0)
| 2.9375
| 3
|
2019/15/py/run.py
|
Bigsby/aoc
| 1
|
12781734
|
#! /usr/bin/python3
import sys
import os
import time
from typing import Dict, List, Tuple
from collections import defaultdict
Position = complex
DIRECTIONS: Dict[int, Position] = {
1: -1j,
2: 1j,
3: -1,
4: 1
}
class IntCodeComputer():
def __init__(self, memory: List[int], inputs: List[int] = []):
self.memory = defaultdict(int, [(index, value)
for index, value in enumerate(memory)])
self.pointer = 0
self.inputs = inputs
self.outputs: List[int] = []
self.base = 0
self.running = True
self.polling = False
self.outputing = False
def set_input(self, value: int):
self.inputs.insert(0, value)
def run(self) -> List[int]:
while self.running:
self.tick()
return self.outputs
def get_parameter(self, offset: int, mode: int) -> int:
value = self.memory[self.pointer + offset]
if mode == 0: # POSITION
return self.memory[value]
if mode == 1: # IMMEDIATE
return value
elif mode == 2: # RELATIVE
return self.memory[self.base + value]
raise Exception("Unrecognized parameter mode", mode)
def get_address(self, offset: int, mode: int) -> int:
value = self.memory[self.pointer + offset]
if mode == 0: # POSITION
return value
if mode == 2: # RELATIVE
return self.base + value
raise Exception("Unrecognized address mode", mode)
def get_output(self) -> int:
self.outputing = False
return self.outputs.pop()
def add_input(self, value: int):
self.inputs.append(value)
def tick(self):
instruction = self.memory[self.pointer]
opcode, p1_mode, p2_mode, p3_mode = instruction % 100, (
instruction // 100) % 10, (instruction // 1000) % 10, (instruction // 10000) % 10
if not self.running:
return
if opcode == 1: # ADD
self.memory[self.get_address(3, p3_mode)] = self.get_parameter(
1, p1_mode) + self.get_parameter(2, p2_mode)
self.pointer += 4
elif opcode == 2: # MUL
self.memory[self.get_address(3, p3_mode)] = self.get_parameter(
1, p1_mode) * self.get_parameter(2, p2_mode)
self.pointer += 4
elif opcode == 3: # INPUT
if self.inputs:
self.polling = False
self.memory[self.get_address(1, p1_mode)] = self.inputs.pop(0)
self.pointer += 2
else:
self.polling = True
elif opcode == 4: # OUTPUT
self.outputing = True
self.outputs.append(self.get_parameter(1, p1_mode))
self.pointer += 2
elif opcode == 5: # JMP_TRUE
if self.get_parameter(1, p1_mode):
self.pointer = self.get_parameter(2, p2_mode)
else:
self.pointer += 3
elif opcode == 6: # JMP_FALSE
if not self.get_parameter(1, p1_mode):
self.pointer = self.get_parameter(2, p2_mode)
else:
self.pointer += 3
elif opcode == 7: # LESS_THAN
self.memory[self.get_address(3, p3_mode)] = 1 if self.get_parameter(
1, p1_mode) < self.get_parameter(2, p2_mode) else 0
self.pointer += 4
elif opcode == 8: # EQUALS
self.memory[self.get_address(3, p3_mode)] = 1 if self.get_parameter(
1, p1_mode) == self.get_parameter(2, p2_mode) else 0
self.pointer += 4
elif opcode == 9: # SET_BASE
self.base += self.get_parameter(1, p1_mode)
self.pointer += 2
elif opcode == 99: # HALT
self.running = False
else:
raise Exception(f"Unknown instruction", self.pointer,
instruction, opcode, p1_mode, p2_mode, p3_mode)
def clone(self):
clone_computer = IntCodeComputer([])
clone_computer.memory = dict(self.memory)
clone_computer.pointer = self.pointer
clone_computer.base = self.base
return clone_computer
def draw_area(oxygen: List[Position], walls: List[Position], open_spaces: List[Position]):
all_posiitons = walls + oxygen
min_x = int(min(map(lambda p: p.real, all_posiitons)))
max_x = int(max(map(lambda p: p.real, all_posiitons)))
min_y = int(min(map(lambda p: p.imag, all_posiitons)))
max_y = int(max(map(lambda p: p.imag, all_posiitons)))
for y in range(max_y, min_y - 1, - 1):
for x in range(min_x, max_x + 1):
position = x + y * 1j
c = " "
if position in walls:
c = "#"
if position in open_spaces:
c = "."
if position in oxygen:
c = "O"
print(c, end="")
print()
print()
def run_until_oxygen_system(memory: List[int]) -> Tuple[int, Position, List[Position]]:
start_position = 0j
open_spaces: List[Position] = []
oxygen_position = 0j
queue = [(start_position, [start_position], IntCodeComputer(memory))]
visited = [start_position]
steps_to_oxygen_system = 0
while queue:
position, path, droid = queue.pop(0)
for command, direction in DIRECTIONS.items():
new_position = position + direction
if new_position not in visited:
visited.append(new_position)
new_droid = droid.clone()
new_droid.inputs.append(command)
while not new_droid.outputing:
new_droid.tick()
status = new_droid.get_output()
if status == 2: # Oxygen system
if steps_to_oxygen_system == 0:
steps_to_oxygen_system = len(path)
oxygen_position = new_position
elif status == 1: # Open space
open_spaces.append(new_position)
while not new_droid.polling:
new_droid.tick()
new_path = list(path)
new_path.append(new_position)
queue.append((new_position, new_path, new_droid))
return steps_to_oxygen_system, oxygen_position, open_spaces
def solve(memory: List[int]) -> Tuple[int, int]:
steps_to_oxygen_system, oxygen_system_position, open_spaces = run_until_oxygen_system(
memory)
filled = [oxygen_system_position]
minutes = 0
while open_spaces:
minutes += 1
for oxygen in list(filled):
for direction in DIRECTIONS.values():
position = oxygen + direction
if position in open_spaces:
filled.append(position)
open_spaces.remove(position)
return steps_to_oxygen_system, minutes
def get_input(file_path: str) -> List[int]:
if not os.path.isfile(file_path):
raise FileNotFoundError(file_path)
with open(file_path, "r") as file:
return [int(i) for i in file.read().split(",")]
def main():
if len(sys.argv) != 2:
raise Exception("Please, add input file path as parameter")
start = time.perf_counter()
part1_result, part2_result = solve(get_input(sys.argv[1]))
end = time.perf_counter()
print("P1:", part1_result)
print("P2:", part2_result)
print()
print(f"Time: {end - start:.7f}")
if __name__ == "__main__":
main()
| 3.265625
| 3
|
programs/b_vdm.py
|
yamamon75/PmagPy
| 2
|
12781735
|
#!/usr/bin/env python
from __future__ import print_function
from builtins import input
import sys
import pmagpy.pmag as pmag
def spitout(line):
if '\t' in line:
dat=line.split('\t') # split the data on a space into columns
else:
dat=line.split() # split the data on a space into columns
b,lat=float(dat[0])*1e-6,float(dat[1])
vdm= pmag.b_vdm(b,lat) #
return vdm
def main():
"""
NAME
b_vdm.py
DESCRIPTION
converts B (in microT) and (magnetic) latitude to V(A)DM
INPUT (COMMAND LINE ENTRY)
B (microtesla), latitude (positive north)
OUTPUT
V[A]DM
SYNTAX
b_vdm.py [command line options] [< filename]
OPTIONS
-h prints help and quits
-i for interactive data entry
-f FILE input file
-F FILE output
"""
inp,out="",""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
inp=f.readlines()
if '-F' in sys.argv:
ind=sys.argv.index('-F')
o=sys.argv[ind+1]
out=open(o,'w')
if '-i' in sys.argv:
cont=1
while cont==1:
try:
b=1e-6*float(input('B (in microtesla): <cntl-D to quit '))
lat=float(input('Latitude: '))
except:
print("\nGood bye\n")
sys.exit()
vdm= pmag.b_vdm(b,lat)
print('%10.3e '%(vdm))
if inp=="":
inp = sys.stdin.readlines() # read from standard input
for line in inp:
vdm=spitout(line)
if out=="":
print('%10.3e'%(vdm))
else:
out.write('%10.3e \n'%(vdm))
if __name__ == "__main__":
main()
| 3.109375
| 3
|
tdapi/person.py
|
borwick/tdapi
| 4
|
12781736
|
import copy
import random
import string
import tdapi
import tdapi.obj
class TDPersonManager(tdapi.obj.TDObjectManager):
def _copy_or_create(self, data, data_to_merge=None):
if data is None:
new_data = {}
else:
new_data = copy.deepcopy(data)
new_data.update(data_to_merge)
return new_data
def search(self, data):
return [TDPerson(td_struct)
for td_struct
in tdapi.TD_CONNECTION.json_request_roller(
method='post',
url_stem='people/search',
data=data,
)]
def all(self, data=None):
all_records = []
all_records += self.active(data)
all_records += self.inactive(data)
return all_records
def active(self, data=None):
# hard coded 1,000,000 as the max results
data = self._copy_or_create(data,
{'IsActive': True,
'MaxResults': 1000000,
})
return self.search(data)
def inactive(self, data=None):
data = self._copy_or_create(data,
{'IsActive': False,
'MaxResults': 1000000,
})
return self.search(data)
def get(self, uid):
user_url_stem = 'people/{}'.format(uid)
td_struct = tdapi.TD_CONNECTION.json_request_roller(
method='get',
url_stem=user_url_stem)
assert len(td_struct) == 1
return self.object_class(td_struct[0])
def userlist(self, active=None, employee=None, user_type=None):
userlist_url = 'people/userlist?'
# build the variables to pass to the GET
userlist_vars = []
if active is True:
userlist_vars.append('isActive=True')
elif active is False:
userlist_vars.append('isActive=False')
if employee is True:
userlist_vars.append('isEmployee=True')
elif employee is False:
userlist_vars.append('isEmployee=False')
if user_type is not None:
userlist_vars.append('userType={}'.format(user_type))
userlist_url += '&'.join(userlist_vars)
return [TDPerson(td_struct)
for td_struct
in tdapi.TD_CONNECTION.json_request_roller(
method='get',
url_stem=userlist_url)]
class TDPerson(tdapi.obj.TDObject):
def __init__(self, *args, **kwargs):
super(TDPerson, self).__init__(*args, **kwargs)
self._single_queried = False
def __eq__(self, otro):
return self.person_id() == otro.person_id()
def __ne__(self, otro):
return not self == otro
def __hash__(self):
# Needed for set operations
return hash(self.person_id())
def __str__(self):
return self.get('FullName')
def person_id(self):
return self.get('UID')
def person_url(self):
return 'people/{}'.format(self.person_id())
def _ensure_single_query(self):
if self._single_queried is False:
self.td_struct = tdapi.TD_CONNECTION.json_request(
method='get',
url_stem=self.person_url()
)
self._single_queried = True
def import_string(self):
return '{} <{}>'.format(self.get('FullName').encode('utf-8'), self.get('AlertEmail'))
def add_group_by_id(self,
group_id,
isPrimary=False,
isNotified=True,
isManager=False,
):
# does not currently support the optional arguments
add_group_uri = self.person_url() + '/groups/{}'.format(group_id) + \
'?isPrimary={}'.format(isPrimary) + \
'&isNotified={}'.format(isNotified) + \
'&isManager={}'.format(isManager)
tdapi.TD_CONNECTION.request(method='put',
url_stem=add_group_uri)
def del_group_by_id(self, group_id):
del_group_uri = self.person_url() + '/groups/{}'.format(group_id)
tdapi.TD_CONNECTION.request(method='delete',
url_stem=del_group_uri)
def set_active(self, active):
activate_uri = self.person_url() + '/isactive?status={}'.format(active)
tdapi.TD_CONNECTION.request(method='put',
url_stem=activate_uri)
def activate(self):
return self.set_active(True)
def deactivate(self):
return self.set_active(False)
def is_active(self):
return self.get('IsActive') == True
def update(self, update_data):
# don't mess with the original data. copy into the update all
# existing data. TODO consider purging cache and re-calling
# query before doing this update.
update_data = copy.deepcopy(update_data)
self._ensure_single_query() # Make sure we have all attributes populated
# short circuit to make sure update_data is not already set
seen_all = True
for (update_key, update_val) in update_data.items():
if self.get(update_key) != update_val:
seen_all = False
break
if seen_all == True:
return
for orig_attr in self.td_struct.keys():
if orig_attr not in update_data:
update_data[orig_attr] = self.td_struct[orig_attr]
tdapi.TD_CONNECTION.request(method='post',
url_stem=self.person_url(),
data=update_data)
def add_applications(self, app_list):
all_apps = list(set(self.td_struct['Applications'] + app_list))
return self.update({'Applications': all_apps})
def del_applications(self, app_list):
all_apps = [x for x in self.td_struct['Applications']
if x not in app_list]
return self.update({'Applications': all_apps})
@classmethod
def new(cls, update_data):
update_data = copy.deepcopy(update_data)
if 'TypeID' not in update_data:
update_data['TypeID'] = 1 # User
if 'UserName' not in update_data:
update_data['UserName'] = update_data['AuthenticationUserName']
if 'Password' not in update_data:
random_password = ''.join(random.choice(string.ascii_uppercase + string.digits)
for _ in range(20))
update_data['Password'] = <PASSWORD>_password
if 'AlertEmail' not in update_data:
update_data['AlertEmail'] = update_data['PrimaryEmail']
tdapi.TD_CONNECTION.request(method='post',
url_stem='people',
data=update_data)
tdapi.obj.relate_cls_to_manager(TDPerson, TDPersonManager)
| 2.4375
| 2
|
text/_elisp/buffer/_op/_text/insert.py
|
jedhsu/text
| 0
|
12781737
|
<filename>text/_elisp/buffer/_op/_text/insert.py
class BufferInsert(
BufferOperator,
):
pass
| 1.109375
| 1
|
tests/test_connection.py
|
matthewhampton/PyRFC
| 0
|
12781738
|
<reponame>matthewhampton/PyRFC
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime, pyrfc, unittest, socket, timeit
from configparser import ConfigParser
config = ConfigParser()
config.read('pyrfc.cfg')
params = config._sections['connection']
class ConnectionTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.conn = pyrfc.Connection(**params)
# Assure english as connection language
connection_info = cls.conn.get_connection_attributes()
if connection_info['isoLanguage'] != u'EN':
raise pyrfc.RFCError("Testing must be done with English as language.")
@classmethod
def tearDownClass(cls):
pass
# TODO: test correct status after error -> or to the error tests?
def test_incomplete_params(self):
incomplete_params = params.copy()
for p in ['ashost', 'gwhost', 'mshost']:
if p in incomplete_params:
del incomplete_params[p]
with self.assertRaises(pyrfc.ExternalRuntimeError) as run:
pyrfc.Connection(**incomplete_params)
self.assertEqual(run.exception.code, 20)
self.assertEqual(run.exception.key, 'RFC_INVALID_PARAMETER')
self.assertEqual(run.exception.message, 'Parameter ASHOST, GWHOST or MSHOST is missing.')
def test_denied_users(self):
denied_params = params.copy()
denied_params['user'] = 'BLAFASEL'
with self.assertRaises(pyrfc.LogonError) as run:
pyrfc.Connection(**denied_params)
self.assertEqual(run.exception.code, 2)
self.assertEqual(run.exception.key, 'RFC_LOGON_FAILURE')
self.assertEqual(run.exception.message, 'Name or password is incorrect (repeat logon)')
def test_config_parameter(self):
# rstrip test
conn2 = pyrfc.Connection(config={'rstrip': False}, **config._sections['connection'])
hello = u'Hällo SAP!' + u' ' * 245
result = conn2.call('STFC_CONNECTION', REQUTEXT=hello)
self.assertEqual(result['ECHOTEXT'], hello, "Test with rstrip=False (input length=255 char)")
result = conn2.call('STFC_CONNECTION', REQUTEXT=hello.rstrip())
self.assertEqual(result['ECHOTEXT'], hello, "Test with rstrip=False (input length=10 char)")
conn2.close()
# return_import_params
result = self.conn.call('STFC_CONNECTION', REQUTEXT=hello)
with self.assertRaises(KeyError):
imp_var = result['REQUTEXT']
conn3 = pyrfc.Connection(config={'return_import_params': True}, **config._sections['connection'])
result = conn3.call('STFC_CONNECTION', REQUTEXT=hello.rstrip())
imp_var = result['REQUTEXT']
conn3.close()
@unittest.skip("time consuming; may block other tests")
def test_many_connections(self):
# If too many connections are established, the following error will occur (on interactive python shell)
#
#CommunicationError: Error 1: [RFC_COMMUNICATION_FAILURE]
#LOCATION CPIC (TCP/IP) on local host with Unicode
#ERROR max no of 100 conversations exceeded
#TIME Tue Sep 18 11:09:35 2012
#RELEASE 720
#COMPONENT CPIC (TCP/IP) with Unicode
#VERSION 3
#RC 466
#MODULE r3cpic_mt.c
#LINE 14345
#COUNTER 1
# ABAP:
for i in range(150):
conn2 = pyrfc.Connection(**params)
conn2.close() # Use explicit close() here. If ommitted, the server may block an open connection attempt
# _and refuse further connections_, resulting in RFC_INVALID_HANDLE errors for the other
# test!
def test_ping(self):
self.conn.ping()
def test_call_undefined(self):
with self.assertRaises(pyrfc.ABAPApplicationError) as run:
self.conn.call('undefined')
self.assertEqual(run.exception.code, 5)
self.assertEqual(run.exception.key, 'FU_NOT_FOUND')
self.assertEqual(run.exception.message, 'ID:FL Type:E Number:046 undefined')
with self.assertRaises(pyrfc.ExternalRuntimeError) as run:
self.conn.call('STFC_CONNECTION', undefined=0)
self.assertEqual(run.exception.code, 20)
self.assertEqual(run.exception.key, 'RFC_INVALID_PARAMETER')
self.assertEqual(run.exception.message, "field 'undefined' not found")
def test_date_output(self):
self.conn.call('BAPI_USER_GET_DETAIL', USERNAME='mc_test')
def test_connection_attributes(self):
data = self.conn.get_connection_attributes()
self.assertEqual(data['client'], str(params['client']))
self.assertEqual(data['host'], str(socket.gethostname()))
self.assertEqual(data['isoLanguage'], str(params['lang'].upper()))
# Only valid for direct logon systems:
# self.assertEqual(data['sysNumber'], str(params['sysnr']))
self.assertEqual(data['user'], str(params['user'].upper()))
self.assertEqual(data['rfcRole'], u'C')
# old tests, referring to non static z-functions
# def test_invalid_input(self):
# self.conn.call('Z_PBR_EMPLOYEE_GET', IV_EMPLOYEE='100190', IV_USER_ID='')
# self.conn.call('Z_PBR_EMPLOYEE_GET', IV_EMPLOYEE='', IV_USER_ID='HRPB_MNG01')
# self.assertRaises(TypeError, self.conn.call, 'Z_PBR_EMPLOYEE_GET', IV_EMPLOYEE=100190, IV_USER_ID='HRPB_MNG01')
#
# def test_xstring_output(self):
# self.conn.call('Z_PBR_EMPLOYEE_GET_XSTRING', IV_EMPLOYEE='100190')
#
# def test_xstring_input_output(self):
# for i in 1, 2, 3, 1023, 1024, 1025:
# s = 'X' * i
# out = self.conn.call('Z_PBR_TEST_2', IV_INPUT_XSTRING=s)
# self.assertEqual(s, out['EV_EXPORT_XSTRING'])
if __name__ == '__main__':
unittest.main()
| 2.34375
| 2
|
csvjson/__init__.py
|
gecBurton/csv-json
| 0
|
12781739
|
"""Top-level package for csvjson."""
__author__ = """<NAME>"""
__email__ = "<EMAIL>"
__version__ = "0.1.0"
__all__ = ["load"]
from csvjson.csvjson import load
| 1.375
| 1
|
tests/glm/test_benchmark_golden_master.py
|
readthedocs-assistant/glum
| 68
|
12781740
|
<filename>tests/glm/test_benchmark_golden_master.py
import json
import warnings
import click
import numpy as np
import pytest
from git_root import git_root
from glum_benchmarks.cli_run import execute_problem_library
from glum_benchmarks.problems import Problem, get_all_problems
from glum_benchmarks.util import BenchmarkParams, get_obj_val
bench_cfg = dict(num_rows=10000, regularization_strength=0.1, diagnostics_level="none")
all_test_problems = get_all_problems()
def is_weights_problem_with_offset_match(problem_name):
return (
"no-weights" not in problem_name
and "weights" in problem_name
and (
"gamma" in problem_name
or "poisson" in problem_name
or "tweedie" in problem_name
)
)
@pytest.fixture(scope="module")
def expected_all():
with open(git_root("tests/glm/golden_master/benchmark_gm.json"), "r") as fh:
return json.load(fh)
@pytest.mark.parametrize(
["Pn", "P"],
[
x if "wide" not in x[0] else pytest.param(x[0], x[1], marks=pytest.mark.slow)
for x in all_test_problems.items()
], # mark the "wide" problems as "slow" so that we can call pytest -m "not slow"
ids=all_test_problems.keys(),
)
def test_gm_benchmarks(Pn: str, P: Problem, expected_all: dict):
result, params = exec(Pn)
if is_weights_problem_with_offset_match(Pn):
expected = expected_all["offset".join(Pn.split("weights"))]
else:
expected = expected_all[Pn]
all_result = np.concatenate(([result["intercept"]], result["coef"]))
all_expected = np.concatenate(([expected["intercept"]], expected["coef"]))
try:
np.testing.assert_allclose(all_result, all_expected, rtol=2e-4, atol=2e-4)
except AssertionError as e:
dat = P.data_loader(
num_rows=params.num_rows,
)
obj_result = get_obj_val(
dat,
P.distribution,
P.regularization_strength,
P.l1_ratio,
all_result[0],
all_result[1:],
)
expected_result = get_obj_val(
dat,
P.distribution,
P.regularization_strength,
P.l1_ratio,
all_expected[0],
all_expected[1:],
)
raise AssertionError(
f"""Failed with error {e} on problem {Pn}.
New objective function value is higher by {obj_result - expected_result}."""
)
@click.command()
@click.option("--overwrite", is_flag=True, help="overwrite existing golden master")
@click.option(
"--problem_name", default=None, help="Only run and store a specific problem."
)
def run_and_store_golden_master(overwrite, problem_name):
try:
with open(git_root("tests/glm/golden_master/benchmark_gm.json"), "r") as fh:
gm_dict = json.load(fh)
except FileNotFoundError:
gm_dict = {}
for Pn in get_all_problems().keys():
if is_weights_problem_with_offset_match(Pn):
continue
if problem_name is not None:
if Pn != problem_name:
continue
res, params = exec(Pn)
if Pn in gm_dict.keys():
if overwrite:
warnings.warn("Overwriting existing result")
else:
warnings.warn("Result exists and cannot overwrite. Skipping")
continue
gm_dict[Pn] = dict(
coef=res["coef"].tolist(),
intercept=res["intercept"],
)
with open(git_root("tests/glm/golden_master/benchmark_gm.json"), "w") as fh:
json.dump(gm_dict, fh, indent=2)
def exec(Pn):
execute_args = ["diagnostics_level"]
params = BenchmarkParams(
problem_name=Pn,
library_name="glum",
**{k: v for k, v in bench_cfg.items() if k not in execute_args},
)
if bench_cfg["diagnostics_level"] != "none":
print("Running", Pn)
result, _ = execute_problem_library(
params, **{k: v for k, v in bench_cfg.items() if k in execute_args}
)
return result, params
if __name__ == "__main__":
run_and_store_golden_master()
| 2.046875
| 2
|
ymir/backend/src/ymir_controller/controller/label_model/label_runner.py
|
Zhang-SJ930104/ymir
| 64
|
12781741
|
<filename>ymir/backend/src/ymir_controller/controller/label_model/label_runner.py
import logging
import os
from typing import Tuple, List
from controller.invoker.invoker_task_exporting import TaskExportingInvoker
from controller.utils import utils
from proto import backend_pb2
def prepare_label_dir(working_dir: str, task_id: str) -> Tuple[str, str, str, str, str]:
task_data_dir = f"{working_dir}/label_{task_id}"
input_asset_dir = os.path.join(task_data_dir, "Images")
os.makedirs(input_asset_dir, exist_ok=True)
export_path = os.path.join(task_data_dir, "label_studio_output")
os.makedirs(export_path, exist_ok=True)
# keep same name as other task
monitor_file_path = os.path.join(working_dir, "out", "monitor.txt")
export_work_dir = os.path.join(working_dir, "export_work_dir")
os.makedirs(export_work_dir, exist_ok=True)
import_work_dir = os.path.join(working_dir, "import_work_dir")
os.makedirs(import_work_dir, exist_ok=True)
return input_asset_dir, export_path, monitor_file_path, export_work_dir, import_work_dir
def trigger_ymir_export(repo_root: str, dataset_id: str, input_asset_dir: str, media_location: str,
export_work_dir: str, keywords: List[str]) -> None:
# trigger ymir export, so that we can get pictures from ymir
format_str = utils.annotation_format_str(backend_pb2.LabelFormat.LABEL_STUDIO_JSON)
TaskExportingInvoker.exporting_cmd(repo_root=repo_root,
dataset_id=dataset_id,
annotation_format=format_str,
asset_dir=input_asset_dir,
annotation_dir=input_asset_dir,
media_location=media_location,
work_dir=export_work_dir,
keywords=keywords)
def start_label_task(
repo_root: str,
working_dir: str,
media_location: str,
task_id: str,
project_name: str,
dataset_id: str,
keywords: List,
collaborators: List,
expert_instruction: str,
export_annotation: bool,
) -> None:
logging.info("start label task!!!")
label_instance = utils.create_label_instance()
input_asset_dir, export_path, monitor_file_path, export_work_dir, import_work_dir = prepare_label_dir(
working_dir, task_id)
trigger_ymir_export(repo_root=repo_root,
dataset_id=dataset_id,
input_asset_dir=input_asset_dir,
media_location=media_location,
export_work_dir=export_work_dir,
keywords=keywords)
label_instance.run(task_id=task_id,
project_name=project_name,
keywords=keywords,
collaborators=collaborators,
expert_instruction=expert_instruction,
input_asset_dir=input_asset_dir,
export_path=export_path,
monitor_file_path=monitor_file_path,
repo_root=repo_root,
media_location=media_location,
import_work_dir=import_work_dir,
use_pre_annotation=export_annotation)
logging.info("finish label task!!!")
| 2.1875
| 2
|
service/backup.py
|
ruslan-ok/ruslan
| 1
|
12781742
|
<reponame>ruslan-ok/ruslan
import os, zipfile, math, shutil, fnmatch, subprocess, smtplib, time
from datetime import datetime
from email.message import EmailMessage
from secret import *
YEAR_DURATION = 365
MONTH_DURATION = 30
WEEK_DURATION = 7
class BackupError(Exception):
def __init__(self, stage, info):
self.stage = stage
self.info = info
def __str__(self):
return 'Ошибка на этапе {0}. {1}'.format(self.stage, self.info)
def sizeof_fmt(num, suffix = 'B'):
magnitude = int(math.floor(math.log(num, 1024)))
val = num / math.pow(1024, magnitude)
if magnitude > 7:
return '{:.1f}{}{}'.format(val, 'Y', suffix)
return '{:3.1f}{}{}'.format(val, [' ', ' K', ' M', ' G', ' T', ' P', ' E', ' Z'][magnitude], suffix)
class Backup:
full_mode = False
content = []
case = 0
def read_last(self, name):
try:
with open(work_dir + 'last_' + name + '.txt', 'r') as f:
return f.read()
except FileNotFoundError:
return '' # Допустимая ситуация - считаем, что архивирование ещё ни разу не выполнялось
return ''
# Выбор режима архивирования
def count_mode(self):
self.full_mode = False
last = self.read_last('full')
d = 0
if (last == ''):
self.full_mode = True
mode = 'Полный архив (ранее не формировался)'
else:
l = datetime.strptime(last, '%Y-%m-%d %H:%M:%S.%f')
n = datetime.now()
d = (n - l).days
self.full_mode = (d >= MONTH_DURATION)
if self.full_mode:
mode = 'Полный архив'
else:
mode = 'Краткий архив. Дней до полной архивации: ' + str(MONTH_DURATION - d) + '.'
print(mode)
self.content.append(mode)
def backup_db(self, zf):
file = work_dir + 'mysql_backup.sql'
command = '"' + sql_dump + '" --user=' + sql_user + ' --password=' + sql_pass + ' --result-file=' + file + ' rusel'
return_code = subprocess.call(command, shell = True)
if (return_code != 0):
raise BackupError('backup_db', 'Ошибка создания бэкапа MySQL. Код ошибки: ' + return_code.__str__())
sz = os.path.getsize(file)
self.content.append(' ' + file + ' ' + sizeof_fmt(sz))
zf.write(file)
os.remove(file)
def backup_mail(self, zf):
return_code = subprocess.call(work_dir + 'MailBackup.vbs', shell = True)
if (return_code != 0):
raise BackupError('backup_mail', 'Вызов subprocess.call вернул код ошибки ' + return_code.__str__())
time.sleep(mail_wait)
total = 0
for file in fnmatch.filter(os.listdir('.'), work_dir + 'HMBackup*.7z'):
total += 1
sz = os.path.getsize(file)
self.content.append(' ' + file + ' ' + sizeof_fmt(sz))
zf.write(file)
os.remove(file)
if (total == 0):
raise BackupError('backup_mail', 'За назначенный таймаут файл архива не был получен.')
# Архивирование
def archivate(self):
if self.full_mode:
dirs = full_dirs
else:
dirs = short_dirs
try:
os.mkdir(work_dir + 'temp')
except FileExistsError:
pass
fn = device + '-' + datetime.now().strftime('%Y.%m.%d-%H.%M.%S')
zf = zipfile.ZipFile(work_dir + 'temp/' + fn + '.zip', 'w')
for dir in dirs:
print('Archiving:', dir, '...')
if (dir == 'mysql'):
self.backup_db(zf)
elif (dir == 'email'):
self.backup_mail(zf)
else:
for dirname, subdirs, files in os.walk(dir):
zf.write(dirname)
for filename in files:
zf.write(os.path.join(dirname, filename))
zf.close()
sz = os.path.getsize(work_dir + 'temp/' + fn + '.zip')
if (sz > 1000):
self.content.append(' ' + fn + '.zip ' + sizeof_fmt(sz))
elif (sz == 0):
raise BackupError('archivate', 'Не удалось создать архив ' + work_dir + 'temp/' + fn + '.zip')
else:
raise BackupError('archivate', 'Пустой архив ' + work_dir + 'temp/' + fn + '.zip')
# Если содержимое какого-то каталога удалялось, то его можно пометить и очистить его заркало в процессе синхронизации
def mark_for_clear(self, dir):
with open(dir + '/__clear__.txt', 'a') as f:
f.write('clear me')
# Определение возраста архива в указанной папке
def arch_age(self, dir, max_duration):
n = datetime.now()
for file in fnmatch.filter(os.listdir(dir), device + '-????.??.??-??.??.??.zip'): # в указанной папке ищем любой подходящий под наш шаблон архив
ss = file[len(device)+1:-4]
mt = datetime.strptime(ss, '%Y.%m.%d-%H.%M.%S')
return (n - mt).days # вернем количество дней от даты его модификации
return max_duration
# Ротация
def rotate(self):
if self.full_mode:
max_duration = YEAR_DURATION
max_dir = work_dir + 'year2'
med_dir = work_dir + 'year1'
min_dir = work_dir + 'month'
else:
max_duration = WEEK_DURATION
max_dir = work_dir + 'week2'
med_dir = work_dir + 'week1'
min_dir = work_dir + 'day'
tmp_dir = work_dir + 'temp'
if not os.path.exists(max_dir):
shutil.copytree(tmp_dir, max_dir, dirs_exist_ok=True)
self.mark_for_clear(max_dir)
self.content.append(' Копия сохранена в ' + max_dir)
if not os.path.exists(med_dir):
shutil.copytree(tmp_dir, med_dir, dirs_exist_ok=True)
self.mark_for_clear(med_dir)
self.content.append(' Архив сохранен в ' + med_dir)
else:
age = self.arch_age(med_dir, max_duration)
if (age >= max_duration):
print('Выполняется ротация')
shutil.rmtree(max_dir, ignore_errors = True)
os.rename(med_dir, max_dir)
os.rename(min_dir, med_dir)
self.mark_for_clear(max_dir)
self.mark_for_clear(med_dir)
self.content.append(' Ротация: ' + tmp_dir + ' -> ' + min_dir + ' -> ' + med_dir + ' -> ' + max_dir)
else:
self.content.append(' Архив сохранен в ' + min_dir + '. Дней до ротации: ' + str(max_duration - age))
shutil.rmtree(min_dir, ignore_errors = True)
os.rename(tmp_dir, min_dir)
self.mark_for_clear(min_dir)
def add_info(self, src, dst):
if (self.case == 0):
self.case = 1
self.content.append(' ' + src + ':')
self.content.append(' ' + dst)
def clear_dir(self, dir):
for filename in os.listdir(dir):
file_path = os.path.join(dir, filename)
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
def synch_dir(self, _src, _dst, _folder):
if (_src == 'nuc'):
src = nuc_drive + _folder
dst = backup_folder + _folder
else:
src = backup_folder + _folder
dst = nuc_drive + _folder
self.case = 0
for dirname, subdirs, files in os.walk(src):
part = dirname[len(src)+1:]
if (part != ''):
part = '\\' + part
test = dst + part
zzz = False
if not os.path.exists(test):
os.mkdir(test)
zzz = True
else:
if os.path.isfile(src + part + '/__clear__.txt'):
self.clear_dir(test)
zzz = True
for f in files:
if (f == '__clear__.txt'):
os.remove(src + part + '\\' + f)
continue
fsrc = src + part + '\\' + f
fdst = test + '\\' + f
sat = os.path.getatime(fsrc)
smt = os.path.getmtime(fsrc)
if zzz or not os.path.exists(fdst):
print(_dst + '\\' + _folder + part + '\\' + f, 'copied...')
shutil.copyfile(fsrc, fdst)
os.utime(fdst, (sat, smt))
self.add_info(_src + '\\' + _folder + ' -> ' + _dst + '\\' + _folder, 'Файл скопирован в ' + _dst + '\\' + _folder + part + '\\' + f)
else:
dmt = os.path.getmtime(fdst)
if (smt != dmt):
print(_dst + '\\' + _folder + part + '\\' + f, 'copied...')
shutil.copyfile(fsrc, fdst)
os.utime(fdst, (sat, smt))
self.add_info(_src + '\\' + _folder + ' -> ' + _dst + '\\' + _folder, 'Файл обновлен в ' + _dst + '\\' + _folder + part + '\\' + f)
if (self.case == 0):
self.content.append(' ' + _src + '\\' + _folder + ' -> ' + _dst + '\\' + _folder + ' - без изменений.')
# Синхронизация
def synch(self):
if (not syncs or (len(syncs) == 0)):
return
self.content.append('')
self.content.append('Синхронизация:')
for s in syncs:
self.synch_dir(s[0], s[1], s[2])
# Отправка информационного письма
def send_mail(self, status, info):
s = smtplib.SMTP(host='rusel.by', port=25)
s.starttls()
s.login(mail_login, mail_pass)
msg = EmailMessage()
msg['From'] = mail_from
msg['To'] = mail_to
msg['Subject']='Архивация на ' + device + ' - ' + status
body = ''
for str in self.content:
body = body + str + '\n'
if (info != ''):
body = body + '\n' + status + ' ' + info
msg.set_content(body)
s.send_message(msg)
del msg
s.quit()
# Фиксация времени архивирования
def fix_time(self):
if self.full_mode:
tail = 'full'
else:
tail = 'short'
with open(work_dir + 'last_' + tail + '.txt', 'w') as f:
f.write(datetime.now().__str__())
def save_log(self, input_wait, status, info):
with open(work_dir + 'backup.log', 'a') as f:
f.write(str(datetime.now()) + ' ')
f.write(status + ' ' + info + '\n')
for ct in self.content:
print(ct)
self.send_mail(status, info)
if input_wait:
if info:
print(info)
input(status + ' >')
def run(self, input_wait):
try:
self.content.clear()
self.count_mode() # Выбор режима архивирования
self.archivate() # Архивирование
self.rotate() # Ротация
self.synch() # Синхронизация
self.fix_time() # Фиксация времени архивирования
self.save_log(input_wait, 'ok', '')
except BackupError as be:
self.save_log(input_wait, '[x]', str(be))
except Exception as ex:
self.save_log(input_wait, '[x]', str(ex))
if __name__ == '__main__':
x = Backup()
x.run(False)
| 2.15625
| 2
|
tests/PyTapDEMON_Test.py
|
ericchou-python/PyTapDEMON
| 5
|
12781743
|
#!/usr/bin/env python
#
# This test uses out of band ovs-ofctl to query the
# switches and compare to an existing state to see
# if the flows are installed correctly in the PyTapDEMon
# topology.
#
import unittest
import subprocess
def parseFlows(flows):
"""
Parse out the string representation of flows passed in.
Example:
NXST_FLOW reply (xid=0x4):
cookie=0x0, duration=4.329s, table=0, n_packets=0, n_bytes=0, idle_timeout=120,hard_timeout=120,in_port=3 actions=output:4
"""
switchFlows = {}
for flow in flows.split('\n'):
line = flow.split()
if len(line) > 3: #get rid of first line in flow output
inputPort = line[5].split(',')[2].split('=')[1]
outputPorts = line[6].split('actions=')[1]
switchFlows[inputPort] = outputPorts
return switchFlows
globalFlows = {}
for i in range(1, 4):
"""Query switches s1, s2, s3 and dump flows, add to global flow dictionary"""
switch = 's'+str(i)
flows = subprocess.check_output(['sudo', 'ovs-ofctl', 'dump-flows', switch])
switchFlows = parseFlows(flows)
globalFlows[switch] = switchFlows
class PyTapDEMON_Test(unittest.TestCase):
def test_s1_port1(self):
self.assertEqual('output:2,output:6,output:8', globalFlows['s1']['1'])
def test_s2_port1(self):
self.assertEqual('output:2,output:6,output:8', globalFlows['s2']['1'])
def test_s3_port10(self):
self.assertEqual('output:11', globalFlows['s3']['10'])
if __name__ == '__main__':
unittest.main()
| 2.578125
| 3
|
deep3dmap/core/renderer/renderer_pt3d.py
|
achao2013/DeepRecon
| 30
|
12781744
|
import torch
import numpy as np
# Util function for loading meshes
from pytorch3d.io import load_objs_as_meshes
from pytorch3d.transforms import euler_angles_to_matrix, matrix_to_euler_angles,Rotate
# Data structures and functions for rendering
from pytorch3d.structures import Pointclouds, Meshes
from pytorch3d.renderer import (
look_at_view_transform,
OpenGLPerspectiveCameras,
OpenGLOrthographicCameras,
#SfMPerspectiveCameras,
PointLights,
DirectionalLights,
Materials,
RasterizationSettings,
PointsRasterizationSettings,
MeshRenderer,
PointsRenderer,
MeshRasterizer,
PointsRasterizer,
SoftPhongShader,
NormWeightedCompositor,
BlendParams,
AlphaCompositor,
TexturesVertex,
TexturesUV,
TexturesAtlas
)
class Pt3dRenderer():
def __init__(self, device, texture_size, lookview):
self.raster_settings = RasterizationSettings(
image_size=texture_size,
blur_radius=0.0,
faces_per_pixel=1,
bin_size = None, # this setting controls whether naive or coarse-to-fine rasterization is used
max_faces_per_bin = None # this setting is for coarse rasterization
)
self.lights = PointLights(device=device,ambient_color=((0, 0, 0),),diffuse_color=((1, 1, 1),),specular_color=((0, 0, 0),), location=[[0.0, 0.0, 10.0]])
self.materials = Materials(device=device,ambient_color=((0, 0, 0),),diffuse_color=((1, 1, 1),),specular_color=((0, 0, 0),))
self.lookview=lookview.view(1,3)
self.device=device
def sample(self,normals,angles,triangles,imgs,template_uvs3d,face_project):
#rot=Rotate(R, device=device)
#normals_transformed = rot.transform_normals(normals.repeat(batchsize,1,1))
batchsize=angles.shape[0]
vertexsize=normals.shape[0]
trisize=triangles.shape[0]
RR = euler_angles_to_matrix(angles, "XYZ")
rot=Rotate(RR)
normals_transformed = rot.transform_normals(normals)
coefs = torch.sum(torch.mul(normals_transformed, self.lookview.repeat(batchsize,vertexsize,1)), 2)
ver_visibility = torch.ones(batchsize,vertexsize).cuda()
ver_visibility[coefs < 0] = 0
used_faces=[]
for b in range(batchsize):
visible_veridx = (ver_visibility[b]<=0).nonzero().view(-1)
#print('triangles visible_veridx:',triangles.unsqueeze(-1).shape, unvisible_veridx.shape)
#part trinum x vertexnum for gpu memory
part_num=8
part_size=int(visible_veridx.shape[0]//part_num)
tri_visibility=(~(triangles.unsqueeze(-1) == visible_veridx[:part_size])).any(-1)
for j in range(1,part_num):
if j < part_num-1:
tri_visibility |= (~(triangles.unsqueeze(-1) == visible_veridx[j*part_size:(j+1)*part_size])).any(-1)
else:
tri_visibility |= (~(triangles.unsqueeze(-1) == visible_veridx[j*part_size:])).any(-1)
visible_triidx = (torch.sum(tri_visibility, 1)>0).nonzero().view(-1)
used_faces.append(triangles[visible_triidx])
used_faces
tex = TexturesUV(verts_uvs=face_project, faces_uvs=used_faces, maps=imgs.permute(0,2,3,1))
mesh = Meshes(
verts=[template_uvs3d]*batchsize, faces=used_faces, textures=tex)
R_, T_ = look_at_view_transform(2.7, torch.zeros(batchsize).cuda(), torch.zeros(batchsize).cuda())
camera = OpenGLOrthographicCameras(device=self.device, R=R_.float(), T=T_.float())
#camera = OpenGLOrthographicCameras(R=R_, T=T_)
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=camera,
raster_settings=self.raster_settings
),
shader=SoftPhongShader(
device=self.device,
cameras=camera,
blend_params=BlendParams(background_color=(0,0,0))
)
)
uv_images = renderer(mesh)
mask = TexturesUV(verts_uvs=face_project, faces_uvs=used_faces, maps=torch.ones_like(imgs.permute(0,2,3,1)))
mesh_mask = Meshes(
verts=[template_uvs3d]*batchsize, faces=used_faces, textures=mask)
uv_mask = renderer(mesh_mask)
return uv_images,uv_mask
| 2.328125
| 2
|
tectosaur/constraints.py
|
jlmaurer/tectosaur
| 17
|
12781745
|
import scipy.sparse
import numpy as np
from tectosaur.util.cpp import imp
fast_constraints = imp('tectosaur.fast_constraints')
for k in dir(fast_constraints):
locals()[k] = getattr(fast_constraints, k)
def build_constraint_matrix(cs, n_total_dofs):
rows, cols, vals, rhs_rows, rhs_cols, rhs_vals, rhs_in, n_unique_cs = \
fast_constraints.build_constraint_matrix(cs, n_total_dofs)
n_rows = n_total_dofs
n_cols = n_total_dofs - n_unique_cs
cm = scipy.sparse.csr_matrix((vals, (rows, cols)), shape = (n_rows, n_cols))
rhs_mat = scipy.sparse.csr_matrix((rhs_vals, (rhs_rows, rhs_cols)), shape = (n_rows, len(cs)))
return cm, rhs_mat.dot(rhs_in), rhs_mat
def simple_constraint_matrix(cs, n_cols):
rows = []
cols = []
data = []
rhs = np.zeros((len(cs)))
for i in range(len(cs)):
c = cs[i]
for j in range(len(c.terms)):
rows.append(i)
cols.append(c.terms[j].dof)
data.append(c.terms[j].val)
rhs[i] = c.rhs
return (
scipy.sparse.csr_matrix((data, (rows, cols)), shape = (len(cs), n_cols)),
rhs
)
| 2.21875
| 2
|
examples/03-remote-system/nautobot_models.py
|
chadell/diffsync
| 67
|
12781746
|
"""Extension of the Base model for the Nautobot DiffSync Adapter to manage the CRUD operations."""
import pynautobot # pylint: disable=import-error
from models import Region, Country # pylint: disable=no-name-in-module
from diffsync import DiffSync
# pylint: disable=no-member,too-few-public-methods
class NautobotRegion(Region):
"""Extend the Region object to store Nautobot specific information.
Region are represented in Nautobot as a dcim.region object without parent.
"""
remote_id: str
"""Store the nautobot uuid in the object to allow update and delete of existing object."""
class NautobotCountry(Country):
"""Extend the Country to manage Country in Nautobot. CREATE/UPDATE/DELETE.
Country are represented in Nautobot as a dcim.region object as well but a country must have a parent.
Subregion information will be store in the description of the object in Nautobot
"""
remote_id: str
"""Store the nautobot uuid in the object to allow update and delete of existing object."""
@classmethod
def create(cls, diffsync: DiffSync, ids: dict, attrs: dict):
"""Create a country object in Nautobot.
Args:
diffsync: The master data store for other DiffSyncModel instances that we might need to reference
ids: Dictionary of unique-identifiers needed to create the new object
attrs: Dictionary of additional attributes to set on the new object
Returns:
NautobotCountry: DiffSync object newly created
"""
# Retrieve the parent region in internal cache to access its UUID
# because the UUID is required to associate the object to its parent region in Nautobot
region = diffsync.get(diffsync.region, attrs.get("region"))
# Create the new country in Nautobot and attach it to its parent
try:
country = diffsync.nautobot.dcim.regions.create(
slug=ids.get("slug"),
name=attrs.get("name"),
custom_fields=dict(population=attrs.get("population")),
parent=region.remote_id,
)
print(f"Created country : {ids} | {attrs} | {country.id}")
except pynautobot.core.query.RequestError as exc:
print(f"Unable to create country {ids} | {attrs} | {exc}")
return None
# Add the newly created remote_id and create the internal object for this resource.
attrs["remote_id"] = country.id
item = super().create(ids=ids, diffsync=diffsync, attrs=attrs)
return item
def update(self, attrs: dict):
"""Update a country object in Nautobot.
Args:
attrs: Dictionary of attributes to update on the object
Returns:
DiffSyncModel: this instance, if all data was successfully updated.
None: if data updates failed in such a way that child objects of this model should not be modified.
Raises:
ObjectNotUpdated: if an error occurred.
"""
# Retrive the pynautobot object from Nautobot since we only have the UUID internally
remote = self.diffsync.nautobot.dcim.regions.get(self.remote_id)
# Convert the internal attrs to Nautobot format
if "population" in attrs:
remote.custom_fields["country_population"] = attrs.get("population")
if "name" in attrs:
remote.name = attrs.get("name")
remote.save()
print(f"Updated Country {self.slug} | {attrs}")
return super().update(attrs)
def delete(self):
"""Delete a country object in Nautobot.
Returns:
NautobotCountry: DiffSync object
"""
# Retrieve the pynautobot object and delete the object in Nautobot
remote = self.diffsync.nautobot.dcim.regions.get(self.remote_id)
remote.delete()
super().delete()
return self
| 2.65625
| 3
|
a01_PySpark/e01_Resources/pyspark-tutorials/indep_vars/setup.py
|
mindis/Big_Data_Analysis
| 0
|
12781747
|
from distutils.core import setup
setup(name='build_indep_vars.py',
version='1.0',
url='https://github.com/UrbanInstitute/pyspark-tutorials/tree/master/indep_vars'
author='<NAME>',
autor_email='<EMAIL>',
py_modules=['build_indep_vars'])
#pip install -e git+https://github.com/UrbanInstitute/pyspark-tutorials/tree/master/indep_vars
| 1.398438
| 1
|
2/lab2_2_d.py
|
JelteF/statistics
| 0
|
12781748
|
from scipy.misc import comb
def exp(p, n):
total = 0.0
for k in range(n+1):
total += comb(n, k, exact=False) * p**k * (1-p) ** (n-k)
return total
def main():
for p in [0.3, 0.75, 0.8, 1.0, 0.0, 0.5]:
for n in range(1, 20):
print('Checking n=%d, p=%f' % (n, p))
print('Result: %f' % (exp(p, n)))
if __name__ == '__main__':
main()
| 2.984375
| 3
|
src/loadgenerator/locust_tasks/__init__.py
|
CertifiedWebMaster/cloud-ops-sandbox
| 0
|
12781749
|
<filename>src/loadgenerator/locust_tasks/__init__.py
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import basic_locustfile as basic_locust_tasks
from . import step_locustfile as step_locust_tasks
USER_FACING_LOCUST_USER_CLASSES = {
"basic": [
basic_locust_tasks.PurchasingUser,
basic_locust_tasks.WishlistUser,
basic_locust_tasks.BrowsingUser,
],
"step": [
step_locust_tasks.PurchasingUser,
step_locust_tasks.WishlistUser,
step_locust_tasks.BrowsingUser,
]
}
USER_FACING_LOCUST_LOAD_SHAPE = {
"basic": None, # use default locust shape class
"step": step_locust_tasks.StepLoadShape()
}
def get_user_classes(task_type):
return USER_FACING_LOCUST_USER_CLASSES.get(task_type, [])
def get_load_shape(task_type):
return USER_FACING_LOCUST_LOAD_SHAPE.get(task_type, None)
| 2.109375
| 2
|
common/xrd-ui-tests-python/tests/xroad_trust_view_details_cs_settings/view_management.py
|
ria-ee/XTM
| 3
|
12781750
|
# coding=utf-8
from selenium.webdriver.common.by import By
from view_models import certification_services, sidebar, ss_system_parameters
import re
import time
def test_ca_cs_details_view_cert(case, profile_class=None):
'''
:param case: MainController object
:param profile_class: string The fully qualified name of the Java class
:return:
'''
self = case
def view_cert():
'''Open "Certification services"'''
self.wait_until_visible(self.by_css(sidebar.CERTIFICATION_SERVICES_CSS)).click()
self.wait_jquery()
view_cert_data(self, profile_class=profile_class)
return view_cert
def view_cert_data(self, profile_class=None):
'''Get approved CA row'''
service_row = self.wait_until_visible(type=By.XPATH, element=certification_services.LAST_ADDED_CERT_XPATH)
'''Double click on approved CA row'''
self.double_click(service_row)
'''Click on "Edit button"'''
self.by_id(certification_services.DETAILS_BTN_ID).click()
self.log('UC TRUST_04 1.CS administrator selects to view the settings of a certification service.')
self.wait_until_visible(type=By.XPATH, element=certification_services.CA_SETTINGS_TAB_XPATH).click()
self.wait_jquery()
self.log(
'UC TRUST_04: 2.System displays the following settings. Usage restrictions for the certificates issued by the certification service.')
auth_checkbox = self.wait_until_visible(certification_services.EDIT_CA_AUTH_ONLY_CHECKBOX_XPATH,
By.XPATH).is_enabled()
self.is_true(auth_checkbox, msg='Authentication chechkbox not found')
'''Click on authentication checkbox'''
self.wait_until_visible(certification_services.EDIT_CA_AUTH_ONLY_CHECKBOX_XPATH, By.XPATH).click()
self.log(
'UC TRUST_04: 2.System displays the following settings. The fully qualified name of the Java class that describes the certificate profile for certificates issued by the certification service.')
'''Get profile info'''
profile_info_area = self.wait_until_visible(type=By.XPATH,
element=certification_services.EDIT_CERTIFICATE_PROFILE_INFO_AREA_XPATH)
profile_info = profile_info_area.get_attribute("value")
'''Verify profile info'''
self.is_equal(profile_info, profile_class,
msg='The name of the Java class that describes the certificate profile is wrong')
self.log(
'UC TRUST_04: 2. The following user action options are displayed:edit the settings of the certification service')
'''Verify "Save" button'''
save_button_id = self.wait_until_visible(type=By.ID,
element=certification_services.SAVE_CA_SETTINGS_BTN_ID).is_enabled()
self.is_true(save_button_id, msg='"Save" button not found')
| 2.4375
| 2
|