hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3d297440ee04e65ec5e37779068515dbbf97d948 | 1,407 | py | Python | CellProfiler/tests/modules/test_opening.py | aidotse/Team-rahma.ai | 66857731e1ca2472e0783e37ba472b55a7ac9cd4 | [
"MIT"
] | null | null | null | CellProfiler/tests/modules/test_opening.py | aidotse/Team-rahma.ai | 66857731e1ca2472e0783e37ba472b55a7ac9cd4 | [
"MIT"
] | null | null | null | CellProfiler/tests/modules/test_opening.py | aidotse/Team-rahma.ai | 66857731e1ca2472e0783e37ba472b55a7ac9cd4 | [
"MIT"
] | null | null | null | import numpy
import numpy.testing
import skimage.morphology
import cellprofiler.modules.opening
instance = cellprofiler.modules.opening.Opening()
| 25.581818 | 69 | 0.68941 |
3d29b2ee51f536c799b3a2e3518fab0b83469961 | 26 | py | Python | pug-bot/apitoken.py | stevenktruong/pug-bot | 315c21363eebb51d67d5b5c9fa9326cd8bcb2b54 | [
"MIT"
] | 17 | 2018-06-27T03:49:03.000Z | 2021-04-13T07:32:43.000Z | pug-bot/apitoken.py | stevenktruong/pug-bot | 315c21363eebb51d67d5b5c9fa9326cd8bcb2b54 | [
"MIT"
] | 3 | 2020-03-26T06:49:10.000Z | 2020-04-23T07:20:41.000Z | pug-bot/apitoken.py | stevenktruong/pug-bot | 315c21363eebb51d67d5b5c9fa9326cd8bcb2b54 | [
"MIT"
] | 14 | 2018-06-27T03:49:06.000Z | 2021-10-07T23:28:44.000Z | TOKEN = "YOUR_TOKEN_HERE"
| 13 | 25 | 0.769231 |
3d2a32296fc0285fa514d89f51675b89a2c96e0a | 52,972 | py | Python | proxy/web/app_web.py | 5GCity/5GCity-infrastructure-abstraction | a743666cdd760bbbf511825600f313b2b88477d8 | [
"Apache-2.0"
] | null | null | null | proxy/web/app_web.py | 5GCity/5GCity-infrastructure-abstraction | a743666cdd760bbbf511825600f313b2b88477d8 | [
"Apache-2.0"
] | null | null | null | proxy/web/app_web.py | 5GCity/5GCity-infrastructure-abstraction | a743666cdd760bbbf511825600f313b2b88477d8 | [
"Apache-2.0"
] | 1 | 2021-11-27T11:16:04.000Z | 2021-11-27T11:16:04.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2017-2022 Univertity of Bristol - High Performance Networks Group
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy import Column, ForeignKey, Integer, String
from datetime import datetime
from werkzeug.middleware.proxy_fix import ProxyFix
from flask import Flask, Response, jsonify, render_template, request
import logging
import os
import sys
import json
import uuid
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from lib.adapters.ruckus import RuckusWiFi
from lib.adapters.i2cat import I2catController
from conf.config import CONTROLLERS, RUCKUS_ID_MAPPING, RUCKUS_INIT_TOPOLOGY
# Logger configuration
log_filename = "logs/output.log"
os.makedirs(os.path.dirname(log_filename), exist_ok=True)
logging.basicConfig(
format="%(asctime)s [%(levelname)s] %(funcName)s %(message)s",
datefmt='%Y-%m-%d %H:%M:%S', filename=log_filename, level=logging.INFO)
logging.getLogger('requests').setLevel(logging.ERROR)
logger = logging.getLogger()
log_base = "{}:{}:{}" # INTERFACE,endpoint,REQ/RESP,content
# Flask app
app = Flask(__name__)
app.config.from_object(__name__)
# Define database
Base = declarative_base()
engine = create_engine('sqlite:///file.db', echo=False)
# helpers to translate dabatase type class objects into dictionaries
def _dictService(service):
vlan = session.query(Vlan).filter(Vlan.service_id == service.id).one()
if service.wirelessConfigSSID:
wirelessConfig = {
"ssid": service.wirelessConfigSSID,
"encryption": service.wirelessConfigEncryption,
"password": service.wirelessConfigPassword
}
else:
wirelessConfig = None
if service.lteConfigPLMNId:
lteConfig = {
"plmnId": service.lteConfigPLMNId,
"cellReserved": service.lteConfigCellReserved,
"mmeAddress": service.lteConfigMMEAddress,
"mmePort": service.lteConfigMMEPort
}
else:
lteConfig = None
response_data = {
"id": service.id,
"serviceType": "SWAM_SERVICE",
"selectedRoot": 0,
"vlanId": {
"id": vlan.id,
"vlanId": vlan.tag
},
"selectedVifs": [{"id": x} for x in eval(service.selectedVifs)],
"wirelessConfig": wirelessConfig,
"lteConfig": lteConfig
}
return response_data
def _dictChunk(chunk):
services = session.query(Service).filter(
Service.id.in_(eval(chunk.serviceList))).all()
phys = session.query(Phy).filter(Phy.id.in_(eval(chunk.phyList))).all()
response_data = {
"id": chunk.id,
"name": chunk.name,
"assignedQuota": 0,
"serviceList": [_dictService(service) for service in services],
"physicalInterfaceList": [_dictPhy(phy) for phy in phys],
"linkList": []
}
return response_data
def _dictPhy(phy):
vifs = session.query(Vif).filter(
Vif.id.in_(eval(phy.virtualInterfaceList))).all()
if phy.config:
config = eval(phy.config)
else:
config = phy.config
response_data = {
"id": phy.id,
"name": phy.name,
"type": phy.type,
"virtualInterfaceList": [_dictVif(vif) for vif in vifs],
"config": config
}
return response_data
def _dictVif(vif):
response_data = {
"id": vif.id,
"name": vif.name,
"toRootVlan": 0,
"toAccessVlan": 0,
"toAccessPort": 0,
"toRootPort": 0,
"openFlowPortList": []
}
return response_data
# Create database session
Base.metadata.create_all(engine)
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Initialize controller list
controllers = []
# controllers = {}
# formatter for the returned errors
API_RESPONSE = {
"OK": {
"content": '',
"code": 200
},
"CREATED": {
"content": '',
"code": 201
},
"CONTROLLER": {
"content": 'Controller Error',
"code": 503
},
"NOTFOUND": {
"content": 'Not Found',
"code": 404
},
"DB_INTEGRITY": {
"content": 'DB Integrity',
"code": 401
},
"VERIFICATION_ERROR": {
"content": 'Verification Error',
"code": 401
}
}
NORTHBOUND = "NORTHBOUND"
SOUTHBOUND = "SOUTHBOUND"
INTERNAL = "INTERNAL"
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
REQRESP = "REQ/RESP"
ROLLBACK = "ROLLBACK"
# Load controllers info from config.py and register topologies
# Look for first phy_id free in database
db_id_phy_id_list = session.query(Phy.id, Phy.phy_id_controller).all()
# db_id_list = [r for (r, a) in db_id_phy_id_list]
# db_id_list.sort()
# if len(db_id_list) == 0:
# new_phy_id = 1
# else:
# new_phy_id = db_id_list[len(db_id_list)-1]+1
# # Look for first box_id free in database
db_id_box_id_list = session.query(Box.id, Box.box_id_controller).all()
# db_id_list = [r for (r, a) in db_id_box_id_list]
# db_id_list.sort()
# if len(db_id_list) == 0:
# new_box_id = 1
# else:
# new_box_id = db_id_list[len(db_id_list)-1]+1
new_box_id = str(uuid.uuid4())
# *******************************
# Initialize proxy runtime status
# *******************************
#
# INITIAL TOPOLOGY RECOVERY (Boxes, Phys):
# =========================
# -RUCKUS type controller initial topology recovered from config.py
# -I2CAT type controller initial topology recovered from live
# SOUTHBOUND REQUEST to controller
#
# CURRENT STATE (Chunks, Services, VirtualInterfaces):
# ==============
# -RUCKUS type controller current state recovered from database and
# controllers runtime status
# -I2CAT type controller current state kept on controller
#
for item in CONTROLLERS:
if item['type'] == 'ruckus':
# Recover the list of chunks from the database
db_chunks = session.query(Chunk).all()
chunks = []
for db_chunk in db_chunks:
if eval(db_chunk.controllers_chunk)[len(controllers)]:
chunk = _dictChunk(db_chunk)
phys_to_pop = []
services_to_pop = []
for service in chunk["serviceList"]:
db_service = session.query(Service).filter(
Service.id == service["id"]).one()
if len(controllers) in \
eval(db_service.controllers_services).keys():
service["id"] = eval(db_service.controllers_services)[
len(controllers)]
else:
services_to_pop.append(service)
[chunk["serviceList"].remove(service)
for service in services_to_pop]
for phy in chunk["physicalInterfaceList"]:
try:
db_phy = session.query(Phy).filter(
Phy.id == phy["id"],
Phy.controller_id == len(controllers)).one()
phy = db_phy.phy_id_controller
except NoResultFound:
phys_to_pop.append(phy)
[chunk["physicalInterfaceList"].remove(
phy) for phy in phys_to_pop]
chunk["id"] = eval(db_chunk.controllers_chunk)[
len(controllers)]
chunks.append(chunk)
phy_id_mapping = RUCKUS_ID_MAPPING
controller = RuckusWiFi(
controller_id=item['id'],
ip=item['ip'],
port=item['port'],
url=item['url'],
topology=item['topology'],
chunks=chunks,
phy_id_mapping=phy_id_mapping,
username=item['username'],
password=item['password']
)
controllers.append(controller)
# controllers[controller.controller_id] = controller
elif item['type'] == 'i2cat':
controller = I2catController(
controller_id=item['id'],
ip=item['ip'],
port=item['port'],
url=item['url']
)
controllers.append(controller)
# controllers[controller.controller_id] = controller
for box in controller.getChunketeTopology()[0]["boxes"]:
if box['id'] not in [r for (a, r) in db_id_box_id_list]:
try:
# initial_topology["boxes"].append(box)
new_box = Box(
name=box["name"],
location=json.dumps(box["location"]),
controller_id=item['id'],
box_id_controller=box['id'],
phys=json.dumps(box["phys"]),
box_json=json.dumps(box))
session.add(new_box)
# count_phys = 0
for phy in box["phys"]:
if phy['id'] not in [r for (a, r) in db_id_phy_id_list]:
new_phy = Phy(
name=phy["name"], type=phy["type"],
controller_id=item['id'],
phy_id_controller=phy['id'],
config=str(phy["config"]),
virtualInterfaceList=json.dumps([]),
phy_json=json.dumps(phy))
session.add(new_phy)
# count_phys += 1
session.commit()
# new_phy_id += count_phys
# new_box_id += 1
except IntegrityError as ex:
session.rollback()
session.close()
# Topology API implementation
# Chunk API implementation
# Service API implementation
app.wsgi_app = ProxyFix(app.wsgi_app)
if __name__ == '__main__':
"""main function
Default host: 0.0.0.0
Default port: 8080
Default debug: False
"""
try:
app.run(
host='0.0.0.0',
port=8008,
debug=False)
except Exception:
logging.critical(
'server: CRASHED: Got exception on main handler')
raise
| 36.633472 | 79 | 0.564883 |
3d2a3406b2c7fae09635aa25e074ee185903e975 | 6,179 | py | Python | openstates/importers/tests/test_base_importer.py | washabstract/openstates-core | ea69564f1f56fe4a80181b0aa715731bbc47e3f5 | [
"MIT"
] | null | null | null | openstates/importers/tests/test_base_importer.py | washabstract/openstates-core | ea69564f1f56fe4a80181b0aa715731bbc47e3f5 | [
"MIT"
] | null | null | null | openstates/importers/tests/test_base_importer.py | washabstract/openstates-core | ea69564f1f56fe4a80181b0aa715731bbc47e3f5 | [
"MIT"
] | null | null | null | import os
import json
import shutil
import tempfile
import datetime
import pytest
from unittest import mock
from openstates.data.models import (
Bill,
Jurisdiction,
Division,
LegislativeSession,
Organization,
Person,
)
from openstates.scrape import Bill as ScrapeBill
from openstates.importers.base import omnihash, BaseImporter
from openstates.importers import BillImporter
from openstates.exceptions import UnresolvedIdError, DataImportError
# doing these next few tests just on a Bill because it is the same code that handles it
# but for completeness maybe it is better to do these on each type?
| 31.365482 | 93 | 0.674057 |
3d2ab40e18ce8de7c837398746d70bdad833cca8 | 3,777 | py | Python | cloudml-template/template/trainer/metadata.py | VanessaDo/cloudml-samples | ae6cd718e583944beef9d8a90db12091ac399432 | [
"Apache-2.0"
] | 3 | 2019-03-29T08:06:35.000Z | 2019-04-12T13:19:18.000Z | cloudml-template/template/trainer/metadata.py | VanessaDo/cloudml-samples | ae6cd718e583944beef9d8a90db12091ac399432 | [
"Apache-2.0"
] | 23 | 2020-09-25T22:44:06.000Z | 2022-02-10T02:58:47.000Z | cloudml-template/template/trainer/metadata.py | VanessaDo/cloudml-samples | ae6cd718e583944beef9d8a90db12091ac399432 | [
"Apache-2.0"
] | 2 | 2019-10-12T19:21:06.000Z | 2019-10-13T17:38:30.000Z | #!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ************************************************************************************
# YOU NEED TO MODIFY THE FOLLOWING METADATA TO ADAPT THE TRAINER TEMPLATE TO YOUR DATA
# ************************************************************************************
# Task type can be either 'classification', 'regression', or 'custom'
# This is based on the target feature in the dataset, and whether you use a canned or a custom estimator
TASK_TYPE = '' # classification | regression | custom
# A List of all the columns (header) present in the input data file(s) in order to parse it.
# Note that, not all the columns present here will be input features to your model.
HEADER = []
# List of the default values of all the columns present in the input data.
# This helps decoding the data types of the columns.
HEADER_DEFAULTS = []
# List of the feature names of type int or float.
INPUT_NUMERIC_FEATURE_NAMES = []
# Numeric features constructed, if any, in process_features function in input.py module,
# as part of reading data.
CONSTRUCTED_NUMERIC_FEATURE_NAMES = []
# Dictionary of feature names with int values, but to be treated as categorical features.
# In the dictionary, the key is the feature name, and the value is the num_buckets (count of distinct values).
INPUT_CATEGORICAL_FEATURE_NAMES_WITH_IDENTITY = {}
# Categorical features with identity constructed, if any, in process_features function in input.py module,
# as part of reading data. Usually include constructed boolean flags.
CONSTRUCTED_CATEGORICAL_FEATURE_NAMES_WITH_IDENTITY = {}
# Dictionary of categorical features with few nominal values (to be encoded as one-hot indicators).
# In the dictionary, the key is the feature name, and the value is the list of feature vocabulary.
INPUT_CATEGORICAL_FEATURE_NAMES_WITH_VOCABULARY = {}
# Dictionary of categorical features with many values (sparse features).
# In the dictionary, the key is the feature name, and the value is the bucket size.
INPUT_CATEGORICAL_FEATURE_NAMES_WITH_HASH_BUCKET = {}
# List of all the categorical feature names.
# This is programmatically created based on the previous inputs.
INPUT_CATEGORICAL_FEATURE_NAMES = list(INPUT_CATEGORICAL_FEATURE_NAMES_WITH_IDENTITY.keys()) \
+ list(INPUT_CATEGORICAL_FEATURE_NAMES_WITH_VOCABULARY.keys()) \
+ list(INPUT_CATEGORICAL_FEATURE_NAMES_WITH_HASH_BUCKET.keys())
# List of all the input feature names to be used in the model.
# This is programmatically created based on the previous inputs.
INPUT_FEATURE_NAMES = INPUT_NUMERIC_FEATURE_NAMES + INPUT_CATEGORICAL_FEATURE_NAMES
# Column includes the relative weight of each record.
WEIGHT_COLUMN_NAME = None
# Target feature name (response or class variable).
TARGET_NAME = ''
# List of the class values (labels) in a classification dataset.
TARGET_LABELS = []
# List of the columns expected during serving (which is probably different to the header of the training data).
SERVING_COLUMNS = []
# List of the default values of all the columns of the serving data.
# This helps decoding the data types of the columns.
SERVING_DEFAULTS = []
| 46.62963 | 111 | 0.734975 |
3d2b2116bab967ee3e89a4236cdda8c96cc22676 | 14,678 | py | Python | tests/models/test_models_base.py | harmsm/epistasis | 741b25b3e28015aeeba8d4efc94af1e1d811cd63 | [
"Unlicense"
] | null | null | null | tests/models/test_models_base.py | harmsm/epistasis | 741b25b3e28015aeeba8d4efc94af1e1d811cd63 | [
"Unlicense"
] | null | null | null | tests/models/test_models_base.py | harmsm/epistasis | 741b25b3e28015aeeba8d4efc94af1e1d811cd63 | [
"Unlicense"
] | 2 | 2020-04-02T00:58:24.000Z | 2021-11-16T13:30:30.000Z |
import pytest
import gpmap
from epistasis import models
import numpy as np
import pandas as pd
import os
### Tests for AbstractModel:
# AbstractModel cannot be instantiated on its own, as it is designed to be a
# mixin with sklearn classes. Many methods have to be defined in subclass
# (.fit, .predict, etc.) These will not be tested here, but instead in the
# subclass tests. For methods defined here that are never redefined in subclass
# (._X, .add_gpm, etc.) we test using the simplest mixed/subclass
# (EpistasisLinearRegression).
def test_abstractmodel_predict_to_df(test_data):
"""
Test basic functionality. Real test of values will be done on .predict
for subclasses.
"""
m = models.linear.EpistasisLinearRegression()
d = test_data[0]
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
m.add_gpm(gpm)
# This should fail -- no fit run
with pytest.raises(Exception):
df = m.predict_to_df()
m.fit()
# This should work
df = m.predict_to_df()
assert type(df) is type(pd.DataFrame())
assert len(df) == len(d["genotype"])
# Create and fit a new model.
m = models.linear.EpistasisLinearRegression()
gpm = gpmap.GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"])
# No gpm added -- should fail
with pytest.raises(RuntimeError):
m.predict_to_df()
m.add_gpm(gpm)
m.fit()
df = m.predict_to_df(genotypes=d["genotype"][0])
assert len(df) == 1
bad_stuff = [1,{},[1,2],"STUPID",["STUPID","IS","REAL"]]
for b in bad_stuff:
with pytest.raises(ValueError):
print(f"Trying bad genotypes {b}")
m.predict_to_df(genotypes=b)
df = m.predict_to_df(genotypes=d["genotype"][:3])
assert len(df) == 3
| 30.579167 | 79 | 0.60417 |
3d2cc12e10450aab89581a6101a64a041375bd58 | 871 | py | Python | examples/write_spyview_meta.py | sourav-majumder/qtlab | 96b2a127b1df7b45622c90229bd5ef8a4083614e | [
"MIT"
] | null | null | null | examples/write_spyview_meta.py | sourav-majumder/qtlab | 96b2a127b1df7b45622c90229bd5ef8a4083614e | [
"MIT"
] | null | null | null | examples/write_spyview_meta.py | sourav-majumder/qtlab | 96b2a127b1df7b45622c90229bd5ef8a4083614e | [
"MIT"
] | null | null | null | # File name: spyview.py
#
# This example should be run with "execfile('spyview.py')"
from numpy import pi, linspace, sinc, sqrt
from lib.file_support.spyview import SpyView
x_vec = linspace(-2 * pi, 2 * pi, 100)
y_vec = linspace(-2 * pi, 2 * pi, 100)
qt.mstart()
data = qt.Data(name='testmeasurement')
# to make the spyview meta.txt file dimension info is required:
data.add_coordinate('X',
size=len(x_vec),
start=x_vec[0],
end=x_vec[-1])
data.add_coordinate('Y',
size=len(y_vec),
start=y_vec[0],
end=y_vec[-1])
data.add_value('Z')
data.create_file()
for y in y_vec:
for x in x_vec:
result = sinc(sqrt(x**2 + y**2))
data.add_data_point(x, y, result)
qt.msleep(0.001)
data.new_block()
data.close_file()
qt.mend()
# create the spyview meta.txt file:
SpyView(data).write_meta_file()
| 20.738095 | 63 | 0.640643 |
3d2ce2c966a31e97ee5b7a66b2aeabb6f1778574 | 35 | py | Python | arcpyext/mapping/_cim/__init__.py | PeterReyne/arcpyext | 9307115da8f0b6a30e2ca741fb6a7d09e54fd0f3 | [
"BSD-3-Clause"
] | 11 | 2015-05-01T04:08:30.000Z | 2019-09-21T05:00:58.000Z | arcpyext/mapping/_cim/__init__.py | PeterReyne/arcpyext | 9307115da8f0b6a30e2ca741fb6a7d09e54fd0f3 | [
"BSD-3-Clause"
] | 14 | 2015-06-23T02:46:44.000Z | 2019-10-11T00:46:11.000Z | arcpyext/mapping/_cim/__init__.py | PeterReyne/arcpyext | 9307115da8f0b6a30e2ca741fb6a7d09e54fd0f3 | [
"BSD-3-Clause"
] | 9 | 2015-02-27T05:25:42.000Z | 2020-01-19T05:43:14.000Z | from .pro_project import ProProject | 35 | 35 | 0.885714 |
3d2d9019566fcc96f253a9e2a983330775a08ac2 | 3,474 | py | Python | o3/operators/filter_logs_to_percentage_operator.py | carlba/o3 | 999ff1b06ef9c7a5bf220a3e840c4a42dc81956a | [
"Unlicense"
] | null | null | null | o3/operators/filter_logs_to_percentage_operator.py | carlba/o3 | 999ff1b06ef9c7a5bf220a3e840c4a42dc81956a | [
"Unlicense"
] | 1 | 2019-01-27T11:04:56.000Z | 2019-01-27T11:04:56.000Z | o3/operators/filter_logs_to_percentage_operator.py | carlba/o3 | 999ff1b06ef9c7a5bf220a3e840c4a42dc81956a | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
"""Custom operator for filtering out a percentage of input log files."""
import os
import glob
from airflow.exceptions import AirflowException, AirflowSkipException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from o3.utils import filter_to_percentage
| 39.033708 | 80 | 0.61399 |
3d2f723ddb0882b15b4375b0ad2b7ffa05e4cedb | 17,541 | py | Python | alibExp.py | wicknec/WalArt | b23488b4e421699155976d5e726d1c7a906c3243 | [
"MIT"
] | 2 | 2016-02-02T11:33:27.000Z | 2020-07-28T13:28:25.000Z | alibExp.py | wicknec/WalArt | b23488b4e421699155976d5e726d1c7a906c3243 | [
"MIT"
] | null | null | null | alibExp.py | wicknec/WalArt | b23488b4e421699155976d5e726d1c7a906c3243 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
"""
alibExp
=======================
Qt4 interface for alib explorer
To browse alib in a more user-friendly way than simple text
Item.data(1,-1) stores its data, i.e. a str or another alib
"""
# NOTE: the actual command documentation is collected from docstrings of the
# commands and is appended to __doc__ after the class has been defined.
"""
Revisions
=================
151125 completed reading functionality
151209 wordless gui, remove node
151210 edit text, add icon to tree, btAdd function
151214 added btRoot, explore root
151219 added *GetCurrent*, modified *RemoveDataSync* to suited with alib.Pop
151229 added *GetSelectedText*
160112 change data display to waText.Brief
160113 change non-editing to read only to allow scroll
160309 fixed save failure by explore root after lock.
171204 updated alibExp.GetSelectedText to return the path of selected node
fixed bug in reeWidget.ItemToPath
180102 migrate to be compatible with PyQt5
"""
try:
from PyQt4 import QtCore
from PyQt4.QtCore import QTimer
from PyQt4.QtGui import QApplication, QWidget
except ImportError or ModuleNotFoundError:
print('PyQt4 module not found, try using PyQt5')
from PyQt5 import QtCore
from PyQt5.QtWidgets import QApplication, QWidget
from PyQt5.QtCore import QTimer
from WalArt.gui.QtGui4or5 import QtGuiFinder
QtGui=QtGuiFinder()
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'alibExp.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
try:
_encoding = QtGui.QApplication.UnicodeUTF8
except AttributeError:
from WalArt import waFile,waText
iconPath=waFile.GetFolderName(waFile.Find('add.png'))
from WalArt import alib
def New(d):
'''Make a new alib explorer in the dialog, and return the object
'''
a=alibExp()
a.setupUi(d)
return a
import sys
import time
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
Form = QtGui.QWidget()
ui = alibExp()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
time.sleep(5)
| 35.580122 | 104 | 0.595462 |
3d2f8979ac8231da6f04ccba44cc761dc5cb64c8 | 2,445 | py | Python | test/test_batch.py | ASemakov/ob-pipelines | ea475cd2c34ae2eccbf59563fe7caea06266c450 | [
"Apache-2.0"
] | 11 | 2017-01-22T22:08:45.000Z | 2020-03-10T20:17:14.000Z | test/test_batch.py | BeKitzur/ob-pipelines | 8ee4ebd5803d72d0babce25b13399c9cdd0f686e | [
"Apache-2.0"
] | null | null | null | test/test_batch.py | BeKitzur/ob-pipelines | 8ee4ebd5803d72d0babce25b13399c9cdd0f686e | [
"Apache-2.0"
] | 6 | 2017-01-23T01:24:33.000Z | 2018-07-18T13:30:06.000Z | """
Integration test for the Luigi wrapper of AWS Batch
Requires:
- boto3 package
- Amazon AWS credentials discoverable by boto3 (e.g., by using ``aws configure``
from awscli_)
- An enabled AWS Batch job queue configured to run on a compute environment.
Written and maintained by Jake Feala (@jfeala) for Outlier Bio (@outlierbio)
"""
import unittest
try:
from ob_pipelines.batch import BatchTask, BatchJobException, client, _get_job_status
except ImportError:
raise unittest.SkipTest('boto3 is not installed. BatchTasks require boto3')
TEST_JOB_DEF = {
'jobDefinitionName': 'hello-world',
'type': 'container',
'parameters': {
'message': 'hll wrld'
},
'containerProperties': {
'image': 'centos',
'command': ['/bin/echo', 'Ref::message'],
'vcpus': 2,
'memory': 4,
}
}
| 25.46875 | 91 | 0.67771 |
3d30c11f1ede17efd698bce52b1da5e9569d559a | 456 | py | Python | reinforcement_learning/rl_deepracer_robomaker_coach_gazebo/src/markov/tests/conftest.py | jpmarques19/tensorflwo-test | 0ff8b06e0415075c7269820d080284a42595bb2e | [
"Apache-2.0"
] | 5 | 2019-01-19T23:53:35.000Z | 2022-01-29T14:04:31.000Z | reinforcement_learning/rl_deepracer_robomaker_coach_gazebo/src/markov/tests/conftest.py | jpmarques19/tensorflwo-test | 0ff8b06e0415075c7269820d080284a42595bb2e | [
"Apache-2.0"
] | 4 | 2020-09-26T01:30:01.000Z | 2022-02-10T02:20:35.000Z | reinforcement_learning/rl_deepracer_robomaker_coach_gazebo/src/markov/tests/conftest.py | jpmarques19/tensorflwo-test | 0ff8b06e0415075c7269820d080284a42595bb2e | [
"Apache-2.0"
] | 7 | 2020-03-04T22:23:51.000Z | 2021-07-13T14:05:46.000Z | import pytest
from markov.tests import test_constant
| 19.826087 | 50 | 0.809211 |
3d319597951dce7996b3f7f4aeae76d89320c801 | 2,716 | py | Python | ROS/my_initials.py | Vishwajeetiitb/Autumn-of-Automation | bd8c78662734f867b6aa6fd9179a12913387a01c | [
"MIT"
] | null | null | null | ROS/my_initials.py | Vishwajeetiitb/Autumn-of-Automation | bd8c78662734f867b6aa6fd9179a12913387a01c | [
"MIT"
] | null | null | null | ROS/my_initials.py | Vishwajeetiitb/Autumn-of-Automation | bd8c78662734f867b6aa6fd9179a12913387a01c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
import math
import os
from turtlesim.msg import Pose
import time
os.system("rosrun")
if __name__ == '__main__':
try:
#Testing our function
move()
except rospy.ROSInterruptException: pass | 28 | 82 | 0.645066 |
3d321cb4dea8943fb087339fe2547eeaba4b5805 | 2,144 | py | Python | Assignment-04/Question-03/mpi_ping_pong.py | gnu-user/mcsc-6030-assignments | 42825cdbc4532d9da6ebdba549b65fb1e36456a0 | [
"MIT"
] | null | null | null | Assignment-04/Question-03/mpi_ping_pong.py | gnu-user/mcsc-6030-assignments | 42825cdbc4532d9da6ebdba549b65fb1e36456a0 | [
"MIT"
] | null | null | null | Assignment-04/Question-03/mpi_ping_pong.py | gnu-user/mcsc-6030-assignments | 42825cdbc4532d9da6ebdba549b65fb1e36456a0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
###############################################################################
#
# Assignment 4, Question 3 solution for MPI ping-pong timings to calcualate
# alpha and beta, implemented in Python using MPI.
#
# Copyright (C) 2015, Jonathan Gillett (100437638)
# All rights reserved.
#
###############################################################################
import numpy as np
import sys
from mpi4py import MPI
from time import sleep
from random import random
# Define process 0 as PING, process 1 as PONG
PING = 0
PONG = 1
# Number of trials for getting the average time
TRIALS = 100
if __name__ == '__main__':
if len(sys.argv) < 2:
print "ERROR: You must provide the number of bytes to send!."
sys.exit()
N = int(sys.argv[1]) # The number of bytes to generate
comm = MPI.COMM_WORLD
proc_id = comm.Get_rank()
n_proc = comm.Get_size()
status = MPI.Status()
# Error checking only 2 processes can be used
if n_proc > 2:
if proc_id == PING:
print "ERROR: Only two proceses (ping and pong)."
MPI.Finalize()
sys.exit()
if N < 1:
if proc_id == PING:
print "ERROR: You must specify the data size in bytes."
MPI.Finalize()
sys.exit()
# The data to send back and forth, in bytes
A = np.empty(N, dtype=np.int8)
comm.Barrier()
# Send the data back and forth 100 times to get the average time
timings = []
for i in range(0, 100):
if proc_id == PING:
local_time = -MPI.Wtime()
comm.Send(A, PONG, tag=PING)
comm.Recv(A, source=MPI.ANY_SOURCE, tag=PONG, status=status)
timings.append(local_time + MPI.Wtime())
# Simulate random sleeps to account for different scheduling
sleep(random() / 100)
else:
comm.Recv(A, source=MPI.ANY_SOURCE, tag=PING, status=status)
comm.Send(A, PING, tag=PONG)
if proc_id == PING:
print "N bytes sent: %d, trials: %d, average time: %0.8f seconds" \
% (N, TRIALS, sum(timings) / float(len(timings)) / 2.0)
| 32 | 79 | 0.567631 |
3d33211ca1584c7787f1e93ba17778c1a7d518eb | 2,286 | py | Python | app/monitoring/logging_config.py | robmarkcole/python-fastapi-aws-lambda-container | 56a676f4c0bccce10fd2533daba3ace0201a1bb3 | [
"Apache-2.0"
] | 15 | 2020-12-29T23:14:33.000Z | 2022-03-24T03:56:34.000Z | app/monitoring/logging_config.py | robmarkcole/python-fastapi-aws-lambda-container | 56a676f4c0bccce10fd2533daba3ace0201a1bb3 | [
"Apache-2.0"
] | 3 | 2021-09-11T00:41:55.000Z | 2022-03-24T05:51:17.000Z | app/monitoring/logging_config.py | robmarkcole/python-fastapi-aws-lambda-container | 56a676f4c0bccce10fd2533daba3ace0201a1bb3 | [
"Apache-2.0"
] | 5 | 2021-09-10T23:53:41.000Z | 2022-03-25T11:31:24.000Z | import os
import uuid
import logging
import json
from json import JSONEncoder
from pythonjsonlogger import jsonlogger
from datetime import datetime
from logging.config import dictConfig
# Custom JSON encoder which enforce standard ISO 8601 format, UUID format
# Configure Logging
def configure_logging(level='DEBUG', service=None, instance=None):
dictConfig({
'version': 1,
'formatters': {'default': {
'()': JsonLogFormatter,
'format': '%(timestamp)s %(level)s %(service)s %(instance)s %(type)s %(message)s',
'json_encoder': ModelJsonEncoder
}},
'filters': {'default': {
'()': LogFilter,
'service': service,
'instance': instance
}},
'handlers': {'default_handler': {
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stdout',
'filters': ['default'],
'formatter': 'default'
}},
'root': {
'level': level,
'handlers': ['default_handler']
}
}) | 30.078947 | 94 | 0.587927 |
3d332e20398ab4a054c4523a1136617bf5854f9a | 1,459 | py | Python | FPAIT/lib/logger/utils.py | D-X-Y/MSPLD-2018 | 71a6a75830ac84c7a861e63367ad3ace991fae77 | [
"MIT"
] | 63 | 2018-07-12T10:36:25.000Z | 2019-04-26T11:30:09.000Z | FPAIT/lib/logger/utils.py | D-X-Y/MSPLD-2018 | 71a6a75830ac84c7a861e63367ad3ace991fae77 | [
"MIT"
] | null | null | null | FPAIT/lib/logger/utils.py | D-X-Y/MSPLD-2018 | 71a6a75830ac84c7a861e63367ad3ace991fae77 | [
"MIT"
] | 8 | 2018-07-14T02:47:12.000Z | 2019-06-03T07:39:13.000Z | import time, sys
import numpy as np
| 28.057692 | 112 | 0.666895 |
3d34dd340fc3d7607de14667552ba62b48a6ce54 | 1,888 | py | Python | hoomd/hpmc/test-py/test_ghost_layer.py | PetersResearchGroup/PCND | 584768cc683a6df0152ead69b567d05b781aab2b | [
"BSD-3-Clause"
] | null | null | null | hoomd/hpmc/test-py/test_ghost_layer.py | PetersResearchGroup/PCND | 584768cc683a6df0152ead69b567d05b781aab2b | [
"BSD-3-Clause"
] | null | null | null | hoomd/hpmc/test-py/test_ghost_layer.py | PetersResearchGroup/PCND | 584768cc683a6df0152ead69b567d05b781aab2b | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
from __future__ import division
from hoomd import *
from hoomd import hpmc
import math
import unittest
context.initialize()
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
| 33.714286 | 121 | 0.613877 |
3d34e6acbf5b6084146e881a817272a730156e45 | 525 | py | Python | performanceplatform/collector/ga/plugins/load_plugin.py | alphagov/performanceplatform-collector | de68ab4aa500c31e436e050fa1268fa928c522a5 | [
"MIT"
] | 3 | 2015-05-01T14:57:28.000Z | 2016-04-08T12:53:59.000Z | performanceplatform/collector/ga/plugins/load_plugin.py | alphagov/performanceplatform-collector | de68ab4aa500c31e436e050fa1268fa928c522a5 | [
"MIT"
] | 15 | 2015-02-11T11:43:02.000Z | 2021-03-24T10:54:35.000Z | performanceplatform/collector/ga/plugins/load_plugin.py | alphagov/performanceplatform-collector | de68ab4aa500c31e436e050fa1268fa928c522a5 | [
"MIT"
] | 7 | 2015-05-04T16:56:02.000Z | 2021-04-10T19:42:35.000Z | """
load_plugin.py
--------------
Responsible for taking plugin strings and returning plugin callables.
"""
# For the linter
import __builtin__
import performanceplatform.collector.ga.plugins
| 21 | 79 | 0.744762 |
3d36068bd29dc63f314be2d8a4d427fb6770b25d | 26,243 | py | Python | src/Python/Visualization/FrogReconstruction.py | ajpmaclean/vtk-examples | 1a55fc8c6af67a3c07791807c7d1ec0ab97607a2 | [
"Apache-2.0"
] | 81 | 2020-08-10T01:44:30.000Z | 2022-03-23T06:46:36.000Z | src/Python/Visualization/FrogReconstruction.py | ajpmaclean/vtk-examples | 1a55fc8c6af67a3c07791807c7d1ec0ab97607a2 | [
"Apache-2.0"
] | 2 | 2020-09-12T17:33:52.000Z | 2021-04-15T17:33:09.000Z | src/Python/Visualization/FrogReconstruction.py | ajpmaclean/vtk-examples | 1a55fc8c6af67a3c07791807c7d1ec0ab97607a2 | [
"Apache-2.0"
] | 27 | 2020-08-17T07:09:30.000Z | 2022-02-15T03:44:58.000Z | #!/usr/bin/env python
import collections
from pathlib import Path
# noinspection PyUnresolvedReferences
import vtkmodules.vtkInteractionStyle
# noinspection PyUnresolvedReferences
import vtkmodules.vtkRenderingOpenGL2
from vtkmodules.vtkCommonColor import vtkNamedColors
from vtkmodules.vtkCommonCore import vtkLookupTable
from vtkmodules.vtkCommonMath import vtkMatrix4x4
from vtkmodules.vtkCommonTransforms import vtkTransform
from vtkmodules.vtkFiltersCore import (
vtkContourFilter,
vtkDecimatePro,
vtkExecutionTimer,
vtkFlyingEdges3D,
vtkMarchingCubes,
vtkPolyDataNormals,
vtkStripper,
vtkWindowedSincPolyDataFilter
)
from vtkmodules.vtkFiltersGeneral import vtkTransformPolyDataFilter
from vtkmodules.vtkIOImage import vtkMetaImageReader
from vtkmodules.vtkImagingCore import (
vtkImageShrink3D,
vtkImageThreshold
)
from vtkmodules.vtkImagingGeneral import vtkImageGaussianSmooth
from vtkmodules.vtkImagingMorphological import vtkImageIslandRemoval2D
from vtkmodules.vtkInteractionWidgets import vtkOrientationMarkerWidget
from vtkmodules.vtkRenderingAnnotation import vtkAxesActor
from vtkmodules.vtkRenderingCore import (
vtkActor,
vtkPolyDataMapper,
vtkRenderWindow,
vtkRenderWindowInteractor,
vtkRenderer
)
def get(self, order):
"""
Returns the vtkTransform corresponding to the slice order.
:param order: The slice order
:return: The vtkTransform to use
"""
if order == 'si':
return self.s_i()
elif order == 'is':
return self.i_s()
elif order == 'ap':
return self.a_p()
elif order == 'pa':
return self.p_a()
elif order == 'lr':
return self.l_r()
elif order == 'rl':
return self.r_l()
elif order == 'hf':
return self.h_f()
elif order == 'hfsi':
return self.hf_si()
elif order == 'hfis':
return self.hf_is()
elif order == 'hfap':
return self.hf_ap()
elif order == 'hfpa':
return self.hf_pa()
elif order == 'hflr':
return self.hf_lr()
elif order == 'hfrl':
return self.hf_rl()
else:
s = 'No such transform "{:s}" exists.'.format(order)
raise Exception(s)
def default_parameters():
p = dict()
p['NAME'] = ''
p['TISSUE'] = '1'
p['START_SLICE'] = '0'
p['END_SLICE'] = '255'
p['STUDY'] = 'frogtissue'
p['VALUE'] = 127.5
p['ROWS'] = 470
p['COLUMNS'] = 500
p['HEADER_SIZE'] = 0
p['PIXEL_SIZE'] = 1
p['SPACING'] = 1.5
p['START_SLICE'] = 1
p['END_SLICE'] = 138
p['REDUCTION'] = 1
p['FEATURE_ANGLE'] = 60
p['DECIMATE_ANGLE'] = 60
p['SMOOTH_ANGLE'] = 60
p['SMOOTH_ITERATIONS'] = 10
p['SMOOTH_FACTOR'] = 0.1
p['DECIMATE_ITERATIONS'] = 1
p['DECIMATE_REDUCTION'] = 1
p['DECIMATE_ERROR'] = 0.0002
p['DECIMATE_ERROR_INCREMENT'] = 0.0002
p['ISLAND_AREA'] = 4
p['ISLAND_REPLACE'] = -1
p['GAUSSIAN_STANDARD_DEVIATION'] = [2, 2, 2]
p['GAUSSIAN_RADIUS_FACTORS'] = [2, 2, 2]
p['VOI'] = [0, p['COLUMNS'] - 1, 0, p['ROWS'] - 1, 0, p['END_SLICE']]
p['SAMPLE_RATE'] = [1, 1, 1]
p['OPACITY'] = 1.0
return p
def blood():
p = frog()
p['NAME'] = 'blood'
p['TISSUE'] = 1
p['START_SLICE'] = 14
p['END_SLICE'] = 131
p['VALUE'] = 4
p['VOI'] = [33, 406, 62, 425, p['START_SLICE'], p['END_SLICE']]
return p
def brain():
p = frog()
p['NAME'] = 'brain'
p['TISSUE'] = 2
p['START_SLICE'] = 1
p['END_SLICE'] = 33
p['VOI'] = [349, 436, 211, 252, p['START_SLICE'], p['END_SLICE']]
return p
def brainbin():
p = frog()
p['NAME'] = 'brainbin'
p['TISSUE'] = 2
p['START_SLICE'] = 1
p['END_SLICE'] = 33
p['VOI'] = [349, 436, 211, 252, p['END_SLICE'], p['START_SLICE']]
p['GAUSSIAN_STANDARD_DEVIATION'] = [0, 0, 0]
p['DECIMATE_ITERATIONS'] = 0
return p
def duodenum():
p = frog()
p['NAME'] = 'duodenum'
p['TISSUE'] = 3
p['START_SLICE'] = 35
p['END_SLICE'] = 105
p['VOI'] = [189, 248, 191, 284, p['START_SLICE'], p['END_SLICE']]
return p
def eye_retna():
p = frog()
p['NAME'] = 'eye_retna'
p['TISSUE'] = 4
p['START_SLICE'] = 1
p['END_SLICE'] = 41
p['VOI'] = [342, 438, 180, 285, p['START_SLICE'], p['END_SLICE']]
return p
def eye_white():
p = frog()
p['NAME'] = 'eye_white'
p['TISSUE'] = 5
p['START_SLICE'] = 1
p['END_SLICE'] = 37
p['VOI'] = [389, 433, 183, 282, p['START_SLICE'], p['END_SLICE']]
return p
def frog():
p = default_parameters()
p['ROWS'] = 470
p['COLUMNS'] = 500
p['STUDY'] = 'frogtissue'
p['SLICE_ORDER'] = 'si'
p['PIXEL_SIZE'] = 1
p['SPACING'] = 1.5
p['VALUE'] = 127.5
p['SAMPLE_RATE'] = [1, 1, 1]
p['GAUSSIAN_STANDARD_DEVIATION'] = [2, 2, 2]
p['DECIMATE_REDUCTION'] = 0.95
p['DECIMATE_ITERATIONS'] = 5
p['DECIMATE_ERROR'] = 0.0002
p['DECIMATE_ERROR_INCREMENT'] = 0.0002
p['SMOOTH_FACTOR'] = 0.1
return p
def heart():
p = frog()
p['NAME'] = 'heart'
p['TISSUE'] = 6
p['START_SLICE'] = 49
p['END_SLICE'] = 93
p['VOI'] = [217, 299, 186, 266, p['START_SLICE'], p['END_SLICE']]
return p
def ileum():
p = frog()
p['NAME'] = 'ileum'
p['TISSUE'] = 7
p['START_SLICE'] = 25
p['END_SLICE'] = 93
p['VOI'] = [172, 243, 201, 290, p['START_SLICE'], p['END_SLICE']]
return p
def kidney():
p = frog()
p['NAME'] = 'kidney'
p['TISSUE'] = 8
p['START_SLICE'] = 24
p['END_SLICE'] = 78
p['VOI'] = [116, 238, 193, 263, p['START_SLICE'], p['END_SLICE']]
return p
def l_intestine():
p = frog()
p['NAME'] = 'l_intestine'
p['TISSUE'] = 9
p['START_SLICE'] = 56
p['END_SLICE'] = 106
p['VOI'] = [115, 224, 209, 284, p['START_SLICE'], p['END_SLICE']]
return p
def liver():
p = frog()
p['NAME'] = 'liver'
p['TISSUE'] = 10
p['START_SLICE'] = 25
p['END_SLICE'] = 126
p['VOI'] = [167, 297, 154, 304, p['START_SLICE'], p['END_SLICE']]
return p
def lung():
p = frog()
p['NAME'] = 'lung'
p['TISSUE'] = 11
p['START_SLICE'] = 24
p['END_SLICE'] = 59
p['VOI'] = [222, 324, 157, 291, p['START_SLICE'], p['END_SLICE']]
return p
def nerve():
p = frog()
p['NAME'] = 'nerve'
p['TISSUE'] = 12
p['START_SLICE'] = 7
p['END_SLICE'] = 113
p['VOI'] = [79, 403, 63, 394, p['START_SLICE'], p['END_SLICE']]
return p
def skin():
p = default_parameters()
p['NAME'] = 'skin'
p['TISSUE'] = 0
p['ROWS'] = 470
p['COLUMNS'] = 500
p['STUDY'] = 'frog'
p['SLICE_ORDER'] = 'si'
p['PIXEL_SIZE'] = 1
p['SPACING'] = 1.5
p['START_SLICE'] = 1
p['END_SLICE'] = 138
p['VOI'] = [0, 499, 0, 469, p['START_SLICE'], p['END_SLICE']]
p['VALUE'] = 10.5
p['SAMPLE_RATE'] = [2, 2, 1]
p['DECIMATE_REDUCTION'] = 0.95
p['DECIMATE_ITERATIONS'] = 10
p['DECIMATE_ERROR'] = 0.0002
p['DECIMATE_ERROR_INCREMENT'] = 0.0002
p['FEATURE_ANGLE'] = 60
p['OPACITY'] = 0.4
return p
def skeleton():
p = frog()
p['STUDY'] = 'frogtissue'
p['NAME'] = 'skeleton'
p['TISSUE'] = 13
p['VALUE'] = 64.5
p['START_SLICE'] = 1
p['END_SLICE'] = 136
p['VOI'] = [23, 479, 8, 469, p['START_SLICE'], p['END_SLICE']]
p['GAUSSIAN_STANDARD_DEVIATION'] = [1.5, 1.5, 1]
return p
def spleen():
p = frog()
p['NAME'] = 'spleen'
p['TISSUE'] = 14
p['START_SLICE'] = 45
p['END_SLICE'] = 68
p['VOI'] = [166, 219, 195, 231, p['START_SLICE'], p['END_SLICE']]
return p
def stomach():
p = frog()
p['NAME'] = 'stomach'
p['TISSUE'] = 15
p['START_SLICE'] = 26
p['END_SLICE'] = 119
p['VOI'] = [143, 365, 158, 297, p['START_SLICE'], p['END_SLICE']]
return p
def tissue_parameters():
t = dict()
t['blood'] = blood()
t['brain'] = brain()
t['brainbin'] = brainbin()
t['duodenum'] = duodenum()
t['eye_retna'] = eye_retna()
t['eye_white'] = eye_white()
t['frog'] = frog()
t['heart'] = heart()
t['ileum'] = ileum()
t['kidney'] = kidney()
t['l_intestine'] = l_intestine()
t['liver'] = liver()
t['lung'] = lung()
t['nerve'] = nerve()
t['skin'] = skin()
t['skeleton'] = skeleton()
t['spleen'] = spleen()
t['stomach'] = stomach()
return t
def create_frog_lut(colors):
lut = vtkLookupTable()
lut.SetNumberOfColors(16)
lut.SetTableRange(0, 15)
lut.Build()
lut.SetTableValue(0, colors.GetColor4d('LimeGreen')) # skin
lut.SetTableValue(1, colors.GetColor4d('salmon')) # blood
lut.SetTableValue(2, colors.GetColor4d('beige')) # brain
lut.SetTableValue(3, colors.GetColor4d('orange')) # duodenum
lut.SetTableValue(4, colors.GetColor4d('misty_rose')) # eye_retina
lut.SetTableValue(5, colors.GetColor4d('white')) # eye_white
lut.SetTableValue(6, colors.GetColor4d('tomato')) # heart
lut.SetTableValue(7, colors.GetColor4d('raspberry')) # ileum
lut.SetTableValue(8, colors.GetColor4d('banana')) # kidney
lut.SetTableValue(9, colors.GetColor4d('peru')) # l_intestine
lut.SetTableValue(10, colors.GetColor4d('pink')) # liver
lut.SetTableValue(11, colors.GetColor4d('powder_blue')) # lung
lut.SetTableValue(12, colors.GetColor4d('carrot')) # nerve
lut.SetTableValue(13, colors.GetColor4d('wheat')) # skeleton
lut.SetTableValue(14, colors.GetColor4d('violet')) # spleen
lut.SetTableValue(15, colors.GetColor4d('plum')) # stomach
return lut
def check_for_required_parameters(tissue, parameters):
required = {'NAME', 'END_SLICE', 'TISSUE', 'STUDY', 'ROWS',
'COLUMNS', 'VALUE', 'SPACING',
'GAUSSIAN_STANDARD_DEVIATION', 'VOI',
'DECIMATE_ITERATIONS'}
k = set(parameters.keys())
s = None
if len(k) == 0:
s = 'Missing parameters for {:11s}: {:s}'.format(tissue, ', '.join(map(str, required)))
else:
d = required.difference(k)
if d:
s = 'Missing parameters for {:11s}: {:s}'.format(tissue, ', '.join(map(str, d)))
return s
def format_timings(ict):
res = list()
total = 0
sk = sorted(ict.keys())
for k in sk:
sigma = 0
res.append('{:11s}'.format(k))
skk = sorted(ict[k].keys())
for kk in skk:
sigma += ict[k][kk]
res.append('{:11s}{:13s} {:5.2f}s'.format(' ', kk, ict[k][kk]))
total += sigma
res.append('Subtotal: {:5.2f}s'.format(sigma))
res.append(' Total: {:5.2f}s'.format(total))
return res
if __name__ == '__main__':
import sys
data_folder, tissue, view, flying_edges, decimate = get_program_parameters(sys.argv)
main(data_folder, tissue, view, flying_edges, decimate)
| 31.093602 | 121 | 0.622185 |
3d3611984ad47f38b9bcaf5c70b8693991e55438 | 3,202 | py | Python | mfr/extensions/tabular/libs/stdlib_tools.py | yacchin1205/RDM-modular-file-renderer | 5bd18175a681d21e7be7fe0238132335a1cd8ded | [
"Apache-2.0"
] | 36 | 2015-08-31T20:24:22.000Z | 2021-12-17T17:02:44.000Z | mfr/extensions/tabular/libs/stdlib_tools.py | yacchin1205/RDM-modular-file-renderer | 5bd18175a681d21e7be7fe0238132335a1cd8ded | [
"Apache-2.0"
] | 190 | 2015-01-02T06:22:01.000Z | 2022-01-19T11:27:03.000Z | mfr/extensions/tabular/libs/stdlib_tools.py | yacchin1205/RDM-modular-file-renderer | 5bd18175a681d21e7be7fe0238132335a1cd8ded | [
"Apache-2.0"
] | 47 | 2015-01-27T15:45:22.000Z | 2021-01-27T22:43:03.000Z | import re
import csv
from mfr.extensions.tabular.exceptions import EmptyTableError, TabularRendererError
from mfr.extensions.tabular import utilities
def csv_stdlib(fp):
"""Read and convert a csv file to JSON format using the python standard library
:param fp: File pointer object
:return: tuple of table headers and data
"""
data = fp.read(2048)
fp.seek(0)
try:
dialect = csv.Sniffer().sniff(data)
except csv.Error:
dialect = csv.excel
else:
_set_dialect_quote_attrs(dialect, data)
del data
reader = csv.DictReader(fp, dialect=dialect)
columns = []
# update the reader field names to avoid duplicate column names when performing row extraction
for idx, fieldname in enumerate(reader.fieldnames or []):
column_count = sum(1 for column in columns if fieldname == column['name'])
if column_count:
unique_fieldname = '{}-{}'.format(fieldname, column_count + 1)
reader.fieldnames[idx] = unique_fieldname
else:
unique_fieldname = fieldname
columns.append({
'id': unique_fieldname,
'field': unique_fieldname,
'name': fieldname,
'sortable': True,
})
try:
rows = [row for row in reader]
except csv.Error as e:
if any("field larger than field limit" in errorMsg for errorMsg in e.args):
raise TabularRendererError(
'This file contains a field too large to render. '
'Please download and view it locally.',
code=400,
extension='csv',
) from e
else:
raise TabularRendererError('csv.Error: {}'.format(e), extension='csv') from e
if not columns and not rows:
raise EmptyTableError('Table empty or corrupt.', extension='csv')
del reader
return {'Sheet 1': (columns, rows)}
def sav_stdlib(fp):
"""Read and convert a .sav file to .csv with pspp, then convert that to JSON format using
the python standard library
:param fp: File pointer object to a .sav file
:return: tuple of table headers and data
"""
csv_file = utilities.sav_to_csv(fp)
with open(csv_file.name, 'r') as file:
csv_file.close()
return csv_stdlib(file)
def _set_dialect_quote_attrs(dialect, data):
"""Set quote-related dialect attributes based on up to 2kb of csv data.
The regular expressions search for things that look like the beginning of
a list, wrapped in a quotation mark that is not dialect.quotechar, with
list items wrapped in dialect.quotechar and seperated by commas.
Example matches include:
"['1', '2', '3' for quotechar == '
'{"a", "b", "c" for quotechar == "
"""
if dialect.quotechar == '"':
if re.search('\'[[({]".+",', data):
dialect.quotechar = "'"
if re.search("'''[[({]\".+\",", data):
dialect.doublequote = True
elif dialect.quotechar == "'":
if re.search("\"[[({]'.+',", data):
dialect.quotechar = '"'
if re.search('"""[[({]\'.+\',', data):
dialect.doublequote = True
| 33.705263 | 98 | 0.605559 |
3d36845f210b13d26d7504e09092d4846041c87f | 4,191 | py | Python | code/master_web/app/template_support.py | glenn-edgar/lacima_ranch_cloud | 0827bdd497295c931cf1a06e97880009773e77be | [
"MIT"
] | null | null | null | code/master_web/app/template_support.py | glenn-edgar/lacima_ranch_cloud | 0827bdd497295c931cf1a06e97880009773e77be | [
"MIT"
] | null | null | null | code/master_web/app/template_support.py | glenn-edgar/lacima_ranch_cloud | 0827bdd497295c931cf1a06e97880009773e77be | [
"MIT"
] | null | null | null | #
#
# This is Support for Drawing Bullet Charts
#
#
#
#
#
#
#
'''
This is the return json value to the javascript front end
{ "canvasName":"canvas1","featuredColor":"Green", "featuredMeasure":14.5,
"qualScale1":14.5, "qualScale1Color":"Black","titleText":"Step 1" },
{ "canvasName":"canvas2","featuredColor":"Blue", "featuredMeasure":14.5,
"qualScale1":14.5, "qualScale1Color":"Black","titleText":"Step 2" },
{ "canvasName":"canvas3","featuredColor":"Red", "featuredMeasure":14.5,
"qualScale1":14.5, "qualScale1Color":"Black","titleText":"Step 3" },
'''
| 37.756757 | 124 | 0.504653 |
3d373999e9b389d4982c3184efb41a30e1a5425d | 1,108 | py | Python | datapack/data/scripts/custom/8871_gve/__init__.py | DigitalCoin1/L2SPERO | f9ec069804d7bf13f9c4bfb508db2eb6ce37ab94 | [
"Unlicense"
] | null | null | null | datapack/data/scripts/custom/8871_gve/__init__.py | DigitalCoin1/L2SPERO | f9ec069804d7bf13f9c4bfb508db2eb6ce37ab94 | [
"Unlicense"
] | null | null | null | datapack/data/scripts/custom/8871_gve/__init__.py | DigitalCoin1/L2SPERO | f9ec069804d7bf13f9c4bfb508db2eb6ce37ab94 | [
"Unlicense"
] | null | null | null | # Author ProGramMoS, Scoria Dev
# Version 0.2b
import sys
from com.l2jfrozen.gameserver.model.actor.instance import L2PcInstance
from com.l2jfrozen.util.database import L2DatabaseFactory
from com.l2jfrozen.gameserver.model.quest import State
from com.l2jfrozen.gameserver.model.quest import QuestState
from com.l2jfrozen.gameserver.model.quest.jython import QuestJython as JQuest
qn = "8871_gve"
QUEST = Quest(8871,qn,"custom")
CREATED = State('Start',QUEST)
STARTED = State('Started',QUEST)
COMPLETED = State('Completed',QUEST)
QUEST.setInitialState(CREATED) | 25.767442 | 77 | 0.731949 |
3d37fed769b12cfb4e9da6c616fc01c8b6b51490 | 3,740 | py | Python | src/hypergol/base_data.py | hypergol/hypergol | 0beee71c8f72d517ef376030baff9c840a2f7eeb | [
"MIT"
] | 49 | 2020-07-09T10:22:25.000Z | 2022-02-21T16:55:34.000Z | src/hypergol/base_data.py | hypergol/hypergol | 0beee71c8f72d517ef376030baff9c840a2f7eeb | [
"MIT"
] | 16 | 2020-08-18T17:06:05.000Z | 2022-02-19T16:30:04.000Z | src/hypergol/base_data.py | hypergol/hypergol | 0beee71c8f72d517ef376030baff9c840a2f7eeb | [
"MIT"
] | 3 | 2020-07-16T08:42:09.000Z | 2021-03-06T15:09:13.000Z | import json
import base64
import pickle
from hypergol.repr import Repr
def test_to_data(self):
"""Tests if the output of the derived class's to_data() function can be converted to a string by ``json.dumps()``"""
originalData = self.__dict__.copy()
data = self.to_data()
for k, v in self.__dict__.items():
if v != originalData[k]:
raise AssertionError(f'{self.__class__.__name__}.to_data() changes the instance itself: {k}: {v} != {originalData[k]}')
try:
_ = json.dumps(data)
except TypeError as ex:
raise TypeError(f'{self.__class__.__name__} JSON serde test failed: {ex}')
return True
def test_from_data(self):
"""Tests if a roundtrip of ``self.from_data(self.to_data())`` modifies the class"""
selfCopy = self.from_data(self.to_data())
if not isinstance(self, type(selfCopy)):
raise AssertionError(f'{self.__class__.__name__}.from_data() does not return the correct type: {self.__class__.__name__} vs {selfCopy.__class__.__name__}, from_data() return value should be "cls(**data)"')
for k, v in selfCopy.__dict__.items():
if v != self.__dict__[k]:
if str(k) == str(v):
raise AssertionError(f'{self.__class__.__name__}.from_data() returns keys as values: {k}: {v} != {self.__dict__[k]}, from_data() return value should be "cls(**data)"')
raise AssertionError(f'{self.__class__.__name__}.from_data() does not deserialise: {k}: {v} != {self.__dict__[k]}')
return True
| 37.029703 | 217 | 0.625936 |
3d392bdfd33f424fff8045fe8d11d2926903d55e | 829 | py | Python | examples/spark-function.py | Hedingber/mlrun | e2269718fcc7caa7e1aa379ac28495830b45f9da | [
"Apache-2.0"
] | 1 | 2021-02-17T08:12:33.000Z | 2021-02-17T08:12:33.000Z | examples/spark-function.py | Hedingber/mlrun | e2269718fcc7caa7e1aa379ac28495830b45f9da | [
"Apache-2.0"
] | 1 | 2020-12-31T14:36:29.000Z | 2020-12-31T14:36:29.000Z | examples/spark-function.py | Hedingber/mlrun | e2269718fcc7caa7e1aa379ac28495830b45f9da | [
"Apache-2.0"
] | 1 | 2021-08-30T21:43:38.000Z | 2021-08-30T21:43:38.000Z | # Pyspark example called by mlrun_spark_k8s.ipynb
from pyspark.sql import SparkSession
from mlrun import get_or_create_ctx
# Acquire MLRun context
mlctx = get_or_create_ctx("spark-function")
# Get MLRun parameters
mlctx.logger.info("!@!@!@!@!@ Getting env variables")
READ_OPTIONS = mlctx.get_param("data_sources")
QUERY = mlctx.get_param("query")
WRITE_OPTIONS = mlctx.get_param("write_options")
# Create spark session
spark = SparkSession.builder.appName("Spark function").getOrCreate()
# Loading data from a JDBC source
for data_source in READ_OPTIONS:
spark.read.load(**READ_OPTIONS[data_source]).createOrReplaceTempView(data_source)
# Transform the data using SQL query
spark.sql(QUERY).write.save(**WRITE_OPTIONS)
# write the result datadrame to destination
mlctx.logger.info("!@!@!@!@!@ Saved")
spark.stop()
| 26.741935 | 85 | 0.772014 |
3d39a4a34099b547fd394be7429e0efce238f402 | 3,654 | py | Python | scripts/create_dataset.py | maxrousseau/dl-anesthesia | e5de2ecfc9d9e954f3ee36eedb13332589dfc27e | [
"MIT"
] | null | null | null | scripts/create_dataset.py | maxrousseau/dl-anesthesia | e5de2ecfc9d9e954f3ee36eedb13332589dfc27e | [
"MIT"
] | null | null | null | scripts/create_dataset.py | maxrousseau/dl-anesthesia | e5de2ecfc9d9e954f3ee36eedb13332589dfc27e | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import os, glob
import datetime
import xmltodict as xd
import numpy as np
import pandas as pd
import h5py
import matplotlib
import matplotlib.pyplot as plt
from sklearn import preprocessing
# lets make a little data set for fun...
mh_dir = os.path.abspath('./db/mh_data/')
mh_cases = glob.glob(os.path.join(mh_dir, '*'))
# sample = os.path.abspath('./db/asac_data/case10.xml') >> TODO: we will need
# to make modifications for this dataset
db = [] # list of all input structs
# a sample will be 6 entries (=60 seconds) of every datapoint to determine if
# there will be a change in spo2 in the next 60 seconds
# spo2, hr,
# parse every xml file and save each to a separate h5 file for future use
# spo2.SpO2, co2.et, ecg.hr, nibp.sys, nibp.dia
mk_npy()
# boom load it...
#X = np.load("x.npy", X) # (3740, 306)
#Y = np.load("y.npy", Y) # (3740,)
| 24.36 | 84 | 0.570881 |
3d39d78b8b90f5a0e60b1cd9c3435a778082fd09 | 636 | py | Python | ossdbtoolsservice/admin/contracts/__init__.py | DaeunYim/pgtoolsservice | b7e548718d797883027b2caee2d4722810b33c0f | [
"MIT"
] | 33 | 2019-05-27T13:04:35.000Z | 2022-03-17T13:33:05.000Z | ossdbtoolsservice/admin/contracts/__init__.py | DaeunYim/pgtoolsservice | b7e548718d797883027b2caee2d4722810b33c0f | [
"MIT"
] | 31 | 2019-06-10T01:55:47.000Z | 2022-03-09T07:27:49.000Z | ossdbtoolsservice/admin/contracts/__init__.py | DaeunYim/pgtoolsservice | b7e548718d797883027b2caee2d4722810b33c0f | [
"MIT"
] | 25 | 2019-05-13T18:39:24.000Z | 2021-11-16T03:07:33.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from ossdbtoolsservice.admin.contracts.get_database_info_request import (
DatabaseInfo, GetDatabaseInfoParameters, GetDatabaseInfoResponse, GET_DATABASE_INFO_REQUEST)
__all__ = [
'DatabaseInfo', 'GetDatabaseInfoParameters', 'GetDatabaseInfoResponse', 'GET_DATABASE_INFO_REQUEST'
]
| 53 | 103 | 0.575472 |
3d3a5919d0773f6fa55679eeb76000e332ce88f7 | 38,534 | py | Python | whacc/image_tools.py | hireslab/whacc | e0ccfe4ee784609cacd4cf62a17192687a5dff51 | [
"MIT"
] | 1 | 2021-05-27T00:34:46.000Z | 2021-05-27T00:34:46.000Z | whacc/image_tools.py | hireslab/whacc | e0ccfe4ee784609cacd4cf62a17192687a5dff51 | [
"MIT"
] | null | null | null | whacc/image_tools.py | hireslab/whacc | e0ccfe4ee784609cacd4cf62a17192687a5dff51 | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import numpy as np
import h5py
import copy
import time
import os
from whacc import utils
if isnotebook():
from tqdm.notebook import tqdm
else:
from tqdm import tqdm
def get_h5_key_and_concatenate(h5_list, key_name='labels'):
"""
simply extract and concatenate all of one key "key_name" from many H5 files, I use it to get balance the data touch
and not touch frames when training a model with a list of different H5 files
Parameters
----------
h5_list : list
list of full paths to H5 file(s).
key_name : str
default 'labels', the key to get the data from the H5 file
"""
h5_list = utils.make_list(h5_list, suppress_warning=True)
for i, k in enumerate(h5_list):
with h5py.File(k, 'r') as h:
if i == 0:
out = np.asarray(h[key_name][:])
else:
out = np.concatenate((out, h[key_name][:]))
return out
def get_h5_key_and_dont_concatenate(h5_list, key_name='labels'):
"""
simply extract and concatenate all of one key "key_name" from many H5 files, I use it to get balance the data touch
and not touch frames when training a model with a list of different H5 files
Parameters
----------
h5_list : list
list of full paths to H5 file(s).
key_name : str
default 'labels', the key to get the data from the H5 file
"""
out = []
for i, k in enumerate(h5_list):
with h5py.File(k, 'r') as h:
out.append(list(h[key_name][:]))
return out
def clone_h5_basic_info(H5_list, fold_name=None, file_end='_QUICK_SAVE.h5'):
"""
copies all the info form H5 into another H5 file NOT INCLUDING the labels or images. so it have all the file info,
like names and pole locations and polate match max value stack. anything with 'images' , 'MODEL__' or 'labels' is
not copied over to the new file.
Parameters
----------
H5_list : list
list of H5 files to clone
fold_name : str
default None, where to place the cloned H5 files. if left blank it will place in the same folder as the original file
file_end : str
default '_QUICK_SAVE.h5', how to change the name of the H5 file to be cloned to differentiate it from the original
Returns
-------
all_new_h5s: list
list of new H5 full file names
"""
if fold_name is not None:
try:
os.mkdir(fold_name)
except:
pass
all_new_h5s = []
for h5 in H5_list:
if fold_name is not None:
new_fn = fold_name + os.path.sep + os.path.basename(h5)[:-3] + file_end
else: #
new_fn = os.path.dirname(h5) + os.path.sep + os.path.basename(h5)[:-3] + file_end
all_new_h5s.append(new_fn)
try:
os.remove(new_fn)
except:
pass
with h5py.File(new_fn, 'w') as f1:
with h5py.File(h5, 'r') as f2:
for i, k in enumerate(f2.keys()):
if 'images' != k and 'MODEL__' not in k and 'labels' not in k:
f1.create_dataset(k, data=f2[k][:])
f2.close()
f1.close()
return all_new_h5s
def del_h5_with_term(h5_list, str_2_cmp):
"""
Parameters
----------
h5_list : list
list of H5 strings (full path)
str_2_cmp : str
will delete keys with this in their title ... e.g. '__RETRAIN'
"""
for k2 in h5_list:
with h5py.File(k2, 'a') as h5_source:
for k in h5_source.keys():
if str_2_cmp in k:
print('del--> ' + k)
del h5_source[k]
print('_______')
def split_h5_loop_segments(h5_to_split_list, split_percentages, temp_base_name, chunk_size=10000,
add_numbers_to_name=True,
disable_TQDM=False, set_seed=None, color_channel=True):
"""Randomly splits images from a list of H5 file(s) into len(split_percentages) different H5 files.
Parameters
----------
h5_to_split_list : list
list of strings with full file names to the H5 file(s) to be split
split_percentages : list
list of numbers, can be ints [20, 1, 1] and or floats [.8, .2], it simply takes the sum and creates a percentage
temp_base_name : str or list
full path to new h5 file e.g "'/Users/phil/tempH5_" and the program will add the number and the ".h5"
in this case tempH5_0.h5, tempH5_1.h5, tempH5_2.h5 etc. or if it is a list it must be equal in length to
'split_percentages' and each file will be named based on that list
chunk_size = int
default 10000, max amount of frames to hold in memory at a time before storing in H5 file. Should almost never
be an issue but just in case you can set to a lower value if you experience memory issues.
add_numbers_to_name = bool
default true, just in case you don't want the numbers on the end of your h5 file.
Returns
Examples
--------
from whacc import image_tools, utils
h5_to_split_list = "/Users/phil/Downloads/untitled folder 2/AH0000x000000_small_tester.h5"
h5_to_split_list = [h5_to_split_list]
utils.print_h5_keys(h5_to_split_list[0])
bd = '/Users/phil/Downloads/untitled folder 2/'
image_tools.split_h5_loop_segments(h5_to_split_list, [1, 3], [bd+'TRASH', bd+'TRASH2'], chunk_size=10000, add_numbers_to_name=False,
disable_TQDM=False, set_seed = None)
-------
"""
if isinstance(temp_base_name, str):
temp_base_name = [temp_base_name] * len(split_percentages)
else:
assert len(temp_base_name) == len(
split_percentages), """if 'temp_base_name' is a list of strings, it must be equal in length to 'split_percentages'"""
for i, k in enumerate(temp_base_name):
if k[-3:] == '.h5':
temp_base_name[i] = temp_base_name[i][:-3]
frame_num_array_list = get_h5_key_and_dont_concatenate(h5_to_split_list, 'frame_nums')
total_frames = len(get_h5_key_and_concatenate(h5_to_split_list, key_name='labels'))
cnt1 = 0
h5_creators = dict()
split_percentages = split_percentages / np.sum(split_percentages)
# assert(sum(split_percentages)==1)
final_names = []
for iii, h5_to_split in enumerate(h5_to_split_list):
with h5py.File(h5_to_split, 'r') as h:
tmp_frame_list = frame_num_array_list[iii]
L = len(tmp_frame_list)
if set_seed is not None:
np.random.seed(set_seed)
mixed_inds = np.random.choice(L, L, replace=False)
random_segment_inds = np.split(mixed_inds, np.ceil(L * np.cumsum(split_percentages[:-1])).astype('int'))
random_segment_inds = [sorted(tmpk) for tmpk in random_segment_inds]
random_frame_inds = [[None]] * len(random_segment_inds)
list_of_new_frame_nums = [[None]] * len(random_segment_inds)
loop_seg_list = list(utils.loop_segments(tmp_frame_list))
for pi, p in enumerate(random_segment_inds):
tmp1 = []
tmp2 = []
for pp in p:
x = list(loop_seg_list[pp])
tmp1 += list(range(x[0], x[1]))
tmp2.append(tmp_frame_list[pp])
random_frame_inds[pi] = tmp1
list_of_new_frame_nums[pi] = tmp2
for i, k in enumerate(split_percentages): # for each new h5 created
if iii == 0: # create the H5 creators
if add_numbers_to_name:
final_names.append(temp_base_name[i] + '_' + str(i) + '.h5')
else:
final_names.append(temp_base_name[i] + '.h5')
h5_creators[i] = h5_iterative_creator(final_names[-1],
overwrite_if_file_exists=True,
close_and_open_on_each_iteration=True,
color_channel=color_channel)
ims = []
labels = []
for ii in tqdm(sorted(random_frame_inds[i]), disable=disable_TQDM, total=total_frames, initial=cnt1):
cnt1 += 1
ims.append(h['images'][ii])
labels.append(h['labels'][ii])
if ii > 0 and ii % chunk_size == 0:
h5_creators[i].add_to_h5(np.asarray(ims), np.asarray(labels))
ims = []
labels = []
h5_creators[i].add_to_h5(np.asarray(ims), np.asarray(labels))
with h5py.File(h5_creators[i].h5_full_file_name,
'r+') as h2: # wanted to do this to allow NONE as input and still have frame nums, but I need to have an append after creating and its a pain
frame_nums = np.asarray(list_of_new_frame_nums[i])
if 'frame_nums' not in h2.keys():
h2.create_dataset('frame_nums', shape=np.shape(frame_nums), maxshape=(None,), chunks=True,
data=frame_nums)
else:
h2['frame_nums'].resize(h2['frame_nums'].shape[0] + frame_nums.shape[0], axis=0)
h2['frame_nums'][-frame_nums.shape[0]:] = frame_nums
# # add the frame info to each
# for i, frame_nums in enumerate(list_of_new_frame_nums):
# with h5py.File(h5_creators[i].h5_full_file_name, 'r+') as h:
# h.create_dataset('frame_nums', shape=np.shape(frame_nums), data=frame_nums)
return final_names
def split_h5(h5_to_split_list, split_percentages, temp_base_name, chunk_size=10000, add_numbers_to_name=True,
disable_TQDM=False, skip_if_label_is_neg_1=False, set_seed=None, color_channel=True):
"""Randomly splits images from a list of H5 file(s) into len(split_percentages) different H5 files.
Parameters
----------
h5_to_split_list : list
list of strings with full file names to the H5 file(s) to be split
split_percentages : list
list of numbers, can be ints [20, 1, 1] and or floats [.8, .2], it simply takes the sum and creates a percentage
temp_base_name : str or list
full path to new h5 file e.g "'/Users/phil/tempH5_" and the program will add the number and the ".h5"
in this case tempH5_0.h5, tempH5_1.h5, tempH5_2.h5 etc. or if it is a list it must be equal in length to
'split_percentages' and each file will be named based on that list
chunk_size = int
default 10000, max amount of frames to hold in memory at a time before storing in H5 file. Should almost never
be an issue but just in case you can set to a lower value if you experience memory issues.
add_numbers_to_name = bool
default true, just in case you don't want the numbers on the end of your h5 file.
Returns
-------
"""
if isinstance(temp_base_name, str):
temp_base_name = [temp_base_name] * len(split_percentages)
else:
assert len(temp_base_name) == len(
split_percentages), """if 'temp_base_name' is a list of strings, it must be equal in length to 'split_percentages'"""
total_frames = len(get_h5_key_and_concatenate(h5_to_split_list, key_name='labels'))
cnt1 = 0
h5_creators = dict()
split_percentages = split_percentages / np.sum(split_percentages)
# assert(sum(split_percentages)==1)
final_names = []
for iii, h5_to_split in enumerate(h5_to_split_list):
with h5py.File(h5_to_split, 'r') as h:
L = len(h['labels'][:])
if set_seed is not None:
np.random.seed(set_seed)
mixed_inds = np.random.choice(L, L, replace=False)
if skip_if_label_is_neg_1: # remove -1s
mixed_inds = mixed_inds[mixed_inds != -1]
random_frame_inds = np.split(mixed_inds, np.ceil(L * np.cumsum(split_percentages[:-1])).astype('int'))
for i, k in enumerate(split_percentages):
if iii == 0: # create the H5 creators
if add_numbers_to_name:
final_names.append(temp_base_name[i] + '_' + str(i) + '.h5')
else:
final_names.append(temp_base_name[i] + '.h5')
h5_creators[i] = h5_iterative_creator(final_names[-1],
overwrite_if_file_exists=True,
close_and_open_on_each_iteration=True,
color_channel=color_channel)
ims = []
labels = []
# print('starting ' + str(iii*i + 1) + ' of ' + str(len(split_percentages)*len(h5_to_split_list)))
for ii in tqdm(sorted(random_frame_inds[i]), disable=disable_TQDM, total=total_frames, initial=cnt1):
cnt1 += 1
ims.append(h['images'][ii])
labels.append(h['labels'][ii])
if ii > 0 and ii % chunk_size == 0:
h5_creators[i].add_to_h5(np.asarray(ims), np.asarray(labels))
ims = []
labels = []
h5_creators[i].add_to_h5(np.asarray(ims), np.asarray(labels))
return final_names
#
def augment_helper(keras_datagen, num_aug_ims, num_reg_ims, in_img, in_label):
"""
Parameters
----------
keras_datagen : keras_datagen: keras_datagen: keras.preprocessing.image.ImageDataGenerator
from keras.preprocessing.image import ImageDataGenerator-- keras_datagen = ImageDataGenerator(...)
num_aug_ims : int
number of augmented images to generate from single input image
num_reg_ims : int
number of copies of in_img to produce. will be stacked at the beginning of all_augment variable.
Use dot see augmentation when testing and can be useful if splitting into many H5s if you want an original in each.
in_img : numpy array
numpy array either 3D with color channel for the last dim ot 2D
in_label : int
the label associate with in_img. simply repeats it creating 'out_labels' the be size of 'all_augment'
Returns
-------
"""
if len(in_img.shape) == 2: # or not np.any(np.asarray(in_img.shape)==3)
in_img = np.repeat(in_img[..., np.newaxis], 3, -1) # for 2D arrays without color channels
set_zoom = keras_datagen.zoom_range
in_img = np.expand_dims(in_img, 0)
it = keras_datagen.flow(in_img, batch_size=1)
all_augment = np.tile(in_img, [num_reg_ims, 1, 1, 1])
for i in range(num_aug_ims): ##
if set_zoom != [0, 0]: # if zoom is being used...
# keras 'zoom' is annoying. it zooms x and y differently randomly
# in order to get an equal zoom I use the following workaround.
z_val = np.random.uniform(low=set_zoom[0], high=set_zoom[1])
keras_datagen.zoom_range = [z_val, z_val]
it = keras_datagen.flow(in_img, batch_size=1)
batch = it.next()
image = batch[0].astype('uint8')
all_augment = np.append(all_augment, np.expand_dims(image, 0), 0)
out_labels = np.repeat(in_label, sum([num_aug_ims, num_reg_ims]))
keras_datagen.zoom_range = set_zoom
return all_augment, out_labels
def img_unstacker(img_array, num_frames_wide=8, color_channel=True):
"""unstacks image stack and combines them into one large image for easy display. reads left to right and then top to bottom.
Parameters
----------
img_array : numpy array
stacked image array
num_frames_wide : int
width of destacked image. if = 8 with input 20 images it will be 8 wide 3 long and 4 blank images (Default value = 8)
Returns
-------
"""
im_stack = None
for i, k in enumerate(img_array):
if i % num_frames_wide == 0:
if i != 0: # stack it
if im_stack is None:
im_stack = im_stack_tmp
else:
im_stack = np.vstack((im_stack, im_stack_tmp))
im_stack_tmp = k # must be at the end
else:
im_stack_tmp = np.hstack((im_stack_tmp, k))
x = num_frames_wide - len(img_array) % num_frames_wide
if x != 0:
if x != num_frames_wide:
for i in range(x):
im_stack_tmp = np.hstack((im_stack_tmp, np.ones_like(k)))
if im_stack is None:
return im_stack_tmp
else:
im_stack = np.vstack((im_stack, im_stack_tmp))
return im_stack
def original_image(x):
"""This is used to transform batch generated images [-1 1] to the original image [0,255] for plotting
Parameters
----------
x :
Returns
-------
"""
image = tf.cast((x + 1) * 127.5, tf.uint8)
return image
def predict_multiple_H5_files(H5_file_list, model_2_load, append_model_and_labels_to_name_string=False,
batch_size=1000, model_2_load_is_model=False, save_on=False,
label_save_name=None, disable_TQDM=False,
save_labels_to_this_h5_file_instead=None) -> object:
"""
Parameters
----------
H5_file_list : list: list
list of string(s) of H5 file full paths
model_2_load : param append_model_and_labels_to_name_string: if True label_save_name = 'MODEL__' + label_save_name + '__labels',
it is a simple way to keep track of labels form many models in a single H5 file. also make sit easier to find :
those labels for later processing. :
either full path to model folder ending with ".ckpt" OR the loaded model itself. if the later,
the user MUST set "model_2_load_is_model" is True and "label_save_name" must be explicitly defined (when using model
path we use the model name to name the labels).
append_model_and_labels_to_name_string : bool
if True label_save_name = 'MODEL__' + label_save_name + '__labels',it is a simple way to keep track of labels
form many models in a single H5 file. also make sit easier to find those labels for later processing. (Default value = False)
batch_size : int
number of images to process per batch, -- slower prediction speeds << ideal predictionsspeed <<
memory issues and crashes -- 1000 is normally pretty good on Google CoLab (Default value = 1000)
model_2_load_is_model : bool
lets the program know if you are directly inserting a model (instead of a path to model folder) (Default value = False)
save_on : bool
saves to H5 file. either the original H5 (image source) or new H5 if a path to "save_labels_to_this_h5_file_instead"
is given (Default value = False)
label_save_name : string
h5 file key used to save the labels to, default is 'MODEL__' + **model_name** + '__labels'
disable_TQDM : bool
if True, turns off loading progress bar. (Default value = False)
save_labels_to_this_h5_file_instead : string
full path to H5 file to insert labels into instead of the H5 used as the image source (Default value = None)
Returns
-------
"""
for i, H5_file in enumerate(H5_file_list):
# save_what_is_left_of_your_h5_file(H5_file, do_del_and_rename = 1) # only matters if file is corrupt otherwise doesnt touch it
gen = ImageBatchGenerator(batch_size, [H5_file])
if model_2_load_is_model:
if label_save_name is None and save_on == True:
assert 1 == 0, 'label_save_name must be assigned if you are loading a model in directly and saveon == True.'
model = model_2_load
else:
if label_save_name is None:
label_save_name = model_2_load.split(os.path.sep)[-1].split('.')[0]
label_save_name = 'MODEL__' + label_save_name + '__labels'
append_model_and_labels_to_name_string = False # turn off because defaults to this naming scheme if user doesnt put in name
model = tf.keras.models.load_model(model_2_load)
if append_model_and_labels_to_name_string:
label_save_name = 'MODEL__' + label_save_name + '__labels'
start = time.time()
labels_2_save = np.asarray([])
for k in tqdm(range(gen.__len__()), disable=disable_TQDM):
TMP_X, tmp_y = gen.getXandY(k)
outY = model.predict(TMP_X)
labels_2_save = np.append(labels_2_save, outY)
total_seconds = time.time() - start
time_per_mil = np.round(1000000 * total_seconds / len(labels_2_save))
print(str(time_per_mil) + ' seconds per 1 million images predicted')
if save_on:
if save_labels_to_this_h5_file_instead is not None: # add to differnt H5 file
H5_file = save_labels_to_this_h5_file_instead # otherwise it will add to the current H5 file
# based on the loop through "H5_file_list" above
try:
hf.close()
except:
pass
with h5py.File(H5_file, 'r+') as hf:
try:
del hf[label_save_name]
time.sleep(10) # give time to process the deleted file... maybe???
hf.create_dataset(label_save_name, data=np.float64(labels_2_save))
except:
hf.create_dataset(label_save_name, data=np.float64(labels_2_save))
hf.close()
return labels_2_save
def get_total_frame_count(h5_file_list):
"""
Parameters
----------
h5_file_list :
Returns
-------
"""
total_frame_count = []
for H5_file in h5_file_list:
H5 = h5py.File(H5_file, 'r')
images = H5['images']
total_frame_count.append(images.shape[0])
return total_frame_count
def batch_size_file_ind_selector(num_in_each, batch_size):
"""batch_size_file_ind_selector - needed for ImageBatchGenerator to know which H5 file index
to use depending on the iteration number used in __getitem__ in the generator.
this all depends on the variable batch size.
Example: the output of the following...
batch_size_file_ind_selector([4000, 4001, 3999], [2000])
would be [0, 0, 1, 1, 1, 2, 2] which means that there are 2 chunks in the first
H5 file, 3 in the second and 2 in the third based on chunk size of 2000
Parameters
----------
num_in_each :
param batch_size:
batch_size :
Returns
-------
"""
break_into = np.ceil(np.array(num_in_each) / batch_size)
extract_inds = np.array([])
for k, elem in enumerate(break_into):
tmp1 = np.array(np.ones(np.int(elem)) * k)
extract_inds = np.concatenate((extract_inds, tmp1), axis=0)
return extract_inds
# file_inds_for_H5_extraction is the same as extract_inds output from the above function
def reset_to_first_frame_for_each_file_ind(file_inds_for_H5_extraction):
"""reset_to_first_frame_for_each_file_ind - uses the output of batch_size_file_ind_selector
to determine when to reset the index for each individual H5 file. using the above example
the out put would be [0, 0, 2, 2, 2, 5, 5], each would be subtracted from the indexing to
set the position of the index to 0 for each new H5 file.
Parameters
----------
file_inds_for_H5_extraction :
Returns
-------
"""
subtract_for_index = []
for k, elem in enumerate(file_inds_for_H5_extraction):
tmp1 = np.diff(file_inds_for_H5_extraction)
tmp1 = np.where(tmp1 != 0)
tmp1 = np.append(-1, tmp1[0]) + 1
subtract_for_index.append(tmp1[np.int(file_inds_for_H5_extraction[k])])
return subtract_for_index
def image_transform_(IMG_SIZE, raw_X):
"""
input num_of_images x H x W, image input must be grayscale
MobileNetV2 requires certain image dimensions
We use N x 61 x 61 formated images
self.IMG_SIZE is a single number to change the images into, images must be square
Parameters
----------
raw_X :
Returns
-------
"""
if len(raw_X.shape) == 4 and raw_X.shape[3] == 3:
rgb_batch = copy.deepcopy(raw_X)
else:
rgb_batch = np.repeat(raw_X[..., np.newaxis], 3, -1)
rgb_tensor = tf.cast(rgb_batch, tf.float32) # convert to tf tensor with float32 dtypes
rgb_tensor = (rgb_tensor / 127.5) - 1 # /127.5 = 0:2, -1 = -1:1 requirement for mobilenetV2
rgb_tensor = tf.image.resize(rgb_tensor, (IMG_SIZE, IMG_SIZE)) # resizing
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
return rgb_tensor
| 40.223382 | 173 | 0.602896 |
3d3c48e30dea59b0f2566984a39668435562eafb | 10,007 | py | Python | tests/texts/declerations.py | Intsights/flake8-intsights | b3785a3be855e05090641696e0648486107dba72 | [
"MIT"
] | 12 | 2020-02-18T17:47:57.000Z | 2021-07-13T10:23:40.000Z | tests/texts/declerations.py | Intsights/flake8-intsights | b3785a3be855e05090641696e0648486107dba72 | [
"MIT"
] | 7 | 2020-02-25T12:14:11.000Z | 2020-12-01T08:14:58.000Z | tests/texts/declerations.py | Intsights/flake8-intsights | b3785a3be855e05090641696e0648486107dba72 | [
"MIT"
] | 1 | 2020-07-01T15:49:28.000Z | 2020-07-01T15:49:28.000Z | declerations_test_text_001 = '''
list1 = [
1,
]
'''
declerations_test_text_002 = '''
list1 = [
1,
2,
]
'''
declerations_test_text_003 = '''
tuple1 = (
1,
)
'''
declerations_test_text_004 = '''
tuple1 = (
1,
2,
)
'''
declerations_test_text_005 = '''
set1 = {
1,
}
'''
declerations_test_text_006 = '''
set1 = {
1,
2,
}
'''
declerations_test_text_007 = '''
dict1 = {
'key': 1,
}
'''
declerations_test_text_008 = '''
dict1 = {
'key1': 1,
'key2': 2,
}
'''
declerations_test_text_009 = '''
return [
1,
]
'''
declerations_test_text_010 = '''
return [
1,
2,
]
'''
declerations_test_text_011 = '''
return (
1,
)
'''
declerations_test_text_012 = '''
return (
1,
2,
)
'''
declerations_test_text_013 = '''
return {
1,
}
'''
declerations_test_text_014 = '''
return {
1,
2,
}
'''
declerations_test_text_015 = '''
return {
'key': 1,
}
'''
declerations_test_text_016 = '''
return {
'key1': 1,
'key2': 2,
}
'''
declerations_test_text_017 = '''
yield [
1,
]
'''
declerations_test_text_018 = '''
yield [
1,
2,
]
'''
declerations_test_text_019 = '''
yield (
1,
)
'''
declerations_test_text_020 = '''
yield (
1,
2,
)
'''
declerations_test_text_021 = '''
yield {
1,
}
'''
declerations_test_text_022 = '''
yield {
1,
2,
}
'''
declerations_test_text_023 = '''
yield {
'key': 1,
}
'''
declerations_test_text_024 = '''
yield {
'key1': 1,
'key2': 2,
}
'''
declerations_test_text_025 = '''
list1 = [
[
1,
],
]
'''
declerations_test_text_026 = '''
list1 = [
[
1,
2,
],
]
'''
declerations_test_text_027 = '''
tuple1 = (
(
1,
),
)
'''
declerations_test_text_028 = '''
tuple1 = (
(
1,
2,
),
)
'''
declerations_test_text_029 = '''
set1 = {
{
1,
},
}
'''
declerations_test_text_030 = '''
set1 = {
{
1,
2,
},
}
'''
declerations_test_text_031 = '''
dict1 = {
'key': {
'key': 1,
},
}
'''
declerations_test_text_032 = '''
dict1 = {
'key1': {
'key1': 1,
'key2': 2,
},
'key2': {
'key1': 1,
'key2': 2,
},
}
'''
declerations_test_text_033 = '''
return [
[
1,
],
]
'''
declerations_test_text_034 = '''
return [
[
1,
2,
],
]
'''
declerations_test_text_035 = '''
return (
(
1,
),
)
'''
declerations_test_text_036 = '''
return (
(
1,
2,
),
)
'''
declerations_test_text_037 = '''
return {
{
1,
},
}
'''
declerations_test_text_038 = '''
return {
{
1,
2,
},
}
'''
declerations_test_text_039 = '''
return {
'key': {
'key': 1,
},
}
'''
declerations_test_text_040 = '''
return {
'key1': {
'key1': 1,
'key2': 2,
},
'key2': {
'key1': 1,
'key2': 2,
},
}
'''
declerations_test_text_041 = '''
yield [
[
1,
],
]
'''
declerations_test_text_042 = '''
yield [
[
1,
2,
],
]
'''
declerations_test_text_043 = '''
yield (
(
1,
),
)
'''
declerations_test_text_044 = '''
yield (
(
1,
2,
),
)
'''
declerations_test_text_045 = '''
yield {
{
1,
},
}
'''
declerations_test_text_046 = '''
yield {
{
1,
2,
},
}
'''
declerations_test_text_047 = '''
yield {
'key': {
'key': 1,
},
}
'''
declerations_test_text_048 = '''
yield {
'key1': {
'key1': 1,
'key2': 2,
},
'key2': {
'key1': 1,
'key2': 2,
},
}
'''
declerations_test_text_049 = '''
list1 = [
[
2,
],
]
'''
declerations_test_text_050 = '''
list_1 = [
[
[
2,
],
],
]
'''
declerations_test_text_051 = '''
list_1 = [
(
2,
),
]
'''
declerations_test_text_052 = '''
list_1 = [
{
'key1': 'value1',
},
]
'''
declerations_test_text_053 = '''
list_1 = [
call(
param1,
),
]
'''
declerations_test_text_054 = '''
entry_1, entry_2 = call()
'''
declerations_test_text_055 = '''
(
entry_1,
entry_2,
) = call()
'''
declerations_test_text_056 = '''
[
1
for a, b in call()
]
'''
declerations_test_text_057 = '''
{
'key': [
'entry_1',
'entry_2',
]
}
'''
declerations_test_text_058 = '''
list_1 = [instance.attribute]
'''
declerations_test_text_059 = '''
list_1 = [1]
'''
declerations_test_text_060 = '''
list_1 = [test]
'''
declerations_test_text_061 = '''
dict_1 = {}
'''
declerations_test_text_062 = '''
list_1 = [term[1]]
'''
declerations_test_text_063 = '''
test = {
'list_of_lists': [
[],
],
}
'''
declerations_test_text_064 = '''
class ClassName:
pass
'''
declerations_test_text_065 = '''
class ClassName(
Class1,
Class2,
):
pass
'''
declerations_test_text_066 = '''
class ClassName():
pass
'''
declerations_test_text_067 = '''
class ClassName(Class1, Class2):
pass
'''
declerations_test_text_068 = '''
class ClassName(
Class1,
Class2
):
pass
'''
declerations_test_text_069 = '''
def function_name():
pass
'''
declerations_test_text_070 = '''
def function_name( ):
pass
'''
declerations_test_text_071 = '''
def function_name(
):
pass
'''
declerations_test_text_072 = '''
def function_name(
):
pass
'''
declerations_test_text_073 = '''
def function_name(
arg1,
arg2,
):
pass
'''
declerations_test_text_074 = '''
def function_name(
arg1,
arg2
):
pass
'''
declerations_test_text_075 = '''
def function_name(arg1):
pass
'''
declerations_test_text_076 = '''
def function_name(
arg1, arg2,
):
pass
'''
declerations_test_text_077 = '''
def function_name(
arg1,
arg2,
):
pass
'''
declerations_test_text_078 = '''
def function_name(
arg1,
**kwargs
):
pass
'''
declerations_test_text_079 = '''
class Class:
def function_name_two(
self,
arg1,
arg2,
):
pass
'''
declerations_test_text_080 = '''
class Class:
@property
def function_name_one(
self,
):
pass
'''
declerations_test_text_081 = '''
def function_name(
*args,
**kwargs
):
pass
'''
declerations_test_text_082 = '''
class A:
def b():
class B:
pass
'''
declerations_test_text_083 = '''
@decorator(
param=1,
)
def function_name(
param_one,
param_two,
):
pass
'''
declerations_test_text_084 = '''
class ClassA:
def function_a():
pass
class TestServerHandler(
http.server.BaseHTTPRequestHandler,
):
pass
'''
declerations_test_text_085 = '''
def function(
param_a,
param_b=[
'test',
],
):
pass
'''
declerations_test_text_086 = '''
@decorator
class DecoratedClass(
ClassBase,
):
pass
'''
declerations_test_text_087 = '''
class ClassName(
object,
):
pass
'''
declerations_test_text_088 = '''
pixel[x,y] = 10
'''
declerations_test_text_089 = '''
@decorator.one
@decorator.two()
class DecoratedClass:
pass
'''
declerations_test_text_090 = '''
@staticmethod
def static_method():
pass
'''
declerations_test_text_091 = '''
@decorator1
@decorator2
def static_method(
param1,
param2,
):
pass
'''
declerations_test_text_092 = '''
@decorator1(
param=1,
)
def method():
pass
'''
declerations_test_text_093 = '''
try:
pass
except Exception:
pass
'''
declerations_test_text_094 = '''
try:
pass
except (
Exception1,
Exception2,
):
pass
'''
declerations_test_text_095 = '''
try:
pass
except Exception as exception:
pass
'''
declerations_test_text_096 = '''
try:
pass
except (
Exception1,
Exception2,
) as exception:
pass
'''
declerations_test_text_097 = '''
try:
pass
except Exception as e:
pass
'''
declerations_test_text_098 = '''
try:
pass
except (
Exception1,
Exception2,
) as e:
pass
'''
declerations_test_text_099 = '''
dict1 = {
'key_one': 1, 'key_two': 2,
}
'''
declerations_test_text_100 = '''
dict1 = {
'key_one': 1,
'key_two': 2,
}
'''
declerations_test_text_101 = '''
dict1 = {
'key_one': 1,
'key_two': 2,
}
'''
declerations_test_text_102 = '''
dict1 = {
'key_one':
1,
}
'''
declerations_test_text_103 = '''
dict_one = {
'list_comp': [
{
'key_one': 'value',
}
for i in range(5)
],
'dict_comp': {
'key_one': i
for i in range(5)
},
'set_comp': {
i
for i in range(5)
},
'generator_comp': (
i
for i in range(5)
),
}
'''
declerations_test_text_104 = '''
dict_one = {
'text_key': 'value',
f'formatted_text_key': 'value',
name_key: 'value',
1: 'value',
dictionary['name']: 'value',
object.attribute: 'value',
}
dict_two = {
'key_text_multiline': \'\'\'
text
\'\'\',
1: 'text',
function(
param=1,
): 'text',
'text'.format(
param=1,
): 'text',
'long_text': (
'first line'
'second line'
),
**other_dict,
}
'''
declerations_test_text_105 = '''
async def function(
param1,
):
pass
'''
declerations_test_text_106 = '''
def no_args_function():
pass
def no_args_function() :
pass
def no_args_function ():
pass
def no_args_function( ):
pass
def no_args_function():
pass
def no_args_function() -> None:
pass
def no_args_function() -> None :
pass
def no_args_function () -> None:
pass
def no_args_function( ) -> None:
pass
def no_args_function() -> None:
pass
'''
declerations_test_text_107 = '''
class Class:
@decorator(
param=1,
)
async def function():
pass
'''
declerations_test_text_108 = '''
list_a = [
\'\'\'
multiline
string
\'\'\',
\'\'\'
multiline
string
\'\'\',
]
'''
declerations_test_text_109 = '''
list_with_empty_tuple = [
(),
]
'''
| 13.098168 | 47 | 0.540122 |
3d3d066b8c43e8060d3eeba6ff779ba80c45bf11 | 1,437 | py | Python | data/preprocess_original.py | Nstats/pytorch_senti_analysis_ch | bb01cc508c37638670b26259a6ee35c4e857f2b6 | [
"Apache-2.0"
] | 1 | 2019-09-29T02:26:14.000Z | 2019-09-29T02:26:14.000Z | data/preprocess_original.py | Nstats/pytorch_senti_analysis_ch | bb01cc508c37638670b26259a6ee35c4e857f2b6 | [
"Apache-2.0"
] | 1 | 2021-06-02T00:24:55.000Z | 2021-06-02T00:24:55.000Z | data/preprocess_original.py | Nstats/pytorch_senti_analysis_ch | bb01cc508c37638670b26259a6ee35c4e857f2b6 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import os
import random
train_df = pd.read_csv("./data/Train_DataSet.csv")
train_label_df = pd.read_csv("./data/Train_DataSet_Label.csv")
test_df = pd.read_csv("./data/Test_DataSet.csv")
train_df = train_df.merge(train_label_df, on='id', how='left')
train_df['label'] = train_df['label'].fillna(-1)
train_df = train_df[train_df['label'] != -1]
train_df['label'] = train_df['label'].astype(int)
test_df['label'] = 0
test_df['content'] = test_df['content'].fillna('')
train_df['content'] = train_df['content'].fillna('')
test_df['title'] = test_df['title'].fillna('')
train_df['title'] = train_df['title'].fillna('')
index = set(range(train_df.shape[0]))
K_fold = []
for i in range(5):
if i == 4:
tmp = index
else:
tmp = random.sample(index, int(1.0 / 5 * train_df.shape[0]))
index = index - set(tmp)
print("Number:", len(tmp))
K_fold.append(tmp)
for i in range(5):
print("Fold", i)
if os.path.exists('./data/data_{}'.format(i)):
os.system("rm -rf ./data/data_{}".format(i))
os.system("mkdir ./data/data_{}".format(i))
dev_index = list(K_fold[i])
train_index = []
for j in range(5):
if j != i:
train_index += K_fold[j]
train_df.iloc[train_index].to_csv("./data/data_{}/train.csv".format(i))
train_df.iloc[dev_index].to_csv("./data/data_{}/dev.csv".format(i))
test_df.to_csv("./data/data_{}/test.csv".format(i))
| 33.418605 | 75 | 0.636047 |
3d3d56ea2024a56958685b39631e50240545177c | 304 | py | Python | tools/load_save.py | zs-liu/Pytorch-AS | 4e41f96522cce7a35f6625bdbe3863c0b74ee0ca | [
"MIT"
] | null | null | null | tools/load_save.py | zs-liu/Pytorch-AS | 4e41f96522cce7a35f6625bdbe3863c0b74ee0ca | [
"MIT"
] | null | null | null | tools/load_save.py | zs-liu/Pytorch-AS | 4e41f96522cce7a35f6625bdbe3863c0b74ee0ca | [
"MIT"
] | null | null | null | import torch
| 23.384615 | 74 | 0.713816 |
3d3ee67b67a8537dbe3c66ff4a5cb8e8c72ee707 | 706 | py | Python | support/send_broadcast_message.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
] | 1 | 2015-11-02T09:11:12.000Z | 2015-11-02T09:11:12.000Z | support/send_broadcast_message.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
] | null | null | null | support/send_broadcast_message.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
] | null | null | null | from xlrd import open_workbook
from scheduler.smsclient import SMSClient
filename = "/Users/twer/Downloads/SchoolsSMSGhana.xlsx"
workbook = open_workbook(filename)
organization_number = "1902"
area_code = "233"
sheets_ = workbook.sheets()[0]
sms_client = SMSClient()
print 'Start'
for row_num in range(1, sheets_.nrows):
row = sheets_.row_values(row_num)
_, _, data_sender_phone_number, message = tuple(row)
phone_number = area_code + str(int(data_sender_phone_number))[1:]
print ("Sending broadcast message to %s from %s.") % (phone_number, organization_number)
sms_sent = sms_client.send_sms(organization_number, phone_number, message)
print 'Response:', sms_sent
print 'End'
| 32.090909 | 92 | 0.756374 |
3d41aeb36fe4c0327c92ba2fb851e5ac557d9a0b | 960 | py | Python | typhon/oem/error.py | jmollard/typhon | 68d5ae999c340b60aa69e095b336d438632ad55c | [
"MIT"
] | null | null | null | typhon/oem/error.py | jmollard/typhon | 68d5ae999c340b60aa69e095b336d438632ad55c | [
"MIT"
] | null | null | null | typhon/oem/error.py | jmollard/typhon | 68d5ae999c340b60aa69e095b336d438632ad55c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Functions to estimate the different sources of retrieval error.
"""
from typhon.oem import common
__all__ = [
'smoothing_error',
'retrieval_noise',
]
def smoothing_error(x, x_a, A):
"""Return the smoothing error through the averaging kernel.
Parameters:
x (ndarray): Atmospherice profile.
x_a (ndarray): A priori profile.
A (ndarray): Averaging kernel matrix.
Returns:
ndarray: Smoothing error due to correlation between layers.
"""
return A @ (x - x_a)
def retrieval_noise(K, S_a, S_y, e_y):
"""Return the retrieval noise.
Parameters:
K (np.array): Simulated Jacobians.
S_a (np.array): A priori error covariance matrix.
S_y (np.array): Measurement covariance matrix.
e_y (ndarray): Total measurement error.
Returns:
ndarray: Retrieval noise.
"""
return common.retrieval_gain_matrix(K, S_a, S_y) @ e_y
| 23.414634 | 67 | 0.644792 |
3d41b25f4537cebd266bfc51daa90f8c3d503433 | 16,155 | py | Python | nicos/core/spm.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12 | 2019-11-06T15:40:36.000Z | 2022-01-01T16:23:00.000Z | nicos/core/spm.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 4 | 2019-11-08T10:18:16.000Z | 2021-01-13T13:07:29.000Z | nicos/core/spm.py | ISISComputingGroup/nicos | 94cb4d172815919481f8c6ee686f21ebb76f2068 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6 | 2020-01-11T10:52:30.000Z | 2022-02-25T12:35:23.000Z | # -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Georg Brandl <georg.brandl@frm2.tum.de>
#
# *****************************************************************************
"""
SPM (Simple Parameter Mode) is an alternate command input mode for NICOS where
entering Python code is not required.
The syntax is very simple and allows no variables, loops or conditionals: a
command line consists of a command and optional arguments, separated by spaces.
Arguments can be numbers, device names, strings and symbols (words that signify
a command option). Strings can be quoted or unquoted as long as they start
with a nondigit character.
Examples::
read
move a1 180
scan sth 10.4 0.4 25 t 2
"""
# XXX SPM todos:
# * figure out how to convert code examples in docstrings
# * add a way to make commands unavailable (e.g. manualscan)
import re
from itertools import chain, cycle, islice
from nicos.core.device import Device
from nicos.core.errors import SPMError
id_re = re.compile('[a-zA-Z_][a-zA-Z0-9_]*$')
string1_re = re.compile(r"'(\\\\|\\'|[^'])*'")
string2_re = re.compile(r'"(\\\\|\\"|[^"])*"')
spaces_re = re.compile(r'\s+')
nospace_re = re.compile(r'[^ \t;]+')
def spmsyntax(*arguments, **options):
"""Decorator to give a function specific SPM syntax advice, for parameter
checking and completion.
"""
return deco
String = String()
Bare = Bare()
Num = Num()
Int = Int()
Bool = Bool()
AnyDev = Dev()
DevParam = DevParam()
DeviceName = DeviceName()
| 32.50503 | 79 | 0.532281 |
3d42299242b673c35a88a568c3b956825f9d2deb | 514 | py | Python | 2_Regression/ARX_Regression/empirical_id.py | abe-mart/arduino | 1bbd88b6bcc3bb9092c259a071c8f3237c391c6a | [
"Apache-2.0"
] | 1 | 2020-06-23T16:28:34.000Z | 2020-06-23T16:28:34.000Z | 2_Regression/ARX_Regression/empirical_id.py | abe-mart/arduino | 1bbd88b6bcc3bb9092c259a071c8f3237c391c6a | [
"Apache-2.0"
] | null | null | null | 2_Regression/ARX_Regression/empirical_id.py | abe-mart/arduino | 1bbd88b6bcc3bb9092c259a071c8f3237c391c6a | [
"Apache-2.0"
] | 1 | 2020-07-22T17:43:30.000Z | 2020-07-22T17:43:30.000Z | import numpy as np
import apm_id as arx
######################################################
# Configuration
######################################################
# number of terms
ny = 2 # output coefficients
nu = 1 # input coefficients
# number of inputs
ni = 1
# number of outputs
no = 1
# load data and parse into columns
data = np.loadtxt('data_step_test.csv',delimiter=',')
######################################################
# generate time-series model
arx.apm_id(data,ni,nu,ny)
| 25.7 | 55 | 0.470817 |
3d42e0a9f4a4977092186d96df6c6ef12958272d | 75,635 | py | Python | setup.py | Alexhuszagh/toolchains | 6428c889dd0def79ddf8498f9af7a9d3ddc0423e | [
"Unlicense"
] | 22 | 2021-06-16T08:33:22.000Z | 2022-01-31T05:17:54.000Z | setup.py | Alexhuszagh/toolchains | 6428c889dd0def79ddf8498f9af7a9d3ddc0423e | [
"Unlicense"
] | 1 | 2022-03-21T16:09:20.000Z | 2022-03-21T16:09:20.000Z | setup.py | Alexhuszagh/xcross | 6428c889dd0def79ddf8498f9af7a9d3ddc0423e | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
'''
setup
=====
This is a relatively complicated setup script, since
it does a few things to simplify version control
and configuration files.
There's a simple script that overrides the `build_py`
command to ensure there's proper version control set
for the library.
There's also a more complex `configure` command
that configures all images from template files,
and also configures the `cmake` wrapper and the
shell version information.
'''
# IMPORTS
# -------
import ast
import enum
import glob
import itertools
import json
import re
import os
import setuptools
import shutil
import stat
import subprocess
import sys
import textwrap
try:
from setuptools import setup, Command
from setuptools.command.build_py import build_py
from setuptools.command.install import install
has_setuptools = True
except ImportError:
from distutils.core import setup, Command
from distutils.command.build_py import build_py
from distutils.command.install import install
has_setuptools = False
try:
import py2exe
except ImportError:
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
print('Cannot import py2exe', file=sys.stderr)
exit(1)
# CONFIG
# ------
def load_json(path):
'''Load JSON files with C++-style comments.'''
# Note: we need comments for maintainability, so we
# can annotate what works and the rationale, but
# we don't want to prevent code from working without
# a complex parser, so we do something very simple:
# only remove lines starting with '//'.
with open(path) as file:
lines = file.read().splitlines()
lines = [i for i in lines if not i.strip().startswith('//')]
return json.loads('\n'.join(lines))
HOME = os.path.dirname(os.path.realpath(__file__))
config = load_json(f'{HOME}/config/config.json')
# A lot of logic depends on being on the proper directory:
# this allows us to do out-of-source builds.
os.chdir(HOME)
def get_version(key):
'''Get the version data from the JSON config.'''
data = config[key]['version']
major = data['major']
minor = data['minor']
patch = data.get('patch', '')
release = data.get('release', '')
number = data.get('number', '')
build = data.get('build', '')
return (major, minor, patch, release, number, build)
# Read the xcross version information.
major, minor, patch, release, number, build = get_version('xcross')
version = f'{major}.{minor}'
if patch != '0':
version = f'{version}.{patch}'
release_type = {'alpha': 'a', 'beta': 'b', 'candidate': 'rc', 'post': '.post'}
if release and not number:
raise ValueError('Must provide a release number with a non-final build.')
elif release:
version = f'{version}{release_type[release]}{number}'
# py2exe version is valid one of the following:
# [0-255].[0-255].[0-65535]
# [0-255].[0-255].[0-255].[0-255]
# Therefore, we can never provide release candidate
# values or omit the patch field.
py2exe_version = f'{major}.{minor}.{patch}'
docker_major, docker_minor, docker_patch, docker_build, *_ = get_version('docker')
docker_version = f'{docker_major}.{docker_minor}'
if docker_patch != '0':
docker_version = f'{docker_version}.{docker_patch}'
# Read the dependency version information.
# This is the GCC and other utilities version from crosstool-NG.
ubuntu_major, ubuntu_minor, *_ = get_version('ubuntu')
ubuntu_version = f'{ubuntu_major}.{ubuntu_minor}'
emsdk_major, emsdk_minor, emsdk_patch, *_ = get_version('emsdk')
emsdk_version = f'{emsdk_major}.{emsdk_minor}.{emsdk_patch}'
gcc_major, gcc_minor, gcc_patch, *_ = get_version('gcc')
gcc_version = f'{gcc_major}.{gcc_minor}.{gcc_patch}'
binutils_major, binutils_minor, *_ = get_version('binutils')
binutils_version = f'{binutils_major}.{binutils_minor}'
mingw_major, mingw_minor, mingw_patch, *_ = get_version('mingw')
mingw_version = f'{mingw_major}.{mingw_minor}.{mingw_patch}'
glibc_major, glibc_minor, *_ = get_version('glibc')
glibc_version = f'{glibc_major}.{glibc_minor}'
musl_major, musl_minor, musl_patch, *_ = get_version('musl')
musl_version = f'{musl_major}.{musl_minor}.{musl_patch}'
musl_cross_major, musl_cross_minor, musl_cross_patch, *_ = get_version('musl-cross')
musl_cross_version = f'{musl_cross_major}.{musl_cross_minor}.{musl_cross_patch}'
avr_major, avr_minor, avr_patch, *_ = get_version('avr')
avr_version = f'{avr_major}.{avr_minor}.{avr_patch}'
uclibc_major, uclibc_minor, uclibc_patch, *_ = get_version('uclibc')
uclibc_version = f'{uclibc_major}.{uclibc_minor}.{uclibc_patch}'
expat_major, expat_minor, expat_patch, *_ = get_version('expat')
expat_version = f'{expat_major}.{expat_minor}.{expat_patch}'
isl_major, isl_minor, *_ = get_version('isl')
isl_version = f'{isl_major}.{isl_minor}'
linux_major, linux_minor, linux_patch, *_ = get_version('linux')
linux_version = f'{linux_major}.{linux_minor}.{linux_patch}'
linux_headers_major, linux_headers_minor, linux_headers_patch, *_ = get_version('linux-headers')
linux_headers_version = f'{linux_headers_major}.{linux_headers_minor}.{linux_headers_patch}'
gmp_major, gmp_minor, gmp_patch, *_ = get_version('gmp')
gmp_version = f'{gmp_major}.{gmp_minor}.{gmp_patch}'
mpc_major, mpc_minor, mpc_patch, *_ = get_version('mpc')
mpc_version = f'{mpc_major}.{mpc_minor}.{mpc_patch}'
mpfr_major, mpfr_minor, mpfr_patch, *_ = get_version('mpfr')
mpfr_version = f'{mpfr_major}.{mpfr_minor}.{mpfr_patch}'
buildroot_major, buildroot_minor, buildroot_patch, *_ = get_version('buildroot')
buildroot_version = f'{buildroot_major}.{buildroot_minor}.{buildroot_patch}'
ct_major, ct_minor, ct_patch, *_ = get_version('crosstool-ng')
ct_version = f'{ct_major}.{ct_minor}.{ct_patch}'
qemu_major, qemu_minor, qemu_patch, *_ = get_version('qemu')
qemu_version = f'{qemu_major}.{qemu_minor}.{qemu_patch}'
riscv_toolchain_version = config['riscv-gnu-toolchain']['riscv-version']
riscv_binutils_version = config['riscv-gnu-toolchain']['binutils-version']
riscv_gdb_version = config['riscv-gnu-toolchain']['gdb-version']
riscv_glibc_version = config['riscv-gnu-toolchain']['glibc-version']
riscv_newlib_version = config['riscv-gnu-toolchain']['newlib-version']
# Other config options.
bin_directory = f'{config["options"]["sysroot"]}/bin/'
# Read the long description.
description = 'Zero-setup cross compilation.'
with open(f'{HOME}/README.md') as file:
long_description = file.read()
# COMMANDS
# --------
# Literal boolean type for command arguments.
bool_type = (type(None), bool, int)
def parse_literal(inst, key, default, valid_types=None):
'''Parse literal user options.'''
value = getattr(inst, key)
if value != default:
value = ast.literal_eval(value)
if valid_types is not None:
assert isinstance(value, valid_types)
setattr(inst, key, value)
def check_call(code):
'''Wrap `subprocess.call` to exit on failure.'''
if code != 0:
sys.exit(code)
def has_module(module):
'''Check if the given module is installed.'''
devnull = subprocess.DEVNULL
code = subprocess.call(
[sys.executable, '-m', module, '--version'],
stdout=devnull,
stderr=devnull,
)
return code == 0
def semver():
'''Create a list of semantic versions for images.'''
versions = [
f'{docker_major}.{docker_minor}',
f'{docker_major}.{docker_minor}.{docker_patch}'
]
if docker_major != '0':
versions.append(docker_major)
return versions
def image_from_target(target, with_pkg=False):
'''Get the full image name from the target.'''
username = config['metadata']['username']
repository = config['metadata']['repository']
if with_pkg:
repository = f'pkg{repository}'
return f'{username}/{repository}:{target}'
def sorted_image_targets():
'''Get a sorted list of image targets.'''
# Need to write the total image list.
os_images = []
metal_images = []
other_images = []
for image in images:
if image.os.is_os():
os_images.append(image.target)
elif image.os.is_baremetal():
metal_images.append(image.target)
else:
other_images.append(image.target)
os_images.sort()
metal_images.sort()
other_images.sort()
return os_images + metal_images + other_images
def subslice_targets(start=None, stop=None):
'''Extract a subslice of all targets.'''
targets = sorted_image_targets()
if start is not None:
targets = targets[targets.index(start):]
if stop is not None:
targets = targets[:targets.index(stop) + 1]
return targets
def build_image(docker, target, with_pkg=False):
'''Call Docker to build a single target.'''
image = image_from_target(target, with_pkg)
image_dir = 'images'
if with_pkg:
image_dir = f'pkg{image_dir}'
path = f'{HOME}/docker/{image_dir}/Dockerfile.{target}'
return subprocess.call([docker, 'build', '-t', image, HOME, '--file', path])
# IMAGES
# ------
# There are two types of images:
# 1). Images with an OS layer.
# 2). Bare-metal machines.
# Bare-metal machines don't use newlibs nanomalloc, so these do not
# support system allocators.
cmake_string = {
OperatingSystem.Android: 'Android',
OperatingSystem.BareMetal: 'Generic',
# This gets ignored anyway.
OperatingSystem.Emscripten: 'Emscripten',
OperatingSystem.Linux: 'Linux',
OperatingSystem.Windows: 'Windows',
OperatingSystem.Unknown: 'Generic',
}
conan_string = {
# Conan uses CMake's feature detection for Android,
# which is famously broken. We have our custom toolchains
# to pass the proper build arguments. Just say Linux,
# and run with it.
OperatingSystem.Android: 'Linux',
OperatingSystem.Linux: 'Linux',
OperatingSystem.Windows: 'Windows',
}
meson_string = {
# The default use is just to use 'linux' for Android.
OperatingSystem.Android: 'linux',
OperatingSystem.BareMetal: 'bare metal',
OperatingSystem.Linux: 'linux',
OperatingSystem.Windows: 'windows',
}
triple_string = {
OperatingSystem.Android: 'linux',
OperatingSystem.BareMetal: None,
OperatingSystem.Emscripten: 'emscripten',
OperatingSystem.Linux: 'linux',
OperatingSystem.Windows: 'w64',
}
vcpkg_string = {
**cmake_string,
# Uses MinGW for to differentiate between legacy Windows apps, the
# Universal Windows Platform. Since we only support MinGW, use it.
OperatingSystem.Windows: 'MinGW',
}
triple_os = {v: k for k, v in triple_string.items()}
oses = {
'linux': OperatingSystem.Linux,
'none': OperatingSystem.BareMetal,
}
def extract_triple(triple):
'''Extract components from the LLVM triple.'''
# Due to how we designed this, we can only
# 1. Omit the vendor, os and system.
# 2. Omit the vendor.
# 3. Omit the os.
# 4. Have all 4 components.
split = triple.split('-')
arch = split[0]
if len(split) == 1:
# ('arch',)
vendor = None
os = OperatingSystem.BareMetal
system = None
elif len(split) == 2 and split[1] in oses:
# ('arch', 'os')
vendor = None
os = oses[split[1]]
system = None
elif len(split) == 3 and split[2] == 'mingw32':
# ('arch', 'vendor', 'system')
vendor = None
os = OperatingSystem.Windows
system = split[2]
elif len(split) == 3:
# ('arch', 'vendor', 'system')
vendor = split[1]
os = OperatingSystem.BareMetal
system = split[2]
elif len(split) == 4:
# ('arch', 'vendor', 'os', 'system')
vendor = split[1]
os = OperatingSystem.from_triple(split[2])
system = split[3]
else:
raise ValueError(f'Invalid LLVM triple, got {triple}')
return (arch, vendor, os, system)
image_types = {
'android': AndroidImage,
'buildroot': BuildRootImage,
'crosstool': CrosstoolImage,
'debian': DebianImage,
'musl-cross': MuslCrossImage,
'riscv': RiscvImage,
'other': OtherImage,
}
# Get all images.
images = [Image.from_json(i) for i in load_json(f'{HOME}/config/images.json')]
# Add extensions
def add_android_extensions():
'''Add Android extensions (null-op).'''
def add_buildroot_extensions():
'''Add buildroot extensions (null-op).'''
def add_crosstool_extensions():
'''Add crosstool-NG toolchain extensions (null-op).'''
def add_debian_extensions():
'''Add Debian toolchain extensions (null-op).'''
def add_musl_cross_extensions():
'''Add musl-cross toolchain extensions (null-op).'''
# Add our RISC-V images with extensions.
def create_riscv_image(os, bits, arch, abi):
'''Create a RISC-V image.'''
prefix = f'riscv{bits}-{arch}-{abi}'
if os == OperatingSystem.Linux:
target = f'{prefix}-multilib-linux-gnu'
triple = 'riscv64-unknown-linux-gnu'
qemu = True
elif os == OperatingSystem.BareMetal:
target = f'{prefix}-unknown-elf'
triple = 'riscv64-unknown-elf'
qemu = False
else:
raise ValueError(f'Unknown operating system {os.to_triple()}')
return RiscvImage.from_dict({
'target': target,
'triple': triple,
'qemu': qemu,
'extensions': arch,
'abi': abi
})
def add_riscv_extensions():
'''Add RISC-V extensions.'''
riscv = config['riscv-gnu-toolchain']
bits = riscv['bits']
extensions = riscv['extensions']
for key in extensions:
os = OperatingSystem.from_triple(extensions[key]['type'])
required_ext = extensions[key]['required']
all_ext = extensions[key]['all']
diff = ''.join([i for i in all_ext if i not in required_ext])
for bits in riscv['bits']:
abi = riscv['abi'][bits]
for count in range(len(diff) + 1):
for combo in itertools.combinations(diff, count):
arch = f'{required_ext}{"".join(combo)}'
images.append(create_riscv_image(os, bits, arch, abi))
if 'd' in arch:
images.append(create_riscv_image(os, bits, arch, f'{abi}d'))
def add_extensions():
'''Add extensions for supported operating systems.'''
add_android_extensions()
add_buildroot_extensions()
add_crosstool_extensions()
add_debian_extensions()
add_musl_cross_extensions()
add_riscv_extensions()
add_extensions()
# Filter images by types.
android_images = [i for i in images if isinstance(i, AndroidImage)]
buildroot_images = [i for i in images if isinstance(i, BuildRootImage)]
crosstool_images = [i for i in images if isinstance(i, CrosstoolImage)]
debian_images = [i for i in images if isinstance(i, DebianImage)]
musl_cross_images = [i for i in images if isinstance(i, MuslCrossImage)]
riscv_images = [i for i in images if isinstance(i, RiscvImage)]
other_images = [i for i in images if isinstance(i, OtherImage)]
def create_array(values):
'''Create a bash array from a list of values.'''
start = "(\n \""
joiner = "\"\n \""
end = "\"\n)"
return start + joiner.join(values) + end
script = f'{HOME}/bin/xcross'
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
params = {
'console': [{
'script': f'{HOME}/xcross/__main__.py',
'dest_base': 'xcross',
'description': description,
'comments': long_description,
'product_name': 'xcross',
}],
'options': {
'py2exe': {
'bundle_files': 1,
'compressed': 1,
'optimize': 2,
'dist_dir': f'{HOME}',
'dll_excludes': [],
}
},
'zipfile': None
}
elif has_setuptools:
params = {
'entry_points': {
'console_scripts': ['xcross = xcross:main']
}
}
else:
params = {
'scripts': [f'{HOME}/bin/xcross']
}
setuptools.setup(
name="xcross",
author="Alex Huszagh",
author_email="ahuszagh@gmail.com",
version=version,
packages=['xcross'],
**params,
description=description,
long_description=long_description,
long_description_content_type='text/markdown',
python_requires='>3.6.0',
license='Unlicense',
keywords='compilers cross-compilation embedded',
url='https://github.com/Alexhuszagh/xcross',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'License :: OSI Approved :: The Unlicense (Unlicense)',
'Topic :: Software Development :: Compilers',
'Topic :: Software Development :: Embedded Systems',
],
cmdclass={
'build_all': BuildAllCommand,
'build_image': BuildImageCommand,
'build_images': BuildImagesCommand,
'build_py': BuildCommand,
'clean': CleanCommand,
'clean_dist': CleanDistCommand,
'configure': ConfigureCommand,
'install': InstallCommand,
'lint': LintCommand,
'publish': PublishCommand,
'push': PushCommand,
'tag': TagCommand,
'test_images': TestImagesCommand,
'test': TestCommand,
'test_all': TestAllCommand,
'version': VersionCommand,
},
)
| 33.96273 | 97 | 0.598202 |
3d4379916a421e4f16400672da640d246b4981ac | 27,082 | py | Python | src/sgfsdriver/plugins/ftp/ftp_client.py | syndicate-storage/syndicate-fs-driver-plugins | 8e455d6bb4838c2313bb6cd72ed5fa6bbbc871d2 | [
"Apache-2.0"
] | null | null | null | src/sgfsdriver/plugins/ftp/ftp_client.py | syndicate-storage/syndicate-fs-driver-plugins | 8e455d6bb4838c2313bb6cd72ed5fa6bbbc871d2 | [
"Apache-2.0"
] | 3 | 2016-11-18T21:31:00.000Z | 2017-08-16T15:35:52.000Z | src/sgfsdriver/plugins/ftp/ftp_client.py | syndicate-storage/syndicate-fs-driver-plugins | 8e455d6bb4838c2313bb6cd72ed5fa6bbbc871d2 | [
"Apache-2.0"
] | 2 | 2016-03-31T18:55:58.000Z | 2017-08-02T19:57:12.000Z | #!/usr/bin/env python
"""
Copyright 2016 The Trustees of University of Arizona
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import traceback
import os
import logging
import time
import ftplib
import threading
from datetime import datetime
from expiringdict import ExpiringDict
from io import BytesIO
logger = logging.getLogger('ftp_client')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('ftp_client.log')
fh.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
METADATA_CACHE_SIZE = 10000
METADATA_CACHE_TTL = 60 * 60 # 1 hour
FTP_TIMEOUT = 5 * 60 # 5 min
FTP_OPERATION_TIMEOUT = 30 # 30 sec
BYTES_MAX_SKIP = 1024 * 1024 * 2 # 2MB
CONNECTIONS_MAX_NUM = 5
"""
Interface class to FTP
"""
| 30.259218 | 196 | 0.535633 |
3d440ce993f7a5cda0551a5a0f0c5294985fb68c | 2,338 | py | Python | py/ops/ops/mob/keys.py | clchiou/garage | 446ff34f86cdbd114b09b643da44988cf5d027a3 | [
"MIT"
] | 3 | 2016-01-04T06:28:52.000Z | 2020-09-20T13:18:40.000Z | py/ops/ops/mob/keys.py | clchiou/garage | 446ff34f86cdbd114b09b643da44988cf5d027a3 | [
"MIT"
] | null | null | null | py/ops/ops/mob/keys.py | clchiou/garage | 446ff34f86cdbd114b09b643da44988cf5d027a3 | [
"MIT"
] | null | null | null | __all__ = [
'keys',
]
from pathlib import Path
import logging
from garage import apps
from garage import scripts
LOG = logging.getLogger(__name__)
HOST_KEYS = [
('dsa', 1024),
('ecdsa', 521),
('ed25519', None),
('rsa', 4096),
]
# ECDSA requires less bits than RSA at same level of strength and
# thus seems to be the best choice
USER_KEY_ALGORITHM = 'ecdsa'
USER_KEY_SIZE = 521
| 23.38 | 74 | 0.630881 |
3d4433d949aa6f4076c88076dfa660972581d142 | 28,882 | py | Python | reconcile/test/test_saasherder.py | bhushanthakur93/qontract-reconcile | fd8eea9f92d353224113955d08e3592864e37df8 | [
"Apache-2.0"
] | null | null | null | reconcile/test/test_saasherder.py | bhushanthakur93/qontract-reconcile | fd8eea9f92d353224113955d08e3592864e37df8 | [
"Apache-2.0"
] | null | null | null | reconcile/test/test_saasherder.py | bhushanthakur93/qontract-reconcile | fd8eea9f92d353224113955d08e3592864e37df8 | [
"Apache-2.0"
] | null | null | null | from typing import Any
from unittest import TestCase
from unittest.mock import patch, MagicMock
import yaml
from github import GithubException
from reconcile.utils.openshift_resource import ResourceInventory
from reconcile.utils.saasherder import SaasHerder
from reconcile.utils.jjb_client import JJB
from reconcile.utils.saasherder import TARGET_CONFIG_HASH
from .fixtures import Fixtures
| 33.544715 | 88 | 0.522263 |
3d452a7b2a000511d4c3041100856759bae15e44 | 8,235 | py | Python | configs/example/garnet_synth_traffic.py | georgia-tech-synergy-lab/gem5_astra | 41695878a2b60c5a28fa104465558cd1acb8a695 | [
"BSD-3-Clause"
] | 5 | 2020-11-15T12:27:28.000Z | 2021-09-20T03:50:54.000Z | configs/example/garnet_synth_traffic.py | georgia-tech-synergy-lab/gem5_astra | 41695878a2b60c5a28fa104465558cd1acb8a695 | [
"BSD-3-Clause"
] | null | null | null | configs/example/garnet_synth_traffic.py | georgia-tech-synergy-lab/gem5_astra | 41695878a2b60c5a28fa104465558cd1acb8a695 | [
"BSD-3-Clause"
] | 2 | 2020-10-27T01:15:41.000Z | 2020-11-16T02:30:32.000Z | # Copyright (c) 2016 Georgia Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Tushar Krishna
from __future__ import print_function
import m5
from m5.objects import *
from m5.defines import buildEnv
from m5.util import addToPath
import os, optparse, sys
addToPath('../')
from common import Options
from ruby import Ruby
# Get paths we might need. It's expected this file is in m5/configs/example.
config_path = os.path.dirname(os.path.abspath(__file__))
config_root = os.path.dirname(config_path)
m5_root = os.path.dirname(config_root)
parser = optparse.OptionParser()
Options.addNoISAOptions(parser)
parser.add_option("--synthetic", type="choice", default="uniform_random",
choices=['uniform_random', 'tornado', 'bit_complement', \
'bit_reverse', 'bit_rotation', 'neighbor', \
'shuffle', 'transpose','training'])
parser.add_option("-i", "--injectionrate", type="float", default=0.1,
metavar="I",
help="Injection rate in packets per cycle per node. \
Takes decimal value between 0 to 1 (eg. 0.225). \
Number of digits after 0 depends upon --precision.")
parser.add_option("--precision", type="int", default=3,
help="Number of digits of precision after decimal point\
for injection rate")
parser.add_option("--sim-cycles", type="int", default=1000,
help="Number of simulation cycles")
parser.add_option("--num-packets-max", type="int", default=-1,
help="Stop injecting after --num-packets-max.\
Set to -1 to disable.")
parser.add_option("--single-sender-id", type="int", default=-1,
help="Only inject from this sender.\
Set to -1 to disable.")
parser.add_option("--single-dest-id", type="int", default=-1,
help="Only send to this destination.\
Set to -1 to disable.")
parser.add_option("--inj-vnet", type="int", default=-1,
help="Only inject in this vnet (0, 1 or 2).\
0 and 1 are 1-flit, 2 is 5-flit.\
Set to -1 to inject randomly in all vnets.")
#
# Add the ruby specific and protocol specific options
#
Ruby.define_options(parser)
execfile(os.path.join(config_root, "common", "Options.py"))
(options, args) = parser.parse_args()
if args:
print("Error: script doesn't take any positional arguments")
sys.exit(1)
if options.inj_vnet > 2:
print("Error: Injection vnet %d should be 0 (1-flit), 1 (1-flit) "
"or 2 (5-flit) or -1 (random)" % (options.inj_vnet))
sys.exit(1)
try:
netInput = open("network_inputs/"+options.net+".txt", "r")
print("Success in opening net file!")
index=0
inps=["",""]
with netInput as f:
for line in f:
for word in line.split():
inps[index%2]=word
index+=1
if index%2==0:
parse_network_input_options(options,inps[0],inps[1])
except IOError:
print("Could not open net file!")
cpus = [ GarnetSyntheticTraffic(
num_packets_max=options.num_packets_max,
single_sender=options.single_sender_id,
single_dest=options.single_dest_id,
sim_cycles=options.sim_cycles,
traffic_type=options.synthetic,
inj_rate=options.injectionrate,
inj_vnet=options.inj_vnet,
precision=options.precision,
burst_length=options.local_burst_length,
burst_interval=options.burst_interval,
num_packages=options.num_packages,
num_dest=options.num_dirs) \
for i in xrange(options.num_cpus) ]
# create the desired simulated system
system = System(cpu = cpus, mem_ranges = [AddrRange(options.mem_size)])
# Create a top-level voltage domain and clock domain
system.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
system.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = system.voltage_domain)
Ruby.create_system(options, False, system)
# Create a seperate clock domain for Ruby
system.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock,
voltage_domain = system.voltage_domain)
i = 0
for ruby_port in system.ruby._cpu_ports:
#
# Tie the cpu test ports to the ruby cpu port
#
cpus[i].test = ruby_port.slave
i += 1
# -----------------------
# run simulation
# -----------------------
root = Root(full_system = False, system = system)
root.system.mem_mode = 'timing'
# Not much point in this being higher than the L1 latency
m5.ticks.setGlobalFrequency('1ns')
# instantiate configuration
m5.instantiate()
# simulate until program terminates
exit_event = m5.simulate(options.abs_max_tick)
print('Exiting @ tick', m5.curTick(), 'because', exit_event.getCause())
| 37.262443 | 79 | 0.664602 |
3d45fc30ab899b62ab8e13a78f05b881621256c2 | 9,329 | py | Python | tests/unit/service/test_messaging.py | davetobin/ignition | eb183dca3fb2041d3f6249467a3265e7eb1d8905 | [
"Apache-2.0"
] | 1 | 2019-09-02T15:23:08.000Z | 2019-09-02T15:23:08.000Z | tests/unit/service/test_messaging.py | davetobin/ignition | eb183dca3fb2041d3f6249467a3265e7eb1d8905 | [
"Apache-2.0"
] | 62 | 2019-09-16T14:51:32.000Z | 2020-07-08T13:28:50.000Z | tests/unit/service/test_messaging.py | accanto-systems/ignition | 87087b81dfa7f8f69525f4dd9c74db715e336eca | [
"Apache-2.0"
] | 4 | 2021-08-17T14:38:54.000Z | 2022-02-09T14:33:57.000Z | import unittest
import time
import copy
from unittest.mock import patch, MagicMock, call
from ignition.service.messaging import PostalService, KafkaDeliveryService, KafkaInboxService, Envelope, Message, MessagingProperties
from kafka import KafkaProducer
| 51.541436 | 209 | 0.731054 |
3d470989d588fa1b7b09836531c89bcfed89beee | 1,011 | py | Python | app/core/management/commands/wait_for_db.py | denis240997/recipe-app-api | c03c079b8df9d2b527c6d32a7c213be2b1478c6b | [
"MIT"
] | null | null | null | app/core/management/commands/wait_for_db.py | denis240997/recipe-app-api | c03c079b8df9d2b527c6d32a7c213be2b1478c6b | [
"MIT"
] | null | null | null | app/core/management/commands/wait_for_db.py | denis240997/recipe-app-api | c03c079b8df9d2b527c6d32a7c213be2b1478c6b | [
"MIT"
] | null | null | null | import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
# This is bullshit! Problem was not solved!
# The first connection is successful, but after that postgres closes the connection and
# reconnects. At the moment, this script has already worked, so the application container crashes.
| 38.884615 | 98 | 0.680514 |
3d4788f3f357f54449458d8a9feead4ef160065f | 835 | py | Python | clusters/actions.py | bhaugen/localecon | ee3134f701e6a786767cf7eeb165ee03f077e9da | [
"MIT"
] | 10 | 2015-02-14T14:22:31.000Z | 2022-02-22T17:40:34.000Z | clusters/actions.py | bhaugen/localecon | ee3134f701e6a786767cf7eeb165ee03f077e9da | [
"MIT"
] | 3 | 2017-02-01T16:44:04.000Z | 2018-04-02T13:48:03.000Z | clusters/actions.py | bhaugen/localecon | ee3134f701e6a786767cf7eeb165ee03f077e9da | [
"MIT"
] | null | null | null | import csv
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
def export_as_csv(modeladmin, request, queryset):
"""
Generic csv export admin action.
"""
if not request.user.is_staff:
raise PermissionDenied
opts = modeladmin.model._meta
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s.csv' % unicode(opts).replace('.', '_')
writer = csv.writer(response)
field_names = [field.name for field in opts.fields]
# Write a first row with header information
writer.writerow(field_names)
# Write data rows
for obj in queryset:
writer.writerow([getattr(obj, field) for field in field_names])
return response
export_as_csv.short_description = "Export selected objects as csv file" | 37.954545 | 101 | 0.720958 |
3d479358107ba6396633f05381cdd46111709044 | 37,605 | py | Python | rbac/common/protobuf/task_transaction_pb2.py | knagware9/sawtooth-next-directory | be80852e08d2b27e105d964c727509f2a974002d | [
"Apache-2.0"
] | 1 | 2019-04-14T20:16:59.000Z | 2019-04-14T20:16:59.000Z | rbac/common/protobuf/task_transaction_pb2.py | crazyrex/sawtooth-next-directory | 210b581c8c92c307fab2f6d2b9a55526b56b790a | [
"Apache-2.0"
] | null | null | null | rbac/common/protobuf/task_transaction_pb2.py | crazyrex/sawtooth-next-directory | 210b581c8c92c307fab2f6d2b9a55526b56b790a | [
"Apache-2.0"
] | 1 | 2018-12-07T10:55:08.000Z | 2018-12-07T10:55:08.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: task_transaction.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='task_transaction.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x16task_transaction.proto\"n\n\x13ProposeAddTaskOwner\x12\x13\n\x0bproposal_id\x18\x01 \x01(\t\x12\x0f\n\x07task_id\x18\x02 \x01(\t\x12\x0f\n\x07user_id\x18\x03 \x01(\t\x12\x0e\n\x06reason\x18\x04 \x01(\t\x12\x10\n\x08metadata\x18\x05 \x01(\t\"q\n\x16ProposeRemoveTaskOwner\x12\x13\n\x0bproposal_id\x18\x01 \x01(\t\x12\x0f\n\x07task_id\x18\x02 \x01(\t\x12\x0f\n\x07user_id\x18\x03 \x01(\t\x12\x0e\n\x06reason\x18\x04 \x01(\t\x12\x10\n\x08metadata\x18\x05 \x01(\t\"n\n\x13ProposeAddTaskAdmin\x12\x13\n\x0bproposal_id\x18\x01 \x01(\t\x12\x0f\n\x07task_id\x18\x02 \x01(\t\x12\x0f\n\x07user_id\x18\x03 \x01(\t\x12\x0e\n\x06reason\x18\x04 \x01(\t\x12\x10\n\x08metadata\x18\x05 \x01(\t\"q\n\x16ProposeRemoveTaskAdmin\x12\x13\n\x0bproposal_id\x18\x01 \x01(\t\x12\x0f\n\x07task_id\x18\x02 \x01(\t\x12\x0f\n\x07user_id\x18\x03 \x01(\t\x12\x0e\n\x06reason\x18\x04 \x01(\t\x12\x10\n\x08metadata\x18\x05 \x01(\t\"\\\n\x13\x43onfirmAddTaskOwner\x12\x13\n\x0bproposal_id\x18\x01 \x01(\t\x12\x0f\n\x07task_id\x18\x02 \x01(\t\x12\x0f\n\x07user_id\x18\x03 \x01(\t\x12\x0e\n\x06reason\x18\x04 \x01(\t\"_\n\x16\x43onfirmRemoveTaskOwner\x12\x13\n\x0bproposal_id\x18\x01 \x01(\t\x12\x0f\n\x07task_id\x18\x02 \x01(\t\x12\x0f\n\x07user_id\x18\x03 \x01(\t\x12\x0e\n\x06reason\x18\x04 \x01(\t\"\\\n\x13\x43onfirmAddTaskAdmin\x12\x13\n\x0bproposal_id\x18\x01 \x01(\t\x12\x0f\n\x07task_id\x18\x02 \x01(\t\x12\x0f\n\x07user_id\x18\x03 \x01(\t\x12\x0e\n\x06reason\x18\x04 \x01(\t\"_\n\x16\x43onfirmRemoveTaskAdmin\x12\x13\n\x0bproposal_id\x18\x01 \x01(\t\x12\x0f\n\x07task_id\x18\x02 \x01(\t\x12\x0f\n\x07user_id\x18\x03 \x01(\t\x12\x0e\n\x06reason\x18\x04 \x01(\t\"[\n\x12RejectAddTaskOwner\x12\x13\n\x0bproposal_id\x18\x01 \x01(\t\x12\x0f\n\x07task_id\x18\x02 \x01(\t\x12\x0f\n\x07user_id\x18\x03 \x01(\t\x12\x0e\n\x06reason\x18\x04 \x01(\t\"^\n\x15RejectRemoveTaskOwner\x12\x13\n\x0bproposal_id\x18\x01 \x01(\t\x12\x0f\n\x07task_id\x18\x02 \x01(\t\x12\x0f\n\x07user_id\x18\x03 \x01(\t\x12\x0e\n\x06reason\x18\x04 \x01(\t\"[\n\x12RejectAddTaskAdmin\x12\x13\n\x0bproposal_id\x18\x01 \x01(\t\x12\x0f\n\x07task_id\x18\x02 \x01(\t\x12\x0f\n\x07user_id\x18\x03 \x01(\t\x12\x0e\n\x06reason\x18\x04 \x01(\t\"^\n\x15RejectRemoveTaskAdmin\x12\x13\n\x0bproposal_id\x18\x01 \x01(\t\x12\x0f\n\x07task_id\x18\x02 \x01(\t\x12\x0f\n\x07user_id\x18\x03 \x01(\t\x12\x0e\n\x06reason\x18\x04 \x01(\t\"]\n\nCreateTask\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0e\n\x06\x61\x64mins\x18\x03 \x03(\t\x12\x0e\n\x06owners\x18\x04 \x03(\t\x12\x10\n\x08metadata\x18\x05 \x01(\t\"b\n\nUpdateTask\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08new_name\x18\x02 \x01(\t\x12\x1b\n\x13old_metadata_sha512\x18\x03 \x01(\t\x12\x14\n\x0cnew_metadata\x18\x04 \x01(\tb\x06proto3')
)
_PROPOSEADDTASKOWNER = _descriptor.Descriptor(
name='ProposeAddTaskOwner',
full_name='ProposeAddTaskOwner',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='proposal_id', full_name='ProposeAddTaskOwner.proposal_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task_id', full_name='ProposeAddTaskOwner.task_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user_id', full_name='ProposeAddTaskOwner.user_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='ProposeAddTaskOwner.reason', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='ProposeAddTaskOwner.metadata', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=26,
serialized_end=136,
)
_PROPOSEREMOVETASKOWNER = _descriptor.Descriptor(
name='ProposeRemoveTaskOwner',
full_name='ProposeRemoveTaskOwner',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='proposal_id', full_name='ProposeRemoveTaskOwner.proposal_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task_id', full_name='ProposeRemoveTaskOwner.task_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user_id', full_name='ProposeRemoveTaskOwner.user_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='ProposeRemoveTaskOwner.reason', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='ProposeRemoveTaskOwner.metadata', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=138,
serialized_end=251,
)
_PROPOSEADDTASKADMIN = _descriptor.Descriptor(
name='ProposeAddTaskAdmin',
full_name='ProposeAddTaskAdmin',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='proposal_id', full_name='ProposeAddTaskAdmin.proposal_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task_id', full_name='ProposeAddTaskAdmin.task_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user_id', full_name='ProposeAddTaskAdmin.user_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='ProposeAddTaskAdmin.reason', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='ProposeAddTaskAdmin.metadata', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=253,
serialized_end=363,
)
_PROPOSEREMOVETASKADMIN = _descriptor.Descriptor(
name='ProposeRemoveTaskAdmin',
full_name='ProposeRemoveTaskAdmin',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='proposal_id', full_name='ProposeRemoveTaskAdmin.proposal_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task_id', full_name='ProposeRemoveTaskAdmin.task_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user_id', full_name='ProposeRemoveTaskAdmin.user_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='ProposeRemoveTaskAdmin.reason', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='ProposeRemoveTaskAdmin.metadata', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=365,
serialized_end=478,
)
_CONFIRMADDTASKOWNER = _descriptor.Descriptor(
name='ConfirmAddTaskOwner',
full_name='ConfirmAddTaskOwner',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='proposal_id', full_name='ConfirmAddTaskOwner.proposal_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task_id', full_name='ConfirmAddTaskOwner.task_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user_id', full_name='ConfirmAddTaskOwner.user_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='ConfirmAddTaskOwner.reason', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=480,
serialized_end=572,
)
_CONFIRMREMOVETASKOWNER = _descriptor.Descriptor(
name='ConfirmRemoveTaskOwner',
full_name='ConfirmRemoveTaskOwner',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='proposal_id', full_name='ConfirmRemoveTaskOwner.proposal_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task_id', full_name='ConfirmRemoveTaskOwner.task_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user_id', full_name='ConfirmRemoveTaskOwner.user_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='ConfirmRemoveTaskOwner.reason', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=574,
serialized_end=669,
)
_CONFIRMADDTASKADMIN = _descriptor.Descriptor(
name='ConfirmAddTaskAdmin',
full_name='ConfirmAddTaskAdmin',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='proposal_id', full_name='ConfirmAddTaskAdmin.proposal_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task_id', full_name='ConfirmAddTaskAdmin.task_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user_id', full_name='ConfirmAddTaskAdmin.user_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='ConfirmAddTaskAdmin.reason', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=671,
serialized_end=763,
)
_CONFIRMREMOVETASKADMIN = _descriptor.Descriptor(
name='ConfirmRemoveTaskAdmin',
full_name='ConfirmRemoveTaskAdmin',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='proposal_id', full_name='ConfirmRemoveTaskAdmin.proposal_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task_id', full_name='ConfirmRemoveTaskAdmin.task_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user_id', full_name='ConfirmRemoveTaskAdmin.user_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='ConfirmRemoveTaskAdmin.reason', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=765,
serialized_end=860,
)
_REJECTADDTASKOWNER = _descriptor.Descriptor(
name='RejectAddTaskOwner',
full_name='RejectAddTaskOwner',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='proposal_id', full_name='RejectAddTaskOwner.proposal_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task_id', full_name='RejectAddTaskOwner.task_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user_id', full_name='RejectAddTaskOwner.user_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='RejectAddTaskOwner.reason', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=862,
serialized_end=953,
)
_REJECTREMOVETASKOWNER = _descriptor.Descriptor(
name='RejectRemoveTaskOwner',
full_name='RejectRemoveTaskOwner',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='proposal_id', full_name='RejectRemoveTaskOwner.proposal_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task_id', full_name='RejectRemoveTaskOwner.task_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user_id', full_name='RejectRemoveTaskOwner.user_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='RejectRemoveTaskOwner.reason', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=955,
serialized_end=1049,
)
_REJECTADDTASKADMIN = _descriptor.Descriptor(
name='RejectAddTaskAdmin',
full_name='RejectAddTaskAdmin',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='proposal_id', full_name='RejectAddTaskAdmin.proposal_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task_id', full_name='RejectAddTaskAdmin.task_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user_id', full_name='RejectAddTaskAdmin.user_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='RejectAddTaskAdmin.reason', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1051,
serialized_end=1142,
)
_REJECTREMOVETASKADMIN = _descriptor.Descriptor(
name='RejectRemoveTaskAdmin',
full_name='RejectRemoveTaskAdmin',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='proposal_id', full_name='RejectRemoveTaskAdmin.proposal_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='task_id', full_name='RejectRemoveTaskAdmin.task_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user_id', full_name='RejectRemoveTaskAdmin.user_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='RejectRemoveTaskAdmin.reason', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1144,
serialized_end=1238,
)
_CREATETASK = _descriptor.Descriptor(
name='CreateTask',
full_name='CreateTask',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='task_id', full_name='CreateTask.task_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='CreateTask.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='admins', full_name='CreateTask.admins', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='owners', full_name='CreateTask.owners', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='CreateTask.metadata', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1240,
serialized_end=1333,
)
_UPDATETASK = _descriptor.Descriptor(
name='UpdateTask',
full_name='UpdateTask',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='task_id', full_name='UpdateTask.task_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='new_name', full_name='UpdateTask.new_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='old_metadata_sha512', full_name='UpdateTask.old_metadata_sha512', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='new_metadata', full_name='UpdateTask.new_metadata', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1335,
serialized_end=1433,
)
DESCRIPTOR.message_types_by_name['ProposeAddTaskOwner'] = _PROPOSEADDTASKOWNER
DESCRIPTOR.message_types_by_name['ProposeRemoveTaskOwner'] = _PROPOSEREMOVETASKOWNER
DESCRIPTOR.message_types_by_name['ProposeAddTaskAdmin'] = _PROPOSEADDTASKADMIN
DESCRIPTOR.message_types_by_name['ProposeRemoveTaskAdmin'] = _PROPOSEREMOVETASKADMIN
DESCRIPTOR.message_types_by_name['ConfirmAddTaskOwner'] = _CONFIRMADDTASKOWNER
DESCRIPTOR.message_types_by_name['ConfirmRemoveTaskOwner'] = _CONFIRMREMOVETASKOWNER
DESCRIPTOR.message_types_by_name['ConfirmAddTaskAdmin'] = _CONFIRMADDTASKADMIN
DESCRIPTOR.message_types_by_name['ConfirmRemoveTaskAdmin'] = _CONFIRMREMOVETASKADMIN
DESCRIPTOR.message_types_by_name['RejectAddTaskOwner'] = _REJECTADDTASKOWNER
DESCRIPTOR.message_types_by_name['RejectRemoveTaskOwner'] = _REJECTREMOVETASKOWNER
DESCRIPTOR.message_types_by_name['RejectAddTaskAdmin'] = _REJECTADDTASKADMIN
DESCRIPTOR.message_types_by_name['RejectRemoveTaskAdmin'] = _REJECTREMOVETASKADMIN
DESCRIPTOR.message_types_by_name['CreateTask'] = _CREATETASK
DESCRIPTOR.message_types_by_name['UpdateTask'] = _UPDATETASK
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ProposeAddTaskOwner = _reflection.GeneratedProtocolMessageType('ProposeAddTaskOwner', (_message.Message,), dict(
DESCRIPTOR = _PROPOSEADDTASKOWNER,
__module__ = 'task_transaction_pb2'
# @@protoc_insertion_point(class_scope:ProposeAddTaskOwner)
))
_sym_db.RegisterMessage(ProposeAddTaskOwner)
ProposeRemoveTaskOwner = _reflection.GeneratedProtocolMessageType('ProposeRemoveTaskOwner', (_message.Message,), dict(
DESCRIPTOR = _PROPOSEREMOVETASKOWNER,
__module__ = 'task_transaction_pb2'
# @@protoc_insertion_point(class_scope:ProposeRemoveTaskOwner)
))
_sym_db.RegisterMessage(ProposeRemoveTaskOwner)
ProposeAddTaskAdmin = _reflection.GeneratedProtocolMessageType('ProposeAddTaskAdmin', (_message.Message,), dict(
DESCRIPTOR = _PROPOSEADDTASKADMIN,
__module__ = 'task_transaction_pb2'
# @@protoc_insertion_point(class_scope:ProposeAddTaskAdmin)
))
_sym_db.RegisterMessage(ProposeAddTaskAdmin)
ProposeRemoveTaskAdmin = _reflection.GeneratedProtocolMessageType('ProposeRemoveTaskAdmin', (_message.Message,), dict(
DESCRIPTOR = _PROPOSEREMOVETASKADMIN,
__module__ = 'task_transaction_pb2'
# @@protoc_insertion_point(class_scope:ProposeRemoveTaskAdmin)
))
_sym_db.RegisterMessage(ProposeRemoveTaskAdmin)
ConfirmAddTaskOwner = _reflection.GeneratedProtocolMessageType('ConfirmAddTaskOwner', (_message.Message,), dict(
DESCRIPTOR = _CONFIRMADDTASKOWNER,
__module__ = 'task_transaction_pb2'
# @@protoc_insertion_point(class_scope:ConfirmAddTaskOwner)
))
_sym_db.RegisterMessage(ConfirmAddTaskOwner)
ConfirmRemoveTaskOwner = _reflection.GeneratedProtocolMessageType('ConfirmRemoveTaskOwner', (_message.Message,), dict(
DESCRIPTOR = _CONFIRMREMOVETASKOWNER,
__module__ = 'task_transaction_pb2'
# @@protoc_insertion_point(class_scope:ConfirmRemoveTaskOwner)
))
_sym_db.RegisterMessage(ConfirmRemoveTaskOwner)
ConfirmAddTaskAdmin = _reflection.GeneratedProtocolMessageType('ConfirmAddTaskAdmin', (_message.Message,), dict(
DESCRIPTOR = _CONFIRMADDTASKADMIN,
__module__ = 'task_transaction_pb2'
# @@protoc_insertion_point(class_scope:ConfirmAddTaskAdmin)
))
_sym_db.RegisterMessage(ConfirmAddTaskAdmin)
ConfirmRemoveTaskAdmin = _reflection.GeneratedProtocolMessageType('ConfirmRemoveTaskAdmin', (_message.Message,), dict(
DESCRIPTOR = _CONFIRMREMOVETASKADMIN,
__module__ = 'task_transaction_pb2'
# @@protoc_insertion_point(class_scope:ConfirmRemoveTaskAdmin)
))
_sym_db.RegisterMessage(ConfirmRemoveTaskAdmin)
RejectAddTaskOwner = _reflection.GeneratedProtocolMessageType('RejectAddTaskOwner', (_message.Message,), dict(
DESCRIPTOR = _REJECTADDTASKOWNER,
__module__ = 'task_transaction_pb2'
# @@protoc_insertion_point(class_scope:RejectAddTaskOwner)
))
_sym_db.RegisterMessage(RejectAddTaskOwner)
RejectRemoveTaskOwner = _reflection.GeneratedProtocolMessageType('RejectRemoveTaskOwner', (_message.Message,), dict(
DESCRIPTOR = _REJECTREMOVETASKOWNER,
__module__ = 'task_transaction_pb2'
# @@protoc_insertion_point(class_scope:RejectRemoveTaskOwner)
))
_sym_db.RegisterMessage(RejectRemoveTaskOwner)
RejectAddTaskAdmin = _reflection.GeneratedProtocolMessageType('RejectAddTaskAdmin', (_message.Message,), dict(
DESCRIPTOR = _REJECTADDTASKADMIN,
__module__ = 'task_transaction_pb2'
# @@protoc_insertion_point(class_scope:RejectAddTaskAdmin)
))
_sym_db.RegisterMessage(RejectAddTaskAdmin)
RejectRemoveTaskAdmin = _reflection.GeneratedProtocolMessageType('RejectRemoveTaskAdmin', (_message.Message,), dict(
DESCRIPTOR = _REJECTREMOVETASKADMIN,
__module__ = 'task_transaction_pb2'
# @@protoc_insertion_point(class_scope:RejectRemoveTaskAdmin)
))
_sym_db.RegisterMessage(RejectRemoveTaskAdmin)
CreateTask = _reflection.GeneratedProtocolMessageType('CreateTask', (_message.Message,), dict(
DESCRIPTOR = _CREATETASK,
__module__ = 'task_transaction_pb2'
# @@protoc_insertion_point(class_scope:CreateTask)
))
_sym_db.RegisterMessage(CreateTask)
UpdateTask = _reflection.GeneratedProtocolMessageType('UpdateTask', (_message.Message,), dict(
DESCRIPTOR = _UPDATETASK,
__module__ = 'task_transaction_pb2'
# @@protoc_insertion_point(class_scope:UpdateTask)
))
_sym_db.RegisterMessage(UpdateTask)
# @@protoc_insertion_point(module_scope)
| 41.506623 | 2,776 | 0.737216 |
3d487f498a05799cec579339e7396f36837a8077 | 14,560 | py | Python | courses/machine_learning/asl/open_project/ASL_youtube8m_models/video_using_datasets/trainer/model.py | Glairly/introduction_to_tensorflow | aa0a44d9c428a6eb86d1f79d73f54c0861b6358d | [
"Apache-2.0"
] | 2 | 2022-01-06T11:52:57.000Z | 2022-01-09T01:53:56.000Z | courses/machine_learning/asl/open_project/ASL_youtube8m_models/video_using_datasets/trainer/model.py | Glairly/introduction_to_tensorflow | aa0a44d9c428a6eb86d1f79d73f54c0861b6358d | [
"Apache-2.0"
] | null | null | null | courses/machine_learning/asl/open_project/ASL_youtube8m_models/video_using_datasets/trainer/model.py | Glairly/introduction_to_tensorflow | aa0a44d9c428a6eb86d1f79d73f54c0861b6358d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Import libraries and modules
import tensorflow as tf
# Set logging verbosity to INFO for richer output
tf.logging.set_verbosity(tf.logging.INFO)
# The number of video classes
NUM_CLASSES = 4716
# Create an input function to read our training and validation data
# Then provide the results to the Estimator API
# Create our model function to be used in our custom estimator
# Create our serving input function to accept the data at serving and send it in the right format to our custom estimator
# Create custom estimator's train and evaluate function
| 54.943396 | 196 | 0.682212 |
3d48f55b2e9c4409d2a293fd05fd3f37f16ba6df | 22,394 | py | Python | allennlp/tests/semparse/worlds/wikitables_world_test.py | kyleclo/allennlp | 0205c26f3db7ef44d7ee70fa9ebdf5a7f6b43baf | [
"Apache-2.0"
] | 24 | 2019-09-16T00:10:54.000Z | 2021-09-08T19:31:51.000Z | allennlp/tests/semparse/worlds/wikitables_world_test.py | TalSchuster/allennlp-MultiLang | dbb28b939652491d2f633326edccca2cd0e528c8 | [
"Apache-2.0"
] | 2 | 2019-01-12T00:19:06.000Z | 2019-02-27T05:29:31.000Z | allennlp/tests/semparse/worlds/wikitables_world_test.py | TalSchuster/allennlp-MultiLang | dbb28b939652491d2f633326edccca2cd0e528c8 | [
"Apache-2.0"
] | 10 | 2019-12-06T11:32:37.000Z | 2022-01-06T15:39:09.000Z | # pylint: disable=no-self-use,invalid-name
from typing import List
import pytest
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.tokenizers import Token
from allennlp.semparse import ParsingError
from allennlp.semparse.contexts import TableQuestionKnowledgeGraph
from allennlp.semparse.worlds import WikiTablesWorld
from allennlp.semparse.type_declarations import wikitables_lambda_dcs as types
| 51.958237 | 111 | 0.466732 |
3d4903f05506c73039c6cca6466ba4b87575d105 | 395 | py | Python | FishCDailyQuestion/ex001-010/Python3_008/008_05.py | YorkFish/git_study | 6e023244daaa22e12b24e632e76a13e5066f2947 | [
"MIT"
] | null | null | null | FishCDailyQuestion/ex001-010/Python3_008/008_05.py | YorkFish/git_study | 6e023244daaa22e12b24e632e76a13e5066f2947 | [
"MIT"
] | null | null | null | FishCDailyQuestion/ex001-010/Python3_008/008_05.py | YorkFish/git_study | 6e023244daaa22e12b24e632e76a13e5066f2947 | [
"MIT"
] | null | null | null | #!/usr/bin/evn python3
# coding:utf-8
from math import sqrt
count = 0
for i in range(100, 201):
if is_prime_num(i):
print(i, end=' ')
count += 1
print("\n\nThere are {} prime numbers in total.".format(count))
| 20.789474 | 63 | 0.582278 |
3d490f3f5ae32168776078a1279b5239c7a6960d | 4,324 | py | Python | models/015_bolasso.py | cmougan/Novartis2021 | 72a6f088929a5a4546760f4a453ec4a77faf5856 | [
"MIT"
] | null | null | null | models/015_bolasso.py | cmougan/Novartis2021 | 72a6f088929a5a4546760f4a453ec4a77faf5856 | [
"MIT"
] | null | null | null | models/015_bolasso.py | cmougan/Novartis2021 | 72a6f088929a5a4546760f4a453ec4a77faf5856 | [
"MIT"
] | null | null | null | # %% Imports
from numpy.lib import select
import pandas as pd
import sys
import numpy as np
import random
from functools import partial
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sktools import IsEmptyExtractor
from lightgbm import LGBMRegressor
from category_encoders import TargetEncoder
from sklearn.linear_model import QuantileRegressor
from sklego.preprocessing import ColumnSelector
from sklearn.preprocessing import StandardScaler
from memo import memlist, memfile, grid, time_taken, Runner
sys.path.append("../")
from metrics.metric_participants import (ComputeMetrics, print_metrics)
from eda.checker import check_train_test
random.seed(0)
sales_train = pd.read_csv("../data/data_raw/sales_train.csv")
df_full = pd.read_csv("../data/split.csv")
df_region = pd.read_csv("../data/data_raw/regions.csv")
regions_hcps = pd.read_csv("../data/data_raw/regions_hcps.csv")
activity_features = pd.read_csv("../data/features/activity_features.csv")
brands_3_12 = pd.read_csv("../data/features/brand_3_12_market_features_lagged.csv")
rte_basic = pd.read_csv("../data/features/rte_features_v2.csv").drop(
columns=["sales", "validation"]
)
market_size = pd.read_csv("../data/market_size.csv")
# For reproducibility
random.seed(0)
VAL_SIZE = 38
SUBMISSION_NAME = "linear_model_simple"
# %% Training weights
market_size = (
market_size
.assign(weight=lambda x: 1 / x['sales'])
)
# %% Add region data
df_feats = df_full.merge(df_region, on="region", how="left")
df_feats = pd.merge(left=df_feats, right=regions_hcps, how="left", on="region")
df_feats = df_feats.merge(
activity_features, on=["month", "region", "brand"], how="left"
)
df_feats = df_feats.merge(rte_basic, on=["month", "region", "brand"], how="left")
df_feats = df_feats.merge(brands_3_12, on=["month", "region"], how="left")
df_feats["whichBrand"] = np.where(df_feats.brand == "brand_1", 1, 0)
df_feats['month_brand'] = df_feats.month + '_' + df_feats.brand
# drop sum variables
cols_to_drop = ["region", "sales", "validation"]
# %% Split train val test
X_train = df_feats.query("validation == 0").drop(columns=cols_to_drop)
y_train = df_feats.query("validation == 0").sales
X_val = df_feats.query("validation == 1").drop(columns=cols_to_drop)
y_val = df_feats.query("validation == 1").sales
X_test = df_feats.query("validation.isnull()", engine="python").drop(
columns=cols_to_drop
)
y_test = df_feats.query("validation.isnull()", engine="python").sales
check_train_test(X_train, X_val)
check_train_test(X_train, X_test, threshold=0.3)
check_train_test(X_val, X_test)
# %%
for quantile in [0.5, 0.1, 0.9]:
selected = {}
for iter in range(100):
print("Quantile: ", quantile, "iter: ", iter)
df_train = df_feats.query("validation == 0")
sample = df_train.sample(replace=True, frac=1)
X_train = sample.drop(columns=cols_to_drop)
y_train = sample.sales
models = {}
pipes = {}
train_preds = {}
val_preds = {}
models[quantile] = QuantileRegressor(
quantile=quantile,
alpha=0.05,
solver="highs-ds"
)
pipes[quantile] = Pipeline(
[
("te", TargetEncoder(cols=["month_brand", "month", "brand"])),
("imputer", SimpleImputer(strategy="median")),
("scale", StandardScaler()),
("lgb", models[quantile])
]
)
# Fit cv model
pipes[quantile].fit(X_train, y_train)
train_preds[quantile] = pipes[quantile].predict(X_train)
coefs = models[quantile].coef_
cols_pipe = pipes[quantile][:1].fit_transform(X_train.head(), y_train.head()).columns
coefs_dict = dict(zip(cols_pipe, coefs))
selected_features = list({k: v for k, v in coefs_dict.items() if v != 0}.keys())
selected[iter] = selected_features
all_selected = {}
for k, v in selected.items():
for feature in v:
all_selected[feature] = all_selected.get(feature, 0) + 1
all_selected_df = pd.DataFrame(all_selected.items(), columns=["feature", "count"]).sort_values("count", ascending=False)
all_selected_df.to_csv(f"../data/features/bolasso_features_0{int(quantile * 10)}.csv", index=False)
| 32.268657 | 124 | 0.679695 |
3d49492a4f368cab1e5d3dbd044945f99690e2f6 | 40,274 | py | Python | docx.py | highcat/python-docx | 05627c6330970f91771174c9e5d849ce28703b3e | [
"MIT"
] | null | null | null | docx.py | highcat/python-docx | 05627c6330970f91771174c9e5d849ce28703b3e | [
"MIT"
] | null | null | null | docx.py | highcat/python-docx | 05627c6330970f91771174c9e5d849ce28703b3e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Open and modify Microsoft Word 2007 docx files (called 'OpenXML' and 'Office OpenXML' by Microsoft)
Part of Python's docx module - http://github.com/mikemaccana/python-docx
See LICENSE for licensing information.
'''
from copy import deepcopy
import logging
from lxml import etree
try:
from PIL import Image
except ImportError:
import Image
import zipfile
import shutil
import distutils.dir_util
import re
import time
import os
from os.path import join
log = logging.getLogger(__name__)
# Record template directory's location which is just 'template' for a docx
# developer or 'site-packages/docx-template' if you have installed docx
TEMPLATE_DIR = join(os.path.dirname(__file__), 'docx-template') # installed
if not os.path.isdir(TEMPLATE_DIR):
TEMPLATE_DIR = join(os.path.dirname(__file__), 'template') # dev
_DOCX_DIR_NAME = 'docx-template'
# All Word prefixes / namespace matches used in document.xml & core.xml.
# LXML doesn't actually use prefixes (just the real namespace) , but these
# make it easier to copy Word output more easily.
nsprefixes = {
# Text Content
'mv':'urn:schemas-microsoft-com:mac:vml',
'mo':'http://schemas.microsoft.com/office/mac/office/2008/main',
've':'http://schemas.openxmlformats.org/markup-compatibility/2006',
'o':'urn:schemas-microsoft-com:office:office',
'r':'http://schemas.openxmlformats.org/officeDocument/2006/relationships',
'm':'http://schemas.openxmlformats.org/officeDocument/2006/math',
'v':'urn:schemas-microsoft-com:vml',
'w':'http://schemas.openxmlformats.org/wordprocessingml/2006/main',
'w10':'urn:schemas-microsoft-com:office:word',
'wne':'http://schemas.microsoft.com/office/word/2006/wordml',
# Drawing
'wp':'http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDrawing',
'a':'http://schemas.openxmlformats.org/drawingml/2006/main',
'pic':'http://schemas.openxmlformats.org/drawingml/2006/picture',
# Properties (core and extended)
'cp':"http://schemas.openxmlformats.org/package/2006/metadata/core-properties",
'dc':"http://purl.org/dc/elements/1.1/",
'dcterms':"http://purl.org/dc/terms/",
'dcmitype':"http://purl.org/dc/dcmitype/",
'xsi':"http://www.w3.org/2001/XMLSchema-instance",
'ep':'http://schemas.openxmlformats.org/officeDocument/2006/extended-properties',
# Content Types (we're just making up our own namespaces here to save time)
'ct':'http://schemas.openxmlformats.org/package/2006/content-types',
# Package Relationships (we're just making up our own namespaces here to save time)
'pr':'http://schemas.openxmlformats.org/package/2006/relationships'
}
def opendocx(file):
'''Open a docx file, return a document XML tree'''
mydoc = zipfile.ZipFile(file)
xmlcontent = mydoc.read('word/document.xml')
document = etree.fromstring(xmlcontent)
return document
def makeelement(tagname,tagtext=None,nsprefix='w',attributes=None,attrnsprefix=None):
'''Create an element & return it'''
# Deal with list of nsprefix by making namespacemap
namespacemap = None
if isinstance(nsprefix, list):
namespacemap = {}
for prefix in nsprefix:
namespacemap[prefix] = nsprefixes[prefix]
nsprefix = nsprefix[0] # FIXME: rest of code below expects a single prefix
if nsprefix:
namespace = '{'+nsprefixes[nsprefix]+'}'
else:
# For when namespace = None
namespace = ''
newelement = etree.Element(namespace+tagname, nsmap=namespacemap)
# Add attributes with namespaces
if attributes:
# If they haven't bothered setting attribute namespace, use an empty string
# (equivalent of no namespace)
if not attrnsprefix:
# Quick hack: it seems every element that has a 'w' nsprefix for its tag uses the same prefix for it's attributes
if nsprefix == 'w':
attributenamespace = namespace
else:
attributenamespace = ''
else:
attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'
for tagattribute in attributes:
newelement.set(attributenamespace+tagattribute, attributes[tagattribute])
if tagtext:
newelement.text = tagtext
return newelement
def pagebreak(type='page', orient='portrait'):
'''Insert a break, default 'page'.
See http://openxmldeveloper.org/forums/thread/4075.aspx
Return our page break element.'''
# Need to enumerate different types of page breaks.
validtypes = ['page', 'section']
if type not in validtypes:
raise ValueError('Page break style "%s" not implemented. Valid styles: %s.' % (type, validtypes))
pagebreak = makeelement('p')
if type == 'page':
run = makeelement('r')
br = makeelement('br',attributes={'type':type})
run.append(br)
pagebreak.append(run)
elif type == 'section':
pPr = makeelement('pPr')
sectPr = makeelement('sectPr')
if orient == 'portrait':
pgSz = makeelement('pgSz',attributes={'w':'12240','h':'15840'})
elif orient == 'landscape':
pgSz = makeelement('pgSz',attributes={'h':'12240','w':'15840', 'orient':'landscape'})
sectPr.append(pgSz)
pPr.append(sectPr)
pagebreak.append(pPr)
return pagebreak
def paragraph(paratext, style='BodyText', breakbefore=False, jc='left'):
'''Make a new paragraph element, containing a run, and some text.
Return the paragraph element.
@param string jc: Paragraph alignment, possible values:
left, center, right, both (justified), ...
see http://www.schemacentral.com/sc/ooxml/t-w_ST_Jc.html
for a full list
If paratext is a list, spawn multiple run/text elements.
Support text styles (paratext must then be a list of lists in the form
<text> / <style>. Style is a string containing a combination of 'bui' chars
example
paratext = [
('some bold text', 'b'),
('some normal text', ''),
('some italic underlined text', 'iu'),
]
'''
# Make our elements
paragraph = makeelement('p')
if isinstance(paratext, list):
text = []
for pt in paratext:
if isinstance(pt, (list,tuple)):
text.append([makeelement('t',tagtext=pt[0]), pt[1]])
else:
text.append([makeelement('t',tagtext=pt), ''])
else:
text = [[makeelement('t',tagtext=paratext),''],]
pPr = makeelement('pPr')
pStyle = makeelement('pStyle',attributes={'val':style})
pJc = makeelement('jc',attributes={'val':jc})
pPr.append(pStyle)
pPr.append(pJc)
# Add the text the run, and the run to the paragraph
paragraph.append(pPr)
for t in text:
run = makeelement('r')
rPr = makeelement('rPr')
if isinstance(t[1], list):
for prop in t[1]: # custom properties
rPr.append(prop)
else:
# Apply styles
if t[1].find('b') > -1:
b = makeelement('b')
rPr.append(b)
if t[1].find('u') > -1:
u = makeelement('u',attributes={'val':'single'})
rPr.append(u)
if t[1].find('i') > -1:
i = makeelement('i')
rPr.append(i)
run.append(rPr)
# Insert lastRenderedPageBreak for assistive technologies like
# document narrators to know when a page break occurred.
if breakbefore:
lastRenderedPageBreak = makeelement('lastRenderedPageBreak')
run.append(lastRenderedPageBreak)
run.append(t[0])
paragraph.append(run)
# Return the combined paragraph
return paragraph
def heading(headingtext,headinglevel,lang='en'):
'''Make a new heading, return the heading element'''
lmap = {
'en': 'Heading',
'it': 'Titolo',
}
# Make our elements
paragraph = makeelement('p')
pr = makeelement('pPr')
pStyle = makeelement('pStyle',attributes={'val':lmap[lang]+str(headinglevel)})
run = makeelement('r')
text = makeelement('t',tagtext=headingtext)
# Add the text the run, and the run to the paragraph
pr.append(pStyle)
run.append(text)
paragraph.append(pr)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def table(contents, heading=True, colw=None, cwunit='dxa', tblw=0, twunit='auto', borders={}, celstyle=None, rowstyle=None, table_props=None):
'''Get a list of lists, return a table
@param list contents: A list of lists describing contents
Every item in the list can be a string or a valid
XML element itself. It can also be a list. In that case
all the listed elements will be merged into the cell.
@param bool heading: Tells whether first line should be threated as heading
or not
@param list colw: A list of interger. The list must have same element
count of content lines. Specify column Widths in
wunitS
@param string cwunit: Unit user for column width:
'pct': fifties of a percent
'dxa': twenties of a point
'nil': no width
'auto': automagically determined
@param int tblw: Table width
@param int twunit: Unit used for table width. Same as cwunit
@param dict borders: Dictionary defining table border. Supported keys are:
'top', 'left', 'bottom', 'right', 'insideH', 'insideV', 'all'
When specified, the 'all' key has precedence over others.
Each key must define a dict of border attributes:
color: The color of the border, in hex or 'auto'
space: The space, measured in points
sz: The size of the border, in eights of a point
val: The style of the border, see http://www.schemacentral.com/sc/ooxml/t-w_ST_Border.htm
@param list celstyle: Specify the style for each colum, list of dicts.
supported keys:
'align': specify the alignment, see paragraph documentation,
@return lxml.etree: Generated XML etree element
'''
table = makeelement('tbl')
columns = len(contents[0])
# Table properties
tableprops = makeelement('tblPr')
tablestyle = makeelement('tblStyle',attributes={'val':''})
tableprops.append(tablestyle)
if not table_props:
table_props = {}
for k, attr in table_props.items():
if isinstance(attr, etree._Element):
tableprops.append(attr)
else:
prop = makeelement(k, attributes=attr)
tableprops.append(prop)
tablewidth = makeelement('tblW',attributes={'w':str(tblw),'type':str(twunit)})
tableprops.append(tablewidth)
if len(borders.keys()):
tableborders = makeelement('tblBorders')
for b in ['top', 'left', 'bottom', 'right', 'insideH', 'insideV']:
if b in borders.keys() or 'all' in borders.keys():
k = 'all' if 'all' in borders.keys() else b
attrs = {}
for a in borders[k].keys():
attrs[a] = str(borders[k][a])
borderelem = makeelement(b,attributes=attrs)
tableborders.append(borderelem)
tableprops.append(tableborders)
tablelook = makeelement('tblLook',attributes={'val':'0400'})
tableprops.append(tablelook)
table.append(tableprops)
# Table Grid
tablegrid = makeelement('tblGrid')
for i in range(columns):
tablegrid.append(makeelement('gridCol',attributes={'w':str(colw[i]) if colw else '2390'}))
table.append(tablegrid)
# Heading Row
row = makeelement('tr')
rowprops = makeelement('trPr')
cnfStyle = makeelement('cnfStyle',attributes={'val':'000000100000'})
rowprops.append(cnfStyle)
row.append(rowprops)
if heading:
i = 0
for heading in contents[0]:
cell = makeelement('tc')
# Cell properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w':str(colw[i]),'type':cwunit}
else:
wattr = {'w':'0','type':'auto'}
cellwidth = makeelement('tcW',attributes=wattr)
cellstyle = makeelement('shd',attributes={'val':'clear','color':'auto','fill':'FFFFFF','themeFill':'text2','themeFillTint':'99'})
cellprops.append(cellwidth)
cellprops.append(cellstyle)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(heading, (list, tuple)):
heading = [heading,]
for h in heading:
if isinstance(h, etree._Element):
cell.append(h)
else:
cell.append(paragraph(h,jc='center'))
row.append(cell)
i += 1
table.append(row)
# Contents Rows
for contentrow in contents[1 if heading else 0:]:
row = makeelement('tr')
if rowstyle:
rowprops = makeelement('trPr')
if 'height' in rowstyle:
rowHeight = makeelement('trHeight', attributes={'val': str(rowstyle['height']),
'hRule': 'exact'})
rowprops.append(rowHeight)
row.append(rowprops)
i = 0
for content_cell in contentrow:
cell = makeelement('tc')
# Properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w':str(colw[i]),'type':cwunit}
else:
wattr = {'w':'0','type':'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellprops.append(cellwidth)
align = 'left'
cell_spec_style = {}
if celstyle:
cell_spec_style = deepcopy(celstyle[i])
if isinstance(content_cell, dict):
cell_spec_style.update(content_cell['style'])
content_cell = content_cell['content']
# spec. align property
SPEC_PROPS = ['align',]
if 'align' in cell_spec_style:
align = celstyle[i]['align']
# any property for cell, by OOXML specification
for cs, attrs in cell_spec_style.items():
if cs in SPEC_PROPS:
continue
cell_prop = makeelement(cs, attributes=attrs)
cellprops.append(cell_prop)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(content_cell, (list, tuple)):
content_cell = [content_cell,]
for c in content_cell:
# cell.append(cellprops)
if isinstance(c, etree._Element):
cell.append(c)
else:
cell.append(paragraph(c, jc=align))
row.append(cell)
i += 1
table.append(row)
return table
def picture(relationshiplist, picname, picdescription, pixelwidth=None,
pixelheight=None, nochangeaspect=True, nochangearrowheads=True,
temp_dir=None):
'''Take a relationshiplist, picture file name, and return a paragraph containing the image
and an updated relationshiplist'''
# http://openxmldeveloper.org/articles/462.aspx
# Create an image. Size may be specified, otherwise it will based on the
# pixel size of image. Return a paragraph containing the picture'''
# Copy the file into the media dir
assert temp_dir
media_dir = join(temp_dir, _DOCX_DIR_NAME, 'word', 'media')
if not os.path.isdir(media_dir):
os.makedirs(media_dir)
shutil.copyfile(picname, join(media_dir,picname))
# Check if the user has specified a size
if not pixelwidth or not pixelheight:
# If not, get info from the picture itself
pixelwidth,pixelheight = Image.open(picname).size[0:2]
# OpenXML measures on-screen objects in English Metric Units
# 1cm = 36000 EMUs
emuperpixel = 12667
width = str(pixelwidth * emuperpixel)
height = str(pixelheight * emuperpixel)
# Set relationship ID to the first available
picid = '2'
picrelid = 'rId'+str(len(relationshiplist)+1)
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relationships/image',
'media/'+picname])
# There are 3 main elements inside a picture
# 1. The Blipfill - specifies how the image fills the picture area (stretch, tile, etc.)
blipfill = makeelement('blipFill',nsprefix='pic')
blipfill.append(makeelement('blip',nsprefix='a',attrnsprefix='r',attributes={'embed':picrelid}))
stretch = makeelement('stretch',nsprefix='a')
stretch.append(makeelement('fillRect',nsprefix='a'))
blipfill.append(makeelement('srcRect',nsprefix='a'))
blipfill.append(stretch)
# 2. The non visual picture properties
nvpicpr = makeelement('nvPicPr',nsprefix='pic')
cnvpr = makeelement('cNvPr',nsprefix='pic',
attributes={'id':'0','name':'Picture 1','descr':picname})
nvpicpr.append(cnvpr)
cnvpicpr = makeelement('cNvPicPr',nsprefix='pic')
cnvpicpr.append(makeelement('picLocks', nsprefix='a',
attributes={'noChangeAspect':str(int(nochangeaspect)),
'noChangeArrowheads':str(int(nochangearrowheads))}))
nvpicpr.append(cnvpicpr)
# 3. The Shape properties
sppr = makeelement('spPr',nsprefix='pic',attributes={'bwMode':'auto'})
xfrm = makeelement('xfrm',nsprefix='a')
xfrm.append(makeelement('off',nsprefix='a',attributes={'x':'0','y':'0'}))
xfrm.append(makeelement('ext',nsprefix='a',attributes={'cx':width,'cy':height}))
prstgeom = makeelement('prstGeom',nsprefix='a',attributes={'prst':'rect'})
prstgeom.append(makeelement('avLst',nsprefix='a'))
sppr.append(xfrm)
sppr.append(prstgeom)
# Add our 3 parts to the picture element
pic = makeelement('pic',nsprefix='pic')
pic.append(nvpicpr)
pic.append(blipfill)
pic.append(sppr)
# Now make the supporting elements
# The following sequence is just: make element, then add its children
graphicdata = makeelement('graphicData',nsprefix='a',
attributes={'uri':'http://schemas.openxmlformats.org/drawingml/2006/picture'})
graphicdata.append(pic)
graphic = makeelement('graphic',nsprefix='a')
graphic.append(graphicdata)
framelocks = makeelement('graphicFrameLocks',nsprefix='a',attributes={'noChangeAspect':'1'})
framepr = makeelement('cNvGraphicFramePr',nsprefix='wp')
framepr.append(framelocks)
docpr = makeelement('docPr',nsprefix='wp',
attributes={'id':picid,'name':'Picture 1','descr':picdescription})
effectextent = makeelement('effectExtent',nsprefix='wp',
attributes={'l':'25400','t':'0','r':'0','b':'0'})
extent = makeelement('extent',nsprefix='wp',attributes={'cx':width,'cy':height})
inline = makeelement('inline',
attributes={'distT':"0",'distB':"0",'distL':"0",'distR':"0"},nsprefix='wp')
inline.append(extent)
inline.append(effectextent)
inline.append(docpr)
inline.append(framepr)
inline.append(graphic)
drawing = makeelement('drawing')
drawing.append(inline)
run = makeelement('r')
run.append(drawing)
paragraph = makeelement('p')
paragraph.append(run)
return relationshiplist,paragraph
def search(document,search):
'''Search a document for a regex, return success / fail result'''
result = False
searchre = re.compile(search)
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
result = True
return result
def replace(document,search,replace):
'''Replace all occurences of string with a different string, return updated document'''
newdocument = document
searchre = re.compile(search)
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
element.text = re.sub(search,replace,element.text)
return newdocument
def clean(document):
""" Perform misc cleaning operations on documents.
Returns cleaned document.
"""
newdocument = document
# Clean empty text and r tags
for t in ('t', 'r'):
rmlist = []
for element in newdocument.iter():
if element.tag == '{%s}%s' % (nsprefixes['w'], t):
if not element.text and not len(element):
rmlist.append(element)
for element in rmlist:
element.getparent().remove(element)
return newdocument
def findTypeParent(element, tag):
""" Finds fist parent of element of the given type
@param object element: etree element
@param string the tag parent to search for
@return object element: the found parent or None when not found
"""
p = element
while True:
p = p.getparent()
if p.tag == tag:
return p
# Not found
return None
def AdvSearch(document, search, bs=3):
'''Return set of all regex matches
This is an advanced version of python-docx.search() that takes into
account blocks of <bs> elements at a time.
What it does:
It searches the entire document body for text blocks.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search : 'Hello,'
output blocks : [ 'Hello,' ]
original text blocks : [ 'Hel', 'lo', ' __', 'name', '__!' ]
search : '(__[a-z]+__)'
output blocks : [ '__name__' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
append, or a list of etree elements
@param int bs: See above
@return set All occurences of search string
'''
# Compile the search regexp
searchre = re.compile(search)
matches = []
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1,len(searchels)+1):
if found:
break
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s,s+l)
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
matches.append(match.group())
found = True
return set(matches)
def advReplace(document,search,replace,bs=3):
'''Replace all occurences of string with a different string, return updated document
This is a modified version of python-docx.replace() that takes into
account blocks of <bs> elements at a time. The replace element can also
be a string or an xml etree element.
What it does:
It searches the entire document body for text blocks.
Then scan thos text blocks for replace.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello,' / 'Hi!'
output blocks : [ 'Hi!', '', ' world!' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello, world' / 'Hi!'
output blocks : [ 'Hi!!', '', '' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hel' / 'Hal'
output blocks : [ 'Hal', 'lo,', ' world!' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
@param mixed replace: The replacement text or lxml.etree element to
append, or a list of etree elements
@param int bs: See above
@return instance The document with replacement applied
'''
# Enables debug output
DEBUG = False
newdocument = document
# Compile the search regexp
searchre = re.compile(search)
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1,len(searchels)+1):
if found:
break
#print "slen:", l
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s,s+l)
#print "elems:", e
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
found = True
# I've found something :)
if DEBUG:
log.debug("Found element!")
log.debug("Search regexp: %s", searchre.pattern)
log.debug("Requested replacement: %s", replace)
log.debug("Matched text: %s", txtsearch)
log.debug( "Matched text (splitted): %s", map(lambda i:i.text,searchels))
log.debug("Matched at position: %s", match.start())
log.debug( "matched in elements: %s", e)
if isinstance(replace, etree._Element):
log.debug("Will replace with XML CODE")
elif isinstance(replace (list, tuple)):
log.debug("Will replace with LIST OF ELEMENTS")
else:
log.debug("Will replace with:", re.sub(search,replace,txtsearch))
curlen = 0
replaced = False
for i in e:
curlen += len(searchels[i].text)
if curlen > match.start() and not replaced:
# The match occurred in THIS element. Puth in the
# whole replaced text
if isinstance(replace, etree._Element):
# Convert to a list and process it later
replace = [ replace, ]
if isinstance(replace, (list,tuple)):
# I'm replacing with a list of etree elements
# clear the text in the tag and append the element after the
# parent paragraph
# (because t elements cannot have childs)
p = findTypeParent(searchels[i], '{%s}p' % nsprefixes['w'])
searchels[i].text = re.sub(search,'',txtsearch)
insindex = p.getparent().index(p) + 1
for r in replace:
p.getparent().insert(insindex, r)
insindex += 1
else:
# Replacing with pure text
searchels[i].text = re.sub(search,replace,txtsearch)
replaced = True
log.debug("Replacing in element #: %s", i)
else:
# Clears the other text elements
searchels[i].text = ''
return newdocument
def getdocumenttext(document):
'''Return the raw text of a document, as a list of paragraphs.'''
paratextlist=[]
# Compile a list of all paragraph (p) elements
paralist = []
for element in document.iter():
# Find p (paragraph) elements
if element.tag == '{'+nsprefixes['w']+'}p':
paralist.append(element)
# Since a single sentence might be spread over multiple text elements, iterate through each
# paragraph, appending all text (t) children to that paragraphs text.
for para in paralist:
paratext=u''
# Loop through each paragraph
for element in para.iter():
# Find t (text) elements
if element.tag == '{'+nsprefixes['w']+'}t':
if element.text:
paratext = paratext+element.text
elif element.tag == '{'+nsprefixes['w']+'}tab':
paratext = paratext + '\t'
# Add our completed paragraph text to the list of paragraph text
if not len(paratext) == 0:
paratextlist.append(paratext)
return paratextlist
def coreproperties(title,subject,creator,keywords,lastmodifiedby=None):
'''Create core properties (common document properties referred to in the 'Dublin Core' specification).
See appproperties() for other stuff.'''
coreprops = makeelement('coreProperties',nsprefix='cp')
coreprops.append(makeelement('title',tagtext=title,nsprefix='dc'))
coreprops.append(makeelement('subject',tagtext=subject,nsprefix='dc'))
coreprops.append(makeelement('creator',tagtext=creator,nsprefix='dc'))
coreprops.append(makeelement('keywords',tagtext=','.join(keywords),nsprefix='cp'))
if not lastmodifiedby:
lastmodifiedby = creator
coreprops.append(makeelement('lastModifiedBy',tagtext=lastmodifiedby,nsprefix='cp'))
coreprops.append(makeelement('revision',tagtext='1',nsprefix='cp'))
coreprops.append(makeelement('category',tagtext='Examples',nsprefix='cp'))
coreprops.append(makeelement('description',tagtext='Examples',nsprefix='dc'))
currenttime = time.strftime('%Y-%m-%dT%H:%M:%SZ')
# Document creation and modify times
# Prob here: we have an attribute who name uses one namespace, and that
# attribute's value uses another namespace.
# We're creating the lement from a string as a workaround...
for doctime in ['created','modified']:
coreprops.append(etree.fromstring('''<dcterms:'''+doctime+''' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:dcterms="http://purl.org/dc/terms/" xsi:type="dcterms:W3CDTF">'''+currenttime+'''</dcterms:'''+doctime+'''>'''))
pass
return coreprops
def appproperties():
'''Create app-specific properties. See docproperties() for more common document properties.'''
appprops = makeelement('Properties',nsprefix='ep')
appprops = etree.fromstring(
b'''<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<Properties xmlns="http://schemas.openxmlformats.org/officeDocument/2006/extended-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocument/2006/docPropsVTypes"></Properties>''')
props = {
'Template':'Normal.dotm',
'TotalTime':'6',
'Pages':'1',
'Words':'83',
'Characters':'475',
'Application':'Microsoft Word 12.0.0',
'DocSecurity':'0',
'Lines':'12',
'Paragraphs':'8',
'ScaleCrop':'false',
'LinksUpToDate':'false',
'CharactersWithSpaces':'583',
'SharedDoc':'false',
'HyperlinksChanged':'false',
'AppVersion':'12.0000',
}
for prop in props:
appprops.append(makeelement(prop,tagtext=props[prop],nsprefix=None))
return appprops
def websettings():
'''Generate websettings'''
web = makeelement('webSettings')
web.append(makeelement('allowPNG'))
web.append(makeelement('doNotSaveAsSingleFile'))
return web
def wordrelationships(relationshiplist):
'''Generate a Word relationships file'''
# Default list of relationships
# FIXME: using string hack instead of making element
#relationships = makeelement('Relationships',nsprefix='pr')
relationships = etree.fromstring(
'''<Relationships xmlns="http://schemas.openxmlformats.org/package/2006/relationships">
</Relationships>'''
)
count = 0
for relationship in relationshiplist:
# Relationship IDs (rId) start at 1.
relationships.append(makeelement('Relationship',attributes={'Id':'rId'+str(count+1),
'Type':relationship[0],'Target':relationship[1]},nsprefix=None))
count += 1
return relationships
def savedocx(document, coreprops, appprops, contenttypes, websettings, wordrelationships, output,
temp_dir=None):
'''Save a modified document'''
assert temp_dir
assert os.path.isdir(temp_dir)
docx_dir = join(temp_dir, _DOCX_DIR_NAME)
# Copy whole template to temporary directory
distutils.dir_util.copy_tree(TEMPLATE_DIR, docx_dir) # directory can already exist
docxfile = zipfile.ZipFile(output,mode='w',compression=zipfile.ZIP_DEFLATED)
# Move to the template data path
prev_dir = os.path.abspath('.') # save previous working dir
os.chdir(docx_dir)
# Serialize our trees into out zip file
treesandfiles = {document:'word/document.xml',
coreprops:'docProps/core.xml',
appprops:'docProps/app.xml',
contenttypes:'[Content_Types].xml',
websettings:'word/webSettings.xml',
wordrelationships:'word/_rels/document.xml.rels'}
for tree in treesandfiles:
log.info('Saving: '+treesandfiles[tree] )
treestring = etree.tostring(tree, pretty_print=True)
docxfile.writestr(treesandfiles[tree],treestring)
# Add & compress support files
files_to_ignore = ['.DS_Store'] # nuisance from some os's
for dirpath,dirnames,filenames in os.walk('.'):
for filename in filenames:
if filename in files_to_ignore:
continue
templatefile = join(dirpath, filename)
archivename = templatefile[2:]
log.info('Saving: %s', archivename)
docxfile.write(templatefile, archivename)
log.info('Saved new file to: %r', output)
docxfile.close()
os.chdir(prev_dir) # restore previous working dir
return
| 43.305376 | 242 | 0.590133 |
3d49f7eaf598f54df886dcfb77904d84e8c9f173 | 108 | py | Python | nylas/util/__init__.py | nylas/nylas-production-python | a0979cd104a43f80750b2361aa580516b8dbfcfc | [
"Apache-2.0",
"MIT"
] | 19 | 2015-11-20T12:38:34.000Z | 2022-01-13T15:40:25.000Z | nylas/api/__init__.py | nylas/nylas-production-python | a0979cd104a43f80750b2361aa580516b8dbfcfc | [
"Apache-2.0",
"MIT"
] | null | null | null | nylas/api/__init__.py | nylas/nylas-production-python | a0979cd104a43f80750b2361aa580516b8dbfcfc | [
"Apache-2.0",
"MIT"
] | 10 | 2016-03-12T00:38:54.000Z | 2018-12-13T05:58:13.000Z | from pkgutil import extend_path
# Allow out-of-tree submodules.
__path__ = extend_path(__path__, __name__)
| 21.6 | 42 | 0.805556 |
3d4abb2320ad6d11a7ab8694b9e07545a91044dd | 885 | py | Python | project/migrations/0002_auto_20180801_1907.py | mcdale/django-material | 3bd5725cc4a4b6f2fb1439333e9033d0cd2b6a9c | [
"MIT"
] | null | null | null | project/migrations/0002_auto_20180801_1907.py | mcdale/django-material | 3bd5725cc4a4b6f2fb1439333e9033d0cd2b6a9c | [
"MIT"
] | 2 | 2020-07-21T12:52:29.000Z | 2021-06-17T20:23:36.000Z | project/migrations/0002_auto_20180801_1907.py | mcdale/django-material | 3bd5725cc4a4b6f2fb1439333e9033d0cd2b6a9c | [
"MIT"
] | null | null | null | # Generated by Django 2.0.8 on 2018-08-01 19:07
from django.db import migrations, models
import django.db.models.deletion
| 30.517241 | 121 | 0.628249 |
3d4dc9ef0428e142bdd3d4e674dd5dce9410a4ab | 8,925 | py | Python | src/core/src/tortuga/objects/softwareProfile.py | sutasu/tortuga | 48d7cde4fa652346600b217043b4a734fa2ba455 | [
"Apache-2.0"
] | 33 | 2018-03-02T17:07:39.000Z | 2021-05-21T18:02:51.000Z | src/core/src/tortuga/objects/softwareProfile.py | sutasu/tortuga | 48d7cde4fa652346600b217043b4a734fa2ba455 | [
"Apache-2.0"
] | 201 | 2018-03-05T14:28:24.000Z | 2020-11-23T19:58:27.000Z | src/core/src/tortuga/objects/softwareProfile.py | sutasu/tortuga | 48d7cde4fa652346600b217043b4a734fa2ba455 | [
"Apache-2.0"
] | 23 | 2018-03-02T17:21:59.000Z | 2020-11-18T14:52:38.000Z | # Copyright 2008-2018 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=no-member
from functools import cmp_to_key
from typing import Dict, Iterable, Optional
import tortuga.objects.admin
import tortuga.objects.component
import tortuga.objects.hardwareProfile
import tortuga.objects.kitSource
import tortuga.objects.nic
import tortuga.objects.node
import tortuga.objects.osInfo
import tortuga.objects.partition
from tortuga.objects.tortugaObject import TortugaObject, TortugaObjectList
from tortuga.utility.helper import str2bool
from .validators import RegexValidator
| 27.631579 | 77 | 0.607731 |
3d4e4b8f64fdbc0b44c87b38d3ece2354dc7dd2f | 579 | py | Python | src/utils/workspace.py | sidcmsft/ResponsibleAI | a8c691574690a8316e054c21ec9e6d0e0ca4e494 | [
"MIT"
] | 2 | 2020-09-03T16:13:56.000Z | 2021-02-18T15:58:41.000Z | src/utils/workspace.py | sidcmsft/ResponsibleAI | a8c691574690a8316e054c21ec9e6d0e0ca4e494 | [
"MIT"
] | null | null | null | src/utils/workspace.py | sidcmsft/ResponsibleAI | a8c691574690a8316e054c21ec9e6d0e0ca4e494 | [
"MIT"
] | 4 | 2020-09-03T16:14:19.000Z | 2021-05-05T05:59:59.000Z | import sys
from azureml.core import Workspace
from azureml.core.authentication import ServicePrincipalAuthentication
| 26.318182 | 70 | 0.680484 |
3d4f711206b2fd9dbd8a3177d589e3c33373c8b1 | 822 | py | Python | tools/test_tmp.py | Z-XQ/mmdetection | 9f3756889969c0c21e6d84e0d993f302e7f07460 | [
"Apache-2.0"
] | null | null | null | tools/test_tmp.py | Z-XQ/mmdetection | 9f3756889969c0c21e6d84e0d993f302e7f07460 | [
"Apache-2.0"
] | null | null | null | tools/test_tmp.py | Z-XQ/mmdetection | 9f3756889969c0c21e6d84e0d993f302e7f07460 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# @Time : 2020/9/28 9:49
# @Author : zxq
# @File : test_tmp.py
# @Software: PyCharm
import mmcv
import torch
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmdet.apis import train_detector, inference_detector, show_result_pyplot
from tools.train_tmp import CustomerTrain
customer_train = CustomerTrain()
cfg = customer_train.cfg
# Build dataset
datasets = [build_dataset(cfg.data.train)]
# Build the detector
model = build_detector(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
# Add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
img = mmcv.imread('../data/kitti_tiny/training/image_2/000068.jpeg')
model.cfg = cfg
result = inference_detector(model, img)
show_result_pyplot(model, img, result) | 27.4 | 77 | 0.770073 |
3d4fb10a65167e4ffb44c4897ed5483e2f0d23c0 | 2,439 | py | Python | users/models.py | Mansi3546/CareerCradle | e040e763b1058aef937deb9eac4e1f9b2421ae25 | [
"MIT"
] | null | null | null | users/models.py | Mansi3546/CareerCradle | e040e763b1058aef937deb9eac4e1f9b2421ae25 | [
"MIT"
] | 1 | 2021-04-14T12:24:41.000Z | 2021-04-18T07:33:11.000Z | users/models.py | Mansi3546/CareerCradle | e040e763b1058aef937deb9eac4e1f9b2421ae25 | [
"MIT"
] | 3 | 2021-04-06T13:54:44.000Z | 2021-05-03T17:28:59.000Z | from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
from django.db import models
from django.utils import timezone
from django.db.models import BooleanField
| 33.410959 | 94 | 0.660107 |
3d4fe154eecbbf658beca88c248a4a382f051e30 | 2,656 | py | Python | scripts/dump_ubd.py | sbreuers/BiternionNets-ROS | 954d6a2fbd97a01231f3411b366f3a3cccae5cf9 | [
"MIT"
] | 1 | 2018-08-29T07:11:22.000Z | 2018-08-29T07:11:22.000Z | scripts/dump_ubd.py | sbreuers/BiternionNets-ROS | 954d6a2fbd97a01231f3411b366f3a3cccae5cf9 | [
"MIT"
] | null | null | null | scripts/dump_ubd.py | sbreuers/BiternionNets-ROS | 954d6a2fbd97a01231f3411b366f3a3cccae5cf9 | [
"MIT"
] | 1 | 2018-10-20T12:09:58.000Z | 2018-10-20T12:09:58.000Z | #!/usr/bin/env python
# encoding: utf-8
from os.path import abspath, expanduser, join as pjoin
import os
from sys import stderr
import cv2
import rospy
from cv_bridge import CvBridge
import message_filters
from sensor_msgs.msg import Image as ROSImage
# Distinguish between STRANDS and SPENCER.
try:
from rwth_perception_people_msgs.msg import UpperBodyDetector
except ImportError:
from upper_body_detector.msg import UpperBodyDetector
if __name__ == "__main__":
rospy.init_node("dump_ubd")
d = Dumper()
rospy.spin()
rospy.loginfo("Dumped a total of {} UBDs.".format(d.counter))
| 34.947368 | 120 | 0.651355 |
3d5041bc56fbfaccca116aec98a24987eddba5f7 | 2,046 | py | Python | site_scons/site_tools/findPkgPath.py | fermi-lat/SConsFiles | 54124ec1031142b4fee76b12fdcfe839845e9fda | [
"BSD-3-Clause"
] | null | null | null | site_scons/site_tools/findPkgPath.py | fermi-lat/SConsFiles | 54124ec1031142b4fee76b12fdcfe839845e9fda | [
"BSD-3-Clause"
] | null | null | null | site_scons/site_tools/findPkgPath.py | fermi-lat/SConsFiles | 54124ec1031142b4fee76b12fdcfe839845e9fda | [
"BSD-3-Clause"
] | null | null | null | import os,platform,os.path
# Usual case: find where package is; add to env include path
# If 'subdir' argument, instead set construction env variable
# to point to it
| 40.117647 | 75 | 0.544966 |
3d506074ec9756c4fb5eb16d2309de5778a6c989 | 1,380 | py | Python | examples/model_zoo/test_binaries.py | Embracing/unrealcv | 19305da8554c3a0e683a5e27a1e487cc2cf42776 | [
"MIT"
] | 1,617 | 2016-09-10T04:41:33.000Z | 2022-03-31T20:03:28.000Z | examples/model_zoo/test_binaries.py | Embracing/unrealcv | 19305da8554c3a0e683a5e27a1e487cc2cf42776 | [
"MIT"
] | 199 | 2016-09-13T09:40:59.000Z | 2022-03-16T02:37:23.000Z | examples/model_zoo/test_binaries.py | Embracing/unrealcv | 19305da8554c3a0e683a5e27a1e487cc2cf42776 | [
"MIT"
] | 431 | 2016-09-10T03:20:35.000Z | 2022-03-19T13:44:21.000Z | import subprocess, os
win_binary_path = 'UE4Binaries/{project_name}/WindowsNoEditor/{project_name}.exe'
linux_binary_path = './UE4Binaries/{project_name}/LinuxNoEditor/{project_name}/Binaries/Linux/{project_name}'
mac_binary_path = './UE4Binaries/{project_name}/MacNoEditor/{project_name}.app'
project_names = [
'RealisticRendering', 'ArchinteriorsVol2Scene1', 'ArchinteriorsVol2Scene2',
'ArchinteriorsVol2Scene3', 'UrbanCity', 'Matinee', 'PhotorealisticCharacter'
]
binaries = []
binaries += [linux_binary_path.format(project_name = v) for v in project_names]
binaries += [win_binary_path.format(project_name = v) for v in project_names]
binaries += [mac_binary_path.format(project_name = v) for v in project_names]
if __name__ == '__main__':
if not os.path.isdir('output'):
os.mkdir('output')
for binary_path in binaries:
project_name = os.path.basename(binary_path).split('.')[0]
output_folder = os.path.join('output', project_name)
if not os.path.isfile(binary_path) and not os.path.isdir(binary_path):
print('Can not find binary "%s", skip' % binary_path)
continue
print('Testing %s ..., output will be saved to "%s"' % (binary_path, output_folder))
subprocess.call([
'python', 'examples/commands_demo.py',
binary_path, '--output', output_folder
])
| 41.818182 | 109 | 0.698551 |
3d50aca1b7a9e65ec91502519f8c8985d2d96649 | 4,629 | py | Python | pyslam/feature_tracker_configs.py | velvetThunder25/Feature-based-Monocular-Visual-Odometry | e6b108e8ce71ec0ec535932e2fc1023fc6fcaf92 | [
"MIT"
] | 7 | 2022-01-12T22:46:06.000Z | 2022-03-16T13:57:52.000Z | pyslam/feature_tracker_configs.py | velvetThunder25/Feature-based-Monocular-Visual-Odometry | e6b108e8ce71ec0ec535932e2fc1023fc6fcaf92 | [
"MIT"
] | null | null | null | pyslam/feature_tracker_configs.py | velvetThunder25/Feature-based-Monocular-Visual-Odometry | e6b108e8ce71ec0ec535932e2fc1023fc6fcaf92 | [
"MIT"
] | 1 | 2022-01-12T22:52:29.000Z | 2022-01-12T22:52:29.000Z | """
* This file is part of PYSLAM
*
* Copyright (C) 2016-present Luigi Freda <luigi dot freda at gmail dot com>
*
* PYSLAM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* PYSLAM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with PYSLAM. If not, see <http://www.gnu.org/licenses/>.
"""
from feature_tracker import feature_tracker_factory, FeatureTrackerTypes
from feature_manager import feature_manager_factory
from feature_types import FeatureDetectorTypes, FeatureDescriptorTypes, FeatureInfo
from feature_matcher import feature_matcher_factory, FeatureMatcherTypes
from parameters import Parameters
# some default parameters
kNumFeatures=Parameters.kNumFeatures
kRatioTest=Parameters.kFeatureMatchRatioTest
kTrackerType = FeatureTrackerTypes.DES_BF # default descriptor-based, brute force matching with knn
#kTrackerType = FeatureTrackerTypes.DES_FLANN # default descriptor-based, FLANN-based matching
"""
A collection of ready-to-used feature tracker configurations
"""
| 44.509615 | 159 | 0.579175 |
3d524c3bd35810437426c4644ee0f769511b58ea | 152 | py | Python | bindings/python/examples/05b_get_output.py | GoldenPedro/iota.rs | 71464f96b8e29d9fbed34a6ff77e757a112fedd4 | [
"Apache-2.0"
] | 256 | 2017-06-27T02:37:21.000Z | 2022-03-28T07:51:48.000Z | bindings/python/examples/05b_get_output.py | GoldenPedro/iota.rs | 71464f96b8e29d9fbed34a6ff77e757a112fedd4 | [
"Apache-2.0"
] | 379 | 2017-06-25T05:49:14.000Z | 2022-03-29T18:57:11.000Z | bindings/python/examples/05b_get_output.py | GoldenPedro/iota.rs | 71464f96b8e29d9fbed34a6ff77e757a112fedd4 | [
"Apache-2.0"
] | 113 | 2017-06-25T14:07:05.000Z | 2022-03-30T09:10:12.000Z | import iota_client
client = iota_client.Client()
print(
client.get_output("a22cba0667c922cbb1f8bdcaf970b2a881ccd6e88e2fcce50374de2aac7c37720000")
) | 25.333333 | 93 | 0.848684 |
3d5394f2af4816cbcec8e499c06b15d66ed6fb8e | 920 | py | Python | simple_ml/__init__.py | Yangruipis/simple_ml | 09657f6b017b973a5201aa611774d6ac8f0fc0a2 | [
"MIT"
] | 25 | 2018-04-17T04:38:51.000Z | 2021-10-09T04:07:53.000Z | simple_ml/__init__.py | Yangruipis/simple_ml | 09657f6b017b973a5201aa611774d6ac8f0fc0a2 | [
"MIT"
] | null | null | null | simple_ml/__init__.py | Yangruipis/simple_ml | 09657f6b017b973a5201aa611774d6ac8f0fc0a2 | [
"MIT"
] | 5 | 2018-04-17T05:27:00.000Z | 2020-12-01T02:55:15.000Z | # -*- coding:utf-8 -*-
"""
==================================
Simple Machine Learning
==================================
"""
from simple_ml.bayes import *
from simple_ml.classify_data import *
from simple_ml.auto import *
from simple_ml.classify_data import *
from simple_ml.ensemble import *
from simple_ml.evaluation import *
from simple_ml.feature_select import *
from simple_ml.knn import *
from simple_ml.logistic import *
from simple_ml.neural_network import *
from simple_ml.pca import *
from simple_ml.regression import *
from simple_ml.support_vector import *
# from simple_ml.svm import *
from simple_ml.tree import *
__all__ = [
'bayes',
'auto',
'classify_data',
'cluster',
'data_handle',
'ensemble',
'evaluation',
'feature_select',
'knn',
'svm',
'logistic',
'neural_network',
'pca',
'regression',
'support_vector',
'tree',
]
| 20 | 38 | 0.644565 |
3d53c39285c2bdec8b3434c91e5427cdb7617eb5 | 5,470 | py | Python | Modelos/Game.py | joaofanti/TrabRedesIIFinal | 3cae5db7ef88e20d9426043e926260ccedc79d10 | [
"MIT"
] | 1 | 2017-07-05T01:24:20.000Z | 2017-07-05T01:24:20.000Z | Modelos/Game.py | joaofanti/TrabRedesIIFinal | 3cae5db7ef88e20d9426043e926260ccedc79d10 | [
"MIT"
] | null | null | null | Modelos/Game.py | joaofanti/TrabRedesIIFinal | 3cae5db7ef88e20d9426043e926260ccedc79d10 | [
"MIT"
] | null | null | null | import sys
sys.path.insert(0, "Modelos/Mapa")
from Map import *
from Item import Item
"""
Define a classe que manipula a logica do jogo.
"""
| 28.051282 | 101 | 0.67404 |
3d550a112ff51ab3601284d3bb247c868ab1d733 | 2,062 | py | Python | test/sample_data/get_observation_histogram_week.py | eduramirezh/pyinaturalist | e5da7ced7fae31f27310868bdb2d349bdff8e0d4 | [
"MIT"
] | 47 | 2019-07-23T08:18:02.000Z | 2022-03-17T16:32:17.000Z | test/sample_data/get_observation_histogram_week.py | eduramirezh/pyinaturalist | e5da7ced7fae31f27310868bdb2d349bdff8e0d4 | [
"MIT"
] | 219 | 2019-08-22T14:45:20.000Z | 2022-03-30T02:39:35.000Z | test/sample_data/get_observation_histogram_week.py | eduramirezh/pyinaturalist | e5da7ced7fae31f27310868bdb2d349bdff8e0d4 | [
"MIT"
] | 9 | 2020-02-28T04:29:13.000Z | 2022-02-23T03:02:32.000Z | from datetime import datetime
{
datetime(2019, 12, 30, 0, 0): 35,
datetime(2020, 1, 6, 0, 0): 27,
datetime(2020, 1, 13, 0, 0): 39,
datetime(2020, 1, 20, 0, 0): 120,
datetime(2020, 1, 27, 0, 0): 73,
datetime(2020, 2, 3, 0, 0): 48,
datetime(2020, 2, 10, 0, 0): 35,
datetime(2020, 2, 17, 0, 0): 89,
datetime(2020, 2, 24, 0, 0): 81,
datetime(2020, 3, 2, 0, 0): 116,
datetime(2020, 3, 9, 0, 0): 90,
datetime(2020, 3, 16, 0, 0): 195,
datetime(2020, 3, 23, 0, 0): 406,
datetime(2020, 3, 30, 0, 0): 642,
datetime(2020, 4, 6, 0, 0): 652,
datetime(2020, 4, 13, 0, 0): 684,
datetime(2020, 4, 20, 0, 0): 1393,
datetime(2020, 4, 27, 0, 0): 1755,
datetime(2020, 5, 4, 0, 0): 1251,
datetime(2020, 5, 11, 0, 0): 1566,
datetime(2020, 5, 18, 0, 0): 1986,
datetime(2020, 5, 25, 0, 0): 2141,
datetime(2020, 6, 1, 0, 0): 1581,
datetime(2020, 6, 8, 0, 0): 1640,
datetime(2020, 6, 15, 0, 0): 1406,
datetime(2020, 6, 22, 0, 0): 1902,
datetime(2020, 6, 29, 0, 0): 2078,
datetime(2020, 7, 6, 0, 0): 1821,
datetime(2020, 7, 13, 0, 0): 1854,
datetime(2020, 7, 20, 0, 0): 2308,
datetime(2020, 7, 27, 0, 0): 2637,
datetime(2020, 8, 3, 0, 0): 2275,
datetime(2020, 8, 10, 0, 0): 1717,
datetime(2020, 8, 17, 0, 0): 1474,
datetime(2020, 8, 24, 0, 0): 2234,
datetime(2020, 8, 31, 0, 0): 2275,
datetime(2020, 9, 7, 0, 0): 2180,
datetime(2020, 9, 14, 0, 0): 1824,
datetime(2020, 9, 21, 0, 0): 1609,
datetime(2020, 9, 28, 0, 0): 1714,
datetime(2020, 10, 5, 0, 0): 2849,
datetime(2020, 10, 12, 0, 0): 1425,
datetime(2020, 10, 19, 0, 0): 569,
datetime(2020, 10, 26, 0, 0): 210,
datetime(2020, 11, 2, 0, 0): 331,
datetime(2020, 11, 9, 0, 0): 229,
datetime(2020, 11, 16, 0, 0): 162,
datetime(2020, 11, 23, 0, 0): 164,
datetime(2020, 11, 30, 0, 0): 102,
datetime(2020, 12, 7, 0, 0): 75,
datetime(2020, 12, 14, 0, 0): 55,
datetime(2020, 12, 21, 0, 0): 150,
datetime(2020, 12, 28, 0, 0): 11,
}
| 35.551724 | 39 | 0.532978 |
3d553fd4a5642d493db1017f36467ff8b535228c | 65 | py | Python | wave_1d_fwi_tf/__init__.py | ar4/wave_1d_fwi_tf | 0a543149dc3bd5ca6ec0e5bfe34add4796e0b879 | [
"MIT"
] | 2 | 2017-08-07T13:35:50.000Z | 2019-02-28T08:26:49.000Z | wave_1d_fwi_tf/__init__.py | ar4/wave_1d_fwi_tf | 0a543149dc3bd5ca6ec0e5bfe34add4796e0b879 | [
"MIT"
] | null | null | null | wave_1d_fwi_tf/__init__.py | ar4/wave_1d_fwi_tf | 0a543149dc3bd5ca6ec0e5bfe34add4796e0b879 | [
"MIT"
] | 5 | 2018-06-26T20:43:44.000Z | 2021-12-11T20:00:03.000Z | """1D FWI implemented using TensorFlow
"""
__version__ = '0.0.1'
| 16.25 | 38 | 0.692308 |
3d555476cff1bc071aa2e2a1ea0c596baf77825f | 1,586 | py | Python | scripts/space_heating_demand/ecofys_space_heating_demand.py | quintel/etmoses | e1e682d0ef68928e5a015c44d916ec151917b1ff | [
"MIT"
] | 16 | 2015-09-22T11:33:52.000Z | 2019-09-09T13:37:14.000Z | scripts/space_heating_demand/ecofys_space_heating_demand.py | quintel/etmoses | e1e682d0ef68928e5a015c44d916ec151917b1ff | [
"MIT"
] | 1,445 | 2015-05-20T22:42:50.000Z | 2022-02-26T19:16:02.000Z | scripts/space_heating_demand/ecofys_space_heating_demand.py | quintel/etloader | e1e682d0ef68928e5a015c44d916ec151917b1ff | [
"MIT"
] | 3 | 2015-11-03T10:41:26.000Z | 2017-02-11T07:39:52.000Z | import numpy as np
from numpy import genfromtxt
import matplotlib.pyplot as plt
import os
time_steps = 8760
file_name = "../input_data/Ecofys_ECN_heating_profiles.csv"
data = zip(*genfromtxt(file_name, delimiter=','))
names = ["tussenwoning_laag", "tussenwoning_midden", "tussenwoning_hoog",
"hoekwoning_laag", "hoekwoning_midden", "hoekwoning_hoog",
"twee_onder_een_kapwoning_laag", "twee_onder_een_kapwoning_midden", "twee_onder_een_kapwoning_hoog",
"appartement_laag", "appartement_midden", "appartement_hoog",
"vrijstaande_woning_laag", "vrijstaande_woning_midden", "vrijstaande_woning_hoog"]
profiles = []
totals = []
counter = 0
for profile in data:
if len(profile) == time_steps:
profiles.append(profile)
totals.append(np.sum(profile))
print "Writing: ", names[counter]+".csv"
out_file = open("../output_data/"+names[counter]+".csv","w")
for item in profile:
for i in range(4):
out_file.write(str(item) + "\n")
out_file.close()
else:
print "Error! profile #"+str(counter)+" has "+ str(len(profile)) + " lines"
counter += 1
print totals
plt.close()
plt.figure(figsize=(19, 7))
mini = 0
maxi = 24 * 7
for name,profile in zip(names,profiles):
#if "appartement" in name:
#plt.plot(profile[mini:maxi]/np.sum(profile),linewidth=1.0, label=name)
plt.plot(profile[mini:maxi],linewidth=1.0, label=name)
plt.xlabel('time (hours)')
plt.ylabel('kW')
plt.legend()
plt.show() | 26.433333 | 109 | 0.645019 |
3d55a052fc466e9d762d5638ce7970aab1dc7f8b | 1,362 | py | Python | parsers/lyrics_az.py | taynaron/lyrics2mp3 | 339f4dfd94c88896278a7be4143ea586ada8194f | [
"MIT"
] | null | null | null | parsers/lyrics_az.py | taynaron/lyrics2mp3 | 339f4dfd94c88896278a7be4143ea586ada8194f | [
"MIT"
] | null | null | null | parsers/lyrics_az.py | taynaron/lyrics2mp3 | 339f4dfd94c88896278a7be4143ea586ada8194f | [
"MIT"
] | null | null | null | from .lyrics import Lyrics
| 31.674419 | 69 | 0.592511 |
3d56210042ea856581699506b54c8a673f17ffaa | 1,414 | py | Python | senorge/listfiles.py | kojitominaga/scratch | 5eaf4de30c89ff1e855a6be493105d1201f07f74 | [
"FSFAP"
] | null | null | null | senorge/listfiles.py | kojitominaga/scratch | 5eaf4de30c89ff1e855a6be493105d1201f07f74 | [
"FSFAP"
] | null | null | null | senorge/listfiles.py | kojitominaga/scratch | 5eaf4de30c89ff1e855a6be493105d1201f07f74 | [
"FSFAP"
] | null | null | null | import os
d = '/Volumes/Seagate Expansion Drive/SeNorge'
vars = ['bn', 'eva', 'frd', 'gwt', 'is', 'os', 'q', 'rr',
'sd', 'smd', 'swe', 'tm']
# Massebalanse isbre (mm/dgn) gwb_bn_2014_06_15.asc
# Fordampning (mm/dgn) gwb_eva_2014_06_15.asc
# Frostdyp (mm/dgn) gwb_frd_2014_06_15.asc
# Grunnvannsmagasin (mm) gwb_gwt_2014_06_15.asc
# Infiltrasjon i rotsonen (mm/dgn) gwb_is_2014_06_15.asc
# Perkolasjon fra rotsonen til grunnvansonen (mm/dgn) gwb_os_2014_06_15.asc
# Avrenning (mm/dgn) gwb_q_2014_06_15.asc
# Nedbr (mm/dgn) gwb_rr_2014_06_15.asc
# Sndyp (mm) gwb_sd_2014_06_15.asc
# Markvannsunderskudd (mm) gwb_smd_2014_06_15.asc
# Snens vannekvivalent (mm) gwb_swe_2014_06_15.asc
# Temperatur (C) gwb_tm_2014_06_15.asc
counts = {}
for year in range(1957, 2015):
fns = os.listdir(os.path.join(d, 'gwb_ascii_%s' % year))
counts[year] = [len([f for f in fns if v in f]) for v in vars]
out = ' '.join(['year'] + vars)
out += '\n'
out += '\n'.join([' '.join(map(str, [e] + counts[e])) for e in counts.keys()])
out += '\n'
counts2 = {}
for year in range(1957, 2015):
fns = os.listdir(os.path.join(d, 'gwb_ascii_%s' % year))
counts2[year] = [len([f for f in fns if v in f and '.gz' in f])
for v in vars]
out2 = ' '.join(['year'] + vars)
out2 += '\n'
out2 += '\n'.join([' '.join(map(str, [e] + counts2[e])) for e in counts2.keys()])
out2 += '\n'
| 33.666667 | 81 | 0.642857 |
3d56d13a865c0fd22d417834c65ef6529f433ba4 | 104 | py | Python | Python/jump-to-python/Exponential.py | leeheefull/blog-source | 5f8370de5b0f62801fffc9e5f0f0bcb98dc2e6d1 | [
"MIT"
] | null | null | null | Python/jump-to-python/Exponential.py | leeheefull/blog-source | 5f8370de5b0f62801fffc9e5f0f0bcb98dc2e6d1 | [
"MIT"
] | null | null | null | Python/jump-to-python/Exponential.py | leeheefull/blog-source | 5f8370de5b0f62801fffc9e5f0f0bcb98dc2e6d1 | [
"MIT"
] | null | null | null | #
a = 1e9
print(a) # 1000000000.0
a = 7.525e2
print(a) # 752.5
a = 3954e-3
print(a) # 3.954
| 10.4 | 24 | 0.576923 |
3d56d5d2a7208245fa6af52b9cc12f9423e31653 | 11,289 | py | Python | src/lib_yolo_detect.py | felixchenfy/ros_yolo_as_template_matching | 0d5c0a52ba5540d2a644e0b426f9041a2a5e7858 | [
"MIT"
] | 29 | 2019-12-02T01:54:18.000Z | 2022-02-15T09:23:27.000Z | src/lib_yolo_detect.py | felixchenfy/ros_yolo_as_template_matching | 0d5c0a52ba5540d2a644e0b426f9041a2a5e7858 | [
"MIT"
] | 8 | 2019-12-24T13:13:44.000Z | 2022-02-10T00:16:31.000Z | src/lib_yolo_detect.py | felixchenfy/ros_yolo_as_template_matching | 0d5c0a52ba5540d2a644e0b426f9041a2a5e7858 | [
"MIT"
] | 5 | 2020-01-31T00:31:37.000Z | 2022-03-28T06:14:09.000Z | # -*- coding: future_fstrings -*-
from __future__ import division
if 1: # Set path
import sys, os
ROOT = os.path.dirname(os.path.abspath(__file__))+"/../" # root of the project
sys.path.append(ROOT)
import sys
from src.PyTorch_YOLOv3.models import Darknet
from src.PyTorch_YOLOv3.utils.utils import non_max_suppression, load_classes
from src.PyTorch_YOLOv3.utils.datasets import ImgfolderDataset
from utils.lib_yolo_datasets import ImgfolderDataset, UsbcamDataset, VideofileDataset
from utils.lib_yolo_plot import Yolo_Detection_Plotter_CV2
import utils.lib_common_funcs as cf
from config.config import read_all_args
import os
import sys
import time
import datetime
import argparse
import cv2
import numpy as np
from PIL import Image
import torch
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
import torchvision.transforms as transforms
import torch.nn.functional as F
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.ticker import NullLocator
def tensor_images_to_list_numpy_images(tensor_imgs):
'''
Arguments:
tensor_imgs {tensor, BxCxHxW}
Return:
list_of_imgs {list of numpy images}
'''
imgs = tensor_imgs.permute(0, 2, 3, 1).data.numpy() # convert to: RGB, float, (20, H, W, 3)
list_of_imgs = [img for img in imgs] # convert to: list of numpy images
return list_of_imgs
def rescale_boxes(boxes, current_dim, original_shape):
''' Rescales bounding boxes to the original shape
This is copied from src/PyTorch_YOLOv3/utils/utils.py
'''
orig_h, orig_w = original_shape
# The amount of padding that was added
pad_x = max(orig_h - orig_w, 0) * (current_dim / max(original_shape))
pad_y = max(orig_w - orig_h, 0) * (current_dim / max(original_shape))
# Image height and width after padding is removed
unpad_h = current_dim - pad_y
unpad_w = current_dim - pad_x
# Rescale bounding boxes to dimension of original image
boxes[:, 0] = ((boxes[:, 0] - pad_x // 2) / unpad_w) * orig_w
boxes[:, 1] = ((boxes[:, 1] - pad_y // 2) / unpad_h) * orig_h
boxes[:, 2] = ((boxes[:, 2] - pad_x // 2) / unpad_w) * orig_w
boxes[:, 3] = ((boxes[:, 3] - pad_y // 2) / unpad_h) * orig_h
return boxes
def resize(image, size):
''' Resize image to `size` '''
image = F.interpolate(image.unsqueeze(0), size=size, mode="nearest").squeeze(0)
return image
def rgbimg_to_yoloimg(img, img_size):
'''
Input:
img: 3xHxW, tensor, rgb
img_size: int
Output:
(let Z = img_size)
img: 3xZxZ, tensor, rgb
'''
# img = np.moveaxis(img, -1, 0) # no need for this. torchvision.transforms does this for us.
# img = transforms.ToTensor()(img) # numpy, HxWx3 --> tensor, 3xHxW
# img = img[np.newaxis, ...] # no need for this. DataLoader itself will add the additional channel.
# Pad to square resolution
img, _ = pad_to_square(img, 0) # 3 x H(W) x H(W)
# Resize
img = resize(img, img_size) # 3 x img_size x img_size
return img
def rgbimgs_to_yoloimgs(imgs, img_size):
'''
Input:
imgs: Batch x (3xHxW), tensor, rgb, uint8
img_size: int
Output:
(let Z = img_size)
yoloimgs: Batch x (3xZxZ), tensor, rgb, float
'''
imgs = imgs.type(torch.float32)
imgs = imgs.permute(0, 3, 1, 2) # [B, W, H, 3] --> [B, 3, W, H]
imgs /= 255.0
yoloimgs = [rgbimg_to_yoloimg(img, img_size) for img in imgs]
yoloimgs = torch.stack((yoloimgs))
return yoloimgs
# ------------------ Main functions used for inference ------------------
def detetions_to_labels_and_pos(self, detections, classes):
'''
Input:
detections: the output of "detect_targets()"
'''
labels_and_pos = []
for x1, y1, x2, y2, conf, cls_conf, cls_idx in detections:
label = classes[int(cls_idx)]
pos = (int((x1+x2)/2), int((y1+y2)/2))
labels_and_pos.append((label, pos))
return labels_and_pos
Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
def detect_targets(args_inference, model,
rgb_imgs, # Batch x (3xHxW), tensor, rgb, uint8
is_one_obj_per_class=False, # single instance for each class
):
'''
Output:
detections: [bbox, conf, cls_conf, cls_idx]
where: bbox = [x1, y1, x2, y2] is represented in the original image coordinate
'''
# -- Convert images to required type
Z = args_inference.img_size
yolo_imgs = rgbimgs_to_yoloimgs(rgb_imgs, Z) # [B, 3, W, H] --> [B, 3, Z, Z], uint8 --> float
imgs_on_gpu = Variable(yolo_imgs.type(Tensor))
# Get detections
with torch.no_grad():
imgs_detections = model(imgs_on_gpu)
N_elements = 7 # format of imgs_detections[jth_img]: x1, y1, x2, y2, conf, cls_conf, cls_idx
idx_conf = 5
imgs_detections = non_max_suppression(imgs_detections, args_inference.conf_thres, args_inference.nms_thres)
# convert to numpy array
imgs_detections = [d.numpy() if d is not None else None for d in imgs_detections]
# Sort detections based on confidence;
# Convert box to the current image coordinate;
# Convert detections to 2d list
for jth_img in range(len(imgs_detections)):
if imgs_detections[jth_img] is None: # no detected object
imgs_detections[jth_img] = []
continue
# sort
detections = sorted(imgs_detections[jth_img], key=lambda x: x[idx_conf])
detections = np.array(detections)
# change bbox pos to yoloimg
detections = rescale_boxes(detections, args_inference.img_size, rgb_imgs[jth_img].shape[:2])
# save result
imgs_detections[jth_img] = detections.tolist()
# Remove duplicated objects in the single-instance mode
if is_one_obj_per_class:
for jth_img, jth_detections in enumerate(imgs_detections):
if not imgs_detections[jth_img]:
continue
detected_objects = set()
jth_unique_detections = []
for kth_object in jth_detections:
x1, y1, x2, y2, conf, cls_conf, cls_idx = kth_object
if cls_idx not in detected_objects: # Add object if not detected before
detected_objects.add(cls_idx)
jth_unique_detections.append(kth_object)
imgs_detections[jth_img] = jth_unique_detections
return imgs_detections
| 36.182692 | 111 | 0.639738 |
3d582b494cb98544a7b8b83f15184b7f8c7c6d2b | 43 | py | Python | python/parse_ddl/tests/ddl_examples/test_vs.py | jared-ong/data-projects | 21ceccacb8e408ca45fe95c1c4d311f48e8f7708 | [
"MIT"
] | null | null | null | python/parse_ddl/tests/ddl_examples/test_vs.py | jared-ong/data-projects | 21ceccacb8e408ca45fe95c1c4d311f48e8f7708 | [
"MIT"
] | null | null | null | python/parse_ddl/tests/ddl_examples/test_vs.py | jared-ong/data-projects | 21ceccacb8e408ca45fe95c1c4d311f48e8f7708 | [
"MIT"
] | null | null | null | import json
import re
print("Hello world") | 10.75 | 20 | 0.767442 |
3d58e1aeb6209bbf0ac5b1e7058c942f20cd4768 | 733 | py | Python | tests/test_missing_variable.py | specfault/GreenerPython | 976260c3e78969cfd3e1e40639325f104325c703 | [
"MIT"
] | null | null | null | tests/test_missing_variable.py | specfault/GreenerPython | 976260c3e78969cfd3e1e40639325f104325c703 | [
"MIT"
] | null | null | null | tests/test_missing_variable.py | specfault/GreenerPython | 976260c3e78969cfd3e1e40639325f104325c703 | [
"MIT"
] | null | null | null | from tests.framework import AbstractFilePair
from tests.framework import in_test_function
from tests.framework import standard_test_spec
from tests.framework import SavingFixesSUT
from tests.framework import fixing_test
variable_names = ('x', 'y')
| 29.32 | 73 | 0.718963 |
3d59c021cf7fb75f7a11d364d01cd243b711a413 | 3,186 | py | Python | aiida/storage/psql_dos/migrations/versions/django_0040_data_migration_legacy_process_attributes.py | mkrack/aiida-core | bab1ad6cfc8e4ff041bce268f9270c613663cb35 | [
"MIT",
"BSD-3-Clause"
] | 153 | 2016-12-23T20:59:03.000Z | 2019-07-02T06:47:52.000Z | aiida/storage/psql_dos/migrations/versions/django_0040_data_migration_legacy_process_attributes.py | mkrack/aiida-core | bab1ad6cfc8e4ff041bce268f9270c613663cb35 | [
"MIT",
"BSD-3-Clause"
] | 2,466 | 2016-12-24T01:03:52.000Z | 2019-07-04T13:41:08.000Z | aiida/storage/psql_dos/migrations/versions/django_0040_data_migration_legacy_process_attributes.py | mkrack/aiida-core | bab1ad6cfc8e4ff041bce268f9270c613663cb35 | [
"MIT",
"BSD-3-Clause"
] | 88 | 2016-12-23T16:28:00.000Z | 2019-07-01T15:55:20.000Z | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=invalid-name,no-member
"""Migrate some legacy process attributes.
Attribute keys that are renamed:
* `_sealed` -> `sealed`
Attribute keys that are removed entirely:
* `_finished`
* `_failed`
* `_aborted`
* `_do_abort`
Finally, after these first migrations, any remaining process nodes that still do not have a sealed attribute and have
it set to `True`. Excluding the nodes that have a `process_state` attribute of one of the active states `created`,
running` or `waiting`, because those are actual valid active processes that are not yet sealed.
This is identical to migration e734dd5e50d7
Revision ID: django_0040
Revises: django_0039
"""
from alembic import op
import sqlalchemy as sa
revision = 'django_0040'
down_revision = 'django_0039'
branch_labels = None
depends_on = None
def upgrade():
"""Migrations for the upgrade."""
conn = op.get_bind()
statement = sa.text(
"""
UPDATE db_dbnode
SET attributes = jsonb_set(attributes, '{"sealed"}', attributes->'_sealed')
WHERE attributes ? '_sealed' AND node_type LIKE 'process.%';
-- Copy `_sealed` -> `sealed`
UPDATE db_dbnode SET attributes = attributes - '_sealed'
WHERE attributes ? '_sealed' AND node_type LIKE 'process.%';
-- Delete `_sealed`
UPDATE db_dbnode SET attributes = attributes - '_finished'
WHERE attributes ? '_finished' AND node_type LIKE 'process.%';
-- Delete `_finished`
UPDATE db_dbnode SET attributes = attributes - '_failed'
WHERE attributes ? '_failed' AND node_type LIKE 'process.%';
-- Delete `_failed`
UPDATE db_dbnode SET attributes = attributes - '_aborted'
WHERE attributes ? '_aborted' AND node_type LIKE 'process.%';
-- Delete `_aborted`
UPDATE db_dbnode SET attributes = attributes - '_do_abort'
WHERE attributes ? '_do_abort' AND node_type LIKE 'process.%';
-- Delete `_do_abort`
UPDATE db_dbnode
SET attributes = jsonb_set(attributes, '{"sealed"}', to_jsonb(True))
WHERE
node_type LIKE 'process.%' AND
NOT (attributes ? 'sealed') AND
attributes->>'process_state' NOT IN ('created', 'running', 'waiting');
-- Set `sealed=True` for process nodes that do not yet have a `sealed` attribute AND are not in an active state
"""
)
conn.execute(statement)
def downgrade():
"""Migrations for the downgrade."""
raise NotImplementedError('Downgrade of django_0040.')
| 35.797753 | 119 | 0.607031 |
3d5a102883a7bb1dd52786e30fc8cbb5261af1f1 | 1,108 | py | Python | hdvw/ops/matrix.py | shaoshitong/hdvw | fbb39da9ad8a765f74225eec7e9614978c740dde | [
"Apache-2.0"
] | 2 | 2022-03-26T09:08:43.000Z | 2022-03-26T09:09:27.000Z | hdvw/ops/matrix.py | shaoshitong/hdvw | fbb39da9ad8a765f74225eec7e9614978c740dde | [
"Apache-2.0"
] | null | null | null | hdvw/ops/matrix.py | shaoshitong/hdvw | fbb39da9ad8a765f74225eec7e9614978c740dde | [
"Apache-2.0"
] | null | null | null | from sklearn.metrics import confusion_matrix
import torch
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from tensorflow.keras.utils import to_categorical | 38.206897 | 81 | 0.652527 |
3d5a3b5d8a7ee8e5a8d60b3408e8aa8d46c512c1 | 346 | py | Python | {{cookiecutter.app_name}}/search_indexes.py | rickydunlop/cookiecutter-django-app-template-drf-haystack | 8ea9034c371950628b3d312639964753899c8c5d | [
"MIT"
] | null | null | null | {{cookiecutter.app_name}}/search_indexes.py | rickydunlop/cookiecutter-django-app-template-drf-haystack | 8ea9034c371950628b3d312639964753899c8c5d | [
"MIT"
] | null | null | null | {{cookiecutter.app_name}}/search_indexes.py | rickydunlop/cookiecutter-django-app-template-drf-haystack | 8ea9034c371950628b3d312639964753899c8c5d | [
"MIT"
] | null | null | null | from haystack import indexes
from .models import {{ cookiecutter.model_name }}
class {{ cookiecutter.model_name }}Index(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
name = indexes.CharField(model_attr='name')
| 28.833333 | 81 | 0.736994 |
3d5db5e05861ba4f7444a52667354a11e6f370f2 | 6,018 | py | Python | utility_functions.py | andrewli2403/California-Basketball-Data-Processor | 19582bef72d6a4f4281ddb61eceb4bee033b5ceb | [
"MIT"
] | null | null | null | utility_functions.py | andrewli2403/California-Basketball-Data-Processor | 19582bef72d6a4f4281ddb61eceb4bee033b5ceb | [
"MIT"
] | null | null | null | utility_functions.py | andrewli2403/California-Basketball-Data-Processor | 19582bef72d6a4f4281ddb61eceb4bee033b5ceb | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup as bs
import re
import pandas as pd
#collect & process data based on GAME ID
#find date of game
#rounds data based on stat parameters
#converts datetime object into MM/DD/YYYY format | 47.015625 | 1,105 | 0.633599 |
3d5eb8fb4fedfe6ddb55250652317a407099a204 | 3,787 | py | Python | site_crawler/cleaner/cleaner.py | kwoshvick/NSE-Financial-News-Crawler-and-Predictor | 8acee7c660c5487d18321dc7a169eba3043ef2b8 | [
"MIT"
] | 11 | 2018-04-24T12:05:45.000Z | 2021-07-12T05:30:41.000Z | site_crawler/cleaner/cleaner.py | kwoshvick/NSE-Financial-News-Crawler-and-Predictor | 8acee7c660c5487d18321dc7a169eba3043ef2b8 | [
"MIT"
] | null | null | null | site_crawler/cleaner/cleaner.py | kwoshvick/NSE-Financial-News-Crawler-and-Predictor | 8acee7c660c5487d18321dc7a169eba3043ef2b8 | [
"MIT"
] | 5 | 2019-08-09T04:43:23.000Z | 2021-08-28T18:05:56.000Z | import csv
import re
import string
import html
if __name__ == "__main__":
c = Cleaner()
tweets_csvs = [
'Business_KE',
'MadeItInAfrica',
'IFCAfrica',
'africareview',
'AfDB_Group',
'_AfricanUnion',
'Taifa_Leo',
'BD_Africa',
'RadioCitizenFM',
'citizentvkenya',
'KTNKenya',
'K24Tv',
'StandardKenya',
'TheStarKenya',
'radiomaisha',
'KBCChannel1',
'CapitalFMKenya',
'African_Markets',
'Africafinancial',
'InvestInAfrica',
'AfricanInvestor',
'forbesafrica',
'cnbcafrica',
'BBCAfrica',
'CNNAfrica',
'allafrica',
'ReutersAfrica',
'VenturesAfrica',
'BBGAfrica',
'GhettoRadio895',
'kenyanwalstreet',
'SokoAnalyst',
'NSEKenya',
'wazua'
]
for tweets_csv in tweets_csvs:
c.save_pre_labled_csv(tweets_csv)
| 29.818898 | 113 | 0.578558 |
3d5f940e0e5788ca23c26f2a301fe14e51745333 | 1,161 | py | Python | 003.branch/if.py | cjp1016/python-samples | ca5a7284cf4cb9fe42fa1487d4944815a00487ec | [
"Apache-2.0"
] | null | null | null | 003.branch/if.py | cjp1016/python-samples | ca5a7284cf4cb9fe42fa1487d4944815a00487ec | [
"Apache-2.0"
] | null | null | null | 003.branch/if.py | cjp1016/python-samples | ca5a7284cf4cb9fe42fa1487d4944815a00487ec | [
"Apache-2.0"
] | null | null | null | """
Version: 0.1
Author: cjp
"""
username = input(': ')
password = input(': ')
# admin123456
if username == 'admin' and password == '123456':
print('!')
else:
print('!')
"""
Python
if
"""
# ifelifelse
"""
3x - 5 (x > 1)
f(x) = x + 2 (-1 <= x <= 1)
5x + 3 (x < -1)
Version: 0.1
Author: cjp
"""
x = float(input('x = '))
if x > 1:
y = 3 * x - 5
elif x >= -1:
y = x + 2
else:
y = 5 * x + 3
print('f(%.2f) = %.2f' % (x, y))
"""
elifelse
"""
"""
3x - 5 (x > 1)
f(x) = x + 2 (-1 <= x <= 1)
5x + 3 (x < -1)
Version: 0.1
Author: cjp
"""
x = float(input('x = '))
if x > 1:
y = 3 * x - 5
else:
if x >= -1:
y = x+2
else:
y = 5 * x + 3
print('f(%.2f) = %.2f' % (x, y))
"""
PythonFlat is better than nested.
""" | 15.077922 | 53 | 0.564169 |
3d602a949005e0184acfd82e6822740a19d36fb9 | 7,210 | bzl | Python | bazel/antlr4_cc.bzl | kyle-winkelman/fhir | 01038aa235189fd043fd2981ebf40f4dc1e826e0 | [
"Apache-2.0"
] | null | null | null | bazel/antlr4_cc.bzl | kyle-winkelman/fhir | 01038aa235189fd043fd2981ebf40f4dc1e826e0 | [
"Apache-2.0"
] | 2 | 2020-07-24T14:20:45.000Z | 2020-07-24T19:43:52.000Z | bazel/antlr4_cc.bzl | kyle-winkelman/fhir | 01038aa235189fd043fd2981ebf40f4dc1e826e0 | [
"Apache-2.0"
] | 1 | 2020-07-10T15:03:45.000Z | 2020-07-10T15:03:45.000Z | """Build rules to create C++ code from an Antlr4 grammar."""
def antlr4_cc_lexer(name, src, namespaces = None, imports = None, deps = None, lib_import = None):
"""Generates the C++ source corresponding to an antlr4 lexer definition.
Args:
name: The name of the package to use for the cc_library.
src: The antlr4 g4 file containing the lexer rules.
namespaces: The namespace used by the generated files. Uses an array to
support nested namespaces. Defaults to [name].
imports: A list of antlr4 source imports to use when building the lexer.
deps: Dependencies for the generated code.
lib_import: Optional target for importing grammar and token files.
"""
namespaces = namespaces or [name]
imports = imports or []
deps = deps or []
if not src.endswith(".g4"):
fail("Grammar must end with .g4", "src")
if (any([not imp.endswith(".g4") for imp in imports])):
fail("Imported files must be Antlr4 grammar ending with .g4", "imports")
file_prefix = src[:-3]
base_file_prefix = _strip_end(file_prefix, "Lexer")
out_files = [
"%sLexer.h" % base_file_prefix,
"%sLexer.cpp" % base_file_prefix,
]
native.java_binary(
name = "antlr_tool",
jvm_flags = ["-Xmx256m"],
main_class = "org.antlr.v4.Tool",
runtime_deps = ["@maven//:org_antlr_antlr4_4_7_1"],
)
command = ";\n".join([
# Use the first namespace, we'll add the others afterwards.
_make_tool_invocation_command(namespaces[0], lib_import),
_make_namespace_adjustment_command(namespaces, out_files),
])
native.genrule(
name = name + "_source",
srcs = [src] + imports,
outs = out_files,
cmd = command,
heuristic_label_expansion = 0,
tools = ["antlr_tool"],
)
native.cc_library(
name = name,
srcs = [f for f in out_files if f.endswith(".cpp")],
hdrs = [f for f in out_files if f.endswith(".h")],
deps = ["@antlr_cc_runtime//:antlr4_runtime"] + deps,
copts = [
"-fexceptions",
],
features = ["-use_header_modules"], # Incompatible with -fexceptions.
)
def antlr4_cc_parser(
name,
src,
namespaces = None,
token_vocab = None,
imports = None,
listener = True,
visitor = False,
deps = None,
lib_import = None):
"""Generates the C++ source corresponding to an antlr4 parser definition.
Args:
name: The name of the package to use for the cc_library.
src: The antlr4 g4 file containing the parser rules.
namespaces: The namespace used by the generated files. Uses an array to
support nested namespaces. Defaults to [name].
token_vocab: The antlr g4 file containing the lexer tokens.
imports: A list of antlr4 source imports to use when building the parser.
listener: Whether or not to include listener generated files.
visitor: Whether or not to include visitor generated files.
deps: Dependencies for the generated code.
lib_import: Optional target for importing grammar and token files.
"""
suffixes = ()
if listener:
suffixes += (
"%sBaseListener.cpp",
"%sListener.cpp",
"%sBaseListener.h",
"%sListener.h",
)
if visitor:
suffixes += (
"%sBaseVisitor.cpp",
"%sVisitor.cpp",
"%sBaseVisitor.h",
"%sVisitor.h",
)
namespaces = namespaces or [name]
imports = imports or []
deps = deps or []
if not src.endswith(".g4"):
fail("Grammar must end with .g4", "src")
if token_vocab != None and not token_vocab.endswith(".g4"):
fail("Token Vocabulary must end with .g4", "token_vocab")
if (any([not imp.endswith(".g4") for imp in imports])):
fail("Imported files must be Antlr4 grammar ending with .g4", "imports")
file_prefix = src[:-3]
base_file_prefix = _strip_end(file_prefix, "Parser")
out_files = [
"%sParser.h" % base_file_prefix,
"%sParser.cpp" % base_file_prefix,
] + _make_outs(file_prefix, suffixes)
if token_vocab:
imports.append(token_vocab)
command = ";\n".join([
# Use the first namespace, we'll add the others afterwardsm thi .
_make_tool_invocation_command(namespaces[0], lib_import, listener, visitor),
_make_namespace_adjustment_command(namespaces, out_files),
])
native.genrule(
name = name + "_source",
srcs = [src] + imports,
outs = out_files,
cmd = command,
heuristic_label_expansion = 0,
tools = [
":antlr_tool",
],
)
native.cc_library(
name = name,
srcs = [f for f in out_files if f.endswith(".cpp")],
hdrs = [f for f in out_files if f.endswith(".h")],
deps = ["@antlr_cc_runtime//:antlr4_runtime"] + deps,
copts = [
"-fexceptions",
# FIXME: antlr generates broken C++ code that attempts to construct
# a std::string from nullptr. It's not clear whether the relevant
# constructs are reachable.
"-Wno-nonnull",
],
features = ["-use_header_modules"], # Incompatible with -fexceptions.
)
| 38.55615 | 142 | 0.600971 |
3d62c9779cfa7f3da2b542252bdcb812a8982541 | 234 | py | Python | src/scenic/simulators/gta/map.py | cahartsell/Scenic | 2e7979011aef426108687947668d9ba6f5439136 | [
"BSD-3-Clause"
] | 141 | 2019-03-07T07:17:19.000Z | 2022-03-19T16:15:48.000Z | src/scenic/simulators/gta/map.py | cahartsell/Scenic | 2e7979011aef426108687947668d9ba6f5439136 | [
"BSD-3-Clause"
] | 27 | 2019-06-18T23:04:29.000Z | 2022-03-31T13:42:05.000Z | src/scenic/simulators/gta/map.py | cahartsell/Scenic | 2e7979011aef426108687947668d9ba6f5439136 | [
"BSD-3-Clause"
] | 59 | 2019-04-08T15:20:15.000Z | 2022-03-29T07:23:26.000Z |
# stub to allow changing the map without having to alter gta_model.sc
import os
mapPath = 'map.npz'
| 19.5 | 69 | 0.717949 |
e9e2bdbc8442df5b9a587f4296d83d87e0d66ce8 | 6,982 | py | Python | bot/messages.py | pyaf/tpobot | d96a3650de46f6d43ab346d61b922b170cd5fdb2 | [
"MIT"
] | 4 | 2017-07-19T19:18:15.000Z | 2017-11-24T16:15:51.000Z | bot/messages.py | rishabhiitbhu/tpobot | d96a3650de46f6d43ab346d61b922b170cd5fdb2 | [
"MIT"
] | 5 | 2020-02-11T23:53:50.000Z | 2021-12-13T19:45:22.000Z | bot/messages.py | pyaf/tpobot | d96a3650de46f6d43ab346d61b922b170cd5fdb2 | [
"MIT"
] | 1 | 2017-08-27T20:40:50.000Z | 2017-08-27T20:40:50.000Z | # -*- coding: utf-8 -*-
message_dict = {
'welcome': "Hi! TPO Baba is here to give you updates about TPO portal, set willingness reminders, ppt "\
"reminders, exam date reminders and lot more...:D \n\n"\
"To personalise your experience, I gotta register you. It's simple two step process.\n",
'greetings': "Hello pal :)",
'haalchaal': "hamaar to mauj ahaai guru , tohaar batawa kaa haal chaal bate?"\
" ;P",
'no_idea': "Oops, didn't get you, Baba is a simple AI bot not Jarvis, don't be so cryptic. \n"\
"Baba has gotta master, Baba will learn this soon. B) \n\n"\
"Ask for help to know what options you have.",
'user_invalid': "You account is Invalid.\n"\
"Contact https://m.me/rishabh.ags/ for help",
'get_email': "Baba needs to know your official IIT email id, drop it as a text message.",
'email_set': "Baba has set your email to {0}",
'not_iit_email': "Oops!, seems like you didn't enter your official email id\n"\
"As I am running on a heroku server, which costs 7$ pm. Don't misuse this. "\
"I cannot afford offering services to others,.\nIf you ain't student of IIT (BHU), please"\
" don't register ,.. Bhawnao ko samjho yaaar ",
'get_course': "Baba needs to know your course, select your course among btech, idd or imd, "\
"then drop a text message.",
'course_set': "Baba has set your course to {0}",
'reg_error': "Oops!, you got me wrong, retry entering it correctly..\n\n"\
"And you gotta register first, we'll chat afterwards. :)\n"\
"if you're facing issues contact https://m.me/rishabh.ags",
'email_already_set': "Pal, you already got your email set to {0}",
'invalid_email': "Baba wants a valid email id.\nRetry please.",
'course_already_set': "Pal, you already got your email set to {0}",
'reg_success': "And congratulations! you have successfully registered!, your email id "\
"will be verified soon. :) \n\nIf found misleading or wrong, I'll find you and I'll "\
"deregister you ;P \n\n"\
"Ask for features to know what I've got for you in my Jhola B) \n\n"\
"Ask for help to know what options you have. :)",
'features': "Baba is a messenger bot created by a high functioning sociopathic nerd of IIT (BHU) :D\n"\
"\nI have got a simple AI brain powered by Wit and has not been trained too much, "\
"so please don't use too off the track keywords \n\n",
'features1': "What I currently do:\n"\
"1. Text you whenever a new company opens for your course and department, "\
"you'll get all details of such companies.\n"\
"2. Text you whenever companies your course and department get any changes in their "\
"parameters like willingness deadlines, exam dates, ppt dates, etc.. \n\n",
'features2':"What I plan to do pretty soon:\n"\
"1. Remind you about deadlines of willingness application, ppt dates "\
"and exam dates etc.. B) \n" \
"2. Give replies to your queries about companies...\n\n"\
"P.S. To know why that nerd made me? you are free to ask me :P\n"\
"Ask for help to know what options you have.",
'help': "Baba has got you some help:\n\n"\
"1. You can ask me to unsubscribe/deactivate you from receiving updates .\n"\
"2. You can ask me subscribe/activate your account. from receiving updates.\n",
'deactivate': "Alright pal, It's been a good chat with you, deactivating your account.\n"\
"You can ask me to reactivate it if necessary.",
'activate': "Welcome back!, your account is reactivated",
'wit_error': "Ohho, I'm sick, my brain is not working, Please call my master! \n"\
"https:/m.me/rishabhags/",
'new_company': "Hola!\nNew Company Open for you! \n\n"\
"Company Name: {company_name}\n"\
"Open for: {course}\n"\
"Departments: {department}\n"\
"BTech CTC: {btech_ctc}\n"\
"IDD/IMD CTC: {idd_imd_ctc}\n"\
"X cutoff: {x}\n"\
"XII cutoff: {xii}\n"\
"CGPA cutoff: {cgpa}\n"\
"Status: {status}\n\n"\
"Will keep you updated with this company :D.\n"\
"Cya :)",
'updated_company': "Baba has updates to deliver!\n\n"\
"{0} got updated on the portal\n\n"\
"Updated fields are: \n\n"\
"{1}\n"\
"{2}"\
"\n\nThis is it for now.\nCya :)",
#{1} will store update message
'abuse': "You are so abusive, next time, I'll deactivate your account ",
'lol': "Lol, I was kidding,,. ",
'master': "My master made me because TPO developers ko to `` ne barbaad karke rakkha hai.. "\
"and he knows very well, that jab tak iss des me `` hai, tab tak log * "\
"bante rahege ;P \n\n"\
"P.S. This was a joke, it has nothing to do with anything, we respect TPO portal "\
"developers they have made a great portal. \n"\
"Ask for me for help, if you wanna know what you have got to do.",
'idd_imd_4th_year': "Ops!, you are from 4rth year IDD/IMD, I don't wanna disturb you with updates. \n"\
"I'll have to set your account Invalid.\n\n"\
"For further queries contact https://m.me/rishabh.ags/"
}
field_msg_dict = {
'company_profile': 'Company Profile',
'x': 'X',
'xii': 'XII',
'cgpa': 'CGPA',
'course': 'Course',
'purpose': 'Purpose',
'department': 'Department',
'a_backlog': 'Active backlogs allowed',
't_backlog': 'Total backlogs allowed',
'ppt_date': 'PPT date',
'exam_date': 'Exam date',
'status': 'Status',
'branch_issue_dead': 'Branch issue deadline',
'willingness_dead': 'Willingness deadline',
'btech_ctc': 'B.Tech CTC',
'idd_imd_ctc':'IDD/IMD CTC',
# 'jd': 'JD',
}
# "TPO developers ko to `` ne barbaad karke rakkha hai.. ;P\n"
# "So, hum denge aapko sare updates, about new companies listed in the portal,willingness opening "\
# "and closing reminders ppt reminders, exam date reminders aur bhi bahot kuchh..\n"\
# 'invalid_course': "Baba wants valid course name (btech or idd or imd).\n retry please.",
# "Active backlogs allowed: {8}\n"\
# "Total backlogs allowed: {9}\n"\
| 48.151724 | 112 | 0.560011 |
e9e2e74f010f4bd4956a3cbde97bcbf8f121ba63 | 5,208 | py | Python | geomstats/geometry/matrices.py | PabloJ-1/geomstats | b53f62b745b21972b80bd7222df9af2549b66d64 | [
"MIT"
] | null | null | null | geomstats/geometry/matrices.py | PabloJ-1/geomstats | b53f62b745b21972b80bd7222df9af2549b66d64 | [
"MIT"
] | null | null | null | geomstats/geometry/matrices.py | PabloJ-1/geomstats | b53f62b745b21972b80bd7222df9af2549b66d64 | [
"MIT"
] | null | null | null | """Module exposing the `Matrices` and `MatricesMetric` class."""
from functools import reduce
import geomstats.backend as gs
from geomstats.geometry.euclidean import Euclidean
from geomstats.geometry.riemannian_metric import RiemannianMetric
TOLERANCE = 1e-5
| 29.590909 | 78 | 0.576421 |
e9e2f70538bbc55ae42d19558eee76ef0345309a | 2,338 | py | Python | gamutrf/mqtt_reporter.py | cglewis/gamutRF | d95b36f5893f165ff02701636c82662727d6e275 | [
"Apache-2.0"
] | null | null | null | gamutrf/mqtt_reporter.py | cglewis/gamutRF | d95b36f5893f165ff02701636c82662727d6e275 | [
"Apache-2.0"
] | null | null | null | gamutrf/mqtt_reporter.py | cglewis/gamutRF | d95b36f5893f165ff02701636c82662727d6e275 | [
"Apache-2.0"
] | null | null | null | import gpsd
import json
import logging
import socket
import httpx
import paho.mqtt.client as mqtt
| 33.4 | 107 | 0.579983 |
e9e522181523a4e229d498e313189c98d24c3d87 | 7,377 | py | Python | 2onnx.py | Yifanfanfanfan/flops-counter.pytorch | 5e7670106511f42f258083a01318b386605b61e7 | [
"MIT"
] | null | null | null | 2onnx.py | Yifanfanfanfan/flops-counter.pytorch | 5e7670106511f42f258083a01318b386605b61e7 | [
"MIT"
] | null | null | null | 2onnx.py | Yifanfanfanfan/flops-counter.pytorch | 5e7670106511f42f258083a01318b386605b61e7 | [
"MIT"
] | null | null | null | import os, sys, time, shutil, argparse
from functools import partial
import pickle
sys.path.append('../')
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision import datasets, transforms
#import torchvision.models as models
import torch.optim as optim
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim as optim
import torch.multiprocessing as mp
from collections import OrderedDict
import torch.utils.data
import torch.utils.data.distributed
import torch.onnx as torch_onnx
import onnx
import numpy as np
import matplotlib.pyplot as plt
from skimage.color import lab2rgb
from skimage import io
# import prune_util
# from prune_util import GradualWarmupScheduler
# from prune_util import CrossEntropyLossMaybeSmooth
# from prune_util import mixup_data, mixup_criterion
# from utils import save_checkpoint, AverageMeter, visualize_image, GrayscaleImageFolder
# from model import ColorNet
#from wdsr_b import *
#from args import *
import captioning.utils.opts as opts
import captioning.models as models
import captioning.utils.misc as utils
import onnxruntime
if __name__ == '__main__':
main()
check()
| 38.222798 | 143 | 0.682256 |
e9e8878237d9fdf426e86b2606cac1e238054e1a | 8,888 | py | Python | arapheno/phenotypedb/migrations/0001_initial.py | svengato/AraPheno | d6918e2e69c497b7096d9291d904c69310e84d06 | [
"MIT"
] | 5 | 2018-03-24T08:54:50.000Z | 2021-01-19T03:19:42.000Z | arapheno/phenotypedb/migrations/0001_initial.py | svengato/AraPheno | d6918e2e69c497b7096d9291d904c69310e84d06 | [
"MIT"
] | 38 | 2016-08-14T12:09:15.000Z | 2020-10-30T06:02:24.000Z | arapheno/phenotypedb/migrations/0001_initial.py | svengato/AraPheno | d6918e2e69c497b7096d9291d904c69310e84d06 | [
"MIT"
] | 8 | 2016-08-15T06:07:32.000Z | 2020-11-06T06:43:56.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-27 14:12
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| 50.5 | 159 | 0.588884 |
e9e93ad17a56b7c2432a305bc659635d4fd17d0c | 1,870 | py | Python | prior_library_release.py | DReichLab/adna-workflow | 07c6da8e64234decb7373fe7109e09395a45cb58 | [
"BSD-3-Clause"
] | 9 | 2019-05-28T11:16:14.000Z | 2022-02-24T01:22:47.000Z | prior_library_release.py | DReichLab/adna-workflow | 07c6da8e64234decb7373fe7109e09395a45cb58 | [
"BSD-3-Clause"
] | 3 | 2020-01-09T20:12:02.000Z | 2020-11-17T14:50:28.000Z | prior_library_release.py | DReichLab/adna-workflow | 07c6da8e64234decb7373fe7109e09395a45cb58 | [
"BSD-3-Clause"
] | 1 | 2019-08-04T12:46:01.000Z | 2019-08-04T12:46:01.000Z | from release_libraries import LibraryParameters
from bam_finder import getBamPath, library_default_dir, MT_default_dir, ShopVersion
import argparse
import re
from has_read_groups import read_group_checks
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Augment the bam list for a release with a prior existing version of the library")
parser.add_argument("bam_list", help="Each line contains the parameters to build a library bam for release. This includes the library ID, the individual ID, experiment, read group description (sequencing run name with experiment type and udg treatment), experiment, and (bam, sequencing run date) pairs ")
args = parser.parse_args()
with open(args.bam_list) as f:
library_parameters = [LibraryParameters(line) for line in f]
for x in library_parameters:
experiment = x.experiment
if '1240k' in experiment:
experiment = '1240k'
search_directory = MT_default_dir if x.reference == 'rsrs' else library_default_dir
existingBAM = getBamPath(x.library_id, experiment=experiment, reference=x.reference, version_policy='latest', shop_parent_directory=search_directory)
bam = str(existingBAM)
#print(bam)
if len(bam) > 0:
try: # this will match a new pipeline bam
match = re.search('v([0-9]+).bam', bam)
new_version = int(match.group(1)) + 1
has_read_groups, has_real_library_name, date_string = read_group_checks(bam)
except: # if the existing version is Shop's
new_version = 1
shop = ShopVersion(bam)
date_string = shop.date_string
#print('{}\t{}\t{:d}'.format(x.library_id, bam, new_version))
x.version = new_version
x.bam_filenames.append(str(existingBAM))
x.bam_date_strings.append(date_string) # the bam date string is used for generating read groups, which the existing bam does not need
#print('{}\t{}'.format(x.library_id, bam))
print(x)
| 49.210526 | 306 | 0.758289 |
e9e95132c690c91397faab36e332edee82e1ac48 | 3,818 | py | Python | scratch/msf/fast_sample_data.py | sasgc6/pysmurf | a370b515ab717c982781223da147bea3c8fb3a9c | [
"BSD-3-Clause-LBNL"
] | 3 | 2019-10-17T02:37:59.000Z | 2022-03-09T16:42:34.000Z | scratch/msf/fast_sample_data.py | sasgc6/pysmurf | a370b515ab717c982781223da147bea3c8fb3a9c | [
"BSD-3-Clause-LBNL"
] | 446 | 2019-04-10T04:46:20.000Z | 2022-03-15T20:27:57.000Z | scratch/msf/fast_sample_data.py | sasgc6/pysmurf | a370b515ab717c982781223da147bea3c8fb3a9c | [
"BSD-3-Clause-LBNL"
] | 13 | 2019-02-05T18:02:05.000Z | 2021-03-02T18:41:49.000Z | import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as signal
plt.ion()
bands = [2,3]
single_channel_readout = 2
nsamp = 2**25
new_chans = False
#For resonator I/Q high sampled data use eta_mag + eta_phase found in eta scans for Q and +/- 90 deg for I, for off resonance data to look at HEMT, etc set eta_mag = 1 and eta_phase = 0 & 90 or the eta_phase from the closest resonator for "Q" and that +/- 90 for "I"
#In single_channel_readout mode 2 you take data at 2.4MHz and don't need to worry about decimation & filter_alpha, for single_channel_reaout = 1 600 kHz data you do, see confluence page https://confluence.slac.stanford.edu/display/SMuRF/SMuRF+firmware#SMuRFfirmware-Datamodes
if new_chans == True:
chans = {}
freqs = {}
sbs = {}
eta_mags_scaled = {}
eta_phases = {}
for band in bands:
chans[band] = S.which_on(band)
freqs[band] = []
sbs[band] = []
eta_mags_scaled[band] = []
eta_phases[band] = []
for chan in chans[band]:
freqs[band].append(S.channel_to_freq(band,chan))
sbs[band].append(S.freq_to_subband(band,S.channel_to_freq(band,chan))[0])
eta_mags_scaled[band].append(S.get_eta_mag_scaled_channel(band,chan))
eta_phases[band].append(S.get_eta_phase_degree_channel(band,chan))
S.channel_off(band,chan)
freqs[band] = np.asarray(freqs[band])
sbs[band] = np.asarray(sbs[band])
eta_mags_scaled[band] = np.asarray(eta_mags_scaled[band])
eta_phases[band] = np.asarray(eta_phases[band])
for band in bands:
for i,chan in enumerate(chans[band]):
plt.figure()
S.set_fixed_tone(freqs[band][i],12)
S.set_feedback_enable(band,0)
#S.run_serial_gradient_descent(band)
#S.run_serial_eta_scan(band)
S.flux_ramp_off()
#qEtaPhaseDegree = eta_phases[band][i]
qEtaPhaseDegree = 0
#EtaMag = eta_mags_scaled[band][i]
EtaMag = 1
channel = S.which_on(band)[0]
S.set_eta_mag_scaled_channel(band,channel,EtaMag)
alpha = 1.0
for IorQ in ['Q0','Q+','I+','I-']:
if IorQ is 'Q0':
S.set_eta_phase_degree_channel(band,channel,qEtaPhaseDegree)
if IorQ is 'Q+':
S.set_eta_phase_degree_channel(band,channel,etaPhaseModDegree(qEtaPhaseDegree+180))
if IorQ is 'I+':
S.set_eta_phase_degree_channel(band,channel,etaPhaseModDegree(qEtaPhaseDegree+90))
if IorQ is 'I-':
S.set_eta_phase_degree_channel(band,channel,etaPhaseModDegree(qEtaPhaseDegree-90))
ctime1=int(S.get_timestamp())
filename='%d.dat'%ctime1
# take ~56 sec of data (18750 Hz)^-1 * (2^20) ~ 55.9sec. Have to set kludge_sec=60.
f, df, sync = S.take_debug_data(band, channel=channel, IQstream=False, single_channel_readout=single_channel_readout, nsamp=nsamp,filename=str(ctime1));
f,Pxx = signal.welch(df,nperseg = 2**16,fs=2.4e6)
Pxx = np.sqrt(Pxx)
plt.loglog(f,Pxx,alpha=alpha,label = IorQ+': '+str(ctime1))
alpha = alpha*0.8
#dfs.append(df)
#data=fmt.format([str(ctime1),'%0.6f'%(S.channel_to_freq(band,channel)),filename,IorQ])
#of.write(data)
#of.flush()
plt.xlabel('Frequency [Hz]',fontsize = 16)
plt.ylabel('I/Q Noise',fontsize = 16)
plt.title('Resonator at '+str(np.round(freqs[band][i],1))+ 'MHz')
plt.legend()
plt.show()
plt.savefig(S.plot_dir+'/'+str(ctime1)+'_band_'+str(band)+'_chan_'+str(chan)+'.png')
plt.close()
S.channel_off(band,channel)
S.flux_ramp_on()
| 41.956044 | 275 | 0.628078 |
e9e9975a7e35ce3210ca6631964e51dc707d8e9b | 2,667 | py | Python | kwiklib/utils/settings.py | fiath/test | b50898dafa90e93da48f573e0b3feb1bb6acd8de | [
"MIT",
"BSD-3-Clause"
] | 7 | 2015-01-20T13:55:51.000Z | 2018-02-06T09:31:21.000Z | kwiklib/utils/settings.py | fiath/test | b50898dafa90e93da48f573e0b3feb1bb6acd8de | [
"MIT",
"BSD-3-Clause"
] | 6 | 2015-01-08T18:13:53.000Z | 2016-06-22T09:53:53.000Z | kwiklib/utils/settings.py | fiath/test | b50898dafa90e93da48f573e0b3feb1bb6acd8de | [
"MIT",
"BSD-3-Clause"
] | 8 | 2015-01-22T22:57:19.000Z | 2020-03-19T11:43:56.000Z | """Internal persistent settings store with cPickle."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import cPickle
import os
from kwiklib.utils.globalpaths import ensure_folder_exists
# -----------------------------------------------------------------------------
# Utility functions
# -----------------------------------------------------------------------------
def load(filepath):
"""Load the settings from the file, and creates it if it does not exist."""
if not os.path.exists(filepath):
save(filepath)
with open(filepath, 'rb') as f:
settings = cPickle.load(f)
return settings
def save(filepath, settings={}):
"""Save the settings in the file."""
with open(filepath, 'wb') as f:
cPickle.dump(settings, f)
return settings
# -----------------------------------------------------------------------------
# Settings
# -----------------------------------------------------------------------------
| 31.011628 | 79 | 0.490064 |
e9e9ed2bd4fb85cec280f41104f00f0f5fe284be | 24,098 | py | Python | cpu.py | philippechataignon/applepy | 1b9d1709a4490f49fa06739bb44c0602bb07b730 | [
"MIT"
] | null | null | null | cpu.py | philippechataignon/applepy | 1b9d1709a4490f49fa06739bb44c0602bb07b730 | [
"MIT"
] | null | null | null | cpu.py | philippechataignon/applepy | 1b9d1709a4490f49fa06739bb44c0602bb07b730 | [
"MIT"
] | null | null | null | import sys
import pygame
from utils import signed
| 36.960123 | 197 | 0.606523 |
e9ead4efec2b488b003bd50670c0f814058b8f19 | 29 | py | Python | router/tasks/__init__.py | smallwat3r/shopify-webhook-processor | 4f16017cb9695ca00eb6d95e4381a8442b3dc0e3 | [
"MIT"
] | 1 | 2021-08-30T14:01:03.000Z | 2021-08-30T14:01:03.000Z | router/tasks/__init__.py | smallwat3r/shopify-webhook-processor | 4f16017cb9695ca00eb6d95e4381a8442b3dc0e3 | [
"MIT"
] | null | null | null | router/tasks/__init__.py | smallwat3r/shopify-webhook-processor | 4f16017cb9695ca00eb6d95e4381a8442b3dc0e3 | [
"MIT"
] | 2 | 2021-08-30T14:01:04.000Z | 2021-09-07T01:07:41.000Z | from .tasks import Processor
| 14.5 | 28 | 0.827586 |
e9ebfd8edc0153bf61129fe91fefdc9f0a9e4300 | 1,392 | py | Python | dogs/dogs.py | RafaelBadaro-zz/dogtour-backend | 30a83eac46dddaf29c3c643e2dc4dd71948484f0 | [
"Unlicense"
] | null | null | null | dogs/dogs.py | RafaelBadaro-zz/dogtour-backend | 30a83eac46dddaf29c3c643e2dc4dd71948484f0 | [
"Unlicense"
] | 2 | 2019-11-10T18:08:39.000Z | 2020-07-11T21:22:42.000Z | dogs/dogs.py | RafaelBadaro-zz/dogtour-backend | 30a83eac46dddaf29c3c643e2dc4dd71948484f0 | [
"Unlicense"
] | 1 | 2022-02-12T12:14:40.000Z | 2022-02-12T12:14:40.000Z | import uuid
from nameko.rpc import RpcProxy, rpc
from nameko_redis import Redis
| 19.068493 | 56 | 0.481322 |
e9ec78a38e45c3ed801db04c7a18df698501ab39 | 1,531 | py | Python | examples/demo_OT_2D_samples.py | agramfort/POT | 8dbfd3edae649f5f3e87be4a3ce446c59729b2f7 | [
"MIT"
] | null | null | null | examples/demo_OT_2D_samples.py | agramfort/POT | 8dbfd3edae649f5f3e87be4a3ce446c59729b2f7 | [
"MIT"
] | null | null | null | examples/demo_OT_2D_samples.py | agramfort/POT | 8dbfd3edae649f5f3e87be4a3ce446c59729b2f7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Demo for 2D Optimal transport between empirical distributions
@author: rflamary
"""
import numpy as np
import matplotlib.pylab as pl
import ot
#%% parameters and data generation
n=20 # nb samples
mu_s=np.array([0,0])
cov_s=np.array([[1,0],[0,1]])
mu_t=np.array([4,4])
cov_t=np.array([[1,-.8],[-.8,1]])
xs=ot.datasets.get_2D_samples_gauss(n,mu_s,cov_s)
xt=ot.datasets.get_2D_samples_gauss(n,mu_t,cov_t)
a,b = ot.unif(n),ot.unif(n) # uniform distribution on samples
# loss matrix
M=ot.dist(xs,xt)
M/=M.max()
#%% plot samples
pl.figure(1)
pl.plot(xs[:,0],xs[:,1],'+b',label='Source samples')
pl.plot(xt[:,0],xt[:,1],'xr',label='Target samples')
pl.legend(loc=0)
pl.title('Source and traget distributions')
pl.figure(2)
pl.imshow(M,interpolation='nearest')
pl.title('Cost matrix M')
#%% EMD
G0=ot.emd(a,b,M)
pl.figure(3)
pl.imshow(G0,interpolation='nearest')
pl.title('OT matrix G0')
pl.figure(4)
ot.plot.plot2D_samples_mat(xs,xt,G0,c=[.5,.5,1])
pl.plot(xs[:,0],xs[:,1],'+b',label='Source samples')
pl.plot(xt[:,0],xt[:,1],'xr',label='Target samples')
pl.legend(loc=0)
pl.title('OT matrix with samples')
#%% sinkhorn
# reg term
lambd=5e-3
Gs=ot.sinkhorn(a,b,M,lambd)
pl.figure(5)
pl.imshow(Gs,interpolation='nearest')
pl.title('OT matrix sinkhorn')
pl.figure(6)
ot.plot.plot2D_samples_mat(xs,xt,Gs,color=[.5,.5,1])
pl.plot(xs[:,0],xs[:,1],'+b',label='Source samples')
pl.plot(xt[:,0],xt[:,1],'xr',label='Target samples')
pl.legend(loc=0)
pl.title('OT matrix Sinkhorn with samples')
| 19.379747 | 61 | 0.677335 |
e9ed96eda4a6de7f5ebf0c1ccffa4e86b1a28787 | 7,594 | py | Python | src/morphometrics/utils/surface_utils.py | kevinyamauchi/morphometrics | f48cb4fa8c06b726f0b699940c32ac8df466f71c | [
"BSD-3-Clause"
] | 5 | 2022-03-17T18:14:18.000Z | 2022-03-23T00:48:17.000Z | src/morphometrics/utils/surface_utils.py | kevinyamauchi/morphometrics | f48cb4fa8c06b726f0b699940c32ac8df466f71c | [
"BSD-3-Clause"
] | 11 | 2022-01-27T14:10:43.000Z | 2022-03-20T18:22:30.000Z | src/morphometrics/utils/surface_utils.py | kevinyamauchi/morphometrics | f48cb4fa8c06b726f0b699940c32ac8df466f71c | [
"BSD-3-Clause"
] | 1 | 2022-03-17T18:17:21.000Z | 2022-03-17T18:17:21.000Z | from typing import List, Tuple
import numpy as np
import pymeshfix
import trimesh.voxel.creation
from skimage.measure import marching_cubes
from trimesh import Trimesh
from trimesh.smoothing import filter_taubin
from ..types import BinaryImage, LabelImage
def _round_to_pitch(coordinate: np.ndarray, pitch: float) -> np.ndarray:
"""Round a point to the nearest point on a grid that starts at the origin
with a specified pitch.
Parameters
----------
coordinate : np.ndarray
The coordinate to round
pitch : float
The pitch of the grid. Assumed to the be same in all directions.
Returns
-------
rounded_point : np.ndarray
The point after rounding to the nearest grid point.
"""
return pitch * np.round(coordinate / pitch, decimals=0)
def repair_mesh(mesh: Trimesh) -> Trimesh:
"""Repair a mesh using pymeshfix.
Parameters
----------
mesh : Trimesh
The mesh to be repaired
"""
vertices = np.asarray(mesh.vertices)
faces = np.asarray(mesh.faces)
vertices_clean, faces_clean = pymeshfix.clean_from_arrays(vertices, faces)
# create the mesh object
repaired_mesh = Trimesh(vertices=vertices_clean, faces=faces_clean)
assert repaired_mesh.is_watertight, "Mesh was unable to be repaired"
return repaired_mesh
def binary_mask_to_surface(
object_mask: BinaryImage, n_mesh_smoothing_interations: int = 50
) -> Trimesh:
"""Convert surface of a 3D binary mask (segmented object) into a watertight mesh.
Parameters
----------
object_mask : BinaryMask
A 3D binary image corresponding to the object you want to mesh.
n_mesh_smoothing_interations : int
The number of interations of smooting to perform. Smoothing is
done by the trimesh taubin filter:
https://trimsh.org/trimesh.smoothing.html#trimesh.smoothing.filter_taubin
Default value is 50.
Returns
-------
mesh : trimesh.Trimesh
The resulting mesh as a trimesh.Trimesh object.
https://trimsh.org/trimesh.base.html#github-com-mikedh-trimesh
"""
vertices, faces, _, _ = marching_cubes(object_mask, 0)
vertices_clean, faces_clean = pymeshfix.clean_from_arrays(vertices, faces)
# create the mesh object
mesh = Trimesh(vertices=vertices_clean, faces=faces_clean)
# optionally clean up the mesh
if n_mesh_smoothing_interations > 0:
filter_taubin(mesh, iterations=n_mesh_smoothing_interations)
return mesh
def voxelize_closed_surface(
mesh: Trimesh, pitch: float, repair_mesh: bool = True
) -> Tuple[BinaryImage, np.ndarray]:
"""Voxelize a closed surface mesh.
Parameters
----------
mesh : Trimesh
The surface to voxelize
pitch : float
The voxel width in mesh units. Voxels have the
same width in each dimension (i.e., are cubes).
repair_mesh : bool
Flag to attept to repair the mesh if set to True.
Default value is True.
Returns
-------
image : BinaryImage
The binary mask created from the
image_origin : np.ndarray
The upper left hand corner of the voxelized image in mesh units
(i.e., minimun of the axis aligned bounding box)
"""
bounding_box = mesh.bounds
centroid = np.mean(bounding_box, axis=0)
# convert the centroid to the nearest integer multiple of the pitch
rounded_centroid = _round_to_pitch(coordinate=centroid, pitch=pitch)
# find the minimum cube half-width that encompases the full mesh
cube_half_width = np.max(bounding_box - rounded_centroid)
# get the number of voxels for the cube half-width
n_voxels_cube_half_width = int(np.ceil(cube_half_width / pitch))
# pad with one voxel on each side to make sure the full mesh is in range
n_voxels_cube_half_width += 1
# get the upper left hand (i.e., minimum) corner of the voxelized image in mesh coordinates
image_origin = rounded_centroid - (n_voxels_cube_half_width * pitch)
# if and (not mesh.is_watertight):
# mesh = repair_mesh(mesh)
voxel_grid = trimesh.voxel.creation.local_voxelize(
mesh=mesh,
point=rounded_centroid,
pitch=pitch,
radius=n_voxels_cube_half_width,
fill=True,
)
return voxel_grid.matrix.astype(bool), image_origin
def closed_surfaces_to_label_image(
meshes: List[Trimesh],
pitch: float,
crop_around_mesh: bool = False,
repair_mesh: bool = False,
) -> Tuple[LabelImage, np.ndarray]:
"""Create a label image from a set of meshes with closed surfaces.
Notes:
- meshes must be water tight for accurate voxelization.
- Labels are assigned in the order the meshes appear in the list.
- all meshes must be in the same coordinate system and scale.
Parameters
----------
meshes : List[Trimesh]
The meshes to convert to a label image.
pitch : float
The width of a voxel in mesh units. Voxels are assumed to be cubes.
crop_around_mesh : bool
When set to True, the image is cropped around the axis aligned bounding box
of the set of meshes with a one voxel pad in each direction.
The default value is False
repair_mesh : bool
When set to True, will attempt to repair meshes with PyMeshFix.
Default value is False.
Returns
-------
label_image : LabelImage
The label image generated from the meshes.
image_origin : np.ndarray
The coordinate of the upper left hand corner (i.e., minimum) of the
label_image in mesh coordinates.
"""
# get the bounding box around the meshes
bounding_boxes = [mesh.bounds for mesh in meshes]
# get the bounding box around all of them
all_corners = np.concatenate(bounding_boxes, axis=0)
min_corner = np.min(all_corners, axis=0)
max_corner = np.max(all_corners, axis=0)
# round the corners to the nearest voxel (in mesh coordinates)
min_corner_rounded = _round_to_pitch(coordinate=min_corner, pitch=pitch)
max_corner_rounded = _round_to_pitch(coordinate=max_corner, pitch=pitch)
# pad the bounding box to make sure everything is accounted for
min_corner_rounded -= pitch
max_corner_rounded += pitch
if crop_around_mesh is True:
image_origin = min_corner_rounded
else:
image_origin = np.array([0, 0, 0])
# determine the size of the image in pixels
image_shape_mesh_units = max_corner_rounded - image_origin
image_shape_voxels = np.round(image_shape_mesh_units / pitch, decimals=0).astype(
int
)
# create the blank label image
label_image = np.zeros(image_shape_voxels, dtype=np.uint16)
for i, mesh in enumerate(meshes):
voxelized, origin = voxelize_closed_surface(
mesh, pitch=pitch, repair_mesh=repair_mesh
)
# get the coordinates of the voxels inside of the mesh
filled_voxel_coordinates = np.argwhere(voxelized)
# get the offset between the label image indices and the voxelized mesh indices
mesh_offset = np.round((origin - image_origin) / pitch, decimals=0)
# offset the voxel coordinates
filled_voxel_indices = np.round(
filled_voxel_coordinates + mesh_offset, decimals=0
).astype(int)
# set the label value
label_value = i + 1
label_image[
filled_voxel_indices[:, 0],
filled_voxel_indices[:, 1],
filled_voxel_indices[:, 2],
] = label_value
return label_image, image_origin
| 32.314894 | 95 | 0.684093 |
e9eda2a3fc73ffe30b97e1cd86e60cd02bdf72a7 | 1,402 | py | Python | bigflow_python/python/bigflow/pipeline/test/testdata/columns/columns/column_sum.py | advancedxy/bigflow_python | 8a244b483404fde7afc42eee98bc964da8ae03e2 | [
"Apache-2.0"
] | 1,236 | 2017-11-14T11:10:10.000Z | 2022-03-08T11:54:41.000Z | bigflow_python/python/bigflow/pipeline/test/testdata/columns/columns/column_sum.py | advancedxy/bigflow_python | 8a244b483404fde7afc42eee98bc964da8ae03e2 | [
"Apache-2.0"
] | 38 | 2017-11-14T16:29:12.000Z | 2020-01-23T08:32:04.000Z | bigflow_python/python/bigflow/pipeline/test/testdata/columns/columns/column_sum.py | advancedxy/bigflow_python | 8a244b483404fde7afc42eee98bc964da8ae03e2 | [
"Apache-2.0"
] | 184 | 2017-11-27T07:23:36.000Z | 2022-03-14T02:54:16.000Z | #!/usr/bin/env python
# encoding: utf-8
########################################################################
#
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
########################################################################
from bigflow import transforms
def column_sum(pcollection, columns):
"""
PCollection
Args:
pcollection (PCollection): PCollection
columns(list)
Returns:
PObject:
>>> import columns
>>> _p = _pipeline.parallelize([(1, 1, 1), (1, 2, 2), (1, 3, 1)])
>>> columns.column_sum(_p, [0, 1]).get()
[3, 6]
"""
cols = columns
return pcollection.map(_get_columns) \
.reduce(lambda x, y: [a + b for a, b in zip(x, y)])
| 28.04 | 74 | 0.597004 |
e9ee58711825a498c9db3c3f37e476c5e56bb0a6 | 282 | py | Python | auction/models/bidbasket.py | littlepea/django-auction | fe0219faabe17efbeca1be51869d750e82299941 | [
"MIT"
] | 10 | 2015-01-13T02:51:35.000Z | 2021-01-25T21:02:29.000Z | auction/models/bidbasket.py | JohnRomanski/django-auction | bc6982c8f34a9a6914badb203424eca7f3219685 | [
"MIT"
] | 2 | 2016-08-05T09:24:30.000Z | 2020-06-28T06:00:11.000Z | auction/models/bidbasket.py | JohnRomanski/django-auction | bc6982c8f34a9a6914badb203424eca7f3219685 | [
"MIT"
] | 22 | 2015-03-12T10:41:52.000Z | 2021-11-23T14:33:09.000Z | import importlib
from django.conf import settings
from auction.utils.loader import load_class
AUCTION_BIDBASKET_MODEL = getattr(settings, 'AUCTION_BIDBASKET_MODEL',
'auction.models.defaults.BidBasket')
BidBasket = load_class(AUCTION_BIDBASKET_MODEL, 'AUCTION_BIDBASKET_MODEL') | 35.25 | 74 | 0.840426 |
e9eed597103f69eb9973238f713e70a5ed271b2e | 551 | py | Python | stixpy/timeseries/tests/test_quicklook.py | nicHoch/stixpy | cdb86094995590da36f3ae5e01f4ca4b9aac819c | [
"BSD-3-Clause"
] | 4 | 2021-07-06T14:42:09.000Z | 2022-02-24T10:19:18.000Z | stixpy/timeseries/tests/test_quicklook.py | nicHoch/stixpy | cdb86094995590da36f3ae5e01f4ca4b9aac819c | [
"BSD-3-Clause"
] | 30 | 2020-10-02T20:24:28.000Z | 2022-03-31T18:29:07.000Z | stixpy/timeseries/tests/test_quicklook.py | nicHoch/stixpy | cdb86094995590da36f3ae5e01f4ca4b9aac819c | [
"BSD-3-Clause"
] | 8 | 2021-04-16T11:00:13.000Z | 2022-03-31T10:09:29.000Z | from pathlib import Path
import pytest
from sunpy.timeseries import TimeSeries
from stixpy.data import test
from stixpy.timeseries.quicklook import *
| 22.958333 | 58 | 0.787659 |
e9f001a0eb4f10eb622617d07d8ad3650ace4a3c | 2,284 | py | Python | roberta_ses/datasets/sst_dataset.py | sythello/Roberta_SES | 289d575b9330cb6ae61190846448bd5368d73453 | [
"Apache-2.0"
] | null | null | null | roberta_ses/datasets/sst_dataset.py | sythello/Roberta_SES | 289d575b9330cb6ae61190846448bd5368d73453 | [
"Apache-2.0"
] | null | null | null | roberta_ses/datasets/sst_dataset.py | sythello/Roberta_SES | 289d575b9330cb6ae61190846448bd5368d73453 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@file : sst_dataset.py
@author: zijun
@contact : zijun_sun@shannonai.com
@date : 2020/11/17 11:45
@version: 1.0
@desc : sst5 and imdb task use the same dataset
"""
import os
from functools import partial
import torch
from transformers import RobertaTokenizer
from torch.utils.data import Dataset, DataLoader
from roberta_ses.datasets.collate_functions import collate_to_max_length
if __name__ == '__main__':
unit_test()
| 30.453333 | 87 | 0.651926 |
e9f017283f2c9870d465de8537e58d7f7588313c | 8,068 | py | Python | tests/gem5/configs/boot_kvm_fork_run.py | darchr/gem5 | 0feb0a34db519523a8595f6d1543f7412259ba17 | [
"BSD-3-Clause"
] | 19 | 2018-07-20T15:08:50.000Z | 2022-03-26T16:15:59.000Z | tests/gem5/configs/boot_kvm_fork_run.py | darchr/gem5 | 0feb0a34db519523a8595f6d1543f7412259ba17 | [
"BSD-3-Clause"
] | 148 | 2018-07-20T00:58:36.000Z | 2021-11-16T01:52:33.000Z | tests/gem5/configs/boot_kvm_fork_run.py | darchr/gem5 | 0feb0a34db519523a8595f6d1543f7412259ba17 | [
"BSD-3-Clause"
] | 10 | 2019-01-10T03:01:30.000Z | 2022-01-21T18:36:18.000Z | # Copyright (c) 2021 The University of Texas at Austin
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Austin Harris
#
"""
This script tests forking gem5 with the KVM cores and switching cores in the
child process. First, the test boots linux with KVM and tests fast-forwarding
with instruction exit events. Then the test forks the simulation, waits for the
child to simulate until completion, and then simulates to completion in the
parent process.
"""
import argparse
import os
import sys
from textwrap import dedent
import m5
from m5.objects import Root
from gem5.components.boards.x86_board import X86Board
from gem5.coherence_protocol import CoherenceProtocol
from gem5.isas import ISA
from gem5.components.memory.single_channel import SingleChannelDDR3_1600
from gem5.components.processors.cpu_types import CPUTypes
from gem5.components.processors.simple_switchable_processor import (
SimpleSwitchableProcessor,
)
from gem5.resources.resource import Resource
from gem5.runtime import (
get_runtime_coherence_protocol, get_runtime_isa
)
from gem5.utils.requires import requires
parser = argparse.ArgumentParser(
description="A script to test forking gem5 and switching cpus."
)
parser.add_argument(
"-m",
"--mem-system",
type=str,
choices=("classic", "mi_example", "mesi_two_level"),
required=True,
help="The memory system.",
)
parser.add_argument(
"-n",
"--num-cpus",
type=int,
choices=(1, 2, 4, 8),
default=4,
help="The number of CPUs.",
)
parser.add_argument(
"-c",
"--cpu",
type=str,
choices=("kvm", "atomic", "timing", "o3"),
required=True,
help="The CPU type.",
)
parser.add_argument(
"-r",
"--resource-directory",
type=str,
required=False,
help="The directory in which resources will be downloaded or exist.",
)
parser.add_argument(
"-o",
"--override-download",
action="store_true",
help="Override a local resource if the hashes do not match.",
)
parser.add_argument(
"-k",
"--kernel-args",
type=str,
default="init=/root/gem5_init.sh",
help="Additional kernel boot arguments.",
)
parser.add_argument(
"-f",
"--num-forks",
type=int,
default=4,
help="The number of times to fork gem5.",
)
args = parser.parse_args()
coherence_protocol_required = None
if args.mem_system == "mi_example":
coherence_protocol_required = CoherenceProtocol.MI_EXAMPLE
elif args.mem_system == "mesi_two_level":
coherence_protocol_required = CoherenceProtocol.MESI_TWO_LEVEL
requires(
isa_required=ISA.X86,
coherence_protocol_required=coherence_protocol_required,
kvm_required=(args.cpu == "kvm"),
)
cache_hierarchy = None
if args.mem_system == "mi_example":
from gem5.components.cachehierarchies.ruby.\
mi_example_cache_hierarchy import (
MIExampleCacheHierarchy,
)
cache_hierarchy = MIExampleCacheHierarchy(size="32kB", assoc=8)
elif args.mem_system == "mesi_two_level":
from gem5.components.cachehierarchies.ruby.\
mesi_two_level_cache_hierarchy import (
MESITwoLevelCacheHierarchy,
)
cache_hierarchy = MESITwoLevelCacheHierarchy(
l1d_size="16kB",
l1d_assoc=8,
l1i_size="16kB",
l1i_assoc=8,
l2_size="256kB",
l2_assoc=16,
num_l2_banks=1,
)
elif args.mem_system == "classic":
from gem5.components.cachehierarchies.classic.\
private_l1_cache_hierarchy import (
PrivateL1CacheHierarchy,
)
cache_hierarchy = PrivateL1CacheHierarchy(l1d_size="16kB", l1i_size="16kB")
else:
raise NotImplementedError(
"Memory system '{}' is not supported in the boot tests.".format(
args.mem_system
)
)
assert cache_hierarchy != None
# Setup the system memory.
memory = SingleChannelDDR3_1600(size="3GB")
# Setup a Processor.
cpu_type = None
if args.cpu == "kvm":
cpu_type = CPUTypes.KVM
elif args.cpu == "atomic":
cpu_type = CPUTypes.ATOMIC
elif args.cpu == "timing":
cpu_type = CPUTypes.TIMING
elif args.cpu == "o3":
cpu_type = CPUTypes.O3
else:
raise NotImplementedError(
"CPU type '{}' is not supported in the boot tests.".format(args.cpu)
)
assert cpu_type != None
processor = SimpleSwitchableProcessor(
starting_core_type=CPUTypes.KVM,
switch_core_type=cpu_type,
num_cores=args.num_cpus,
)
# Setup the motherboard.
motherboard = X86Board(
clk_freq="3GHz",
processor=processor,
memory=memory,
cache_hierarchy=cache_hierarchy,
exit_on_work_items=True,
)
motherboard.connect_things()
# Set the Full System workload.
motherboard.set_workload(
kernel=Resource(
"x86-linux-kernel-5.4.49",
override=args.override_download,
resource_directory=args.resource_directory,
),
disk_image=Resource(
"x86-ubuntu-img",
override=args.override_download,
resource_directory=args.resource_directory,
),
command=dedent(
"""
m5 exit # signal end of boot
m5 exit # exit in children and parent
"""
),
kernel_args=[args.kernel_args]
)
# Begin running of the simulation. This will exit once the Linux system boot
# is complete.
print("Running with ISA: " + get_runtime_isa().name)
print("Running with protocol: " + get_runtime_coherence_protocol().name)
print()
root = Root(full_system=True, system=motherboard)
# TODO: This of annoying. Is there a way to fix this to happen
# automatically when running KVM?
root.sim_quantum = int(1e9)
# Disable the gdb ports. Required for forking.
m5.disableAllListeners()
m5.instantiate()
# Simulate the inital boot with the starting KVM cpu
exit_event = m5.simulate()
print("Boot finished", exit_event.getCause())
print("Starting fork and switch processors test")
pids = []
for i in range(args.num_forks):
pid = m5.fork("%(parent)s/" + str(m5.curTick()))
if pid == 0: # in child
print(f"Switching processors in child {i}.")
processor.switch()
exit_event = m5.simulate()
if exit_event.getCause() != "m5_exit instruction encountered":
raise Exception(f"Expected m5 exit, got {exit_event.getCause()}")
print("Child finished, exiting: ", exit_event.getCause())
sys.exit(0)
else:
pids.append(pid)
print("Waiting for children...")
for pid in pids:
print (os.waitpid(pid, 0))
print("Children finished! Running to completion in parent.")
exit_event = m5.simulate()
if exit_event.getCause() != "m5_exit instruction encountered":
raise Exception(f"Expected m5 exit, got {exit_event.getCause()}")
| 29.661765 | 79 | 0.716534 |
e9f050b89ff8d6e83255108084e3c376a0039fc7 | 1,203 | py | Python | rioxarray/write.py | kadyb/raster-benchmark | 78733ff75181713071cc0694e187a2ac83f76752 | [
"MIT"
] | 11 | 2021-04-15T09:51:48.000Z | 2022-02-08T13:01:28.000Z | rioxarray/write.py | kadyb/raster-benchmark | 78733ff75181713071cc0694e187a2ac83f76752 | [
"MIT"
] | 11 | 2021-02-16T12:43:07.000Z | 2021-12-14T19:57:10.000Z | rioxarray/write.py | kadyb/raster-benchmark | 78733ff75181713071cc0694e187a2ac83f76752 | [
"MIT"
] | 2 | 2021-07-22T14:01:46.000Z | 2021-07-25T05:24:51.000Z | # -*- coding: utf-8 -*-
import os
import timeit
import xarray
import rioxarray
import pandas as pd
wd = os.getcwd()
catalog = os.path.join('data', 'LC08_L1TP_190024_20200418_20200822_02_T1')
rasters = os.listdir(catalog)
rasters = [r for r in rasters if r.endswith(('.TIF'))]
rasters = [os.path.join(wd, catalog, r) for r in rasters]
### raster stack
band_names = ["B1", "B10", "B11", "B2", "B3", "B4", "B5", "B6", "B7", "B9"]
ras = []
for i, path in enumerate(rasters):
ras.append(rioxarray.open_rasterio(path, masked = True).squeeze())
ras = xarray.concat(ras, "band")
ras.coords["band"] = band_names
t_list = [None] * 10
stack_file = 'stack.TIF'
for i in range(10):
tic = timeit.default_timer()
ras.rio.to_raster(stack_file, dtype = "uint16", compress = "LZW")
toc = timeit.default_timer()
t_list[i] = round(toc - tic, 2)
os.remove(stack_file)
df = {'task': ['write'] * 10, 'package': ['rioxarray'] * 10, 'time': t_list}
df = pd.DataFrame.from_dict(df)
if not os.path.isdir('results'): os.mkdir('results')
savepath = os.path.join('results', 'write-rioxarray.csv')
df.to_csv(savepath, index = False, decimal = ',', sep = ';') | 27.340909 | 77 | 0.633416 |
e9f15a2385f1ea0dee9385406e24c070bd322820 | 14,534 | py | Python | manifold/manifold.py | timotheosh/Manifest | d3917cb386aa351335c38f08e4c7d36136d8863f | [
"MIT"
] | 2 | 2021-08-13T12:38:24.000Z | 2021-08-21T19:36:42.000Z | manifold/manifold.py | timotheosh/Manifold | d3917cb386aa351335c38f08e4c7d36136d8863f | [
"MIT"
] | null | null | null | manifold/manifold.py | timotheosh/Manifold | d3917cb386aa351335c38f08e4c7d36136d8863f | [
"MIT"
] | null | null | null | # encoding: utf-8
'''manifold
An SMF service manifest creation tool.
'''
__author__ = 'Chris Miles'
__copyright__ = '(c) Chris Miles 2008. All rights reserved.'
__license__ = 'GPL http://www.gnu.org/licenses/gpl.txt'
__id__ = '$Id: manifold.py 7 2009-03-24 09:10:48Z miles.chris $'
__url__ = '$URL: https://manifold.googlecode.com/svn/trunk/manifold/manifold.py $'
# ---- Imports ----
# - Python Modules -
import logging
import os
import optparse
import sys
# - Genshi Modules -
from genshi.template import MarkupTemplate
# - Project Modules -
from .release import version
# ---- Genshi Templates ----
MANIFEST_TEMPLATE = """<?xml version="1.0"?>
<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
<!--
Created by Manifold
-->
<service_bundle type='manifest' name='${service_name}' xmlns:py='http://genshi.edgewall.org/'>
<service
name='${service_category}/${service_name}'
type='service'
version='${service_version}'>
<create_default_instance py:if="not multi_instance" enabled='${instance_enabled}' />
<single_instance py:if="not multi_instance" />
<dependency py:if="depends_on_network"
name='network'
grouping='require_all'
restart_on='error'
type='service'>
<service_fmri value='svc:/milestone/network:default'/>
</dependency>
<dependency py:if="depends_on_filesystem"
name='filesystem'
grouping='require_all'
restart_on='error'
type='service'>
<service_fmri value='svc:/system/filesystem/local'/>
</dependency>
<instance py:if="multi_instance" name='${instance_name}' enabled='${instance_enabled}'>
<!--! This part used for a multi instance service. -->
<method_context>
<method_credential py:if="method_credential_user and method_credential_group" user='${method_credential_user}' group='${method_credential_group}' />
</method_context>
<exec_method
type='method'
name='start'
exec='${exec_method_start}'
timeout_seconds='60' />
<exec_method
type='method'
name='stop'
exec='${exec_method_stop}'
timeout_seconds='60' />
<property_group name='startd' type='framework'>
<propval py:if="startd_model=='wait'" name='duration' type='astring' value='child' />
<propval py:if="startd_model=='transient'" name='duration' type='astring' value='transient' />
<propval py:if="startd_model=='contract'" name='duration' type='astring' value='contract' />
<propval name='ignore_error' type='astring' value='core,signal' />
</property_group>
<property_group name='application' type='application'>
<propval py:if="config_file" name='config_file' type='astring' value='${config_file}' />
</property_group>
</instance>
<a_single_instance py:if="not multi_instance" py:strip="True">
<!--! This part used for a single instance only service. -->
<method_context>
<method_credential py:if="method_credential_user and method_credential_group" user='${method_credential_user}' group='${method_credential_group}' />
</method_context>
<exec_method
type='method'
name='start'
exec='${exec_method_start}'
timeout_seconds='60' />
<exec_method
type='method'
name='stop'
exec='${exec_method_stop}'
timeout_seconds='60' />
<property_group name='startd' type='framework'>
<propval py:if="startd_model=='wait'" name='duration' type='astring' value='child' />
<propval py:if="startd_model=='transient'" name='duration' type='astring' value='transient' />
<propval py:if="startd_model=='contract'" name='duration' type='astring' value='contract' />
<propval name='ignore_error' type='astring' value='core,signal' />
</property_group>
<property_group name='application' type='application'>
<propval py:if="config_file" name='config_file' type='astring' value='${config_file}' />
</property_group>
</a_single_instance>
<stability value='Evolving' />
<template>
<common_name>
<loctext xml:lang='C'>
${common_name}
</loctext>
</common_name>
</template>
</service>
</service_bundle>
"""
# ---- Classes ----
# ---- Functions ----
def ask_user(service_questions):
response = {}
for q in service_questions:
print()
response[q.name] = q.ask(response)
return response
def generate_service_config():
service_questions = [
CONFIG_STR(
'service_category',
require_value=True,
default='site',
description='The service category',
example="'site' or '/application/database'"
),
CONFIG_STR(
'service_name',
require_value=True,
description="""The name of the service, which follows the service category
""",
example="'myapp'"
),
CONFIG_STR(
'service_version',
require_value=True,
description="The version of the service manifest",
default='1',
example="'1'"
),
CONFIG_STR(
'common_name',
require_value=False,
description="""The human readable name of the service
""",
example="'My service.'"
),
CONFIG_IF(
'multi_instance',
description="Can this service run multiple instances",
default=False,
questions=[
CONFIG_STR('instance_name', require_value=True, default='default', example="default")
]
),
CONFIG_STR(
'config_file',
require_value=False,
description="""Full path to a config file; leave blank if no config file
required""",
example="'/etc/myservice.conf'"
),
CONFIG_STR(
'exec_method_start',
require_value=True,
description="""The full command to start the service; may contain
'%{config_file}' to substitute the configuration file
""",
example="'/usr/bin/myservice %{config_file}'"
),
CONFIG_STR(
'exec_method_stop',
require_value=True,
default = ':kill',
description="""The full command to stop the service; may specify ':kill' to let
SMF kill the service processes automatically
""",
example="""'/usr/bin/myservice_ctl stop' or ':kill' to let SMF kill
the service processes automatically"""
),
CONFIG_STR(
'startd_model',
require_value=True,
default = 'wait',
description="""Choose a process management model:
'wait' : long-running process that runs in the foreground (default)
'contract' : long-running process that daemonizes or forks itself
(i.e. start command returns immediately)
'transient' : short-lived process, performs an action and ends quickly
""",
# example="",
accepted_values = ('wait', 'contract', 'transient'),
),
CONFIG_BOOL(
'depends_on_network',
description="Does this service depend on the network being ready",
default=True
),
CONFIG_BOOL(
'depends_on_filesystem',
description="Does this service depend on the local filesystems being ready",
default=True
),
CONFIG_BOOL(
'instance_enabled',
default=False,
description="Should the service be enabled by default"
),
CONFIG_STR(
'method_credential_user',
require_value=False,
description="""The user to change to when executing the
start/stop/refresh methods""",
example="'webservd'"
),
CONFIG_STR(
'method_credential_group',
require_value=False,
description="""The group to change to when executing the
start/stop/refresh methods""",
example="'webservd'"
),
]
service_config = ask_user(service_questions)
logging.debug(service_config)
return service_config
def create_manifest(outfp, service_config):
tmpl = MarkupTemplate(MANIFEST_TEMPLATE)
xml = tmpl.generate(**service_config).render('xml', strip_whitespace=False)
outfp.write(xml)
def main(argv=None):
if argv is None:
argv = sys.argv
# define usage and version messages
usageMsg = "usage: %s [options] output.xml" % sys.argv[0]
versionMsg = """%s %s""" % (os.path.basename(argv[0]), version)
description = """Create an SMF service manifest file. The resulting
XML file can be validated and imported into SMF using the 'svccfg' command.
For example, "svccfg validate myservice.xml", "svccfg -v import myservice.xml".
"""
# get a parser object and define our options
parser = optparse.OptionParser(usage=usageMsg, version=versionMsg, description=description)
# Switches
parser.add_option('-v', '--verbose', dest='verbose',
action='store_true', default=False,
help="verbose output")
parser.add_option('-d', '--debug', dest='debug',
action='store_true', default=False,
help="debugging output (very verbose)")
# Parse options & arguments
(options, args) = parser.parse_args()
if len(args) < 1:
parser.error("Output file must be specified.")
if len(args) > 1:
parser.error("Only one output file can be specified.")
if options.verbose:
loglevel = logging.INFO
elif options.debug:
loglevel = logging.DEBUG
else:
loglevel = logging.WARNING
logging.basicConfig(
level=loglevel,
# format='%(asctime)s %(levelname)s %(message)s',
format='%(message)s',
)
output_filename = args[0]
output = open(output_filename, 'w')
service_config = generate_service_config()
create_manifest(output, service_config)
output.close()
print("\nManifest written to %s" %output_filename)
print('You can validate the XML file with "svccfg validate %s"' %output_filename)
print('And create the SMF service with "svccfg import %s"' %output_filename)
return 0
if __name__ == "__main__":
sys.exit(main())
| 30.923404 | 164 | 0.555938 |
e9f182577a3561deeedd13bd4f63beb75d349a4d | 7,183 | py | Python | lemon_boy.py | hug58/Lemon-Boy-platformer | 5ec5dd8974088fce5084e6249d13e7bb47621669 | [
"MIT"
] | 4 | 2019-03-12T09:02:17.000Z | 2019-05-06T20:31:18.000Z | lemon_boy.py | hug58/Lemon-Boy-platformer | 5ec5dd8974088fce5084e6249d13e7bb47621669 | [
"MIT"
] | null | null | null | lemon_boy.py | hug58/Lemon-Boy-platformer | 5ec5dd8974088fce5084e6249d13e7bb47621669 | [
"MIT"
] | 2 | 2019-03-11T06:51:06.000Z | 2020-09-01T16:17:06.000Z | from script import *
from script.menu import Menu
from script import image,sound,resolve_route
from script.player import Player
from script.enemy import Apple
from script.elementos import Trap,Door,Trampoline,Key,Lemon
from script.camera import Camera
from script.tilemap import TileMap
pg.display.init()
pg.joystick.init()
pg.font.init()
WIDTH = 620
HEIGHT = 480
WHITE2 = (252,252,238)
LEMON = (249,215,0)
GREEN = (140,196,51)
SCREEN = pg.display.set_mode((WIDTH,HEIGHT))
pg.display.set_caption("Project Hugo")
pg.display.set_icon(pg.image.load(resolve_route("lemon.ico") ))
def main():
exit = False
clock = pg.time.Clock()
maps= ["map/map1.tmx",
"map/map2.tmx",
"map/map3.tmx",
"map/map4.tmx",
"map/map5.tmx",
"map/map6.tmx",
"map/map7.tmx"]
menu = Menu(maps)
game = Game(menu.maps)
game.load()
#Creando un objeto joystick e iniciando
joystick = pg.joystick.Joystick(0) if pg.joystick.get_count() > 0 else None
joystick.init() if joystick != None else None
background = pg.Surface((WIDTH,HEIGHT)).convert()
background.blit(pg.transform.scale(image["background"],(WIDTH,HEIGHT)),(0,0))
draw_background = lambda background: SCREEN.blit(background,(0,0))
while exit != True and menu.exit_game != True:
clock.tick(60)
for event in pg.event.get():
if event.type == pg.QUIT: exit = True
if event.type == pg.KEYDOWN:
if event.key == pg.K_x:
if game.player.cont_jump > 0:
game.player.diffx = 0
game.sound["jump"].stop()
game.sound["jump"].play()
game.player.vly = -8
game.player.cont_jump -=1
game.player.direcciony = -1
if event.key == pg.K_RETURN: menu.exit = False
if event.type == pg.KEYUP:
if event.key == pg.K_RIGHT or event.key == pg.K_LEFT: game.player.stop = True
if event.key == pg.K_c:
if game.player.cont_shot >= 13:
game.player.shot()
game.player.cont_shot = 0
else: game.player.cont_shot = 0
if menu.changes_maps == True:
game.map_cont = menu.position
game.changes_maps = True
menu.changes_maps = False
if menu.exit != True:
menu.update(SCREEN)
draw_background(background)
#Cerrar el videojuego completamente sin pasar por dibujar el nivel actual(lvl1 por defecto)
if menu.exit_game != True:
game.draw()
game.update()
pg.display.flip()
if __name__ == "__main__":
main()
| 24.599315 | 93 | 0.667688 |
e9f1fbbda761ade5d0893da97c048863bb481369 | 4,915 | py | Python | pliers/utils/base.py | jsmentch/pliers | ef13552793ab5789065249a89230baced407c472 | [
"BSD-3-Clause"
] | null | null | null | pliers/utils/base.py | jsmentch/pliers | ef13552793ab5789065249a89230baced407c472 | [
"BSD-3-Clause"
] | null | null | null | pliers/utils/base.py | jsmentch/pliers | ef13552793ab5789065249a89230baced407c472 | [
"BSD-3-Clause"
] | null | null | null | ''' Miscellaneous internal utilities. '''
import collections
import os
from abc import ABCMeta, abstractmethod, abstractproperty
from types import GeneratorType
from itertools import islice
from tqdm import tqdm
import pandas as pd
from pliers import config
from pliers.support.exceptions import MissingDependencyError
def listify(obj):
''' Wraps all non-list or tuple objects in a list; provides a simple way
to accept flexible arguments. '''
return obj if isinstance(obj, (list, tuple, type(None))) else [obj]
def flatten_dict(d, parent_key='', sep='_'):
''' Flattens a multi-level dictionary into a single level by concatenating
nested keys with the char provided in the sep argument.
Solution from https://stackoverflow.com/questions/6027558/flatten-nested-python-dictionaries-compressing-keys'''
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def set_iterable_type(obj):
''' Returns either a generator or a list depending on config-level
settings. Should be used to wrap almost every internal iterable return.
Also inspects elements recursively in the case of list returns, to
ensure that there are no nested generators. '''
if not isiterable(obj):
return obj
if config.get_option('use_generators'):
return obj if isgenerator(obj) else (i for i in obj)
else:
return [set_iterable_type(i) for i in obj]
def isiterable(obj):
''' Returns True if the object is one of allowable iterable types. '''
return isinstance(obj, (list, tuple, pd.Series, GeneratorType, tqdm))
def isgenerator(obj):
''' Returns True if object is a generator, or a generator wrapped by a
tqdm object. '''
return isinstance(obj, GeneratorType) or (hasattr(obj, 'iterable') and
isinstance(getattr(obj, 'iterable'), GeneratorType))
def progress_bar_wrapper(iterable, **kwargs):
''' Wrapper that applies tqdm progress bar conditional on config settings.
'''
return tqdm(iterable, **kwargs) if (config.get_option('progress_bar')
and not isinstance(iterable, tqdm)) else iterable
module_names = {}
Dependency = collections.namedtuple('Dependency', 'package value')
| 29.431138 | 126 | 0.673042 |