hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
12c5c79e7f95fd34c8892f6f44952e889b0051d1 | 111 | py | Python | backend/venv/src/api/ordercampproduct/apps.py | AkashSDas/camps_for_champs | 1bf7e51905b5b3efc47f94ffcfde7167dace4475 | [
"MIT"
] | null | null | null | backend/venv/src/api/ordercampproduct/apps.py | AkashSDas/camps_for_champs | 1bf7e51905b5b3efc47f94ffcfde7167dace4475 | [
"MIT"
] | null | null | null | backend/venv/src/api/ordercampproduct/apps.py | AkashSDas/camps_for_champs | 1bf7e51905b5b3efc47f94ffcfde7167dace4475 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 18.5 | 40 | 0.792793 |
12c65927c0458f39714e96cf3347972f4ddf2a65 | 691 | py | Python | onnx_tf/handlers/backend/identity.py | ZemingZhao/onnx-tensorflow | 9ab9b934c2c8494b6309d20f15acabcb3abd126d | [
"Apache-2.0"
] | null | null | null | onnx_tf/handlers/backend/identity.py | ZemingZhao/onnx-tensorflow | 9ab9b934c2c8494b6309d20f15acabcb3abd126d | [
"Apache-2.0"
] | null | null | null | onnx_tf/handlers/backend/identity.py | ZemingZhao/onnx-tensorflow | 9ab9b934c2c8494b6309d20f15acabcb3abd126d | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
from onnx_tf.handlers.backend_handler import BackendHandler
from onnx_tf.handlers.handler import onnx_op
from onnx_tf.handlers.handler import tf_func
| 25.592593 | 59 | 0.726483 |
12c759447ac7e05d73a693a7af973c9ec776f540 | 42,699 | py | Python | raiden/tests/integration/api/test_restapi.py | litexnetwork/raiden | b084c77e994d1f08f72947e57ce5bd6c8aa9c2a9 | [
"MIT"
] | 1 | 2018-11-26T01:40:37.000Z | 2018-11-26T01:40:37.000Z | raiden/tests/integration/api/test_restapi.py | litexnetwork/raiden | b084c77e994d1f08f72947e57ce5bd6c8aa9c2a9 | [
"MIT"
] | null | null | null | raiden/tests/integration/api/test_restapi.py | litexnetwork/raiden | b084c77e994d1f08f72947e57ce5bd6c8aa9c2a9 | [
"MIT"
] | null | null | null | from http import HTTPStatus
import time
import logging
import pytest
import grequests
from flask import url_for
from eth_utils import (
to_checksum_address,
to_canonical_address,
is_checksum_address,
)
from raiden_contracts.constants import (
CONTRACT_HUMAN_STANDARD_TOKEN,
MAX_TOKENS_DEPLOY,
TEST_SETTLE_TIMEOUT_MIN,
TEST_SETTLE_TIMEOUT_MAX,
)
from raiden.api.v1.encoding import (
AddressField,
HexAddressConverter,
)
from raiden.transfer.state import (
CHANNEL_STATE_OPENED,
CHANNEL_STATE_CLOSED,
)
from raiden.tests.utils import assert_dicts_are_equal
from raiden.tests.utils.client import burn_all_eth
from raiden.tests.utils.smartcontracts import deploy_contract_web3
# pylint: disable=too-many-locals,unused-argument,too-many-lines
def test_url_with_invalid_address(rest_api_port_number, api_backend):
""" Addresses require the leading 0x in the urls. """
url_without_prefix = (
'http://localhost:{port}/api/1/'
'channels/ea674fdde714fd979de3edf0f56aa9716b898ec8'
).format(port=rest_api_port_number)
request = grequests.patch(
url_without_prefix,
json=dict(state='CHANNEL_STATE_SETTLED'),
)
response = request.send().response
assert_response_with_code(response, HTTPStatus.NOT_FOUND)
def test_api_close_insufficient_eth(
api_backend,
token_addresses,
reveal_timeout,
):
# let's create a new channel
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
balance = 0
assert_proper_response(response, status_code=HTTPStatus.CREATED)
response = response.json()
expected_response = channel_data_obj
expected_response['balance'] = balance
expected_response['state'] = CHANNEL_STATE_OPENED
expected_response['reveal_timeout'] = reveal_timeout
expected_response['channel_identifier'] = assert_dicts_are_equal.IGNORE_VALUE
expected_response['token_network_identifier'] = assert_dicts_are_equal.IGNORE_VALUE
assert_dicts_are_equal(response, expected_response)
# let's burn all eth and try to close the channel
api_server, _ = api_backend
burn_all_eth(api_server.rest_api.raiden_api.raiden)
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
json={'state': CHANNEL_STATE_CLOSED},
)
response = request.send().response
assert_proper_response(response, HTTPStatus.PAYMENT_REQUIRED)
response = response.json()
assert 'Insufficient ETH' in response['errors']
#demo
#demo
def test_api_getcrosstransation_by_id(api_backend, raiden_network, token_addresses,cross_id):
_, app1 = raiden_network
api_server, _ = api_backend
cross_id = cross_id
request = grequests.get(
api_url_for(
api_backend,
'getcrosstransactionbyid',
cross_id = cross_id,
)
)
response = request.send().response
assert_proper_response(response, HTTPStatus.OK)
assert response.json() != []
def test_api_crosstransation_hash(api_backend, raiden_network, token_addresses,hash_r):
_, app1 = raiden_network
api_server, _ = api_backend
hash_r = str(hash_r)
request = grequests.get(
api_url_for(
api_backend,
'recivehashresource',
hash_r = hash_r,
)
)
response = request.send().response
assert_proper_response(response, HTTPStatus.OK)
assert response.json() == 'hash_r is ok'
| 32.769762 | 176 | 0.686035 |
12c7cbd02b14e09531a4f5ea52a53834f3434799 | 6,946 | py | Python | contents/MyExperiment/Exp3_test/cluster_env.py | Feng-XiaoYue/Reinforcement-learning-with-tensorflow-master | 011594083410f9b2f8e16eb5deed26e730ed849e | [
"MIT"
] | null | null | null | contents/MyExperiment/Exp3_test/cluster_env.py | Feng-XiaoYue/Reinforcement-learning-with-tensorflow-master | 011594083410f9b2f8e16eb5deed26e730ed849e | [
"MIT"
] | null | null | null | contents/MyExperiment/Exp3_test/cluster_env.py | Feng-XiaoYue/Reinforcement-learning-with-tensorflow-master | 011594083410f9b2f8e16eb5deed26e730ed849e | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import random
import time
import sys
if sys.version_info.major == 2:
import Tkinter as tk
else:
import tkinter as tk
# if __name__ == '__main__':
# server_attribute = pd.DataFrame(np.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
# 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0,
# 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0,
# 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0,
# 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
# 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0,
# 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,
# 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]).
# reshape(8, 24),
# columns=np.arange(24))
# env = Cluster(state_init(), server_attribute)
# Qss = env.QSs
# print(Qss)
# for i in range(len(Qss)):
# q = i
# for j in range(len(server_attribute)):
# index_server = j
# print(env.cost_init)
# print("The reward of initial state is:")
# print(env.reward(env.cost_all(env.cost_init), env.state_init))
# print(env.state_init)
# actions=list(range(env.n_actions))
# print(actions)
# env.after(100, update)
# env.mainloop() | 37.144385 | 145 | 0.460121 |
12c7d079f923030d66c22a1b6cf6b9b674f39635 | 2,589 | py | Python | libensemble/tests/regression_tests/test_6-hump_camel_elapsed_time_abort.py | Kardyne/libensemble | 566c8f5daafe2ad4deebc13198a1e131e4ce6542 | [
"BSD-2-Clause"
] | null | null | null | libensemble/tests/regression_tests/test_6-hump_camel_elapsed_time_abort.py | Kardyne/libensemble | 566c8f5daafe2ad4deebc13198a1e131e4ce6542 | [
"BSD-2-Clause"
] | null | null | null | libensemble/tests/regression_tests/test_6-hump_camel_elapsed_time_abort.py | Kardyne/libensemble | 566c8f5daafe2ad4deebc13198a1e131e4ce6542 | [
"BSD-2-Clause"
] | null | null | null | # """
# Runs libEnsemble on the 6-hump camel problem. Documented here:
# https://www.sfu.ca/~ssurjano/camel6.html
#
# Execute via the following command:
# mpiexec -np 4 python3 test_6-hump_camel_elapsed_time_abort.py
# The number of concurrent evaluations of the objective function will be 4-1=3.
# """
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from mpi4py import MPI # for libE communicator
import sys, os # for adding to path
import numpy as np
# Import libEnsemble main
from libensemble.libE import libE
# Import sim_func
from libensemble.sim_funcs.six_hump_camel import six_hump_camel
# Import gen_func
from libensemble.gen_funcs.uniform_sampling import uniform_random_sample
script_name = os.path.splitext(os.path.basename(__file__))[0]
#State the objective function, its arguments, output, and necessary parameters (and their sizes)
sim_specs = {'sim_f': six_hump_camel, # This is the function whose output is being minimized
'in': ['x'], # These keys will be given to the above function
'out': [('f',float), # This is the output from the function being minimized
],
'pause_time': 2,
# 'save_every_k': 10
}
# State the generating function, its arguments, output, and necessary parameters.
gen_specs = {'gen_f': uniform_random_sample,
'in': ['sim_id'],
'out': [('x',float,2),
],
'lb': np.array([-3,-2]),
'ub': np.array([ 3, 2]),
'gen_batch_size': 5,
'num_active_gens': 1,
'batch_mode': False,
# 'save_every_k': 10
}
# Tell libEnsemble when to stop
exit_criteria = {'elapsed_wallclock_time': 1}
np.random.seed(1)
persis_info = {}
for i in range(MPI.COMM_WORLD.Get_size()):
persis_info[i] = {'rand_stream': np.random.RandomState(i)}
# Perform the run
H, persis_info, flag = libE(sim_specs, gen_specs, exit_criteria, persis_info)
if MPI.COMM_WORLD.Get_rank() == 0:
eprint(flag)
eprint(H)
assert flag == 2
short_name = script_name.split("test_", 1).pop()
filename = short_name + '_results_History_length=' + str(len(H)) + '_evals=' + str(sum(H['returned'])) + '_ranks=' + str(MPI.COMM_WORLD.Get_size())
print("\n\n\nRun completed.\nSaving results to file: " + filename)
# if flag == 2:
# print("\n\n\nKilling COMM_WORLD")
# MPI.COMM_WORLD.Abort()
| 34.52 | 151 | 0.653148 |
12c8a53eac5c028a5e825aaa86f201c528a2f671 | 1,329 | py | Python | do_like_javac/tools/graphtools.py | zcai1/do-like-javac | 3eb4a43521ae181a9b777a589e477b0c6ab7cb6e | [
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | 1 | 2020-10-10T20:24:08.000Z | 2020-10-10T20:24:08.000Z | do_like_javac/tools/graphtools.py | zcai1/do-like-javac | 3eb4a43521ae181a9b777a589e477b0c6ab7cb6e | [
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | 13 | 2019-06-20T23:16:15.000Z | 2022-03-26T21:19:20.000Z | do_like_javac/tools/graphtools.py | zcai1/do-like-javac | 3eb4a43521ae181a9b777a589e477b0c6ab7cb6e | [
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | 5 | 2016-09-23T00:52:12.000Z | 2021-09-08T01:24:36.000Z | import os
import argparse
from . import common
argparser = argparse.ArgumentParser(add_help=False)
graph_group = argparser.add_argument_group('graphtool arguments')
graph_group.add_argument('--graph-jar', metavar='<graphtool-jar>',
action='store',default=None, dest='graph_jar',
help='Path to prog2dfg.jar or apilearner.jar')
| 30.906977 | 88 | 0.611738 |
12c8ff9bf299511a1712cec875fde79e159c64f4 | 507 | py | Python | boss_grabbing/pipelines.py | shansb/boss_grabbing | 20aabd6b2062099eb287d7586dcf619648569ba2 | [
"MIT"
] | null | null | null | boss_grabbing/pipelines.py | shansb/boss_grabbing | 20aabd6b2062099eb287d7586dcf619648569ba2 | [
"MIT"
] | null | null | null | boss_grabbing/pipelines.py | shansb/boss_grabbing | 20aabd6b2062099eb287d7586dcf619648569ba2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from boss_grabbing.sqlite import Sqlite
| 25.35 | 65 | 0.65286 |
12c9169d04a1b953b055c11fb6f8b67fa66071ff | 344 | py | Python | core/jobs/urls.py | InKyrNet/inkyrnet | fdb5c8def9b74049c4b48f2fccf5d52b040a4435 | [
"MIT"
] | null | null | null | core/jobs/urls.py | InKyrNet/inkyrnet | fdb5c8def9b74049c4b48f2fccf5d52b040a4435 | [
"MIT"
] | 4 | 2021-06-04T21:36:18.000Z | 2021-09-22T17:44:09.000Z | core/jobs/urls.py | InKyrNet/inkyrnet | fdb5c8def9b74049c4b48f2fccf5d52b040a4435 | [
"MIT"
] | null | null | null | from django.urls import path
from .views import *
from django_filters.views import FilterView
app_name = 'jobs'
urlpatterns = [
path('', FilterView.as_view(filterset_class=JobFilter,
template_name='jobs/job_list.html'), name='index'),
path('companies/', CompanyListView.as_view(), name='companies'),
]
| 28.666667 | 83 | 0.674419 |
12c9326e60a2f14e4ff7c33d36e504ccc28441b7 | 2,010 | py | Python | src/compas/datastructures/mesh/transformations_numpy.py | arpastrana/compas | ed677a162c14dbe562c82d72f370279259faf7da | [
"MIT"
] | 2 | 2021-03-17T18:14:22.000Z | 2021-09-19T13:50:02.000Z | src/compas/datastructures/mesh/transformations_numpy.py | arpastrana/compas | ed677a162c14dbe562c82d72f370279259faf7da | [
"MIT"
] | null | null | null | src/compas/datastructures/mesh/transformations_numpy.py | arpastrana/compas | ed677a162c14dbe562c82d72f370279259faf7da | [
"MIT"
] | null | null | null | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas.geometry import transform_points_numpy
__all__ = [
'mesh_transform_numpy',
'mesh_transformed_numpy',
]
def mesh_transform_numpy(mesh, transformation):
"""Transform a mesh.
Parameters
----------
mesh : compas.datastructures.Mesh
The mesh.
transformation : compas.geometry.Transformation
The transformation.
Notes
-----
The mesh is modified in-place.
Examples
--------
>>> mesh = Mesh.from_obj(compas.get('cube.obj'))
>>> T = matrix_from_axis_and_angle([0, 0, 1], pi / 4)
>>> tmesh = mesh.copy()
>>> mesh_transform(tmesh, T)
"""
vertices = list(mesh.vertices())
xyz = [mesh.vertex_coordinates(vertex) for vertex in vertices]
xyz[:] = transform_points_numpy(xyz, transformation)
for index, vertex in enumerate(vertices):
mesh.vertex_attributes(vertex, 'xyz', xyz[index])
def mesh_transformed_numpy(mesh, transformation):
"""Transform a copy of ``mesh``.
Parameters
----------
mesh : compas.datastructures.Mesh
The mesh.
transformation : compas.geometry.Transformation
The transformation.
Returns
-------
Mesh
A transformed independent copy of ``mesh``.
Notes
-----
The original mesh is not modified.
Instead a transformed independent copy is returned.
Examples
--------
>>> mesh = Mesh.from_obj(compas.get('cube.obj'))
>>> T = matrix_from_axis_and_angle([0, 0, 1], pi / 4)
>>> tmesh = mesh_transformed(mesh, T)
"""
mesh_copy = mesh.copy()
mesh_transform_numpy(mesh_copy, transformation)
return mesh_copy
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
import doctest
doctest.testmod(globs=globals())
| 24.216867 | 80 | 0.595025 |
12c93b56f0fe4bfd1cf140c773e7ff17f7dd5689 | 17,860 | py | Python | selfdrive/car/gm/carcontroller.py | CTyrell/openpilot | 1ef27823882eed575266983175f106af1e293082 | [
"MIT"
] | null | null | null | selfdrive/car/gm/carcontroller.py | CTyrell/openpilot | 1ef27823882eed575266983175f106af1e293082 | [
"MIT"
] | null | null | null | selfdrive/car/gm/carcontroller.py | CTyrell/openpilot | 1ef27823882eed575266983175f106af1e293082 | [
"MIT"
] | null | null | null | from cereal import car
from common.realtime import DT_CTRL
from common.numpy_fast import interp
from common.realtime import sec_since_boot
from selfdrive.config import Conversions as CV
from selfdrive.car import apply_std_steer_torque_limits
from selfdrive.car.gm import gmcan
from selfdrive.car.gm.values import DBC, AccState, CanBus, CarControllerParams
from opendbc.can.packer import CANPacker
VisualAlert = car.CarControl.HUDControl.VisualAlert
| 54.45122 | 203 | 0.693729 |
12ca7aec9c936b7e376b5d6d2ed2e6e550f43708 | 8,570 | py | Python | src/rprblender/__init__.py | ralic/RadeonProRenderBlenderAddon | 310c650d4230289ac5d5407cc24a13b4c7ce0a90 | [
"Apache-2.0"
] | 1 | 2021-03-29T05:55:49.000Z | 2021-03-29T05:55:49.000Z | src/rprblender/__init__.py | ralic/RadeonProRenderBlenderAddon | 310c650d4230289ac5d5407cc24a13b4c7ce0a90 | [
"Apache-2.0"
] | 1 | 2021-04-03T09:39:28.000Z | 2021-04-03T09:39:28.000Z | src/rprblender/__init__.py | isabella232/RadeonProRenderBlenderAddon | ff4ede164c1e1e909f182be709422bc8c8878b1c | [
"Apache-2.0"
] | null | null | null | #**********************************************************************
# Copyright 2020 Advanced Micro Devices, Inc
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#********************************************************************
import traceback
import bpy
bl_info = {
"name": "Radeon ProRender",
"author": "AMD",
"version": (3, 1, 0),
"blender": (2, 80, 0),
"location": "Info header, render engine menu",
"description": "Radeon ProRender rendering plugin for Blender 2.8x",
"warning": "",
"tracker_url": "",
"wiki_url": "",
"category": "Render"
}
version_build = ""
from .utils import logging, version_updater
from .utils import install_libs
from .engine.engine import Engine
from . import (
nodes,
properties,
ui,
operators,
material_library,
)
from .engine.render_engine import RenderEngine
from .engine.render_engine_2 import RenderEngine2
from .engine.preview_engine import PreviewEngine
from .engine.viewport_engine import ViewportEngine
from .engine.viewport_engine_2 import ViewportEngine2
from .engine.animation_engine import AnimationEngine, AnimationEngine2
from .engine.render_engine_hybrid import RenderEngine as RenderEngineHybrid
from .engine.viewport_engine_hybrid import ViewportEngine as ViewportEngineHybrid
from .engine.animation_engine_hybrid import AnimationEngine as AnimationEngineHybrid
log = logging.Log(tag='init')
log("Loading RPR addon {}".format(bl_info['version']))
render_engine_cls = {
'FULL': RenderEngine,
'HIGH': RenderEngineHybrid,
'MEDIUM': RenderEngineHybrid,
'LOW': RenderEngineHybrid,
'FULL2': RenderEngine2,
}
animation_engine_cls = {
'FULL': AnimationEngine,
'HIGH': AnimationEngineHybrid,
'MEDIUM': AnimationEngineHybrid,
'LOW': AnimationEngineHybrid,
'FULL2': AnimationEngine2,
}
viewport_engine_cls = {
'FULL': ViewportEngine,
'HIGH': ViewportEngineHybrid,
'MEDIUM': ViewportEngineHybrid,
'LOW': ViewportEngineHybrid,
'FULL2': ViewportEngine2,
}
def register():
""" Register all addon classes in Blender """
log("register")
install_libs.ensure_boto3()
bpy.utils.register_class(RPREngine)
material_library.register()
properties.register()
operators.register()
nodes.register()
ui.register()
bpy.app.handlers.save_pre.append(on_save_pre)
bpy.app.handlers.load_pre.append(on_load_pre)
bpy.app.handlers.version_update.append(on_version_update)
def unregister():
""" Unregister all addon classes from Blender """
log("unregister")
bpy.app.handlers.version_update.remove(on_version_update)
bpy.app.handlers.load_pre.remove(on_load_pre)
bpy.app.handlers.save_pre.remove(on_save_pre)
ui.unregister()
nodes.unregister()
operators.unregister()
properties.unregister()
material_library.unregister()
bpy.utils.unregister_class(RPREngine)
| 31.391941 | 116 | 0.655076 |
12cc0f45c792a01e3a5bd5c42c13138e07ace531 | 1,561 | py | Python | plot_metric_err_vs_dim.py | wchen459/design_embeddings_jmd_2016 | 30dfec40b14c81e6cbe1c57efc2abe1a28dbdd5f | [
"MIT"
] | 9 | 2017-07-13T19:17:48.000Z | 2022-03-17T02:19:06.000Z | plot_metric_err_vs_dim.py | wchen459/design_embeddings_jmd_2016 | 30dfec40b14c81e6cbe1c57efc2abe1a28dbdd5f | [
"MIT"
] | null | null | null | plot_metric_err_vs_dim.py | wchen459/design_embeddings_jmd_2016 | 30dfec40b14c81e6cbe1c57efc2abe1a28dbdd5f | [
"MIT"
] | 2 | 2018-08-31T22:46:03.000Z | 2020-06-19T16:17:38.000Z | """
Plots reconstruction error vs semantic space dimensionality
Usage: python metric_err_vs_dim.py
Author(s): Wei Chen (wchen459@umd.edu)
"""
import matplotlib.pyplot as plt
import numpy as np
plt.rc("font", size=18)
examples = ['glass', 'sf_linear', 'sf_s_nonlinear', 'sf_v_nonlinear']
titles = {'glass': 'Glass',
'sf_linear': 'Superformula (linear)',
'sf_s_nonlinear': 'Superformula (slightly nonlinear)',
'sf_v_nonlinear': 'Superformula (very nonlinear)'}
n = len(examples)
x = range(1, 6)
for i in range(n):
plt.figure()
plt.xticks(np.arange(min(x), max(x)+1, dtype=np.int))
plt.xlabel('Semantic space dimensionality')
plt.ylabel('Reconstruction error')
plt.xlim(0.5, 5.5)
errs = np.zeros((3,5))
for j in x:
# Read reconstruction errors in rec_err.txt
txtfile = open('./results/'+examples[i]+'/n_samples = 115/n_control_points = 20/semantic_dim = '
+str(j)+'/rec_err.txt', 'r')
k = 0
for line in txtfile:
errs[k, j-1] = float(line)
k += 1
line_pca, = plt.plot(x, errs[0], '-ob', label='PCA')
line_kpca, = plt.plot(x, errs[1], '-vg', label='Kernel PCA')
line_ae, = plt.plot(x, errs[2], '-sr', label='Autoencoder')
plt.legend(handles=[line_pca, line_kpca, line_ae], fontsize=16)
plt.title(titles[examples[i]])
fig_name = 'err_vs_dim_'+examples[i]+'.png'
plt.tight_layout()
plt.savefig('./results/'+fig_name, dpi=300)
print fig_name+' saved!'
| 31.22 | 104 | 0.606022 |
12cc8345dd761da772a7145052f730ec8abb45f7 | 621 | py | Python | tools/pot/openvino/tools/pot/graph/gpu_patterns.py | ryanloney/openvino-1 | 4e0a740eb3ee31062ba0df88fcf438564f67edb7 | [
"Apache-2.0"
] | 1,127 | 2018-10-15T14:36:58.000Z | 2020-04-20T09:29:44.000Z | tools/pot/openvino/tools/pot/graph/gpu_patterns.py | ryanloney/openvino-1 | 4e0a740eb3ee31062ba0df88fcf438564f67edb7 | [
"Apache-2.0"
] | 439 | 2018-10-20T04:40:35.000Z | 2020-04-19T05:56:25.000Z | tools/pot/openvino/tools/pot/graph/gpu_patterns.py | ryanloney/openvino-1 | 4e0a740eb3ee31062ba0df88fcf438564f67edb7 | [
"Apache-2.0"
] | 414 | 2018-10-17T05:53:46.000Z | 2020-04-16T17:29:53.000Z | # Copyright (C) 2020-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from .pattern_utils import check_fused_scale_shift_patterns, get_fused_scale_shift_patterns, \
check_fused_op_const_patterns, get_fused_op_const_pattern, get_clamp_mult_const_pattern
| 41.4 | 113 | 0.756844 |
12ccbde3bf71864760496c1e1f0963111fba9314 | 638 | py | Python | test/environments/instances/8x8/gen.py | Multi-Agent-Research-Group/hog2 | 544d7c0e933fd69025944a0a3abcf9a40e59f0be | [
"MIT"
] | 5 | 2020-08-03T09:43:26.000Z | 2022-01-11T08:28:30.000Z | test/environments/instances/8x8/gen.py | Multi-Agent-Research-Group/hog2 | 544d7c0e933fd69025944a0a3abcf9a40e59f0be | [
"MIT"
] | null | null | null | test/environments/instances/8x8/gen.py | Multi-Agent-Research-Group/hog2 | 544d7c0e933fd69025944a0a3abcf9a40e59f0be | [
"MIT"
] | 7 | 2017-07-31T13:01:28.000Z | 2021-05-16T10:15:49.000Z | #!/usr/bin/python
import random
import os
import errno
for i in range(100):
s=set()
g=set()
while len(s) < 50:
s.add((random.randint(0,7),random.randint(0,7)))
while len(g) < 50:
g.add((random.randint(0,7),random.randint(0,7)))
start=list(s)
goal=list(g)
for size in range(21,50):
if not os.path.exists("./%d"%size):
try:
os.makedirs("./%d"%size)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
with open("./%d/%d.csv"%(size,i), "w") as f:
for j in range(size):
f.write("%d,%d %d,%d\n"%(start[j][0],start[j][1],goal[j][0],goal[j][1]))
| 22.785714 | 80 | 0.548589 |
12ccd738c589b9032a098324390886166233073c | 2,308 | py | Python | pose_recognition_from_camera_demo.py | amazingchow/capture-dance-using-mediapipe | 1963d461b4e047308da78b1bb88b9ed1f2c3c7d1 | [
"MIT"
] | null | null | null | pose_recognition_from_camera_demo.py | amazingchow/capture-dance-using-mediapipe | 1963d461b4e047308da78b1bb88b9ed1f2c3c7d1 | [
"MIT"
] | null | null | null | pose_recognition_from_camera_demo.py | amazingchow/capture-dance-using-mediapipe | 1963d461b4e047308da78b1bb88b9ed1f2c3c7d1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import argparse
import cv2 as cv
import mediapipe as mp
import sys
import time
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--video_device", type=int, default=0)
parser.add_argument("--video_file", type=str, default="")
args = parser.parse_args()
mp_pose = mp.solutions.pose
mp_drawing = mp.solutions.drawing_utils
with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:
cap = object()
if args.video_file != "":
cap = cv.VideoCapture(args.video_file)
else:
cap = cv.VideoCapture(args.video_device)
if not cap.isOpened():
print("Cannot open camera device-0")
sys.exit(-1)
else:
print("Video <width: {}, height: {}, fps: {}>".format(
cap.get(cv.CAP_PROP_FRAME_WIDTH),
cap.get(cv.CAP_PROP_FRAME_HEIGHT),
cap.get(cv.CAP_PROP_FPS)
))
fps = int(cap.get(cv.CAP_PROP_FPS))
frame_idx = 0
while 1:
ret, frame = cap.read()
if not ret:
print("Cannot receive frame, exiting ...")
break
frame_idx += 1
st = time.time()
# flip the frame horizontally for a later selfie-view display
frame = cv.cvtColor(cv.flip(frame, 1), cv.COLOR_BGR2RGB)
# to improve performance, optionally mark the frame as not writeable to pass by reference
frame.flags.writeable = False
results = pose.process(frame)
frame.flags.writeable = True
frame = cv.cvtColor(frame, cv.COLOR_RGB2BGR)
# draw the pose annotation on the frame
mp_drawing.draw_landmarks(frame, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
ed = time.time()
print("Used {:.3f} secs to process frame-{:05}".format(ed - st, frame_idx))
gap = 1000//fps - int(1000 * (ed - st))
if gap < 5:
gap = 5
cv.imshow("pose_recognition_from_camera_demo", frame)
if cv.waitKey(gap) & 0xFF == 27:
break
cap.release()
cv.destroyAllWindows()
| 35.507692 | 101 | 0.561958 |
12ce678d7b9581bc7d8e71fefb2ce7346256d86f | 1,901 | py | Python | reference/data_dict_export.py | TBody/atomic1D | fcab88f3b303468f23ac75b847c76244593f4b7f | [
"MIT"
] | 1 | 2019-05-18T22:32:21.000Z | 2019-05-18T22:32:21.000Z | reference/data_dict_export.py | TBody/atomic1D | fcab88f3b303468f23ac75b847c76244593f4b7f | [
"MIT"
] | null | null | null | reference/data_dict_export.py | TBody/atomic1D | fcab88f3b303468f23ac75b847c76244593f4b7f | [
"MIT"
] | null | null | null | # Program name: atomic1D/reference/build_json.py
# Author: Thomas Body
# Author email: tajb500@york.ac.uk
# Date of creation: 14 July 2017
#
#
# Makes data_dict and copies it into a .json file 'sd1d-case-05.json'
filename = 'sd1d-case-05'
from boutdata.collect import collect
data_dict = {}
# Normalisation factor for temperature - T * Tnorm returns in eV
data_dict["Tnorm"] = collect("Tnorm")
# Normalisation factor for density - N * Nnorm returns in m^-3
data_dict["Nnorm"] = collect("Nnorm")
# Plasma pressure (normalised). Pe = 2 Ne Te => P/Ne = Te (and assume Ti=Te)
data_dict["P"] = collect("P")
# Electron density (normalised)
data_dict["Ne"] = collect("Ne")
# Neutral density (normalised)
data_dict["Nn"] = collect("Nn")
# Help for user
data_dict["help"] = "Contains outputs from Boutprojects/SD1D/case-05 example. Created with data_dict_export.py - stored in Github.com/TBody/atomic1D/reference"
from copy import deepcopy
import numpy as np
import json
# Need to 'jsonify' the numpy arrays (i.e. convert to nested lists) so that they can be stored in plain-text
# Deep-copy data to a new dictionary and then edit that one (i.e. break the data pointer association - keep data_dict unchanged in case you want to run a copy-verify on it)
data_dict_jsonified = deepcopy(data_dict)
numpy_ndarrays = [];
for key, element in data_dict.items():
if type(element) == np.ndarray:
# Store which keys correspond to numpy.ndarray, so that you can de-jsonify the arrays when reading
numpy_ndarrays.append(key)
data_dict_jsonified[key] = data_dict_jsonified[key].tolist()
data_dict_jsonified['numpy_ndarrays'] = numpy_ndarrays
# Encode help
# >> data_dict['help'] = 'help string'
# <<Use original filename, except with .json instead of .dat extension>>
with open('{}.json'.format(filename),'w') as fp:
json.dump(data_dict_jsonified, fp, sort_keys=True, indent=4) | 36.557692 | 172 | 0.730668 |
12cf323ab36261eee5e0ca79f3a3c93c62ed377b | 3,300 | py | Python | wordDocComposite.py | flyonok/image2text | 0c16e6bf35eb486e6ff28e9e402a18bea6bd338c | [
"Apache-1.1"
] | null | null | null | wordDocComposite.py | flyonok/image2text | 0c16e6bf35eb486e6ff28e9e402a18bea6bd338c | [
"Apache-1.1"
] | null | null | null | wordDocComposite.py | flyonok/image2text | 0c16e6bf35eb486e6ff28e9e402a18bea6bd338c | [
"Apache-1.1"
] | null | null | null | from docx import Document
def CompositeTwoDocs(srcDocFullName, dstDocFullName, compositeName):
'''
srcDocFullName:
dstDocFullName:
compositeName:
return: ->True->False
'''
try:
srcDoc = Document(srcDocFullName)
dstDoc = Document(dstDocFullName)
srcParasMap = {} # Heading 2 => [paras list]
dstParasMap = {} # Heading 2 => [paras list]
firstPage = False
secondPage = False
currentLabelStyleContent = None #
#
for srcPara in srcDoc.paragraphs:
if (srcPara.style.name.find('Heading 2') >= 0 and srcPara.text.find(compositeName) >= 0):
print('find {0}'.format(srcPara))
firstPage = True
elif (srcPara.style.name.find('Heading 2') >= 0 and firstPage):
secondPage = True
break
else:
if (firstPage and not secondPage):
if (srcPara.style.name.find('Heading 3') >= 0):
srcParasMap[srcPara.text] = []
currentLabelStyleContent = srcPara.text
else:
if currentLabelStyleContent is None:
raise ValueError('word')
srcParasMap[currentLabelStyleContent].append(srcPara)
firstPage = False
secondPage = False
currentLabelStyleContent = None #
#
for dstPara in dstDoc.paragraphs:
if (dstPara.style.name.find('Heading 2') >= 0 and dstPara.text.find(compositeName) >= 0):
print('find {0}'.format(dstPara))
firstPage = True
elif (dstPara.style.name.find('Heading 2') >= 0 and firstPage):
secondPage = True
break
else:
if (firstPage and not secondPage):
if (dstPara.style.name.find('Heading 3') >= 0):
dstParasMap[dstPara.text] = []
currentLabelStyleContent = dstPara.text
else:
if currentLabelStyleContent is None:
raise ValueError('word')
dstParasMap[currentLabelStyleContent].append(dstPara)
#
for key, dstParas in dstParasMap.items():
srcParas = srcParasMap[key]
if len(srcParas) <= 0:
print('--{0}--'.format(key))
continue
else:
for index, item in enumerate(dstParas):
if (index <= len(srcParas)):
dstParas[index].text = srcParas[index].text
else:
print('{0}--{1}----{2}'.format(key, index, len(srcParas)))
dstDoc.save(dstDocFullName)
except Exception as e:
print('...')
print(e)
return False
return True
if __name__ == '__main__':
srcDocFullName = r'D:\\20208\-111\-111.docx'
dstDocFullName = r'D:\\20208\-456\-456.docx'
CompositeTwoDocs(srcDocFullName, dstDocFullName, '')
| 40.740741 | 101 | 0.538485 |
12d0afe950ed445eb9f7e907ee14e9a851acd904 | 4,853 | py | Python | app/cover.py | mrwiwi/tydom2mqtt | 293322033b67521bb981af1c8c2245ca9af6c646 | [
"MIT"
] | 26 | 2020-04-07T17:58:24.000Z | 2022-02-12T16:28:44.000Z | app/cover.py | mrwiwi/tydom2mqtt | 293322033b67521bb981af1c8c2245ca9af6c646 | [
"MIT"
] | 19 | 2020-03-25T09:46:46.000Z | 2021-11-29T09:55:57.000Z | app/cover.py | mrwiwi/tydom2mqtt | 293322033b67521bb981af1c8c2245ca9af6c646 | [
"MIT"
] | 26 | 2020-04-27T21:40:12.000Z | 2022-01-06T14:44:22.000Z | import json
import time
from datetime import datetime
from sensors import sensor
cover_command_topic = "cover/tydom/{id}/set_positionCmd"
cover_config_topic = "homeassistant/cover/tydom/{id}/config"
cover_position_topic = "cover/tydom/{id}/current_position"
cover_set_postion_topic = "cover/tydom/{id}/set_position"
cover_attributes_topic = "cover/tydom/{id}/attributes"
| 37.914063 | 147 | 0.622708 |
12d29fab22f07b19b231bdfe08bc053825594e45 | 56,823 | py | Python | edx/config/lms/docker_run.py | openfun/learning-analytics-playground | dca80d89ca781d9060bd69927af4aa1462cc53ef | [
"MIT"
] | 1 | 2021-12-13T09:05:59.000Z | 2021-12-13T09:05:59.000Z | edx/config/lms/docker_run.py | openfun/learning-analytics-playground | dca80d89ca781d9060bd69927af4aa1462cc53ef | [
"MIT"
] | 3 | 2021-05-18T08:26:51.000Z | 2022-03-14T10:34:36.000Z | edx/config/lms/docker_run.py | openfun/learning-analytics-playground | dca80d89ca781d9060bd69927af4aa1462cc53ef | [
"MIT"
] | 1 | 2021-06-03T14:21:56.000Z | 2021-06-03T14:21:56.000Z | """
This is the default template for our main set of servers. This does NOT
cover the content machines, which use content.py
Common traits:
* Use memcached, and cache-backed sessions
* Use a MySQL 5.1 database
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
# Pylint gets confused by path.py instances, which report themselves as class
# objects. As a result, pylint applies the wrong regex in validating names,
# and throws spurious errors. Therefore, we disable invalid-name checking.
# pylint: disable=invalid-name
import datetime
import dateutil
from glob import glob
import json
import os
from path import Path as path
import pkgutil
import platform
from django.utils.translation import ugettext_lazy
from django.conf import global_settings
from celery_redis_sentinel import register
from openedx.core.lib.logsettings import get_logger_config
from path import Path as path
from xmodule.modulestore.modulestore_settings import (
convert_module_store_setting_if_needed,
update_module_store_settings,
)
from ..common import *
from .utils import Configuration, prefer_fun_video
# Load custom configuration parameters from yaml files
config = Configuration(os.path.dirname(__file__))
# edX has now started using "settings.ENV_TOKENS" and "settings.AUTH_TOKENS" everywhere in the
# project, not just in the settings. Let's make sure our settings still work in this case
ENV_TOKENS = config
AUTH_TOKENS = config
# SERVICE_VARIANT specifies name of the variant used, which decides what JSON
# configuration files are read during startup.
SERVICE_VARIANT = config("SERVICE_VARIANT", default=None)
# CONFIG_ROOT specifies the directory where the JSON configuration
# files are expected to be found. If not specified, use the project
# directory.
CONFIG_ROOT = path(config("CONFIG_ROOT", default=ENV_ROOT))
# CONFIG_PREFIX specifies the prefix of the JSON configuration files,
# based on the service variant. If no variant is use, don't use a
# prefix.
CONFIG_PREFIX = SERVICE_VARIANT + "." if SERVICE_VARIANT else ""
################################ ALWAYS THE SAME ##############################
RELEASE = config("RELEASE", default=None)
DEBUG = False
DEFAULT_TEMPLATE_ENGINE["OPTIONS"]["debug"] = False
# IMPORTANT: With this enabled, the server must always be behind a proxy that
# strips the header HTTP_X_FORWARDED_PROTO from client requests. Otherwise,
# a user can fool our server into thinking it was an https connection.
# See
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
# for other warnings.
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
###################################### CELERY ################################
CELERY_ALWAYS_EAGER = config("CELERY_ALWAYS_EAGER", default=False, formatter=bool)
# Don't use a connection pool, since connections are dropped by ELB.
BROKER_POOL_LIMIT = 0
BROKER_CONNECTION_TIMEOUT = 1
# For the Result Store, use the django cache named 'celery'
CELERY_RESULT_BACKEND = config(
"CELERY_RESULT_BACKEND", default="djcelery.backends.cache:CacheBackend"
)
# When the broker is behind an ELB, use a heartbeat to refresh the
# connection and to detect if it has been dropped.
BROKER_HEARTBEAT = 60.0
BROKER_HEARTBEAT_CHECKRATE = 2
# Each worker should only fetch one message at a time
CELERYD_PREFETCH_MULTIPLIER = 1
# Celery queues
DEFAULT_PRIORITY_QUEUE = config(
"DEFAULT_PRIORITY_QUEUE", default="edx.lms.core.default"
)
HIGH_PRIORITY_QUEUE = config("HIGH_PRIORITY_QUEUE", default="edx.lms.core.high")
LOW_PRIORITY_QUEUE = config("LOW_PRIORITY_QUEUE", default="edx.lms.core.low")
HIGH_MEM_QUEUE = config("HIGH_MEM_QUEUE", default="edx.lms.core.high_mem")
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = config(
"CELERY_QUEUES",
default={
DEFAULT_PRIORITY_QUEUE: {},
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
HIGH_MEM_QUEUE: {},
},
formatter=json.loads,
)
CELERY_ROUTES = "lms.celery.Router"
# Force accepted content to "json" only. If we also accept pickle-serialized
# messages, the worker will crash when it's running with a privileged user (even
# if it's not the root user but a user belonging to the root group, which is our
# case with OpenShift).
CELERY_ACCEPT_CONTENT = ["json"]
CELERYBEAT_SCHEDULE = {} # For scheduling tasks, entries can be added to this dict
########################## NON-SECURE ENV CONFIG ##############################
# Things like server locations, ports, etc.
STATIC_ROOT_BASE = path("/edx/app/edxapp/staticfiles")
STATIC_ROOT = STATIC_ROOT_BASE
STATIC_URL = "/static/"
STATICFILES_STORAGE = config(
"STATICFILES_STORAGE", default="lms.envs.fun.storage.CDNProductionStorage"
)
CDN_BASE_URL = config("CDN_BASE_URL", default=None)
MEDIA_ROOT = path("/edx/var/edxapp/media/")
MEDIA_URL = "/media/"
# DEFAULT_COURSE_ABOUT_IMAGE_URL specifies the default image to show for courses that don't provide one
DEFAULT_COURSE_ABOUT_IMAGE_URL = config(
"DEFAULT_COURSE_ABOUT_IMAGE_URL", default=DEFAULT_COURSE_ABOUT_IMAGE_URL
)
PLATFORM_NAME = config("PLATFORM_NAME", default=PLATFORM_NAME)
# For displaying on the receipt. At Stanford PLATFORM_NAME != MERCHANT_NAME, but PLATFORM_NAME is a fine default
PLATFORM_TWITTER_ACCOUNT = config(
"PLATFORM_TWITTER_ACCOUNT", default=PLATFORM_TWITTER_ACCOUNT
)
PLATFORM_FACEBOOK_ACCOUNT = config(
"PLATFORM_FACEBOOK_ACCOUNT", default=PLATFORM_FACEBOOK_ACCOUNT
)
SOCIAL_SHARING_SETTINGS = config(
"SOCIAL_SHARING_SETTINGS", default=SOCIAL_SHARING_SETTINGS, formatter=json.loads
)
# Social media links for the page footer
SOCIAL_MEDIA_FOOTER_URLS = config(
"SOCIAL_MEDIA_FOOTER_URLS", default=SOCIAL_MEDIA_FOOTER_URLS, formatter=json.loads
)
CC_MERCHANT_NAME = config("CC_MERCHANT_NAME", default=PLATFORM_NAME)
EMAIL_BACKEND = config(
"EMAIL_BACKEND", default="django.core.mail.backends.smtp.EmailBackend"
)
EMAIL_FILE_PATH = config("EMAIL_FILE_PATH", default=None)
EMAIL_HOST = config("EMAIL_HOST", default="localhost")
EMAIL_PORT = config("EMAIL_PORT", default=25) # django default is 25
EMAIL_USE_TLS = config("EMAIL_USE_TLS", default=False) # django default is False
HTTPS = config("HTTPS", default=HTTPS)
SESSION_COOKIE_DOMAIN = config("SESSION_COOKIE_DOMAIN", default=None)
SESSION_COOKIE_HTTPONLY = config(
"SESSION_COOKIE_HTTPONLY", default=True, formatter=bool
)
SESSION_COOKIE_SECURE = config(
"SESSION_COOKIE_SECURE", default=SESSION_COOKIE_SECURE, formatter=bool
)
SESSION_ENGINE = config("SESSION_ENGINE", default="redis_sessions.session")
SESSION_SAVE_EVERY_REQUEST = config(
"SESSION_SAVE_EVERY_REQUEST", default=SESSION_SAVE_EVERY_REQUEST, formatter=bool
)
# Configuration to use session with redis
# To use redis, change SESSION_ENGINE to "redis_sessions.session"
SESSION_REDIS_HOST = config("SESSION_REDIS_HOST", default="redis")
SESSION_REDIS_PORT = config("SESSION_REDIS_PORT", default=6379, formatter=int)
SESSION_REDIS_DB = config("SESSION_REDIS_DB", default=1, formatter=int)
SESSION_REDIS_PASSWORD = config("SESSION_REDIS_PASSWORD", default=None)
SESSION_REDIS_PREFIX = config("SESSION_REDIS_PREFIX", default="session")
SESSION_REDIS_SOCKET_TIMEOUT = config(
"SESSION_REDIS_SOCKET_TIMEOUT", default=1, formatter=int
)
SESSION_REDIS_RETRY_ON_TIMEOUT = config(
"SESSION_REDIS_RETRY_ON_TIMEOUT", default=False, formatter=bool
)
SESSION_REDIS = config(
"SESSION_REDIS",
default={
"host": SESSION_REDIS_HOST,
"port": SESSION_REDIS_PORT,
"db": SESSION_REDIS_DB, # db 0 is used for Celery Broker
"password": SESSION_REDIS_PASSWORD,
"prefix": SESSION_REDIS_PREFIX,
"socket_timeout": SESSION_REDIS_SOCKET_TIMEOUT,
"retry_on_timeout": SESSION_REDIS_RETRY_ON_TIMEOUT,
},
formatter=json.loads,
)
SESSION_REDIS_SENTINEL_LIST = config(
"SESSION_REDIS_SENTINEL_LIST", default=None, formatter=json.loads
)
SESSION_REDIS_SENTINEL_MASTER_ALIAS = config(
"SESSION_REDIS_SENTINEL_MASTER_ALIAS", default=None
)
REGISTRATION_EXTRA_FIELDS = config(
"REGISTRATION_EXTRA_FIELDS", default=REGISTRATION_EXTRA_FIELDS, formatter=json.loads
)
# Set the names of cookies shared with the marketing site
# These have the same cookie domain as the session, which in production
# usually includes subdomains.
EDXMKTG_LOGGED_IN_COOKIE_NAME = config(
"EDXMKTG_LOGGED_IN_COOKIE_NAME", default=EDXMKTG_LOGGED_IN_COOKIE_NAME
)
EDXMKTG_USER_INFO_COOKIE_NAME = config(
"EDXMKTG_USER_INFO_COOKIE_NAME", default=EDXMKTG_USER_INFO_COOKIE_NAME
)
# Override feature by feature by whatever is being redefined in the settings.yaml file
CONFIG_FEATURES = config("FEATURES", default={}, formatter=json.loads)
FEATURES.update(CONFIG_FEATURES)
LMS_BASE = config("LMS_BASE", default="localhost:8072")
CMS_BASE = config("CMS_BASE", default="localhost:8082")
LMS_ROOT_URL = config("LMS_ROOT_URL", default="http://{:s}".format(LMS_BASE))
LMS_INTERNAL_ROOT_URL = config("LMS_INTERNAL_ROOT_URL", default=LMS_ROOT_URL)
SITE_NAME = config("SITE_NAME", default=LMS_BASE)
ALLOWED_HOSTS = config(
"ALLOWED_HOSTS", default=[LMS_BASE.split(":")[0]], formatter=json.loads
)
if FEATURES.get("PREVIEW_LMS_BASE"):
ALLOWED_HOSTS.append(FEATURES["PREVIEW_LMS_BASE"])
# allow for environments to specify what cookie name our login subsystem should use
# this is to fix a bug regarding simultaneous logins between edx.org and edge.edx.org which can
# happen with some browsers (e.g. Firefox)
if config("SESSION_COOKIE_NAME", default=None):
# NOTE, there's a bug in Django (http://bugs.python.org/issue18012) which necessitates this
# being a str()
SESSION_COOKIE_NAME = str(config("SESSION_COOKIE_NAME"))
CACHE_REDIS_HOST = config("CACHE_REDIS_HOST", default="redis")
CACHE_REDIS_PORT = config("CACHE_REDIS_PORT", default=6379, formatter=int)
CACHE_REDIS_DB = config("CACHE_REDIS_DB", default=1, formatter=int)
CACHE_REDIS_BACKEND = config(
"CACHE_REDIS_BACKEND", default="django_redis.cache.RedisCache"
)
CACHE_REDIS_URI = "redis://{}:{}/{}".format(
CACHE_REDIS_HOST, CACHE_REDIS_PORT, CACHE_REDIS_DB
)
CACHE_REDIS_CLIENT = config(
"CACHE_REDIS_CLIENT", default="django_redis.client.DefaultClient"
)
CACHES_DEFAULT_CONFIG = {
"BACKEND": CACHE_REDIS_BACKEND,
"LOCATION": CACHE_REDIS_URI,
"OPTIONS": {"CLIENT_CLASS": CACHE_REDIS_CLIENT},
}
if "Sentinel" in CACHE_REDIS_BACKEND:
CACHES_DEFAULT_CONFIG["LOCATION"] = [(CACHE_REDIS_HOST, CACHE_REDIS_PORT)]
CACHES_DEFAULT_CONFIG["OPTIONS"]["SENTINEL_SERVICE_NAME"] = config(
"CACHE_REDIS_SENTINEL_SERVICE_NAME", default="mymaster"
)
CACHES_DEFAULT_CONFIG["OPTIONS"]["REDIS_CLIENT_KWARGS"] = {"db": CACHE_REDIS_DB}
CACHES = config(
"CACHES",
default={
"default": dict(CACHES_DEFAULT_CONFIG, **{"KEY_PREFIX": "default"}),
"general": dict(CACHES_DEFAULT_CONFIG, **{"KEY_PREFIX": "general"}),
"celery": dict(CACHES_DEFAULT_CONFIG, **{"KEY_PREFIX": "celery"}),
"mongo_metadata_inheritance": dict(
CACHES_DEFAULT_CONFIG, **{"KEY_PREFIX": "mongo_metadata_inheritance"}
),
"openassessment_submissions": dict(
CACHES_DEFAULT_CONFIG, **{"KEY_PREFIX": "openassessment_submissions"}
),
"loc_cache": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "edx_location_mem_cache",
},
# Cache backend used by Django 1.8 storage backend while processing static files
"staticfiles": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "edx_location_mem_cache",
},
},
formatter=json.loads,
)
# Email overrides
DEFAULT_FROM_EMAIL = config("DEFAULT_FROM_EMAIL", default=DEFAULT_FROM_EMAIL)
DEFAULT_FEEDBACK_EMAIL = config(
"DEFAULT_FEEDBACK_EMAIL", default=DEFAULT_FEEDBACK_EMAIL
)
ADMINS = config("ADMINS", default=ADMINS, formatter=json.loads)
SERVER_EMAIL = config("SERVER_EMAIL", default=SERVER_EMAIL)
TECH_SUPPORT_EMAIL = config("TECH_SUPPORT_EMAIL", default=TECH_SUPPORT_EMAIL)
CONTACT_EMAIL = config("CONTACT_EMAIL", default=CONTACT_EMAIL)
BUGS_EMAIL = config("BUGS_EMAIL", default=BUGS_EMAIL)
PAYMENT_SUPPORT_EMAIL = config("PAYMENT_SUPPORT_EMAIL", default=PAYMENT_SUPPORT_EMAIL)
FINANCE_EMAIL = config("FINANCE_EMAIL", default=FINANCE_EMAIL)
UNIVERSITY_EMAIL = config("UNIVERSITY_EMAIL", default=UNIVERSITY_EMAIL)
PRESS_EMAIL = config("PRESS_EMAIL", default=PRESS_EMAIL)
# Currency
PAID_COURSE_REGISTRATION_CURRENCY = config(
"PAID_COURSE_REGISTRATION_CURRENCY", default=["EUR", u"\N{euro sign}"]
)
# Payment Report Settings
PAYMENT_REPORT_GENERATOR_GROUP = config(
"PAYMENT_REPORT_GENERATOR_GROUP", default=PAYMENT_REPORT_GENERATOR_GROUP
)
# Bulk Email overrides
BULK_EMAIL_DEFAULT_FROM_EMAIL = config(
"BULK_EMAIL_DEFAULT_FROM_EMAIL", default=BULK_EMAIL_DEFAULT_FROM_EMAIL
)
BULK_EMAIL_EMAILS_PER_TASK = config(
"BULK_EMAIL_EMAILS_PER_TASK", default=BULK_EMAIL_EMAILS_PER_TASK, formatter=int
)
BULK_EMAIL_DEFAULT_RETRY_DELAY = config(
"BULK_EMAIL_DEFAULT_RETRY_DELAY",
default=BULK_EMAIL_DEFAULT_RETRY_DELAY,
formatter=int,
)
BULK_EMAIL_MAX_RETRIES = config(
"BULK_EMAIL_MAX_RETRIES", default=BULK_EMAIL_MAX_RETRIES, formatter=int
)
BULK_EMAIL_INFINITE_RETRY_CAP = config(
"BULK_EMAIL_INFINITE_RETRY_CAP",
default=BULK_EMAIL_INFINITE_RETRY_CAP,
formatter=int,
)
BULK_EMAIL_LOG_SENT_EMAILS = config(
"BULK_EMAIL_LOG_SENT_EMAILS", default=BULK_EMAIL_LOG_SENT_EMAILS, formatter=bool
)
BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS = config(
"BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS",
default=BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS,
formatter=int,
)
# We want Bulk Email running on the high-priority queue, so we define the
# routing key that points to it. At the moment, the name is the same.
# We have to reset the value here, since we have changed the value of the queue name.
BULK_EMAIL_ROUTING_KEY = config("BULK_EMAIL_ROUTING_KEY", default=HIGH_PRIORITY_QUEUE)
# We can run smaller jobs on the low priority queue. See note above for why
# we have to reset the value here.
BULK_EMAIL_ROUTING_KEY_SMALL_JOBS = LOW_PRIORITY_QUEUE
# Theme overrides
THEME_NAME = config("THEME_NAME", default=None)
COMPREHENSIVE_THEME_DIR = path(
config("COMPREHENSIVE_THEME_DIR", default=COMPREHENSIVE_THEME_DIR)
)
# Marketing link overrides
MKTG_URL_LINK_MAP = config("MKTG_URL_LINK_MAP", default={}, formatter=json.loads)
SUPPORT_SITE_LINK = config("SUPPORT_SITE_LINK", default=SUPPORT_SITE_LINK)
# Mobile store URL overrides
MOBILE_STORE_URLS = config("MOBILE_STORE_URLS", default=MOBILE_STORE_URLS)
# Timezone overrides
TIME_ZONE = config("TIME_ZONE", default=TIME_ZONE)
# Translation overrides
LANGUAGES = config("LANGUAGES", default=LANGUAGES, formatter=json.loads)
LANGUAGE_DICT = dict(LANGUAGES)
LANGUAGE_CODE = config("LANGUAGE_CODE", default=LANGUAGE_CODE)
USE_I18N = config("USE_I18N", default=USE_I18N)
# Additional installed apps
for app in config("ADDL_INSTALLED_APPS", default=[], formatter=json.loads):
INSTALLED_APPS.append(app)
WIKI_ENABLED = config("WIKI_ENABLED", default=WIKI_ENABLED, formatter=bool)
local_loglevel = config("LOCAL_LOGLEVEL", default="INFO")
# Configure Logging
LOG_DIR = config("LOG_DIR", default=path("/edx/var/logs/edx"), formatter=path)
DATA_DIR = config("DATA_DIR", default=path("/edx/app/edxapp/data"), formatter=path)
# Default format for syslog logging
standard_format = "%(asctime)s %(levelname)s %(process)d [%(name)s] %(filename)s:%(lineno)d - %(message)s"
syslog_format = (
"[variant:lms][%(name)s][env:sandbox] %(levelname)s "
"[{hostname} %(process)d] [%(filename)s:%(lineno)d] - %(message)s"
).format(hostname=platform.node().split(".")[0])
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"handlers": {
"local": {
"formatter": "syslog_format",
"class": "logging.StreamHandler",
"level": "INFO",
},
"tracking": {
"formatter": "raw",
"class": "logging.StreamHandler",
"level": "DEBUG",
},
"console": {
"formatter": "standard",
"class": "logging.StreamHandler",
"level": "INFO",
},
},
"formatters": {
"raw": {"format": "%(message)s"},
"syslog_format": {"format": syslog_format},
"standard": {"format": standard_format},
},
"filters": {"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}},
"loggers": {
"": {"level": "INFO", "propagate": False, "handlers": ["console", "local"]},
"tracking": {"level": "DEBUG", "propagate": False, "handlers": ["tracking"]},
},
}
SENTRY_DSN = config("SENTRY_DSN", default=None)
if SENTRY_DSN:
LOGGING["loggers"][""]["handlers"].append("sentry")
LOGGING["handlers"]["sentry"] = {
"class": "raven.handlers.logging.SentryHandler",
"dsn": SENTRY_DSN,
"level": "ERROR",
"environment": "production",
"release": RELEASE,
}
COURSE_LISTINGS = config("COURSE_LISTINGS", default={}, formatter=json.loads)
SUBDOMAIN_BRANDING = config("SUBDOMAIN_BRANDING", default={}, formatter=json.loads)
VIRTUAL_UNIVERSITIES = config("VIRTUAL_UNIVERSITIES", default=[])
META_UNIVERSITIES = config("META_UNIVERSITIES", default={}, formatter=json.loads)
COMMENTS_SERVICE_URL = config("COMMENTS_SERVICE_URL", default="")
COMMENTS_SERVICE_KEY = config("COMMENTS_SERVICE_KEY", default="")
CERT_NAME_SHORT = config("CERT_NAME_SHORT", default=CERT_NAME_SHORT)
CERT_NAME_LONG = config("CERT_NAME_LONG", default=CERT_NAME_LONG)
CERT_QUEUE = config("CERT_QUEUE", default="test-pull")
ZENDESK_URL = config("ZENDESK_URL", default=None)
FEEDBACK_SUBMISSION_EMAIL = config("FEEDBACK_SUBMISSION_EMAIL", default=None)
MKTG_URLS = config("MKTG_URLS", default=MKTG_URLS, formatter=json.loads)
# Badgr API
BADGR_API_TOKEN = config("BADGR_API_TOKEN", default=BADGR_API_TOKEN)
BADGR_BASE_URL = config("BADGR_BASE_URL", default=BADGR_BASE_URL)
BADGR_ISSUER_SLUG = config("BADGR_ISSUER_SLUG", default=BADGR_ISSUER_SLUG)
# git repo loading environment
GIT_REPO_DIR = config(
"GIT_REPO_DIR", default=path("/edx/var/edxapp/course_repos"), formatter=path
)
GIT_IMPORT_STATIC = config("GIT_IMPORT_STATIC", default=True)
for name, value in config("CODE_JAIL", default={}, formatter=json.loads).items():
oldvalue = CODE_JAIL.get(name)
if isinstance(oldvalue, dict):
for subname, subvalue in value.items():
oldvalue[subname] = subvalue
else:
CODE_JAIL[name] = value
COURSES_WITH_UNSAFE_CODE = config(
"COURSES_WITH_UNSAFE_CODE", default=[], formatter=json.loads
)
ASSET_IGNORE_REGEX = config("ASSET_IGNORE_REGEX", default=ASSET_IGNORE_REGEX)
# Event Tracking
TRACKING_IGNORE_URL_PATTERNS = config(
"TRACKING_IGNORE_URL_PATTERNS",
default=TRACKING_IGNORE_URL_PATTERNS,
formatter=json.loads,
)
# SSL external authentication settings
SSL_AUTH_EMAIL_DOMAIN = config("SSL_AUTH_EMAIL_DOMAIN", default="MIT.EDU")
SSL_AUTH_DN_FORMAT_STRING = config("SSL_AUTH_DN_FORMAT_STRING", default=None)
# Django CAS external authentication settings
CAS_EXTRA_LOGIN_PARAMS = config(
"CAS_EXTRA_LOGIN_PARAMS", default=None, formatter=json.loads
)
if FEATURES.get("AUTH_USE_CAS"):
CAS_SERVER_URL = config("CAS_SERVER_URL", default=None)
INSTALLED_APPS.append("django_cas")
MIDDLEWARE_CLASSES.append("django_cas.middleware.CASMiddleware")
CAS_ATTRIBUTE_CALLBACK = config(
"CAS_ATTRIBUTE_CALLBACK", default=None, formatter=json.loads
)
if CAS_ATTRIBUTE_CALLBACK:
import importlib
CAS_USER_DETAILS_RESOLVER = getattr(
importlib.import_module(CAS_ATTRIBUTE_CALLBACK["module"]),
CAS_ATTRIBUTE_CALLBACK["function"],
)
# Video Caching. Pairing country codes with CDN URLs.
# Example: {'CN': 'http://api.xuetangx.com/edx/video?s3_url='}
VIDEO_CDN_URL = config("VIDEO_CDN_URL", default={}, formatter=json.loads)
# Branded footer
FOOTER_OPENEDX_URL = config("FOOTER_OPENEDX_URL", default=FOOTER_OPENEDX_URL)
FOOTER_OPENEDX_LOGO_IMAGE = config(
"FOOTER_OPENEDX_LOGO_IMAGE", default=FOOTER_OPENEDX_LOGO_IMAGE
)
FOOTER_ORGANIZATION_IMAGE = config(
"FOOTER_ORGANIZATION_IMAGE", default=FOOTER_ORGANIZATION_IMAGE
)
FOOTER_CACHE_TIMEOUT = config(
"FOOTER_CACHE_TIMEOUT", default=FOOTER_CACHE_TIMEOUT, formatter=int
)
FOOTER_BROWSER_CACHE_MAX_AGE = config(
"FOOTER_BROWSER_CACHE_MAX_AGE", default=FOOTER_BROWSER_CACHE_MAX_AGE, formatter=int
)
# Credit notifications settings
NOTIFICATION_EMAIL_CSS = config(
"NOTIFICATION_EMAIL_CSS", default=NOTIFICATION_EMAIL_CSS
)
NOTIFICATION_EMAIL_EDX_LOGO = config(
"NOTIFICATION_EMAIL_EDX_LOGO", default=NOTIFICATION_EMAIL_EDX_LOGO
)
############# CORS headers for cross-domain requests #################
if FEATURES.get("ENABLE_CORS_HEADERS") or FEATURES.get(
"ENABLE_CROSS_DOMAIN_CSRF_COOKIE"
):
CORS_ALLOW_CREDENTIALS = True
CORS_ORIGIN_WHITELIST = config(
"CORS_ORIGIN_WHITELIST", default=(), formatter=json.loads
)
CORS_ORIGIN_ALLOW_ALL = config(
"CORS_ORIGIN_ALLOW_ALL", default=False, formatter=bool
)
CORS_ALLOW_INSECURE = config("CORS_ALLOW_INSECURE", default=False, formatter=bool)
# If setting a cross-domain cookie, it's really important to choose
# a name for the cookie that is DIFFERENT than the cookies used
# by each subdomain. For example, suppose the applications
# at these subdomains are configured to use the following cookie names:
#
# 1) foo.example.com --> "csrftoken"
# 2) baz.example.com --> "csrftoken"
# 3) bar.example.com --> "csrftoken"
#
# For the cross-domain version of the CSRF cookie, you need to choose
# a name DIFFERENT than "csrftoken"; otherwise, the new token configured
# for ".example.com" could conflict with the other cookies,
# non-deterministically causing 403 responses.
#
# Because of the way Django stores cookies, the cookie name MUST
# be a `str`, not unicode. Otherwise there will `TypeError`s will be raised
# when Django tries to call the unicode `translate()` method with the wrong
# number of parameters.
CROSS_DOMAIN_CSRF_COOKIE_NAME = str(config("CROSS_DOMAIN_CSRF_COOKIE_NAME"))
# When setting the domain for the "cross-domain" version of the CSRF
# cookie, you should choose something like: ".example.com"
# (note the leading dot), where both the referer and the host
# are subdomains of "example.com".
#
# Browser security rules require that
# the cookie domain matches the domain of the server; otherwise
# the cookie won't get set. And once the cookie gets set, the client
# needs to be on a domain that matches the cookie domain, otherwise
# the client won't be able to read the cookie.
CROSS_DOMAIN_CSRF_COOKIE_DOMAIN = config("CROSS_DOMAIN_CSRF_COOKIE_DOMAIN")
# Field overrides. To use the IDDE feature, add
# 'courseware.student_field_overrides.IndividualStudentOverrideProvider'.
FIELD_OVERRIDE_PROVIDERS = tuple(
config("FIELD_OVERRIDE_PROVIDERS", default=[], formatter=json.loads)
)
############################## SECURE AUTH ITEMS ###############
# Secret things: passwords, access keys, etc.
############### XBlock filesystem field config ##########
DJFS = config(
"DJFS",
default={
"directory_root": "/edx/var/edxapp/django-pyfs/static/django-pyfs",
"type": "osfs",
"url_root": "/static/django-pyfs",
},
formatter=json.loads,
)
############### Module Store Items ##########
HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS = config(
"HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS", default={}, formatter=json.loads
)
# PREVIEW DOMAIN must be present in HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS for the preview to show draft changes
if "PREVIEW_LMS_BASE" in FEATURES and FEATURES["PREVIEW_LMS_BASE"] != "":
PREVIEW_DOMAIN = FEATURES["PREVIEW_LMS_BASE"].split(":")[0]
# update dictionary with preview domain regex
HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS.update({PREVIEW_DOMAIN: "draft-preferred"})
############### Mixed Related(Secure/Not-Secure) Items ##########
LMS_SEGMENT_KEY = config("LMS_SEGMENT_KEY", default=None)
CC_PROCESSOR_NAME = config("CC_PROCESSOR_NAME", default=CC_PROCESSOR_NAME)
CC_PROCESSOR = config("CC_PROCESSOR", default=CC_PROCESSOR)
SECRET_KEY = config("SECRET_KEY", default="ThisisAnExampleKeyForDevPurposeOnly")
# Authentication backends
# - behind a proxy, use: "lms.envs.fun.backends.ProxyRateLimitModelBackend"
# - for LTI provider, add: "lti_provider.users.LtiBackend"
# - for CAS, add: "django_cas.backends.CASBackend"
AUTHENTICATION_BACKENDS = config(
"AUTHENTICATION_BACKENDS",
default=("lms.envs.fun.backends.ProxyRateLimitModelBackend",),
)
DEFAULT_FILE_STORAGE = config(
"DEFAULT_FILE_STORAGE", default="django.core.files.storage.FileSystemStorage"
)
# Specific setting for the File Upload Service to store media in a bucket.
FILE_UPLOAD_STORAGE_BUCKET_NAME = config(
"FILE_UPLOAD_STORAGE_BUCKET_NAME", default="uploads"
)
FILE_UPLOAD_STORAGE_PREFIX = config(
"FILE_UPLOAD_STORAGE_PREFIX", default=FILE_UPLOAD_STORAGE_PREFIX
)
# If there is a database called 'read_replica', you can use the use_read_replica_if_available
# function in util/query.py, which is useful for very large database reads
DATABASE_ENGINE = config("DATABASE_ENGINE", default="django.db.backends.mysql")
DATABASE_HOST = config("DATABASE_HOST", default="mysql")
DATABASE_PORT = config("DATABASE_PORT", default=3306, formatter=int)
DATABASE_NAME = config("DATABASE_NAME", default="edxapp")
DATABASE_USER = config("DATABASE_USER", default="edxapp_user")
DATABASE_PASSWORD = config("DATABASE_PASSWORD", default="password")
DATABASES = config(
"DATABASES",
default={
"default": {
"ENGINE": DATABASE_ENGINE,
"HOST": DATABASE_HOST,
"PORT": DATABASE_PORT,
"NAME": DATABASE_NAME,
"USER": DATABASE_USER,
"PASSWORD": DATABASE_PASSWORD,
}
},
formatter=json.loads,
)
# Enable automatic transaction management on all databases
# https://docs.djangoproject.com/en/1.8/topics/db/transactions/#tying-transactions-to-http-requests
# This needs to be true for all databases
for database_name in DATABASES:
DATABASES[database_name]["ATOMIC_REQUESTS"] = True
XQUEUE_INTERFACE = config(
"XQUEUE_INTERFACE",
default={"url": None, "basic_auth": None, "django_auth": None},
formatter=json.loads,
)
# Configure the MODULESTORE
MODULESTORE = convert_module_store_setting_if_needed(
config("MODULESTORE", default=MODULESTORE, formatter=json.loads)
)
MONGODB_PASSWORD = config("MONGODB_PASSWORD", default="")
MONGODB_HOST = config("MONGODB_HOST", default="mongodb")
MONGODB_PORT = config("MONGODB_PORT", default=27017, formatter=int)
MONGODB_NAME = config("MONGODB_NAME", default="edxapp")
MONGODB_USER = config("MONGODB_USER", default=None)
MONGODB_SSL = config("MONGODB_SSL", default=False, formatter=bool)
MONGODB_REPLICASET = config("MONGODB_REPLICASET", default=None)
# Accepted read_preference value can be found here https://github.com/mongodb/mongo-python-driver/blob/2.9.1/pymongo/read_preferences.py#L54
MONGODB_READ_PREFERENCE = config("MONGODB_READ_PREFERENCE", default="PRIMARY")
DOC_STORE_CONFIG = config(
"DOC_STORE_CONFIG",
default={
"collection": "modulestore",
"host": MONGODB_HOST,
"port": MONGODB_PORT,
"db": MONGODB_NAME,
"user": MONGODB_USER,
"password": MONGODB_PASSWORD,
"ssl": MONGODB_SSL,
"replicaSet": MONGODB_REPLICASET,
"read_preference": MONGODB_READ_PREFERENCE,
},
formatter=json.loads,
)
update_module_store_settings(MODULESTORE, doc_store_settings=DOC_STORE_CONFIG)
MONGODB_LOG = config("MONGODB_LOG", default={}, formatter=json.loads)
CONTENTSTORE = config(
"CONTENTSTORE",
default={
"DOC_STORE_CONFIG": DOC_STORE_CONFIG,
"ENGINE": "xmodule.contentstore.mongo.MongoContentStore",
},
formatter=json.loads,
)
EMAIL_HOST_USER = config("EMAIL_HOST_USER", default="") # django default is ''
EMAIL_HOST_PASSWORD = config("EMAIL_HOST_PASSWORD", default="") # django default is ''
# Datadog for events!
DATADOG = config("DATADOG", default={}, formatter=json.loads)
# TODO: deprecated (compatibility with previous settings)
DATADOG_API = config("DATADOG_API", default=None)
# Analytics dashboard server
ANALYTICS_SERVER_URL = config("ANALYTICS_SERVER_URL", default=None)
ANALYTICS_API_KEY = config("ANALYTICS_API_KEY", default="")
# Analytics data source
ANALYTICS_DATA_URL = config("ANALYTICS_DATA_URL", default=ANALYTICS_DATA_URL)
ANALYTICS_DATA_TOKEN = config("ANALYTICS_DATA_TOKEN", default=ANALYTICS_DATA_TOKEN)
# Analytics Dashboard
# when True this setting add a link in instructor dashbord to analytics insight service
ANALYTICS_DASHBOARD_URL = config(
"ANALYTICS_DASHBOARD_URL", default=False, formatter=bool
)
ANALYTICS_DASHBOARD_NAME = config(
"ANALYTICS_DASHBOARD_NAME", default=PLATFORM_NAME + " Insights"
)
# Mailchimp New User List
MAILCHIMP_NEW_USER_LIST_ID = config("MAILCHIMP_NEW_USER_LIST_ID", default=None)
# Zendesk
ZENDESK_USER = config("ZENDESK_USER", default=None)
ZENDESK_API_KEY = config("ZENDESK_API_KEY", default=None)
# API Key for inbound requests from Notifier service
EDX_API_KEY = config("EDX_API_KEY", default=None)
# Celery Broker
# For redis sentinel use the redis-sentinel transport
CELERY_BROKER_TRANSPORT = config("CELERY_BROKER_TRANSPORT", default="redis")
CELERY_BROKER_USER = config("CELERY_BROKER_USER", default="")
CELERY_BROKER_PASSWORD = config("CELERY_BROKER_PASSWORD", default="")
CELERY_BROKER_HOST = config("CELERY_BROKER_HOST", default="redis")
CELERY_BROKER_PORT = config("CELERY_BROKER_PORT", default=6379, formatter=int)
CELERY_BROKER_VHOST = config("CELERY_BROKER_VHOST", default=0, formatter=int)
if CELERY_BROKER_TRANSPORT == "redis-sentinel":
# register redis sentinel schema in celery
register()
BROKER_URL = "{transport}://{user}:{password}@{host}:{port}/{vhost}".format(
transport=CELERY_BROKER_TRANSPORT,
user=CELERY_BROKER_USER,
password=CELERY_BROKER_PASSWORD,
host=CELERY_BROKER_HOST,
port=CELERY_BROKER_PORT,
vhost=CELERY_BROKER_VHOST,
)
# To use redis-sentinel, refer to the documenation here
# https://celery-redis-sentinel.readthedocs.io/en/latest/
BROKER_TRANSPORT_OPTIONS = config(
"BROKER_TRANSPORT_OPTIONS", default={}, formatter=json.loads
)
# upload limits
STUDENT_FILEUPLOAD_MAX_SIZE = config(
"STUDENT_FILEUPLOAD_MAX_SIZE", default=STUDENT_FILEUPLOAD_MAX_SIZE, formatter=int
)
# Event tracking
TRACKING_BACKENDS.update(config("TRACKING_BACKENDS", default={}, formatter=json.loads))
EVENT_TRACKING_BACKENDS["tracking_logs"]["OPTIONS"]["backends"].update(
config("EVENT_TRACKING_BACKENDS", default={}, formatter=json.loads)
)
EVENT_TRACKING_BACKENDS["segmentio"]["OPTIONS"]["processors"][0]["OPTIONS"][
"whitelist"
].extend(
config("EVENT_TRACKING_SEGMENTIO_EMIT_WHITELIST", default=[], formatter=json.loads)
)
TRACKING_SEGMENTIO_WEBHOOK_SECRET = config(
"TRACKING_SEGMENTIO_WEBHOOK_SECRET", default=TRACKING_SEGMENTIO_WEBHOOK_SECRET
)
TRACKING_SEGMENTIO_ALLOWED_TYPES = config(
"TRACKING_SEGMENTIO_ALLOWED_TYPES",
default=TRACKING_SEGMENTIO_ALLOWED_TYPES,
formatter=json.loads,
)
TRACKING_SEGMENTIO_DISALLOWED_SUBSTRING_NAMES = config(
"TRACKING_SEGMENTIO_DISALLOWED_SUBSTRING_NAMES",
default=TRACKING_SEGMENTIO_DISALLOWED_SUBSTRING_NAMES,
formatter=json.loads,
)
TRACKING_SEGMENTIO_SOURCE_MAP = config(
"TRACKING_SEGMENTIO_SOURCE_MAP",
default=TRACKING_SEGMENTIO_SOURCE_MAP,
formatter=json.loads,
)
# Student identity verification settings
VERIFY_STUDENT = config("VERIFY_STUDENT", default=VERIFY_STUDENT, formatter=json.loads)
# Grades download
GRADES_DOWNLOAD_ROUTING_KEY = config(
"GRADES_DOWNLOAD_ROUTING_KEY", default=HIGH_MEM_QUEUE
)
GRADES_DOWNLOAD = config(
"GRADES_DOWNLOAD", default=GRADES_DOWNLOAD, formatter=json.loads
)
GRADES_DOWNLOAD = config("GRADES_DOWNLOAD", default=GRADES_DOWNLOAD)
# financial reports
FINANCIAL_REPORTS = config(
"FINANCIAL_REPORTS", default=FINANCIAL_REPORTS, formatter=json.loads
)
##### ACCOUNT LOCKOUT DEFAULT PARAMETERS #####
MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = config(
"MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED", default=5, formatter=int
)
MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = config(
"MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS", default=15 * 60, formatter=int
)
MICROSITE_CONFIGURATION = config(
"MICROSITE_CONFIGURATION", default={}, formatter=json.loads
)
MICROSITE_ROOT_DIR = path(config("MICROSITE_ROOT_DIR", default=""))
#### PASSWORD POLICY SETTINGS #####
PASSWORD_MIN_LENGTH = config("PASSWORD_MIN_LENGTH", default=12, formatter=int)
PASSWORD_MAX_LENGTH = config("PASSWORD_MAX_LENGTH", default=None, formatter=int)
PASSWORD_COMPLEXITY = config(
"PASSWORD_COMPLEXITY",
default={"UPPER": 1, "LOWER": 1, "DIGITS": 1},
formatter=json.loads,
)
PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = config(
"PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD",
default=PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD,
formatter=int,
)
PASSWORD_DICTIONARY = config("PASSWORD_DICTIONARY", default=[], formatter=json.loads)
### INACTIVITY SETTINGS ####
SESSION_INACTIVITY_TIMEOUT_IN_SECONDS = config(
"SESSION_INACTIVITY_TIMEOUT_IN_SECONDS", default=None, formatter=int
)
##### LMS DEADLINE DISPLAY TIME_ZONE #######
TIME_ZONE_DISPLAYED_FOR_DEADLINES = config(
"TIME_ZONE_DISPLAYED_FOR_DEADLINES", default=TIME_ZONE_DISPLAYED_FOR_DEADLINES
)
##### X-Frame-Options response header settings #####
X_FRAME_OPTIONS = config("X_FRAME_OPTIONS", default=X_FRAME_OPTIONS)
##### Third-party auth options ################################################
if FEATURES.get("ENABLE_THIRD_PARTY_AUTH"):
# The reduced session expiry time during the third party login pipeline. (Value in seconds)
SOCIAL_AUTH_PIPELINE_TIMEOUT = config("SOCIAL_AUTH_PIPELINE_TIMEOUT", default=600)
# The SAML private/public key values do not need the delimiter lines (such as
# "-----BEGIN PRIVATE KEY-----", default="-----END PRIVATE KEY-----" etc.) but they may be included
# if you want (though it's easier to format the key values as JSON without the delimiters).
SOCIAL_AUTH_SAML_SP_PRIVATE_KEY = config(
"SOCIAL_AUTH_SAML_SP_PRIVATE_KEY", default=""
)
SOCIAL_AUTH_SAML_SP_PUBLIC_CERT = config(
"SOCIAL_AUTH_SAML_SP_PUBLIC_CERT", default=""
)
SOCIAL_AUTH_OAUTH_SECRETS = config(
"SOCIAL_AUTH_OAUTH_SECRETS", default={}, formatter=json.loads
)
SOCIAL_AUTH_LTI_CONSUMER_SECRETS = config(
"SOCIAL_AUTH_LTI_CONSUMER_SECRETS", default={}, formatter=json.loads
)
# third_party_auth config moved to ConfigurationModels. This is for data migration only:
THIRD_PARTY_AUTH_OLD_CONFIG = config("THIRD_PARTY_AUTH", default=None)
if (
config("THIRD_PARTY_AUTH_SAML_FETCH_PERIOD_HOURS", default=24, formatter=int)
is not None
):
CELERYBEAT_SCHEDULE["refresh-saml-metadata"] = {
"task": "third_party_auth.fetch_saml_metadata",
"schedule": datetime.timedelta(
hours=config(
"THIRD_PARTY_AUTH_SAML_FETCH_PERIOD_HOURS",
default=24,
formatter=int,
)
),
}
# The following can be used to integrate a custom login form with third_party_auth.
# It should be a dict where the key is a word passed via ?auth_entry=, and the value is a
# dict with an arbitrary 'secret_key' and a 'url'.
THIRD_PARTY_AUTH_CUSTOM_AUTH_FORMS = config(
"THIRD_PARTY_AUTH_CUSTOM_AUTH_FORMS", default={}, formatter=json.loads
)
##### OAUTH2 Provider ##############
if FEATURES.get("ENABLE_OAUTH2_PROVIDER"):
OAUTH_OIDC_ISSUER = config("OAUTH_OIDC_ISSUER", default=None)
OAUTH_ENFORCE_SECURE = config("OAUTH_ENFORCE_SECURE", default=True, formatter=bool)
OAUTH_ENFORCE_CLIENT_SECURE = config(
"OAUTH_ENFORCE_CLIENT_SECURE", default=True, formatter=bool
)
##### ADVANCED_SECURITY_CONFIG #####
ADVANCED_SECURITY_CONFIG = config(
"ADVANCED_SECURITY_CONFIG", default={}, formatter=json.loads
)
##### GOOGLE ANALYTICS IDS #####
GOOGLE_ANALYTICS_ACCOUNT = config("GOOGLE_ANALYTICS_ACCOUNT", default=None)
GOOGLE_ANALYTICS_LINKEDIN = config("GOOGLE_ANALYTICS_LINKEDIN", default=None)
##### OPTIMIZELY PROJECT ID #####
OPTIMIZELY_PROJECT_ID = config("OPTIMIZELY_PROJECT_ID", default=OPTIMIZELY_PROJECT_ID)
#### Course Registration Code length ####
REGISTRATION_CODE_LENGTH = config("REGISTRATION_CODE_LENGTH", default=8, formatter=int)
# REGISTRATION CODES DISPLAY INFORMATION
INVOICE_CORP_ADDRESS = config("INVOICE_CORP_ADDRESS", default=INVOICE_CORP_ADDRESS)
INVOICE_PAYMENT_INSTRUCTIONS = config(
"INVOICE_PAYMENT_INSTRUCTIONS", default=INVOICE_PAYMENT_INSTRUCTIONS
)
# Which access.py permission names to check;
# We default this to the legacy permission 'see_exists'.
COURSE_CATALOG_VISIBILITY_PERMISSION = config(
"COURSE_CATALOG_VISIBILITY_PERMISSION", default=COURSE_CATALOG_VISIBILITY_PERMISSION
)
COURSE_ABOUT_VISIBILITY_PERMISSION = config(
"COURSE_ABOUT_VISIBILITY_PERMISSION", default=COURSE_ABOUT_VISIBILITY_PERMISSION
)
# Enrollment API Cache Timeout
ENROLLMENT_COURSE_DETAILS_CACHE_TIMEOUT = config(
"ENROLLMENT_COURSE_DETAILS_CACHE_TIMEOUT", default=60, formatter=int
)
# PDF RECEIPT/INVOICE OVERRIDES
PDF_RECEIPT_TAX_ID = config("PDF_RECEIPT_TAX_ID", default=PDF_RECEIPT_TAX_ID)
PDF_RECEIPT_FOOTER_TEXT = config(
"PDF_RECEIPT_FOOTER_TEXT", default=PDF_RECEIPT_FOOTER_TEXT
)
PDF_RECEIPT_DISCLAIMER_TEXT = config(
"PDF_RECEIPT_DISCLAIMER_TEXT", default=PDF_RECEIPT_DISCLAIMER_TEXT
)
PDF_RECEIPT_BILLING_ADDRESS = config(
"PDF_RECEIPT_BILLING_ADDRESS", default=PDF_RECEIPT_BILLING_ADDRESS
)
PDF_RECEIPT_TERMS_AND_CONDITIONS = config(
"PDF_RECEIPT_TERMS_AND_CONDITIONS", default=PDF_RECEIPT_TERMS_AND_CONDITIONS
)
PDF_RECEIPT_TAX_ID_LABEL = config(
"PDF_RECEIPT_TAX_ID_LABEL", default=PDF_RECEIPT_TAX_ID_LABEL
)
PDF_RECEIPT_LOGO_PATH = config("PDF_RECEIPT_LOGO_PATH", default=PDF_RECEIPT_LOGO_PATH)
PDF_RECEIPT_COBRAND_LOGO_PATH = config(
"PDF_RECEIPT_COBRAND_LOGO_PATH", default=PDF_RECEIPT_COBRAND_LOGO_PATH
)
PDF_RECEIPT_LOGO_HEIGHT_MM = config(
"PDF_RECEIPT_LOGO_HEIGHT_MM", default=PDF_RECEIPT_LOGO_HEIGHT_MM, formatter=int
)
PDF_RECEIPT_COBRAND_LOGO_HEIGHT_MM = config(
"PDF_RECEIPT_COBRAND_LOGO_HEIGHT_MM",
default=PDF_RECEIPT_COBRAND_LOGO_HEIGHT_MM,
formatter=int,
)
if (
FEATURES.get("ENABLE_COURSEWARE_SEARCH")
or FEATURES.get("ENABLE_DASHBOARD_SEARCH")
or FEATURES.get("ENABLE_COURSE_DISCOVERY")
or FEATURES.get("ENABLE_TEAMS")
):
# Use ElasticSearch as the search engine herein
SEARCH_ENGINE = "search.elastic.ElasticSearchEngine"
ELASTIC_SEARCH_CONFIG = config(
"ELASTIC_SEARCH_CONFIG", default=[{}], formatter=json.loads
)
# Facebook app
FACEBOOK_API_VERSION = config("FACEBOOK_API_VERSION", default=None)
FACEBOOK_APP_SECRET = config("FACEBOOK_APP_SECRET", default=None)
FACEBOOK_APP_ID = config("FACEBOOK_APP_ID", default=None)
XBLOCK_SETTINGS = config("XBLOCK_SETTINGS", default={}, formatter=json.loads)
XBLOCK_SETTINGS.setdefault("VideoDescriptor", {})["licensing_enabled"] = FEATURES.get(
"LICENSING", False
)
XBLOCK_SETTINGS.setdefault("VideoModule", {})["YOUTUBE_API_KEY"] = config(
"YOUTUBE_API_KEY", default=YOUTUBE_API_KEY
)
##### CDN EXPERIMENT/MONITORING FLAGS #####
CDN_VIDEO_URLS = config("CDN_VIDEO_URLS", default=CDN_VIDEO_URLS)
ONLOAD_BEACON_SAMPLE_RATE = config(
"ONLOAD_BEACON_SAMPLE_RATE", default=ONLOAD_BEACON_SAMPLE_RATE
)
##### ECOMMERCE API CONFIGURATION SETTINGS #####
ECOMMERCE_PUBLIC_URL_ROOT = config(
"ECOMMERCE_PUBLIC_URL_ROOT", default=ECOMMERCE_PUBLIC_URL_ROOT
)
ECOMMERCE_API_URL = config("ECOMMERCE_API_URL", default=ECOMMERCE_API_URL)
ECOMMERCE_API_TIMEOUT = config(
"ECOMMERCE_API_TIMEOUT", default=ECOMMERCE_API_TIMEOUT, formatter=int
)
ECOMMERCE_SERVICE_WORKER_USERNAME = config(
"ECOMMERCE_SERVICE_WORKER_USERNAME", default=ECOMMERCE_SERVICE_WORKER_USERNAME
)
ECOMMERCE_API_TIMEOUT = config("ECOMMERCE_API_TIMEOUT", default=ECOMMERCE_API_TIMEOUT)
ECOMMERCE_API_SIGNING_KEY = config(
"ECOMMERCE_API_SIGNING_KEY", default=ECOMMERCE_API_SIGNING_KEY
)
##### Custom Courses for EdX #####
if FEATURES.get("CUSTOM_COURSES_EDX"):
INSTALLED_APPS += ("lms.djangoapps.ccx",)
FIELD_OVERRIDE_PROVIDERS += (
"lms.djangoapps.ccx.overrides.CustomCoursesForEdxOverrideProvider",
)
CCX_MAX_STUDENTS_ALLOWED = config(
"CCX_MAX_STUDENTS_ALLOWED", default=CCX_MAX_STUDENTS_ALLOWED
)
##### Individual Due Date Extensions #####
if FEATURES.get("INDIVIDUAL_DUE_DATES"):
FIELD_OVERRIDE_PROVIDERS += (
"courseware.student_field_overrides.IndividualStudentOverrideProvider",
)
##### Self-Paced Course Due Dates #####
FIELD_OVERRIDE_PROVIDERS += (
"courseware.self_paced_overrides.SelfPacedDateOverrideProvider",
)
# PROFILE IMAGE CONFIG
PROFILE_IMAGE_BACKEND = config("PROFILE_IMAGE_BACKEND", default=PROFILE_IMAGE_BACKEND)
PROFILE_IMAGE_SECRET_KEY = config(
"PROFILE_IMAGE_SECRET_KEY", default=PROFILE_IMAGE_SECRET_KEY
)
PROFILE_IMAGE_MAX_BYTES = config(
"PROFILE_IMAGE_MAX_BYTES", default=PROFILE_IMAGE_MAX_BYTES, formatter=int
)
PROFILE_IMAGE_MIN_BYTES = config(
"PROFILE_IMAGE_MIN_BYTES", default=PROFILE_IMAGE_MIN_BYTES, formatter=int
)
PROFILE_IMAGE_DEFAULT_FILENAME = "images/profiles/default"
# EdxNotes config
EDXNOTES_PUBLIC_API = config("EDXNOTES_PUBLIC_API", default=EDXNOTES_PUBLIC_API)
EDXNOTES_INTERNAL_API = config("EDXNOTES_INTERNAL_API", default=EDXNOTES_INTERNAL_API)
##### Credit Provider Integration #####
CREDIT_PROVIDER_SECRET_KEYS = config(
"CREDIT_PROVIDER_SECRET_KEYS", default={}, formatter=json.loads
)
##################### LTI Provider #####################
if FEATURES.get("ENABLE_LTI_PROVIDER"):
INSTALLED_APPS += ("lti_provider",)
LTI_USER_EMAIL_DOMAIN = config("LTI_USER_EMAIL_DOMAIN", default="lti.example.com")
# For more info on this, see the notes in common.py
LTI_AGGREGATE_SCORE_PASSBACK_DELAY = config(
"LTI_AGGREGATE_SCORE_PASSBACK_DELAY", default=LTI_AGGREGATE_SCORE_PASSBACK_DELAY
)
##################### Credit Provider help link ####################
CREDIT_HELP_LINK_URL = config("CREDIT_HELP_LINK_URL", default=CREDIT_HELP_LINK_URL)
#### JWT configuration ####
JWT_ISSUER = config("JWT_ISSUER", default=JWT_ISSUER)
JWT_EXPIRATION = config("JWT_EXPIRATION", default=JWT_EXPIRATION)
################# PROCTORING CONFIGURATION ##################
PROCTORING_BACKEND_PROVIDER = config(
"PROCTORING_BACKEND_PROVIDER", default=PROCTORING_BACKEND_PROVIDER
)
PROCTORING_SETTINGS = config(
"PROCTORING_SETTINGS", default=PROCTORING_SETTINGS, formatter=json.loads
)
################# MICROSITE ####################
MICROSITE_CONFIGURATION = config(
"MICROSITE_CONFIGURATION", default={}, formatter=json.loads
)
MICROSITE_ROOT_DIR = path(config("MICROSITE_ROOT_DIR", default=""))
# Cutoff date for granting audit certificates
if config("AUDIT_CERT_CUTOFF_DATE", default=None):
AUDIT_CERT_CUTOFF_DATE = dateutil.parser.parse(
config("AUDIT_CERT_CUTOFF_DATE", default=AUDIT_CERT_CUTOFF_DATE)
)
################ CONFIGURABLE LTI CONSUMER ###############
# Add just the standard LTI consumer by default, forcing it to open in a new window and ask
# the user before sending email and username:
LTI_XBLOCK_CONFIGURATIONS = config(
"LTI_XBLOCK_CONFIGURATIONS",
default=[
{
"display_name": "LTI consumer",
"pattern": ".*",
"hidden_fields": [
"ask_to_send_email",
"ask_to_send_username",
"new_window",
],
"defaults": {
"ask_to_send_email": True,
"ask_to_send_username": True,
"launch_target": "new_window",
},
}
],
formatter=json.loads,
)
LTI_XBLOCK_SECRETS = config("LTI_XBLOCK_SECRETS", default={}, formatter=json.loads)
################################ FUN stuff ################################
SITE_VARIANT = "lms"
# Environment's name displayed in FUN's backoffice
ENVIRONMENT = config("ENVIRONMENT", default="no set")
BASE_ROOT = path("/edx/app/edxapp/")
# Fun-apps configuration
INSTALLED_APPS += (
"backoffice",
"bootstrapform",
"ckeditor",
"course_dashboard",
"course_pages",
"courses_api",
"courses",
"easy_thumbnails",
"edx_gea",
"forum_contributors",
"fun_api",
"fun_certificates",
"fun_instructor",
"fun",
"funsite",
"haystack",
"masquerade",
"newsfeed",
"password_container",
"payment_api",
"payment",
"pure_pagination",
"raven.contrib.django.raven_compat",
"rest_framework.authtoken",
"teachers",
"universities",
"videoproviders",
)
ROOT_URLCONF = "fun.lms.urls"
# Related Richie platform url
PLATFORM_RICHIE_URL = config("PLATFORM_RICHIE_URL", default=None)
# Haystack configuration (default is minimal working configuration)
HAYSTACK_CONNECTIONS = config(
"HAYSTACK_CONNECTIONS",
default={
"default": {"ENGINE": "courses.search_indexes.ConfigurableElasticSearchEngine"}
},
formatter=json.loads,
)
CKEDITOR_UPLOAD_PATH = "./"
CKEDITOR_CONFIGS = {
"default": {
"toolbar": [
[
"Undo",
"Redo",
"-",
"Bold",
"Italic",
"Underline",
"-",
"Link",
"Unlink",
"Anchor",
"-",
"Format",
"-",
"SpellChecker",
"Scayt",
"-",
"Maximize",
],
[
"HorizontalRule",
"-",
"Table",
"-",
"BulletedList",
"NumberedList",
"-",
"Cut",
"Copy",
"Paste",
"PasteText",
"PasteFromWord",
"-",
"SpecialChar",
"-",
"Source",
],
],
"toolbarCanCollapse": False,
"entities": False,
"width": 955,
"uiColor": "#9AB8F3",
},
"news": {
# Redefine path where the news images/files are uploaded. This would
# better be done at runtime with the 'reverse' function, but
# unfortunately there is no way around defining this in the settings
# file.
"filebrowserUploadUrl": "/news/ckeditor/upload/",
"filebrowserBrowseUrl": "/news/ckeditor/browse/",
"toolbar_Full": [
[
"Styles",
"Format",
"Bold",
"Italic",
"Underline",
"Strike",
"SpellChecker",
"Undo",
"Redo",
],
["Image", "Flash", "Table", "HorizontalRule"],
["NumberedList", "BulletedList", "Blockquote", "TextColor", "BGColor"],
["Smiley", "SpecialChar"],
["Source"],
],
},
}
# ### FUN-APPS SETTINGS ###
# This is dist-packages path where all fun-apps are
FUN_BASE_ROOT = path(os.path.dirname(pkgutil.get_loader("funsite").filename))
SHARED_ROOT = DATA_DIR / "shared"
# Add FUN applications templates directories to MAKO template finder before edX's ones
MAKO_TEMPLATES["main"] = [
# overrides template in edx-platform/lms/templates
FUN_BASE_ROOT / "funsite/templates/lms",
FUN_BASE_ROOT / "funsite/templates",
FUN_BASE_ROOT / "course_pages/templates",
FUN_BASE_ROOT / "payment/templates",
FUN_BASE_ROOT / "course_dashboard/templates",
FUN_BASE_ROOT / "newsfeed/templates",
FUN_BASE_ROOT / "fun_certificates/templates",
] + MAKO_TEMPLATES["main"]
# JS static override
DEFAULT_TEMPLATE_ENGINE["DIRS"].append(FUN_BASE_ROOT / "funsite/templates/lms")
FUN_SMALL_LOGO_RELATIVE_PATH = "funsite/images/logos/funmooc173.png"
FUN_BIG_LOGO_RELATIVE_PATH = "funsite/images/logos/funmoocfp.png"
FAVICON_PATH = "fun/images/favicon.ico"
# Locale paths
# Here we rewrite LOCAL_PATHS to give precedence to our applications above edx-platform's ones,
# then we add xblocks which provide translations as there is no native mechanism to handle this
# See Xblock i18n: http://www.libremente.eu/2017/12/06/edx-translation/
LOCALIZED_FUN_APPS = [
"backoffice",
"course_dashboard",
"course_pages",
"courses",
"fun_api",
"fun_certificates",
"funsite",
"newsfeed",
"payment",
"universities",
"videoproviders",
]
LOCALE_PATHS = [FUN_BASE_ROOT / app / "locale" for app in LOCALIZED_FUN_APPS]
LOCALE_PATHS.append(REPO_ROOT / "conf/locale") # edx-platform locales
LOCALE_PATHS.append(path(pkgutil.get_loader("proctor_exam").filename) / "locale")
# -- Certificates
CERTIFICATES_DIRECTORY_NAME = "attestations"
FUN_LOGO_PATH = FUN_BASE_ROOT / "funsite/static" / FUN_BIG_LOGO_RELATIVE_PATH
FUN_ATTESTATION_LOGO_PATH = (
FUN_BASE_ROOT / "funsite/static" / "funsite/images/logos/funmoocattest.png"
)
STUDENT_NAME_FOR_TEST_CERTIFICATE = "Test User"
# Videofront subtitles cache
CACHES["video_subtitles"] = {
"BACKEND": "django.core.cache.backends.filebased.FileBasedCache",
"KEY_PREFIX": "video_subtitles",
"LOCATION": DATA_DIR / "video_subtitles_cache",
}
# Course image thumbnails
FUN_THUMBNAIL_OPTIONS = {
"small": {"size": (270, 152), "crop": "smart"},
"big": {"size": (337, 191), "crop": "smart"},
"about": {"size": (730, 412), "crop": "scale"},
"facebook": {
"size": (600, 315),
"crop": "smart",
}, # https://developers.facebook.com/docs/sharing/best-practices
}
THUMBNAIL_PRESERVE_EXTENSIONS = True
THUMBNAIL_EXTENSION = "png"
##### ORA2 ######
ORA2_FILEUPLOAD_BACKEND = "swift"
ORA2_SWIFT_KEY = config("ORA2_SWIFT_KEY", default="")
ORA2_SWIFT_URL = config("ORA2_SWIFT_URL", default="")
# Prefix for uploads of example-based assessment AI classifiers
# This can be used to separate uploads for different environments
ORA2_FILE_PREFIX = config("ORA2_FILE_PREFIX", default=ORA2_FILE_PREFIX)
# Profile image upload
PROFILE_IMAGE_BACKEND = {
"class": "storages.backends.overwrite.OverwriteStorage",
"options": {
"location": os.path.join(MEDIA_ROOT, "profile-images/"),
"base_url": os.path.join(MEDIA_URL, "profile-images/"),
},
}
ENABLE_ADWAYS_FOR_COURSES = config(
"ENABLE_ADWAYS_FOR_COURSES", default=[], formatter=json.loads
)
# Add our v3 CSS and JS files to assets compilation pipeline to make them available in courseware.
# On FUN v3 frontend, which do not use edX's templates, those files are loaded
# by funsite/templates/funsite/parts/base.html and css/lms-main.css
PIPELINE_CSS["style-vendor"]["source_filenames"].append("fun/css/cookie-banner.css")
PIPELINE_CSS["style-vendor"]["source_filenames"].append("funsite/css/header.css")
PIPELINE_CSS["style-vendor"]["source_filenames"].append("funsite/css/footer.css")
# can't find any common group
for group in ["base_vendor", "main_vendor"]:
PIPELINE_JS[group]["source_filenames"].append("funsite/js/header.js")
PIPELINE_JS[group]["source_filenames"].append("fun/js/cookie-banner.js")
# Glowbl
GLOWBL_LTI_ENDPOINT = config(
"GLOWBL_LTI_ENDPOINT", default="http://ltiapps.net/test/tp.php"
)
GLOWBL_LTI_KEY = config("GLOWBL_LTI_KEY", default="jisc.ac.uk")
GLOWBL_LTI_SECRET = config("GLOWBL_LTI_SECRET", default="secret")
GLOWBL_LTI_ID = config("GLOWBL_LTI_ID", default="testtoolconsumer")
GLOWBL_LAUNCH_URL = config(
"GLOWBL_LAUNCH_URL", default="http://ltiapps.net/test/tp.php"
)
GLOWBL_COLL_OPT = config("GLOWBL_COLL_OPT", default="FunMoocJdR")
DEFAULT_TEMPLATE_ENGINE["DIRS"].append(FUN_BASE_ROOT / "funsite/templates/lms")
DEFAULT_TEMPLATE_ENGINE["OPTIONS"]["context_processors"].append(
"fun.context_processor.fun_settings"
)
TEMPLATES = [DEFAULT_TEMPLATE_ENGINE]
# This force Edx Studio to use our own video provider Xblock on default button
FUN_DEFAULT_VIDEO_PLAYER = "libcast_xblock"
MIDDLEWARE_CLASSES += (
"fun.middleware.LegalAcceptance",
"backoffice.middleware.PathLimitedMasqueradeMiddleware",
)
# These are the allowed subtitle languages, we have the same list on Videofront server
# We remove 2 deprecated chinese language codes which do not exist on Django 1.10 VideoFront
SUBTITLE_SUPPORTED_LANGUAGES = LazyChoicesSorter(
(code, ugettext_lazy(lang))
for code, lang in global_settings.LANGUAGES
if code not in ("zh-cn", "zh-tw")
)
ANONYMIZATION_KEY = config("ANONYMIZATION_KEY", default="")
RAVEN_CONFIG = config("RAVEN_CONFIG", default={"dsn": ""}, formatter=json.loads)
ELASTICSEARCH_INDEX_SETTINGS = {
"settings": {
"analysis": {
"filter": {
"elision": {
"type": "elision",
"articles": ["l", "m", "t", "qu", "n", "s", "j", "d"],
}
},
"analyzer": {
"custom_french_analyzer": {
"tokenizer": "letter",
"filter": [
"asciifolding",
"lowercase",
"french_stem",
"elision",
"stop",
"word_delimiter",
],
}
},
}
}
}
FUN_MKTG_URLS = config("FUN_MKTG_URLS", default={}, formatter=json.loads)
# Default visibility of student's profile to other students
ACCOUNT_VISIBILITY_CONFIGURATION["default_visibility"] = "private"
# A user is verified if he has an approved SoftwareSecurePhotoVerification entry
# this setting will create a dummy SoftwareSecurePhotoVerification for user in
# paybox success callback view. A this point, we think it's better to create a
# dummy one than to remove verifying process in edX
FUN_ECOMMERCE_DEBUG_NO_NOTIFICATION = config(
"FUN_ECOMMERCE_DEBUG_NO_NOTIFICATION", default=False, formatter=bool
)
ECOMMERCE_NOTIFICATION_URL = config("ECOMMERCE_NOTIFICATION_URL", default=None)
PAYMENT_ADMIN = "paybox@fun-mooc.fr"
# List of pattern definitions to automatically add verified users to a cohort
# If value is [] this feature is disabled
# Otherwise this setting is a list of
# tuple values (r"<course id regex>", "<cohort name>").
# e.g: if you want to enable this feature for a particular course you can set
# this setting to
# [
# (r"<course id>", "cohort name"),
# ]
VERIFIED_COHORTS = config("VERIFIED_COHORTS", default=[])
# Force Edx to use `libcast_xblock` as default video player
# in the studio (big green button) and if any xblock is called `video`
XBLOCK_SELECT_FUNCTION = prefer_fun_video
if "sentry" in LOGGING.get("handlers"):
LOGGING["handlers"]["sentry"]["environment"] = "development"
# Configure gelf handler to listen on graylog server
LOGGING["loggers"][""]["handlers"].append("gelf")
LOGGING["loggers"]["tracking"]["handlers"].append("gelf")
LOGGING["handlers"]["gelf"] = {
"level": "DEBUG",
"class": "djehouty.libgelf.handlers.GELFTCPSocketHandler",
"host": "graylog",
"port": 12201,
"null_character": True,
}
DEBUG = True
REQUIRE_DEBUG = True
EMAIL_BACKEND = config(
"EMAIL_BACKEND", default="django.core.mail.backends.console.EmailBackend"
)
PIPELINE_ENABLED = False
STATICFILES_STORAGE = "openedx.core.storage.DevelopmentStorage"
ALLOWED_HOSTS = ["*"]
FEATURES["AUTOMATIC_AUTH_FOR_TESTING"] = True
# ORA2 fileupload
ORA2_FILEUPLOAD_BACKEND = "filesystem"
ORA2_FILEUPLOAD_ROOT = os.path.join(SHARED_ROOT, "openassessment_submissions")
ORA2_FILEUPLOAD_CACHE_ROOT = os.path.join(
SHARED_ROOT, "openassessment_submissions_cache"
)
AUTHENTICATION_BACKENDS = config(
"AUTHENTICATION_BACKENDS",
default=["django.contrib.auth.backends.ModelBackend"],
formatter=json.loads
)
| 36.239158 | 140 | 0.724671 |
12d2af7e340f2c0b16013db0e187eff0a983f2ec | 14,028 | py | Python | stashboard/handlers/site.py | kelnos/stashboard | 5f92ed14b8cf17f4b1be8441005b187e97ca74b8 | [
"MIT"
] | 1 | 2015-02-24T23:30:06.000Z | 2015-02-24T23:30:06.000Z | stashboard/handlers/site.py | ratchetio/stashboard | f8e4e6d175f48701a154e4baca10de2a4a577ab4 | [
"MIT"
] | null | null | null | stashboard/handlers/site.py | ratchetio/stashboard | f8e4e6d175f48701a154e4baca10de2a4a577ab4 | [
"MIT"
] | null | null | null | # The MIT License
#
# Copyright (c) 2008 William T. Katz
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
__author__ = 'Kyle Conroy'
import datetime
import calendar
import logging
import os
import re
import string
import urllib
import urlparse
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext import db
from datetime import date, timedelta
from django.conf import settings
from django.template.loader import render_to_string
from django.utils import simplejson as json
from time import mktime
from models import List, Status, Service, Event, Profile
import xml.etree.ElementTree as et
from utils import authorized
from wsgiref.handlers import format_date_time
| 32.852459 | 149 | 0.590961 |
12d4213a1f0e884d767b82004cfca76be19c9038 | 1,375 | py | Python | bga/forms.py | KarmaPoliceT2/bga | f7708ddae72bc83d68a2294d8f1b600345ebec30 | [
"MIT"
] | null | null | null | bga/forms.py | KarmaPoliceT2/bga | f7708ddae72bc83d68a2294d8f1b600345ebec30 | [
"MIT"
] | 3 | 2019-12-26T16:57:34.000Z | 2021-06-01T23:08:35.000Z | bga/forms.py | KarmaPoliceT2/bga | f7708ddae72bc83d68a2294d8f1b600345ebec30 | [
"MIT"
] | null | null | null | from wtforms import Form, StringField, PasswordField, DecimalField, IntegerField, SelectField, validators
from wtforms.fields.html5 import DateField
| 42.96875 | 105 | 0.696 |
12d68f272974ae7982471fbca3af702e552c3c1f | 597 | py | Python | ejercicios_python/Clase05/practica5-9.py | hcgalvan/UNSAM-Python-programming | c4b3f5ae0702dc03ea6010cb8051c7eec6aef42f | [
"MIT"
] | null | null | null | ejercicios_python/Clase05/practica5-9.py | hcgalvan/UNSAM-Python-programming | c4b3f5ae0702dc03ea6010cb8051c7eec6aef42f | [
"MIT"
] | null | null | null | ejercicios_python/Clase05/practica5-9.py | hcgalvan/UNSAM-Python-programming | c4b3f5ae0702dc03ea6010cb8051c7eec6aef42f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 15 08:32:03 2021
@author: User
"""
import numpy as np
import matplotlib.pyplot as plt
a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
print(a)
print(a[0])
print(a.ndim) #te dice la cantidad de ejes (o dimensiones) del arreglo
print(a.shape) #Te va a dar una tupla de enteros que indican la cantidad de elementos en cada eje.
print(a.size)
#%%
vec_fila = a[np.newaxis, :]
print(vec_fila.shape, a.shape)
#%%
print(a.sum())
print(a.min())
print(a.max())
#%%
print(a)
print(a.max(axis=1))
print(a.max(axis=0))
#%%
print(np.random.random(3)) | 22.111111 | 98 | 0.649916 |
12d6fdba24bc3c779da8bc89c659942cc66fb630 | 9,284 | py | Python | cluster_toolkit/xi.py | jhod0/cluster_toolkit | b515b39fc4d0a17c19be4530a75d089d190f50cb | [
"MIT"
] | null | null | null | cluster_toolkit/xi.py | jhod0/cluster_toolkit | b515b39fc4d0a17c19be4530a75d089d190f50cb | [
"MIT"
] | 6 | 2019-08-14T18:54:23.000Z | 2019-09-19T22:10:42.000Z | cluster_toolkit/xi.py | jhod0/cluster_toolkit | b515b39fc4d0a17c19be4530a75d089d190f50cb | [
"MIT"
] | null | null | null | """Correlation functions for matter and halos.
"""
import cluster_toolkit
from cluster_toolkit import _ArrayWrapper, _handle_gsl_error
import numpy as np
def xi_nfw_at_r(r, M, c, Omega_m, delta=200):
"""NFW halo profile correlation function.
Args:
r (float or array like): 3d distances from halo center in Mpc/h comoving
M (float): Mass in Msun/h
c (float): Concentration
Omega_m (float): Omega_matter, matter fraction of the density
delta (int; optional): Overdensity, default is 200
Returns:
float or array like: NFW halo profile.
"""
r = _ArrayWrapper(r, 'r')
xi = _ArrayWrapper.zeros_like(r)
cluster_toolkit._lib.calc_xi_nfw(r.cast(), len(r), M, c, delta,
Omega_m, xi.cast())
return xi.finish()
def xi_einasto_at_r(r, M, conc, alpha, om, delta=200, rhos=-1.):
"""Einasto halo profile.
Args:
r (float or array like): 3d distances from halo center in Mpc/h comoving
M (float): Mass in Msun/h; not used if rhos is specified
conc (float): Concentration
alpha (float): Profile exponent
om (float): Omega_matter, matter fraction of the density
delta (int): Overdensity, default is 200
rhos (float): Scale density in Msun h^2/Mpc^3 comoving; optional
Returns:
float or array like: Einasto halo profile.
"""
r = _ArrayWrapper(r, 'r')
xi = _ArrayWrapper.zeros_like(r)
cluster_toolkit._lib.calc_xi_einasto(r.cast(), len(r), M, rhos,
conc, alpha, delta, om, xi.cast())
return xi.finish()
def xi_mm_at_r(r, k, P, N=500, step=0.005, exact=False):
"""Matter-matter correlation function.
Args:
r (float or array like): 3d distances from halo center in Mpc/h comoving
k (array like): Wavenumbers of power spectrum in h/Mpc comoving
P (array like): Matter power spectrum in (Mpc/h)^3 comoving
N (int; optional): Quadrature step count, default is 500
step (float; optional): Quadrature step size, default is 5e-3
exact (boolean): Use the slow, exact calculation; default is False
Returns:
float or array like: Matter-matter correlation function
"""
r = _ArrayWrapper(r, 'r')
k = _ArrayWrapper(k, allow_multidim=True)
P = _ArrayWrapper(P, allow_multidim=True)
xi = _ArrayWrapper.zeros_like(r)
if not exact:
rc = cluster_toolkit._lib.calc_xi_mm(r.cast(), len(r), k.cast(),
P.cast(), len(k), xi.cast(),
N, step)
_handle_gsl_error(rc, xi_mm_at_r)
else:
if r.arr.max() > 1e3:
raise Exception("max(r) cannot be >1e3 for numerical stability.")
rc = cluster_toolkit._lib.calc_xi_mm_exact(r.cast(), len(r),
k.cast(), P.cast(),
len(k), xi.cast())
_handle_gsl_error(rc, xi_mm_at_r)
return xi.finish()
def xi_2halo(bias, xi_mm):
"""2-halo term in halo-matter correlation function
Args:
bias (float): Halo bias
xi_mm (float or array like): Matter-matter correlation function
Returns:
float or array like: 2-halo term in halo-matter correlation function
"""
xi_mm = _ArrayWrapper(xi_mm, allow_multidim=True)
xi = _ArrayWrapper.zeros_like(xi_mm)
cluster_toolkit._lib.calc_xi_2halo(len(xi_mm), bias, xi_mm.cast(),
xi.cast())
return xi.finish()
def xi_hm(xi_1halo, xi_2halo, combination="max"):
"""Halo-matter correlation function
Note: at the moment you can combine the 1-halo and 2-halo terms by either taking the max of the two or the sum of the two. The 'combination' field must be set to either 'max' (default) or 'sum'.
Args:
xi_1halo (float or array like): 1-halo term
xi_2halo (float or array like, same size as xi_1halo): 2-halo term
combination (string; optional): specifies how the 1-halo and 2-halo terms are combined, default is 'max' which takes the max of the two
Returns:
float or array like: Halo-matter correlation function
"""
if combination == "max":
switch = 0
elif combination == 'sum':
switch = 1
else:
raise Exception("Combinations other than maximum not implemented yet")
xi_1halo = _ArrayWrapper(xi_1halo, allow_multidim=True)
xi_2halo = _ArrayWrapper(xi_2halo, allow_multidim=True)
xi = _ArrayWrapper.zeros_like(xi_1halo)
cluster_toolkit._lib.calc_xi_hm(len(xi_1halo), xi_1halo.cast(),
xi_2halo.cast(), xi.cast(), switch)
return xi.finish()
def xi_DK(r, M, conc, be, se, k, P, om, delta=200, rhos=-1., alpha=-1., beta=-1., gamma=-1.):
"""Diemer-Kravtsov 2014 profile.
Args:
r (float or array like): radii in Mpc/h comoving
M (float): mass in Msun/h
conc (float): Einasto concentration
be (float): DK transition parameter
se (float): DK transition parameter
k (array like): wavenumbers in h/Mpc
P (array like): matter power spectrum in [Mpc/h]^3
Omega_m (float): matter density fraction
delta (float): overdensity of matter. Optional, default is 200
rhos (float): Einasto density. Optional, default is compute from the mass
alpha (float): Einasto parameter. Optional, default is computed from peak height
beta (float): DK 2-halo parameter. Optional, default is 4
gamma (float): DK 2-halo parameter. Optional, default is 8
Returns:
float or array like: DK profile evaluated at the input radii
"""
r = _ArrayWrapper(r, 'r')
k = _ArrayWrapper(k, allow_multidim=True)
P = _ArrayWrapper(P, allow_multidim=True)
xi = _ArrayWrapper.zeros_like(r)
cluster_toolkit._lib.calc_xi_DK(r.cast(), len(r), M, rhos, conc, be, se, alpha, beta, gamma, delta, k.cast(), P.cast(), len(k), om, xi.cast())
return xi.finish()
def xi_DK_appendix1(r, M, conc, be, se, k, P, om, bias, xi_mm, delta=200, rhos=-1., alpha=-1., beta=-1., gamma=-1.):
"""Diemer-Kravtsov 2014 profile, first form from the appendix, eq. A3.
Args:
r (float or array like): radii in Mpc/h comoving
M (float): mass in Msun/h
conc (float): Einasto concentration
be (float): DK transition parameter
se (float): DK transition parameter
k (array like): wavenumbers in h/Mpc
P (array like): matter power spectrum in [Mpc/h]^3
Omega_m (float): matter density fraction
bias (float): halo bias
xi_mm (float or array like): matter correlation function at r
delta (float): overdensity of matter. Optional, default is 200
rhos (float): Einasto density. Optional, default is compute from the mass
alpha (float): Einasto parameter. Optional, default is computed from peak height
beta (float): DK 2-halo parameter. Optional, default is 4
gamma (float): DK 2-halo parameter. Optional, default is 8
Returns:
float or array like: DK profile evaluated at the input radii
"""
r = _ArrayWrapper(r, 'r')
k = _ArrayWrapper(k, allow_multidim=True)
P = _ArrayWrapper(P, allow_multidim=True)
xi_mm = _ArrayWrapper(xi_mm, allow_multidim=True)
xi = np.zeros_like(r)
cluster_toolkit._lib.calc_xi_DK_app1(r.cast(), len(r), M, rhos, conc, be, se, alpha, beta, gamma, delta, k.cast(), P.cast(), len(k), om, bias, xi_mm.cast(), xi.cast())
return xi.finish()
def xi_DK_appendix2(r, M, conc, be, se, k, P, om, bias, xi_mm, delta=200, rhos=-1., alpha=-1., beta=-1., gamma=-1.):
"""Diemer-Kravtsov 2014 profile, second form from the appendix, eq. A4.
Args:
r (float or array like): radii in Mpc/h comoving
M (float): mass in Msun/h
conc (float): Einasto concentration
be (float): DK transition parameter
se (float): DK transition parameter
k (array like): wavenumbers in h/Mpc
P (array like): matter power spectrum in [Mpc/h]^3
Omega_m (float): matter density fraction
bias (float): halo bias
xi_mm (float or array like): matter correlation function at r
delta (float): overdensity of matter. Optional, default is 200
rhos (float): Einasto density. Optional, default is compute from the mass
alpha (float): Einasto parameter. Optional, default is computed from peak height
beta (float): DK 2-halo parameter. Optional, default is 4
gamma (float): DK 2-halo parameter. Optional, default is 8
Returns:
float or array like: DK profile evaluated at the input radii
"""
r = _ArrayWrapper(r, 'r')
k = _ArrayWrapper(k)
P = _ArrayWrapper(P)
xi_mm = _ArrayWrapper(xi_mm)
xi = _ArrayWrapper.zeros_like(r)
cluster_toolkit._lib.calc_xi_DK_app2(r.cast(), len(r), M, rhos, conc, be,
se, alpha, beta, gamma, delta,
k.cast(), P.cast(), len(k), om, bias,
xi_mm.cast(), xi.cast())
return xi.finish()
| 40.190476 | 198 | 0.62193 |
12d736e2a136d27d71bf7901bba9c44692b70118 | 1,500 | py | Python | Tools/scripts/rgrep.py | ystk/debian-python3.1 | 6241444a6994140621d1b143a2d6b311b184366a | [
"PSF-2.0"
] | 1 | 2015-05-21T23:47:54.000Z | 2015-05-21T23:47:54.000Z | Tools/scripts/rgrep.py | ystk/debian-python3.1 | 6241444a6994140621d1b143a2d6b311b184366a | [
"PSF-2.0"
] | 1 | 2015-10-29T20:51:31.000Z | 2015-10-29T20:51:31.000Z | Tools/scripts/rgrep.py | ystk/debian-python3.1 | 6241444a6994140621d1b143a2d6b311b184366a | [
"PSF-2.0"
] | 2 | 2018-08-06T04:37:38.000Z | 2022-02-27T18:07:12.000Z | #! /usr/bin/env python
"""Reverse grep.
Usage: rgrep [-i] pattern file
"""
import sys
import re
import getopt
if __name__ == '__main__':
main()
| 23.076923 | 66 | 0.519333 |
12d758ba9b3d6c5825fba951fa8141e8f0dd86e9 | 5,161 | py | Python | licel_format_parser/main.py | IFAEControl/lidar-cli | 02480ecd932cad1e11a04d866eb2eafc214f678d | [
"BSD-3-Clause"
] | null | null | null | licel_format_parser/main.py | IFAEControl/lidar-cli | 02480ecd932cad1e11a04d866eb2eafc214f678d | [
"BSD-3-Clause"
] | null | null | null | licel_format_parser/main.py | IFAEControl/lidar-cli | 02480ecd932cad1e11a04d866eb2eafc214f678d | [
"BSD-3-Clause"
] | null | null | null | import struct
f = open("c0610400.102200", 'rb')
def read_dataset(file):
ch = file.read(1)
buf = []
while True:
if chr(ch[0]) == '\n' and chr(buf[-1]) == '\r':
break
buf.append(ch[0])
ch = file.read(1)
buf.append(ch[0])
return bytes(buf)
h = Header()
print(h) | 33.083333 | 118 | 0.571207 |
12d85c3f8e0b325f0104a7462f8c848f6627e0a1 | 7,073 | py | Python | built-in/TensorFlow/Official/nlp/BertLarge_ID0634_for_TensorFlow2.X/bert/tf2_common/training/optimizer_v2modified.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 12 | 2020-12-13T08:34:24.000Z | 2022-03-20T15:17:17.000Z | built-in/TensorFlow/Official/nlp/BertLarge_ID0634_for_TensorFlow2.X/bert/tf2_common/training/optimizer_v2modified.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 1 | 2022-01-20T03:11:05.000Z | 2022-01-20T06:53:39.000Z | built-in/TensorFlow/Official/nlp/BertLarge_ID0634_for_TensorFlow2.X/bert/tf2_common/training/optimizer_v2modified.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 2 | 2021-07-10T12:40:46.000Z | 2021-12-17T07:55:15.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Modified optimizer_v2 implementation enabling XLA across variable updates."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx
from tensorflow.python.distribute import parameter_server_strategy
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
from tensorflow.python.keras import backend
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.keras.optimizer_v2 import utils as optimizer_utils
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables as tf_variables
| 42.10119 | 88 | 0.713276 |
12d9793b66d488d4aab6750551143953a771ab71 | 4,828 | py | Python | src/data/utils.py | behavioral-data/multiverse | 82b7265de0aa3e9d229ce9f3f86b8b48435ca365 | [
"MIT"
] | null | null | null | src/data/utils.py | behavioral-data/multiverse | 82b7265de0aa3e9d229ce9f3f86b8b48435ca365 | [
"MIT"
] | null | null | null | src/data/utils.py | behavioral-data/multiverse | 82b7265de0aa3e9d229ce9f3f86b8b48435ca365 | [
"MIT"
] | 1 | 2021-08-19T15:21:50.000Z | 2021-08-19T15:21:50.000Z | import os
import errno
import requests
import glob
import os
import json
from tqdm import tqdm
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
| 31.555556 | 94 | 0.570008 |
12da373705e611aa87f9b708815df70bbd6ae325 | 14,870 | py | Python | jocular/calibrator.py | MartinCooke/jocular | 635816d4ef6aa6ea75187137e25386dad2d551e9 | [
"MIT"
] | 6 | 2021-03-21T16:46:44.000Z | 2021-11-27T14:07:06.000Z | jocular/calibrator.py | MartinCooke/jocular | 635816d4ef6aa6ea75187137e25386dad2d551e9 | [
"MIT"
] | null | null | null | jocular/calibrator.py | MartinCooke/jocular | 635816d4ef6aa6ea75187137e25386dad2d551e9 | [
"MIT"
] | null | null | null | ''' Handles calibration library and calibration of subs.
'''
import os.path
import numpy as np
from scipy.stats import trimboth
from kivy.app import App
from loguru import logger
from kivy.properties import BooleanProperty, DictProperty, NumericProperty
from kivy.core.window import Window
from jocular.table import Table
from jocular.utils import make_unique_filename
from jocular.component import Component
from jocular.settingsmanager import Settings
from jocular.image import Image, save_image, fits_in_dir
date_time_format = '%d %b %y %H:%M'
| 39.028871 | 123 | 0.571688 |
12dbd5bf3d381ee625187e0ae26efd79aef7f23a | 1,128 | py | Python | test/office_schema.py | chrismaille/marshmallow-pynamodb | 1e799041ff1053a6aa67ce72729e7262cb0f746f | [
"MIT"
] | 3 | 2020-05-17T15:04:27.000Z | 2021-08-12T14:27:15.000Z | test/office_schema.py | chrismaille/marshmallow-pynamodb | 1e799041ff1053a6aa67ce72729e7262cb0f746f | [
"MIT"
] | 2 | 2020-05-06T00:11:49.000Z | 2022-02-23T11:45:54.000Z | test/office_schema.py | chrismaille/marshmallow-pynamodb | 1e799041ff1053a6aa67ce72729e7262cb0f746f | [
"MIT"
] | 1 | 2020-04-30T19:34:22.000Z | 2020-04-30T19:34:22.000Z | from test.office_model import Headquarters, Office
from marshmallow import fields
from pynamodb.attributes import DiscriminatorAttribute
from marshmallow_pynamodb import ModelSchema
| 22.117647 | 63 | 0.721631 |
12ddf9c1d17cbd9db7aea277570f0278393c93a6 | 1,599 | py | Python | energy_demand/initalisations/initialisations.py | willu47/energy_demand | 59a2712f353f47e3dc237479cc6cc46666b7d0f1 | [
"MIT"
] | null | null | null | energy_demand/initalisations/initialisations.py | willu47/energy_demand | 59a2712f353f47e3dc237479cc6cc46666b7d0f1 | [
"MIT"
] | null | null | null | energy_demand/initalisations/initialisations.py | willu47/energy_demand | 59a2712f353f47e3dc237479cc6cc46666b7d0f1 | [
"MIT"
] | null | null | null | """Helper initialising functions
"""
#pylint: disable=I0011, C0321, C0301, C0103, C0325, R0902, R0913, no-member, E0213
def init_fuel_tech_p_by(all_enduses_with_fuels, nr_of_fueltypes):
"""Helper function to define stocks for all enduse and fueltype
Parameters
----------
all_enduses_with_fuels : dict
Provided fuels
nr_of_fueltypes : int
Nr of fueltypes
Returns
-------
fuel_tech_p_by : dict
"""
fuel_tech_p_by = {}
for enduse in all_enduses_with_fuels:
fuel_tech_p_by[enduse] = dict.fromkeys(range(nr_of_fueltypes), {})
return fuel_tech_p_by
def dict_zero(first_level_keys):
"""Initialise a dictionary with one level
Parameters
----------
first_level_keys : list
First level data
Returns
-------
one_level_dict : dict
dictionary
"""
one_level_dict = dict.fromkeys(first_level_keys, 0) # set zero as argument
return one_level_dict
def service_type_tech_by_p(lu_fueltypes, fuel_tech_p_by):
"""Initialise dict and fill with zeros
Parameters
----------
lu_fueltypes : dict
Look-up dictionary
fuel_tech_p_by : dict
Fuel fraction per technology for base year
Return
-------
service_fueltype_tech_by_p : dict
Fraction of service per fueltype and technology for base year
"""
service_fueltype_tech_by_p = {}
for fueltype_int in lu_fueltypes.values():
service_fueltype_tech_by_p[fueltype_int] = dict.fromkeys(fuel_tech_p_by[fueltype_int].keys(), 0)
return service_fueltype_tech_by_p
| 24.984375 | 104 | 0.676048 |
12df0714eb5fa8ab8f6068ed158fd58746d6bc32 | 37 | py | Python | npd_well_decoder/__init__.py | fmell/npd-well-name-decoder | a44ec28a6ef3b32ba38751eeffff479008b53e2d | [
"MIT"
] | null | null | null | npd_well_decoder/__init__.py | fmell/npd-well-name-decoder | a44ec28a6ef3b32ba38751eeffff479008b53e2d | [
"MIT"
] | null | null | null | npd_well_decoder/__init__.py | fmell/npd-well-name-decoder | a44ec28a6ef3b32ba38751eeffff479008b53e2d | [
"MIT"
] | null | null | null | from .npd import parse_wellbore_name
| 18.5 | 36 | 0.864865 |
12e000a4e8578ea58e111e55e0187884ea14b784 | 26,842 | py | Python | Lib/site-packages/wx-3.0-msw/wx/tools/Editra/src/util.py | jickieduan/python27 | c752b552396bbed68d8555080d475718cea2edd0 | [
"bzip2-1.0.6"
] | 5 | 2019-03-11T14:30:31.000Z | 2021-12-04T14:11:54.000Z | Lib/site-packages/wx-3.0-msw/wx/tools/Editra/src/util.py | jickieduan/python27 | c752b552396bbed68d8555080d475718cea2edd0 | [
"bzip2-1.0.6"
] | 1 | 2018-07-28T20:07:04.000Z | 2018-07-30T18:28:34.000Z | Lib/site-packages/wx-3.0-msw/wx/tools/Editra/src/util.py | jickieduan/python27 | c752b552396bbed68d8555080d475718cea2edd0 | [
"bzip2-1.0.6"
] | 2 | 2019-12-02T01:39:10.000Z | 2021-02-13T22:41:00.000Z | ###############################################################################
# Name: util.py #
# Purpose: Misc utility functions used through out Editra #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2008 Cody Precord <staff@editra.org> #
# License: wxWindows License #
###############################################################################
"""
This file contains various helper functions and utilities that the program uses.
"""
__author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: util.py 72623 2012-10-06 19:33:06Z CJP $"
__revision__ = "$Revision: 72623 $"
#--------------------------------------------------------------------------#
# Imports
import os
import sys
import mimetypes
import encodings
import codecs
import urllib2
import wx
# Editra Libraries
import ed_glob
import ed_event
import ed_crypt
import dev_tool
import syntax.syntax as syntax
import syntax.synglob as synglob
import ebmlib
_ = wx.GetTranslation
#--------------------------------------------------------------------------#
#---- End FileDropTarget ----#
#---- Misc Common Function Library ----#
# Used for holding the primary selection on mac/msw
FAKE_CLIPBOARD = None
def GetClipboardText(primary=False):
"""Get the primary selection from the clipboard if there is one
@return: str or None
"""
if primary and wx.Platform == '__WXGTK__':
wx.TheClipboard.UsePrimarySelection(True)
elif primary:
# Fake the primary selection on mac/msw
global FAKE_CLIPBOARD
return FAKE_CLIPBOARD
else:
pass
text_obj = wx.TextDataObject()
rtxt = None
if wx.TheClipboard.IsOpened() or wx.TheClipboard.Open():
if wx.TheClipboard.GetData(text_obj):
rtxt = text_obj.GetText()
wx.TheClipboard.Close()
if primary and wx.Platform == '__WXGTK__':
wx.TheClipboard.UsePrimarySelection(False)
return rtxt
def SetClipboardText(txt, primary=False):
"""Copies text to the clipboard
@param txt: text to put in clipboard
@keyword primary: Set txt as primary selection (x11)
"""
# Check if using primary selection
if primary and wx.Platform == '__WXGTK__':
wx.TheClipboard.UsePrimarySelection(True)
elif primary:
# Fake the primary selection on mac/msw
global FAKE_CLIPBOARD
FAKE_CLIPBOARD = txt
return True
else:
pass
data_o = wx.TextDataObject()
data_o.SetText(txt)
if wx.TheClipboard.IsOpened() or wx.TheClipboard.Open():
wx.TheClipboard.SetData(data_o)
wx.TheClipboard.Close()
if primary and wx.Platform == '__WXGTK__':
wx.TheClipboard.UsePrimarySelection(False)
return True
else:
return False
def FilterFiles(file_list):
"""Filters a list of paths and returns a list of paths
that can probably be opened in the editor.
@param file_list: list of files/folders to filter for good files in
"""
good = list()
checker = ebmlib.FileTypeChecker()
for path in file_list:
if not checker.IsBinary(path):
good.append(path)
return good
def GetFileType(fname):
"""Get what the type of the file is as Editra sees it
in a formatted string.
@param fname: file path
@return: string (formatted/translated filetype)
"""
if os.path.isdir(fname):
return _("Folder")
eguess = syntax.GetTypeFromExt(fname.split('.')[-1])
if eguess == synglob.LANG_TXT and fname.split('.')[-1] == 'txt':
return _("Text Document")
elif eguess == synglob.LANG_TXT:
mtype = mimetypes.guess_type(fname)[0]
if mtype is not None:
return mtype
else:
return _("Unknown")
else:
return _("%s Source File") % eguess
def GetFileReader(file_name, enc='utf-8'):
"""Returns a file stream reader object for reading the
supplied file name. It returns a file reader using the encoding
(enc) which defaults to utf-8. If lookup of the reader fails on
the host system it will return an ascii reader.
If there is an error in creating the file reader the function
will return a negative number.
@param file_name: name of file to get a reader for
@keyword enc: encoding to use for reading the file
@return file reader, or int if error.
"""
try:
file_h = file(file_name, "rb")
except (IOError, OSError):
dev_tool.DEBUGP("[file_reader] Failed to open file %s" % file_name)
return -1
try:
reader = codecs.getreader(enc)(file_h)
except (LookupError, IndexError, ValueError):
dev_tool.DEBUGP('[file_reader] Failed to get %s Reader' % enc)
reader = file_h
return reader
def GetFileWriter(file_name, enc='utf-8'):
"""Returns a file stream writer object for reading the
supplied file name. It returns a file writer in the supplied
encoding if the host system supports it other wise it will return
an ascii reader. The default will try and return a utf-8 reader.
If there is an error in creating the file reader the function
will return a negative number.
@param file_name: path of file to get writer for
@keyword enc: encoding to write text to file with
"""
try:
file_h = open(file_name, "wb")
except IOError:
dev_tool.DEBUGP("[file_writer][err] Failed to open file %s" % file_name)
return -1
try:
writer = codecs.getwriter(enc)(file_h)
except (LookupError, IndexError, ValueError):
dev_tool.DEBUGP('[file_writer][err] Failed to get %s Writer' % enc)
writer = file_h
return writer
# TODO: DEPRECATED - remove once callers migrate to ebmlib
GetFileManagerCmd = ebmlib.GetFileManagerCmd
def GetUserConfigBase():
"""Get the base user configuration directory path"""
cbase = ed_glob.CONFIG['CONFIG_BASE']
if cbase is None:
cbase = wx.StandardPaths_Get().GetUserDataDir()
if wx.Platform == '__WXGTK__':
if u'.config' not in cbase and not os.path.exists(cbase):
# If no existing configuration return xdg config path
base, cfgdir = os.path.split(cbase)
tmp_path = os.path.join(base, '.config')
if os.path.exists(tmp_path):
cbase = os.path.join(tmp_path, cfgdir.lstrip(u'.'))
return cbase + os.sep
def HasConfigDir(loc=u""):
""" Checks if the user has a config directory and returns True
if the config directory exists or False if it does not.
@return: whether config dir in question exists on an expected path
"""
cbase = GetUserConfigBase()
to_check = os.path.join(cbase, loc)
return os.path.exists(to_check)
def MakeConfigDir(name):
"""Makes a user config directory
@param name: name of config directory to make in user config dir
"""
cbase = GetUserConfigBase()
try:
os.mkdir(cbase + name)
except (OSError, IOError):
pass
def RepairConfigState(path):
"""Repair the state of profile path, updating and creating it
it does not exist.
@param path: path of profile
"""
if os.path.isabs(path) and os.path.exists(path):
return path
else:
# Need to fix some stuff up
CreateConfigDir()
import profiler
return profiler.Profile_Get("MYPROFILE")
def CreateConfigDir():
""" Creates the user config directory its default sub
directories and any of the default config files.
@postcondition: all default configuration files/folders are created
"""
#---- Resolve Paths ----#
config_dir = GetUserConfigBase()
profile_dir = os.path.join(config_dir, u"profiles")
dest_file = os.path.join(profile_dir, u"default.ppb")
ext_cfg = [u"cache", u"styles", u"plugins"]
#---- Create Directories ----#
if not os.path.exists(config_dir):
os.mkdir(config_dir)
if not os.path.exists(profile_dir):
os.mkdir(profile_dir)
for cfg in ext_cfg:
if not HasConfigDir(cfg):
MakeConfigDir(cfg)
import profiler
profiler.TheProfile.LoadDefaults()
profiler.Profile_Set("MYPROFILE", dest_file)
profiler.TheProfile.Write(dest_file)
profiler.UpdateProfileLoader()
def ResolvConfigDir(config_dir, sys_only=False):
"""Checks for a user config directory and if it is not
found it then resolves the absolute path of the executables
directory from the relative execution path. This is then used
to find the location of the specified directory as it relates
to the executable directory, and returns that path as a
string.
@param config_dir: name of config directory to resolve
@keyword sys_only: only get paths of system config directory or user one
@note: This method is probably much more complex than it needs to be but
the code has proven itself.
"""
# Try to get a User config directory
if not sys_only:
user_config = GetUserConfigBase()
user_config = os.path.join(user_config, config_dir)
if os.path.exists(user_config):
return user_config + os.sep
# Check if the system install path has already been resolved once before
if ed_glob.CONFIG['INSTALL_DIR'] != u"":
tmp = os.path.join(ed_glob.CONFIG['INSTALL_DIR'], config_dir)
tmp = os.path.normpath(tmp) + os.sep
if os.path.exists(tmp):
return tmp
else:
del tmp
# The following lines are used only when Editra is being run as a
# source package. If the found path does not exist then Editra is
# running as as a built package.
if not hasattr(sys, 'frozen'):
path = __file__
if not ebmlib.IsUnicode(path):
path = path.decode(sys.getfilesystemencoding())
path = os.sep.join(path.split(os.sep)[:-2])
path = path + os.sep + config_dir + os.sep
if os.path.exists(path):
if not ebmlib.IsUnicode(path):
path = unicode(path, sys.getfilesystemencoding())
return path
# If we get here we need to do some platform dependent lookup
# to find everything.
path = sys.argv[0]
if not ebmlib.IsUnicode(path):
path = unicode(path, sys.getfilesystemencoding())
# If it is a link get the real path
if os.path.islink(path):
path = os.path.realpath(path)
# Tokenize path
pieces = path.split(os.sep)
if wx.Platform == u'__WXMSW__':
# On Windows the exe is in same dir as config directories
pro_path = os.sep.join(pieces[:-1])
if os.path.isabs(pro_path):
pass
elif pro_path == u"":
pro_path = os.getcwd()
pieces = pro_path.split(os.sep)
pro_path = os.sep.join(pieces[:-1])
else:
pro_path = os.path.abspath(pro_path)
elif wx.Platform == u'__WXMAC__':
# On OS X the config directories are in the applet under Resources
stdpath = wx.StandardPaths_Get()
pro_path = stdpath.GetResourcesDir()
pro_path = os.path.join(pro_path, config_dir)
else:
pro_path = os.sep.join(pieces[:-2])
if pro_path.startswith(os.sep):
pass
elif pro_path == u"":
pro_path = os.getcwd()
pieces = pro_path.split(os.sep)
if pieces[-1] not in [ed_glob.PROG_NAME.lower(), ed_glob.PROG_NAME]:
pro_path = os.sep.join(pieces[:-1])
else:
pro_path = os.path.abspath(pro_path)
if wx.Platform != u'__WXMAC__':
pro_path = pro_path + os.sep + config_dir + os.sep
path = os.path.normpath(pro_path) + os.sep
# Make sure path is unicode
if not ebmlib.IsUnicode(path):
path = unicode(path, sys.getdefaultencoding())
return path
def GetResources(resource):
"""Returns a list of resource directories from a given toplevel config dir
@param resource: config directory name
@return: list of resource directory that exist under the given resource path
"""
rec_dir = ResolvConfigDir(resource)
if os.path.exists(rec_dir):
rec_lst = [ rec.title() for rec in os.listdir(rec_dir)
if os.path.isdir(rec_dir + rec) and rec[0] != u"." ]
return rec_lst
else:
return -1
def GetResourceFiles(resource, trim=True, get_all=False,
suffix=None, title=True):
"""Gets a list of resource files from a directory and trims the
file extentions from the names if trim is set to True (default).
If the get_all parameter is set to True the function will return
a set of unique items by looking up both the user and system level
files and combining them, the default behavior returns the user
level files if they exist or the system level files if the
user ones do not exist.
@param resource: name of config directory to look in (i.e cache)
@keyword trim: trim file extensions or not
@keyword get_all: get a set of both system/user files or just user level
@keyword suffix: Get files that have the specified suffix or all (default)
@keyword title: Titlize the results
"""
rec_dir = ResolvConfigDir(resource)
if get_all:
rec_dir2 = ResolvConfigDir(resource, True)
rec_list = list()
if not os.path.exists(rec_dir):
return -1
else:
recs = os.listdir(rec_dir)
if get_all and os.path.exists(rec_dir2):
recs.extend(os.listdir(rec_dir2))
for rec in recs:
if os.path.isfile(rec_dir + rec) or \
(get_all and os.path.isfile(rec_dir2 + rec)):
# If a suffix was specified only keep files that match
if suffix is not None:
if not rec.endswith(suffix):
continue
# Trim the last part of an extension if one exists
if trim:
rec = ".".join(rec.split(u".")[:-1]).strip()
# Make the resource name a title if requested
if title and len(rec):
rec = rec[0].upper() + rec[1:]
if len(rec):
rec_list.append(rec)
rec_list.sort()
return list(set(rec_list))
def GetAllEncodings():
"""Get all encodings found on the system
@return: list of strings
"""
elist = encodings.aliases.aliases.values()
elist = list(set(elist))
elist.sort()
elist = [ enc for enc in elist if not enc.endswith('codec') ]
return elist
def Log(msg, *args):
"""Push the message to the apps log
@param msg: message string to log
@param args: optional positional arguments to use as a printf formatting
to the message.
"""
try:
wx.GetApp().GetLog()(msg, args)
except:
pass
def GetProxyOpener(proxy_set):
"""Get a urlopener for use with a proxy
@param proxy_set: proxy settings to use
"""
Log("[util][info] Making proxy opener with %s" % str(proxy_set))
proxy_info = dict(proxy_set)
auth_str = "%(uname)s:%(passwd)s@%(url)s"
url = proxy_info['url']
if url.startswith('http://'):
auth_str = "http://" + auth_str
proxy_info['url'] = url.replace('http://', '')
else:
pass
if len(proxy_info.get('port', '')):
auth_str = auth_str + ":%(port)s"
proxy_info['passwd'] = ed_crypt.Decrypt(proxy_info['passwd'],
proxy_info['pid'])
Log("[util][info] Formatted proxy request: %s" % \
(auth_str.replace('%(passwd)s', '****') % proxy_info))
proxy = urllib2.ProxyHandler({"http" : auth_str % proxy_info})
opener = urllib2.build_opener(proxy, urllib2.HTTPHandler)
return opener
#---- GUI helper functions ----#
def SetWindowIcon(window):
"""Sets the given windows icon to be the programs
application icon.
@param window: window to set app icon for
"""
try:
if wx.Platform == "__WXMSW__":
ed_icon = ed_glob.CONFIG['SYSPIX_DIR'] + u"editra.ico"
window.SetIcon(wx.Icon(ed_icon, wx.BITMAP_TYPE_ICO))
else:
ed_icon = ed_glob.CONFIG['SYSPIX_DIR'] + u"editra.png"
window.SetIcon(wx.Icon(ed_icon, wx.BITMAP_TYPE_PNG))
finally:
pass
#-----------------------------------------------------------------------------#
| 33.891414 | 80 | 0.595671 |
12e061c5c6e2f04c0f2228f70f6bcd0e8dd58774 | 1,105 | py | Python | genrl/environments/vec_env/utils.py | matrig/genrl | 25eb018f18a9a1d0865c16e5233a2a7ccddbfd78 | [
"MIT"
] | 390 | 2020-05-03T17:34:02.000Z | 2022-03-05T11:29:07.000Z | genrl/environments/vec_env/utils.py | matrig/genrl | 25eb018f18a9a1d0865c16e5233a2a7ccddbfd78 | [
"MIT"
] | 306 | 2020-05-03T05:53:53.000Z | 2022-03-12T00:27:28.000Z | genrl/environments/vec_env/utils.py | matrig/genrl | 25eb018f18a9a1d0865c16e5233a2a7ccddbfd78 | [
"MIT"
] | 64 | 2020-05-05T20:23:30.000Z | 2022-03-30T08:43:10.000Z | from typing import Tuple
import torch
| 28.333333 | 77 | 0.611765 |
12e064fd8ee7774d0bfca223891f1c72e7cca90f | 2,752 | py | Python | releases/pota-windows-1.3-ai5.0.2.0/ae/aiPotaTemplate.py | sumitneup/pota | a1d7a59b5ca29813d8b7f3fa77cca0a47404b785 | [
"MIT"
] | null | null | null | releases/pota-windows-1.3-ai5.0.2.0/ae/aiPotaTemplate.py | sumitneup/pota | a1d7a59b5ca29813d8b7f3fa77cca0a47404b785 | [
"MIT"
] | null | null | null | releases/pota-windows-1.3-ai5.0.2.0/ae/aiPotaTemplate.py | sumitneup/pota | a1d7a59b5ca29813d8b7f3fa77cca0a47404b785 | [
"MIT"
] | null | null | null | import mtoa.ui.ae.templates as templates
import pymel.core as pm
import maya.cmds as cmds
import mtoa.ui.ae.utils as aeUtils
templates.registerTranslatorUI(aiPotaTemplate, "camera", "pota") | 39.884058 | 135 | 0.673328 |
12e101d3d1c0a3624036d3fc55bbec2095eca800 | 2,690 | py | Python | tests/test_user.py | munniomer/Send-IT-Api-v1 | 17041c987638c7e47c7c2ebed29bf7e2b5156bed | [
"CNRI-Python",
"OML"
] | null | null | null | tests/test_user.py | munniomer/Send-IT-Api-v1 | 17041c987638c7e47c7c2ebed29bf7e2b5156bed | [
"CNRI-Python",
"OML"
] | null | null | null | tests/test_user.py | munniomer/Send-IT-Api-v1 | 17041c987638c7e47c7c2ebed29bf7e2b5156bed | [
"CNRI-Python",
"OML"
] | 1 | 2019-02-05T07:44:19.000Z | 2019-02-05T07:44:19.000Z | import unittest
from app import create_app
import json
from tests.basetest import BaseTest
| 42.03125 | 99 | 0.637546 |
12e130d67ccfe9de3b3564473e4a39882ddb1111 | 583 | py | Python | authors/apps/profiles/migrations/0023_auto_20190124_1222.py | andela/ah-django-unchained | a4e5f6cd11fdc0b9422020693ac1200b849cf0f3 | [
"BSD-3-Clause"
] | null | null | null | authors/apps/profiles/migrations/0023_auto_20190124_1222.py | andela/ah-django-unchained | a4e5f6cd11fdc0b9422020693ac1200b849cf0f3 | [
"BSD-3-Clause"
] | 26 | 2019-01-07T14:22:05.000Z | 2019-02-28T17:11:48.000Z | authors/apps/profiles/migrations/0023_auto_20190124_1222.py | andela/ah-django-unchained | a4e5f6cd11fdc0b9422020693ac1200b849cf0f3 | [
"BSD-3-Clause"
] | 3 | 2019-09-19T22:16:09.000Z | 2019-10-16T21:16:16.000Z | # Generated by Django 2.1.4 on 2019-01-24 12:22
from django.db import migrations, models
| 24.291667 | 63 | 0.595197 |
12e2d80d29d4efd869955ca94be7cd962776dc80 | 811 | py | Python | Algorithm/ShellSort/pyShellSort.py | commanderHR1/algorithms | d077364e8b08ae2b7b93bc01a73f622421086365 | [
"MIT"
] | 1 | 2020-07-17T20:49:55.000Z | 2020-07-17T20:49:55.000Z | Algorithm/ShellSort/pyShellSort.py | commanderHR1/algorithms | d077364e8b08ae2b7b93bc01a73f622421086365 | [
"MIT"
] | null | null | null | Algorithm/ShellSort/pyShellSort.py | commanderHR1/algorithms | d077364e8b08ae2b7b93bc01a73f622421086365 | [
"MIT"
] | null | null | null | # Implementation of Shell Sort algorithm in Python
l = [4, 1, 2, 5, 3]
print("Initial list: " + str(l))
shellSort(l)
print("Sorted list: " + str(l))
| 26.16129 | 70 | 0.477189 |
12e5018fbac310b4e1d16e7744a8549158b1a76a | 1,943 | py | Python | photos/models.py | benjaminbills/galleria | 4c89f265a2f4f853a5685828d5bc505b51b9bb74 | [
"MIT"
] | null | null | null | photos/models.py | benjaminbills/galleria | 4c89f265a2f4f853a5685828d5bc505b51b9bb74 | [
"MIT"
] | null | null | null | photos/models.py | benjaminbills/galleria | 4c89f265a2f4f853a5685828d5bc505b51b9bb74 | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here. | 26.986111 | 79 | 0.691199 |
12e533fd59ecf8d6a32514514fcb290ff13e6ec1 | 1,322 | py | Python | main.py | kramrm/gcf-alerting-discord | c73d88520a783f9c4d12099bb8e21f03a950eebc | [
"MIT"
] | null | null | null | main.py | kramrm/gcf-alerting-discord | c73d88520a783f9c4d12099bb8e21f03a950eebc | [
"MIT"
] | null | null | null | main.py | kramrm/gcf-alerting-discord | c73d88520a783f9c4d12099bb8e21f03a950eebc | [
"MIT"
] | null | null | null | import base64
import json
from webhook import post_webhook
from datetime import datetime
def hello_pubsub(event, context):
"""Triggered from a message on a Cloud Pub/Sub topic.
Args:
event (dict): Event payload.
context (google.cloud.functions.Context): Metadata for the event.
"""
pubsub_message = base64.b64decode(event['data']).decode('utf-8')
#post_webhook(message=f'{pubsub_message}', timestamp='now', status='status', title='title')
message = json.loads(pubsub_message)
message = message['incident']
#post_webhook(message, timestamp, status, title='Monitoring'):
null = None
status = 'Status'
log_message = ''
title = 'Monitoring Alert'
status = message['state'].title()
timestamp = datetime.utcfromtimestamp(message["started_at"]).isoformat()
log_message += f'Started: {timestamp} UTC'
color = 16772608
if message['ended_at'] is not None:
timestamp = datetime.utcfromtimestamp(message["ended_at"]).isoformat()
log_message += f'\nEnded: {timestamp} UTC'
color = 65297
title = message['policy_name']
log_message += f'\n{message["summary"]}'
log_message += f'\n[Monitor Event]({message["url"]})'
post_webhook(message=log_message, timestamp=timestamp, status=status, title=title, color=color)
| 38.882353 | 99 | 0.683812 |
12e58dae1b7214722dcef0c29dfe11fbbf4c0b51 | 358 | py | Python | libzyre.py | brettviren/wafit | 39e9f2748c095dc4c3421a5de0f10f300d8da30b | [
"BSD-3-Clause"
] | null | null | null | libzyre.py | brettviren/wafit | 39e9f2748c095dc4c3421a5de0f10f300d8da30b | [
"BSD-3-Clause"
] | null | null | null | libzyre.py | brettviren/wafit | 39e9f2748c095dc4c3421a5de0f10f300d8da30b | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env waf
'''
This is a wafit tool for using zyre
'''
import util
| 21.058824 | 68 | 0.667598 |
12e5fe65e4d8ed7a4606ea760b1a56fc1a8485e1 | 6,226 | py | Python | scripts/run-gmm.py | vr100/nfl-kaggle | 74386b672ef4bb894bdf943df866855c4b555ede | [
"MIT"
] | null | null | null | scripts/run-gmm.py | vr100/nfl-kaggle | 74386b672ef4bb894bdf943df866855c4b555ede | [
"MIT"
] | null | null | null | scripts/run-gmm.py | vr100/nfl-kaggle | 74386b672ef4bb894bdf943df866855c4b555ede | [
"MIT"
] | null | null | null | import argparse, os, fnmatch, json, joblib
import pandas as pd
from sklearn.mixture import GaussianMixture
from sklearn.metrics import adjusted_rand_score
# Reference paper - https://arxiv.org/abs/1906.11373
# "Unsupervised Methods for Identifying Pass Coverage Among Defensive Backs with NFL Player Tracking Data"
STATS_PREFIX = "week"
SKIP_COLS_KEY = "global_skip_cols"
ONLY_CLOSEST_KEY = "only_closest"
CLOSE_TO_BR_KEY = "close_to_br"
SELECT_GROUP_KEY = "select_group_by"
GROUP_BY = ["gameId", "playId"]
MAX_COL = "closest_frames"
main()
| 32.092784 | 106 | 0.734661 |
12e658cebecc095f8910cdb95d0ccbd190f22eff | 106 | py | Python | module01/classes/class06b.py | LauroHBrant/python-course | 2154181ca4b684b0d1fa635706bcb1647a753bc3 | [
"MIT"
] | 2 | 2021-01-07T23:59:36.000Z | 2021-01-18T00:23:52.000Z | module01/classes/class06b.py | LauroHBrant/python-course | 2154181ca4b684b0d1fa635706bcb1647a753bc3 | [
"MIT"
] | null | null | null | module01/classes/class06b.py | LauroHBrant/python-course | 2154181ca4b684b0d1fa635706bcb1647a753bc3 | [
"MIT"
] | null | null | null | from style import blue, none
n = input(f'Type {blue}something{none}: ')
print(f'{blue}{n.isnumeric()}')
| 17.666667 | 42 | 0.669811 |
12e805833151bd1898679d1e39b89a2e7fde7f1c | 2,600 | py | Python | custom_components/panasonic_cc/__init__.py | shyne99/panasonic_cc | ec7912e4067ebd0c08ea2a16c123c50d69a2fca6 | [
"MIT"
] | null | null | null | custom_components/panasonic_cc/__init__.py | shyne99/panasonic_cc | ec7912e4067ebd0c08ea2a16c123c50d69a2fca6 | [
"MIT"
] | null | null | null | custom_components/panasonic_cc/__init__.py | shyne99/panasonic_cc | ec7912e4067ebd0c08ea2a16c123c50d69a2fca6 | [
"MIT"
] | null | null | null | """Platform for the Panasonic Comfort Cloud."""
from datetime import timedelta
import logging
from typing import Any, Dict
import asyncio
from async_timeout import timeout
import voluptuous as vol
from homeassistant.core import HomeAssistant
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
CONF_USERNAME, CONF_PASSWORD)
from homeassistant.exceptions import ConfigEntryNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.helpers import discovery
from .const import TIMEOUT
from .panasonic import PanasonicApiDevice
_LOGGER = logging.getLogger(__name__)
DOMAIN = "panasonic_cc"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
PANASONIC_DEVICES = "panasonic_devices"
COMPONENT_TYPES = ["climate", "sensor", "switch"]
| 27.368421 | 83 | 0.699231 |
12e82d4517d5644cd0b40eba9d476a8a70aa842c | 5,806 | py | Python | django/bossingest/test/test_ingest_manager.py | jhuapl-boss/boss | c2e26d272bd7b8d54abdc2948193163537e31291 | [
"Apache-2.0"
] | 20 | 2016-05-16T21:08:13.000Z | 2021-11-16T11:50:19.000Z | django/bossingest/test/test_ingest_manager.py | jhuapl-boss/boss | c2e26d272bd7b8d54abdc2948193163537e31291 | [
"Apache-2.0"
] | 31 | 2016-10-28T17:51:11.000Z | 2022-02-10T08:07:31.000Z | django/bossingest/test/test_ingest_manager.py | jhuapl-boss/boss | c2e26d272bd7b8d54abdc2948193163537e31291 | [
"Apache-2.0"
] | 12 | 2016-10-28T17:47:01.000Z | 2021-05-18T23:47:06.000Z | # Copyright 2016 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from unittest.mock import patch, MagicMock
from bossingest.ingest_manager import IngestManager
from bossingest.models import IngestJob
from bossingest.test.setup import SetupTests
from bosscore.test.setup_db import SetupTestDB
from bosscore.error import ErrorCodes
from bosscore.lookup import LookUpKey
import bossutils.aws
from django.contrib.auth.models import User
from ndingest.ndqueue.uploadqueue import UploadQueue
from rest_framework.test import APITestCase
| 40.887324 | 110 | 0.707544 |
12e8353d99830242965335f0aba978e3cb0ab443 | 5,505 | py | Python | sanic_devtools/log.py | yunstanford/sanic-devtools | 9e8a6d011db025d53ddd6012b5542dc18825d4b0 | [
"MIT"
] | 12 | 2019-09-06T05:14:46.000Z | 2022-02-17T09:26:38.000Z | sanic_devtools/log.py | yunstanford/sanic-devtools | 9e8a6d011db025d53ddd6012b5542dc18825d4b0 | [
"MIT"
] | null | null | null | sanic_devtools/log.py | yunstanford/sanic-devtools | 9e8a6d011db025d53ddd6012b5542dc18825d4b0 | [
"MIT"
] | 1 | 2019-09-10T03:57:21.000Z | 2019-09-10T03:57:21.000Z | import json
import logging
import logging.config
import platform
import re
import traceback
from io import StringIO
import pygments
from devtools import pformat
from devtools.ansi import isatty, sformat
from pygments.formatters import Terminal256Formatter
from pygments.lexers import Python3TracebackLexer
rs_dft_logger = logging.getLogger('sdev.server.dft')
rs_aux_logger = logging.getLogger('sdev.server.aux')
tools_logger = logging.getLogger('sdev.tools')
main_logger = logging.getLogger('sdev.main')
LOG_FORMATS = {
logging.DEBUG: sformat.dim,
logging.INFO: sformat.green,
logging.WARN: sformat.yellow,
}
pyg_lexer = Python3TracebackLexer()
pyg_formatter = Terminal256Formatter(style='vim')
split_log = re.compile(r'^(\[.*?\])')
def log_config(verbose: bool) -> dict:
"""
Setup default config. for dictConfig.
:param verbose: level: DEBUG if True, INFO if False
:return: dict suitable for ``logging.config.dictConfig``
"""
log_level = 'DEBUG' if verbose else 'INFO'
return {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
'format': '[%(asctime)s] %(message)s',
'datefmt': '%H:%M:%S',
'class': 'sanic_devtools.log.DefaultFormatter',
},
'no_ts': {
'format': '%(message)s',
'class': 'sanic_devtools.log.DefaultFormatter',
},
'sanic': {
'format': '%(message)s',
'class': 'sanic_devtools.log.AccessFormatter',
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'sanic_devtools.log.HighlightStreamHandler',
'formatter': 'default'
},
'no_ts': {
'level': log_level,
'class': 'sanic_devtools.log.HighlightStreamHandler',
'formatter': 'no_ts'
},
'sanic_access': {
'level': log_level,
'class': 'sanic_devtools.log.HighlightStreamHandler',
'formatter': 'sanic'
},
'sanic_server': {
'class': 'sanic_devtools.log.HighlightStreamHandler',
'formatter': 'sanic'
},
},
'loggers': {
rs_dft_logger.name: {
'handlers': ['default'],
'level': log_level,
},
rs_aux_logger.name: {
'handlers': ['default'],
'level': log_level,
},
tools_logger.name: {
'handlers': ['default'],
'level': log_level,
},
main_logger.name: {
'handlers': ['no_ts'],
'level': log_level,
},
'sanic.access': {
'handlers': ['sanic_access'],
'level': log_level,
'propagate': False,
},
'sanic.server': {
'handlers': ['sanic_server'],
'level': log_level,
},
},
}
def setup_logging(verbose):
config = log_config(verbose)
logging.config.dictConfig(config)
| 31.820809 | 101 | 0.547502 |
12e86cadd6eb11b7a84bc77642dccfd6d3f1bfb4 | 1,893 | py | Python | rest_fhir/mixins/conditional_read.py | weynelucas/django-rest-fhir | 560a0aadd0cfa43b6dc58f995c86015f6eefb768 | [
"MIT"
] | 2 | 2021-05-07T12:16:27.000Z | 2021-12-16T20:45:36.000Z | rest_fhir/mixins/conditional_read.py | weynelucas/django-rest-fhir | 560a0aadd0cfa43b6dc58f995c86015f6eefb768 | [
"MIT"
] | 3 | 2021-05-10T19:40:33.000Z | 2021-06-27T14:24:47.000Z | rest_fhir/mixins/conditional_read.py | weynelucas/django-rest-fhir | 560a0aadd0cfa43b6dc58f995c86015f6eefb768 | [
"MIT"
] | 1 | 2021-08-09T22:00:22.000Z | 2021-08-09T22:00:22.000Z | import calendar
from typing import Union
import dateutil.parser
from rest_framework import status
from rest_framework.response import Response
from django.utils.cache import get_conditional_response
from django.utils.http import http_date
from ..models import Resource, ResourceVersion
FhirResource = Union[Resource, ResourceVersion]
| 29.578125 | 73 | 0.663497 |
12e90bbcd25c813026449118e104295e2d5b4d7b | 803 | py | Python | code_week27_1026_111/sort_colors.py | dylanlee101/leetcode | b059afdadb83d504e62afd1227107de0b59557af | [
"Apache-2.0"
] | null | null | null | code_week27_1026_111/sort_colors.py | dylanlee101/leetcode | b059afdadb83d504e62afd1227107de0b59557af | [
"Apache-2.0"
] | null | null | null | code_week27_1026_111/sort_colors.py | dylanlee101/leetcode | b059afdadb83d504e62afd1227107de0b59557af | [
"Apache-2.0"
] | null | null | null | '''
n
01 2
:
:
: [2,0,2,1,1,0]
: [0,0,1,1,2,2]
01 2 012
LeetCode
https://leetcode-cn.com/problems/sort-colors
'''
| 21.131579 | 65 | 0.554172 |
12ea0884e04ad5410800ee3a274f85dcb7596112 | 363 | py | Python | solutions/lowest_common_ancestor_deepest_leaves/__main__.py | ansonmiu0214/dsa-worked-solutions | 88801d268b78506edd77e771c29b4c9f4ae0f59a | [
"MIT"
] | null | null | null | solutions/lowest_common_ancestor_deepest_leaves/__main__.py | ansonmiu0214/dsa-worked-solutions | 88801d268b78506edd77e771c29b4c9f4ae0f59a | [
"MIT"
] | null | null | null | solutions/lowest_common_ancestor_deepest_leaves/__main__.py | ansonmiu0214/dsa-worked-solutions | 88801d268b78506edd77e771c29b4c9f4ae0f59a | [
"MIT"
] | null | null | null | from .solution import lcaDeepestLeaves
from ..utils import TreeNode
print('Enter tree, e.g. [2,3,1,3,1,null,1]:', end=' ')
nodes = [int(node) if node != 'null' else None for node in input().strip().split(',')]
root = TreeNode.fromList(nodes)
lowestCommonAncestor = lcaDeepestLeaves(root)
print(f'The lowest common ancestor is: {lowestCommonAncestor.toList()}') | 36.3 | 86 | 0.721763 |
12ea961825e76ebc83c3a72ff0731af4a86af12d | 2,472 | py | Python | code/python3/index_values_with_geo.py | jaylett/xapian-docsprint | 2e8fdffecf71f7042c0abe49924ba48c11818b7e | [
"MIT"
] | 47 | 2015-01-20T15:38:41.000Z | 2022-02-15T21:03:50.000Z | code/python3/index_values_with_geo.py | jaylett/xapian-docsprint | 2e8fdffecf71f7042c0abe49924ba48c11818b7e | [
"MIT"
] | 16 | 2015-06-09T16:12:50.000Z | 2020-02-05T06:40:18.000Z | code/python3/index_values_with_geo.py | jaylett/xapian-docsprint | 2e8fdffecf71f7042c0abe49924ba48c11818b7e | [
"MIT"
] | 56 | 2015-01-20T15:38:44.000Z | 2022-03-03T18:13:39.000Z | #!/usr/bin/env python
import json
from support import parse_states
import sys
import xapian
if len(sys.argv) != 3:
print("Usage: %s DATAPATH DBPATH" % sys.argv[0])
sys.exit(1)
index(datapath = sys.argv[1], dbpath = sys.argv[2])
| 35.314286 | 74 | 0.644013 |
12eab71a1efede1b96f0100790956e17f9d9393a | 1,265 | py | Python | logger.py | drewstone/dynamic-governanceq | 924317800db7bca6308ff912b16c7b834ab30e32 | [
"MIT"
] | null | null | null | logger.py | drewstone/dynamic-governanceq | 924317800db7bca6308ff912b16c7b834ab30e32 | [
"MIT"
] | null | null | null | logger.py | drewstone/dynamic-governanceq | 924317800db7bca6308ff912b16c7b834ab30e32 | [
"MIT"
] | null | null | null | import constants
| 36.142857 | 74 | 0.554941 |
12ec838b4e6e3d1f8f2bea5549297c2e3c075ade | 2,484 | py | Python | test/core/bad_ssl/gen_build_yaml.py | Akrog/grpc | 14800b0c1acc2d10d4fd0826731ecae2cb448143 | [
"Apache-2.0"
] | 3 | 2020-10-07T14:20:21.000Z | 2021-10-08T14:49:17.000Z | test/core/bad_ssl/gen_build_yaml.py | Akrog/grpc | 14800b0c1acc2d10d4fd0826731ecae2cb448143 | [
"Apache-2.0"
] | 1 | 2021-03-04T02:33:56.000Z | 2021-03-04T02:33:56.000Z | test/core/bad_ssl/gen_build_yaml.py | Akrog/grpc | 14800b0c1acc2d10d4fd0826731ecae2cb448143 | [
"Apache-2.0"
] | 5 | 2021-02-19T09:46:00.000Z | 2022-03-13T17:33:34.000Z | #!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the appropriate build.json data for all the end2end tests."""
import collections
import yaml
TestOptions = collections.namedtuple('TestOptions', 'flaky cpu_cost')
default_test_options = TestOptions(False, 1.0)
# maps test names to options
BAD_CLIENT_TESTS = {
'cert': default_test_options._replace(cpu_cost=0.1),
# Disabling this test because it does not link correctly as written
# 'alpn': default_test_options._replace(cpu_cost=0.1),
}
if __name__ == '__main__':
main()
| 35.485714 | 76 | 0.595813 |
12ed7f2619866ebbd758994ab5e6290f518e72e4 | 6,608 | py | Python | tests/test_providers.py | thejoeejoee/django-allauth-cas | 5db34b546eb32524a3a1a4b90f411e370ac7ad9b | [
"MIT"
] | null | null | null | tests/test_providers.py | thejoeejoee/django-allauth-cas | 5db34b546eb32524a3a1a4b90f411e370ac7ad9b | [
"MIT"
] | null | null | null | tests/test_providers.py | thejoeejoee/django-allauth-cas | 5db34b546eb32524a3a1a4b90f411e370ac7ad9b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from six.moves.urllib.parse import urlencode
from django.contrib import messages
from django.contrib.messages.api import get_messages
from django.contrib.messages.middleware import MessageMiddleware
from django.contrib.messages.storage.base import Message
from django.contrib.sessions.middleware import SessionMiddleware
from django.test import RequestFactory, TestCase, override_settings
from allauth.socialaccount.providers import registry
from allauth_cas.views import AuthAction
from .example.provider import ExampleCASProvider
| 32.875622 | 79 | 0.623033 |
12ed9629940a31dc96db1b6d58b951b990da8233 | 3,723 | py | Python | infoblox_netmri/api/remote/models/device_password_log_remote.py | IngmarVG-IB/infoblox-netmri | b0c725fd64aee1890d83917d911b89236207e564 | [
"Apache-2.0"
] | null | null | null | infoblox_netmri/api/remote/models/device_password_log_remote.py | IngmarVG-IB/infoblox-netmri | b0c725fd64aee1890d83917d911b89236207e564 | [
"Apache-2.0"
] | null | null | null | infoblox_netmri/api/remote/models/device_password_log_remote.py | IngmarVG-IB/infoblox-netmri | b0c725fd64aee1890d83917d911b89236207e564 | [
"Apache-2.0"
] | null | null | null | from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
| 31.285714 | 130 | 0.608649 |
12ee13303b7604822dba3ba0cf7479d1d2caaf67 | 4,477 | py | Python | selenium_utils/element.py | defactto/selenium-utils | d3a71f3baaaa0da29e3b1ab869f8c685ea5d1b42 | [
"Apache-2.0"
] | 7 | 2016-08-24T20:29:47.000Z | 2020-01-29T13:59:03.000Z | selenium_utils/element.py | defactto/selenium-utils | d3a71f3baaaa0da29e3b1ab869f8c685ea5d1b42 | [
"Apache-2.0"
] | null | null | null | selenium_utils/element.py | defactto/selenium-utils | d3a71f3baaaa0da29e3b1ab869f8c685ea5d1b42 | [
"Apache-2.0"
] | 1 | 2020-01-06T18:41:15.000Z | 2020-01-06T18:41:15.000Z | import logging
import time
from selenium.common import exceptions
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.common import action_chains
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium_utils import exception
logger = logging.getLogger(__name__)
def hover_over_element(driver: WebDriver, element):
"""Moves the mouse pointer to the element and hovers"""
action_chains.ActionChains(driver).move_to_element(element).perform()
def wait_until_stops_moving(element, wait_seconds=1):
"""Waits until the element stops moving
Args:
selenium.webdriver.remote.webelement.WebElement
"""
prev_location = None
timer_begin = time.time()
while prev_location != element.location:
prev_location = element.location
time.sleep(0.1)
if time.time() - timer_begin > wait_seconds:
raise exception.ElementMovingTimeout
def get_when_visible(driver: WebDriver, locator, wait_seconds=1):
"""
Args:
driver (base.CustomDriver)
locator (tuple)
Returns:
selenium.webdriver.remote.webelement.WebElement
"""
return WebDriverWait(
driver,
wait_seconds) \
.until(EC.presence_of_element_located(locator))
def wait_until_condition(driver: WebDriver, condition, wait_seconds=1):
"""Wait until given expected condition is met"""
WebDriverWait(
driver,
wait_seconds).until(condition)
def wait_until_not_present(driver: WebDriver, locator):
"""Wait until no element(-s) for locator given are present in the DOM."""
wait_until_condition(driver, lambda d: len(d.find_elements(*locator)) == 0)
def get_when_all_visible(driver: WebDriver, locator, wait_seconds=1):
"""Return WebElements by locator when all of them are visible.
Args:
locator (tuple)
Returns:
selenium.webdriver.remote.webelement.WebElements
"""
return WebDriverWait(
driver,
wait_seconds) \
.until(EC.visibility_of_any_elements_located(locator))
def get_when_clickable(driver: WebDriver, locator, wait_seconds=1):
"""
Args:
driver (base.CustomDriver)
locator (tuple)
Returns:
selenium.webdriver.remote.webelement.WebElement
"""
return WebDriverWait(
driver,
wait_seconds) \
.until(EC.element_to_be_clickable(locator))
def get_when_invisible(driver: WebDriver, locator, wait_seconds=1):
"""
Args:
driver (base.CustomDriver)
locator (tuple)
Returns:
selenium.webdriver.remote.webelement.WebElement
"""
return WebDriverWait(
driver,
wait_seconds) \
.until(EC.invisibility_of_element_located(locator))
def wait_for_element_text(driver: WebDriver, locator, text, wait_seconds=1):
"""
Args:
driver (base.CustomDriver)
locator (tuple)
text (str)
"""
return WebDriverWait(
driver,
wait_seconds) \
.until(EC.text_to_be_present_in_element(locator, text))
def is_value_in_attr(element, attr="class", value="active"):
"""Checks if the attribute value is present for given attribute
Args:
element (selenium.webdriver.remote.webelement.WebElement)
attr (basestring): attribute name e.g. "class"
value (basestring): value in the class attribute that
indicates the element is now active/opened
Returns:
bool
"""
attributes = element.get_attribute(attr)
return value in attributes.split()
def click_on_staleable_element(driver: WebDriver, el_locator, wait_seconds=1):
"""Clicks an element that can be modified between the time we find it and when we click on it"""
time_start = time.time()
while time.time() - time_start < wait_seconds:
try:
driver.find_element(*el_locator).click()
break
except exceptions.StaleElementReferenceException as e:
logger.error(str(e))
time.sleep(0.1)
else:
raise exception.ElementNotFound(el_locator)
def scroll_into_view(driver: WebDriver, element, offset_pixels=0):
"""Scrolls page to element using JS"""
driver.execute_script("return arguments[0].scrollIntoView();", element)
# compensate for the header
driver.execute_script("window.scrollBy(0, -{});".format(offset_pixels))
return element
| 29.071429 | 100 | 0.691311 |
12ee5dcab405211321c77a37855a79013c17587c | 1,421 | py | Python | modules/iib_applications.py | satbel/ib-metrics-pyclient | 1670df55684a7182884fcfc777fde5ae44095f8f | [
"MIT"
] | null | null | null | modules/iib_applications.py | satbel/ib-metrics-pyclient | 1670df55684a7182884fcfc777fde5ae44095f8f | [
"MIT"
] | null | null | null | modules/iib_applications.py | satbel/ib-metrics-pyclient | 1670df55684a7182884fcfc777fde5ae44095f8f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Various functions for ib applications."""
from modules.iib_api import get_status
def get_metric_name(metric_label):
"""Returns pushgateway formatted metric name."""
return 'ib_application_{0}'.format(metric_label)
def get_metric_annotation():
"""Returns dictionary with annotations 'HELP' and 'TYPE' for metrics."""
annotations = {
'status': '# HELP {0} Current status of IB application.\n\
# TYPE {0} gauge\n'.format(get_metric_name('status'))}
return annotations
def format_applications(applications, broker_name):
"""Returns string with all metrics for all applications which ready to push to pushgateway."""
metrics_annotation = get_metric_annotation()
app_metric_data = str()
for app in applications:
app_list = app.split()
egname, app_name, status = app_list[6], app_list[2], app_list[8].replace(".","")
template_string = 'egname="{0}", brokername="{1}", appname="{2}"'.format(
egname.replace("'", ""),
broker_name,
app_name.replace("'", ""))
app_metric = '{0}{{{1}}} {2}\n'.format(
get_metric_name(metric_label='status'),
template_string,
get_status(status=status))
app_metric_data += app_metric
app_metric_data = '{0}{1}'.format(
metrics_annotation['status'],
app_metric_data)
return app_metric_data
| 36.435897 | 98 | 0.64532 |
12f0e1426999717b706caac8906a3500e72dc344 | 1,366 | py | Python | clock.py | hcjk/kitchen-bot | 5122101ed840b6bdf0b56d3c154de083cb793eda | [
"MIT"
] | null | null | null | clock.py | hcjk/kitchen-bot | 5122101ed840b6bdf0b56d3c154de083cb793eda | [
"MIT"
] | null | null | null | clock.py | hcjk/kitchen-bot | 5122101ed840b6bdf0b56d3c154de083cb793eda | [
"MIT"
] | 1 | 2019-06-10T01:25:49.000Z | 2019-06-10T01:25:49.000Z | import os
import requests
import psycopg2
import db_lib as db
from app import send_message, log
from apscheduler.schedulers.blocking import BlockingScheduler
DATABASE_URL = os.environ['DATABASE_URL']
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
sched = BlockingScheduler()
sched.add_job(kitchen_reminder, 'cron', hour=0, minute=0)
sched.add_job(rent_reminder, 'cron', day=1)
sched.start()
| 23.964912 | 82 | 0.732064 |
12f18b28f2f44fef548bff40a3b625b2e4be86b9 | 2,940 | py | Python | ucf_sub_catkin_ros/src/sub_states/src/qual/test.py | RoboticsClubatUCF/RoboSub | 47304c620f963a8762db57a7ed248d1df90190fb | [
"MIT"
] | null | null | null | ucf_sub_catkin_ros/src/sub_states/src/qual/test.py | RoboticsClubatUCF/RoboSub | 47304c620f963a8762db57a7ed248d1df90190fb | [
"MIT"
] | 19 | 2016-09-16T19:52:57.000Z | 2018-04-14T18:16:17.000Z | ucf_sub_catkin_ros/src/sub_states/src/qual/test.py | RoboticsClubatUCF/RoboSub | 47304c620f963a8762db57a7ed248d1df90190fb | [
"MIT"
] | 8 | 2016-01-06T20:56:45.000Z | 2017-02-26T02:49:17.000Z | #!/usr/bin/env python
import rospy
import smach
import gate
import pole
if __name__ == '__main__':
rospy.init_node('hippo_sm')
sm = SubStates()
outcome = sm.tasks.execute()
rospy.spin()
| 49.830508 | 116 | 0.343197 |
12f1cb8ec52078b29ac8f9e2d9706191446e64ae | 3,271 | py | Python | networkapi/plugins/SDN/ODL/tests/test_send_flows_with_tcp_flags.py | vinicius-marinho/GloboNetworkAPI | 94651d3b4dd180769bc40ec966814f3427ccfb5b | [
"Apache-2.0"
] | 73 | 2015-04-13T17:56:11.000Z | 2022-03-24T06:13:07.000Z | networkapi/plugins/SDN/ODL/tests/test_send_flows_with_tcp_flags.py | leopoldomauricio/GloboNetworkAPI | 3b5b2e336d9eb53b2c113977bfe466b23a50aa29 | [
"Apache-2.0"
] | 99 | 2015-04-03T01:04:46.000Z | 2021-10-03T23:24:48.000Z | networkapi/plugins/SDN/ODL/tests/test_send_flows_with_tcp_flags.py | shildenbrand/GloboNetworkAPI | 515d5e961456cee657c08c275faa1b69b7452719 | [
"Apache-2.0"
] | 64 | 2015-08-05T21:26:29.000Z | 2022-03-22T01:06:28.000Z |
from networkapi.test.test_case import NetworkApiTestCase
from networkapi.plugins.SDN.ODL.flows.acl import AclFlowBuilder
| 33.377551 | 75 | 0.496484 |
12f2139184aea177b546923cc78d7b43a26b2e26 | 4,940 | py | Python | Old/OpenCV Scripts/red_filtered_detector.py | multirotorsociety/SAFMC-19-D2-Autonomous-Drone | fd9f0fae5d7cbf618b327224e06a7f459612b4ca | [
"MIT"
] | 6 | 2019-04-01T02:38:40.000Z | 2021-06-05T18:23:06.000Z | Old/OpenCV Scripts/red_filtered_detector.py | multirotorsociety/SAFMC-19-D2-Autonomous-Drone | fd9f0fae5d7cbf618b327224e06a7f459612b4ca | [
"MIT"
] | null | null | null | Old/OpenCV Scripts/red_filtered_detector.py | multirotorsociety/SAFMC-19-D2-Autonomous-Drone | fd9f0fae5d7cbf618b327224e06a7f459612b4ca | [
"MIT"
] | 1 | 2019-09-01T08:58:28.000Z | 2019-09-01T08:58:28.000Z |
from picamera.array import PiRGBArray
from picamera import PiCamera
import cv2
import numpy as np
import time
from fractions import Fraction
from PIL import Image
#cap = cv2.VideoCapture(0)
camera = PiCamera()
camera.resolution = (426, 240)
camera.framerate = 24
camera.exposure_mode = 'off'
camera.exposure_compensation = -3
camera.drc_strength = 'off'
camera.still_stats = False
camera.awb_mode = 'off'
camera.awb_gains = (Fraction(25, 16), Fraction(25,16))
rawCapture = PiRGBArray(camera, size=(426, 240))
# allow the camera to warmup
time.sleep(0.1)
# lower = [135, 130, 50]
# upper = [180, 200, 255]
# lower = [160, 100, 100]
# upper = [180, 255, 255]
# lower2 = [0, 100, 100]
# upper2 = [10, 255, 255]
#lower1 = [0, 50, 50]
#upper1 = [5, 255, 255]
out = cv2.VideoWriter(str(time.time()) + ".avi",cv2.VideoWriter_fourcc('M','J','P','G'), 10, (426, 240))
# lower = np.array(lower, dtype = "uint8")
# upper = np.array(upper, dtype = "uint8")
# lower2 = np.array(lower2, dtype = "uint8")
# upper2 = np.array(upper2, dtype = "uint8")
#lower1 = np.array(lower1, dtype = "uint8")
#upper1 = np.array(upper1, dtype = "uint8")
for img in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
#print(camera.awb_gains)
#r, frame = cap.read()
for i in range(5): # Clears the 5 frame buffer
frame = img.array
height, width = frame.shape[:2]
centre = (int(width/2), int(height/2))
#frame = cv2.GaussianBlur(frame, (9, 9), 0)
#frame = cv2.medianBlur(frame,3)
#frame = cv2.GaussianBlur(frame, (9, 9), 0)
#mask = cv2.inRange(frame, lower, upper)
#mask2 = cv2.inRange(frame, lower2, upper2)
#mask2 = cv2.inRange(frame, lower1, upper1)
#mask = mask1 + mask2
#img_rec_red = cv2.bitwise_and(frame, frame, mask = mask)
#img_rec_redo = cv2.bitwise_and(frame, frame, mask = mask2)
#cv2.imshow("pre or1", img_rec_red)
#cv2.imshow("pre or2", img_rec_redo)
#img_rec_red = cv2.bitwise_or(img_rec_red, img_rec_redo)
b_channel = np.array(frame[:,:,0]).astype('float')
g_channel = np.array(frame[:,:,1]).astype('float')
r_channel = np.array(frame[:,:,2]).astype('float')
# #cv2.imshow('b_chan', b_channel)
# # cv2.imshow('g_chan', g_channel)
# # cv2.imshow('r_chan', r_channel)
bgr_channel = np.add((np.add(b_channel, g_channel)), r_channel)
img_rec_red2 = np.subtract(r_channel,((b_channel + g_channel)/ 2))
#img_rec_red2 = np.divide(r_channel, 255)
img_rec_red2 = np.divide(img_rec_red2,255)
#img_rec_red2 = np.square(img_rec_red2)
img_rec_red2[img_rec_red2 < 0.3] = 0
img_rec_red2 = img_rec_red2 * 255
img_rec_red2 = np.floor(img_rec_red2).astype('uint8')
#img_rec_red = cv2.cvtColor(img_rec_red, cv2.COLOR_BGR2GRAY)
#cv2.imshow('recred2', img_rec_red2)
ret, th = cv2.threshold(img_rec_red2,10,255,cv2.THRESH_BINARY)
#ret, th = cv2.threshold(r_channel.astype('uint8'),110,255,cv2.THRESH_BINARY)
#th = cv2.bitwise_not(th, th)
kernel = np.ones((5,5),np.uint8)
#th = cv2.erode(th, kernel)
th = cv2.dilate(th, kernel)
th = cv2.GaussianBlur(th, (5,5), 0)
try:
M = cv2.moments(th)
# calculate x,y coordinate of center
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
# put text and highlight the center
cv2.circle(frame, (cX, cY), 5, (255, 255, 255), -1)
#cv2.putText(frame, "centroid", (cX - 25, cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
cv2.line(frame, centre, (cX, cY), (255,0,0), 2)
dX = cX - centre[0]
dY = centre[1] - cY
cv2.putText(frame, ("(" + str(dX) + ", " + str(dY) + " )"), (centre[0] - 20, centre[1] - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
print('Velocities: ' + str(dX) + "," + str(dY))
except:
print("No centre detected")
#kernel2 = np.ones((15,15),np.uint8)
#eroded_th = cv2.erode(dilated_th, kernel2)
#blurred_th = cv2.GaussianBlur(eroded_th.copy(), (9, 9), 0)
#eroded_th = cv2.bitwise_not(eroded_th,eroded_th)
#dilated_th = cv2.bitwise_not(dilated_th, dilated_th)
# circles = cv2.HoughCircles(th,cv2.HOUGH_GRADIENT, 1,1000,
# param1=40,param2=23,minRadius=20,maxRadius=0)
# try:
# circles = np.uint16(np.around(circles))
# for i in circles[0,:]:
# # draw the outer circle
# cv2.circle(frame,(i[0],i[1]),i[2],(0,255,0),2)
# # draw the center of the circle
# cv2.circle(frame,(i[0],i[1]),2,(0,0,255),3)
# except:
# pass
cv2.imshow('original', frame)
#cv2.imshow('rec_red',img_rec_red)
cv2.imshow('detected circles',th)
out.write(frame)
k = cv2.waitKey(1)
rawCapture.truncate(0)
if k == 0xFF & ord("q"):
break
#cv2.destroyAllWindows()
#cap.release()
out.release() | 33.154362 | 109 | 0.618421 |
12f21abc71f1092fae63b143257827c5624eebdf | 2,965 | py | Python | setup.py | Maven85/plugin.video.magenta-sport | e05eeea629295d79de7467d495eb0c20b3adb60b | [
"MIT"
] | null | null | null | setup.py | Maven85/plugin.video.magenta-sport | e05eeea629295d79de7467d495eb0c20b3adb60b | [
"MIT"
] | null | null | null | setup.py | Maven85/plugin.video.magenta-sport | e05eeea629295d79de7467d495eb0c20b3adb60b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Module: default
# Author: asciidisco
# Created on: 24.07.2017
# License: MIT https://goo.gl/WA1kby
"""Setup"""
from __future__ import unicode_literals
from os.path import abspath, dirname, join
from re import search
from sys import exit, version, version_info
from setuptools import find_packages, setup
REQUIRED_PYTHON_VERSION = (2, 7)
PACKAGES = find_packages()
INSTALL_DEPENDENCIES = []
SETUP_DEPENDENCIES = []
TEST_DEPENDENCIES = [
'nose',
'Kodistubs',
'httpretty',
'mock',
]
EXTRA_DEPENDENCIES = {
'dev': [
'nose',
'flake8',
'codeclimate-test-reporter',
'pylint',
'mccabe',
'pycodestyle',
'pyflakes',
'Kodistubs',
'httpretty',
'mock',
'requests',
'beautifulsoup4',
'pyDes',
'radon',
'Sphinx',
'sphinx_rtd_theme',
'm2r',
'kodi-release-helper',
'dennis',
'blessings',
'demjson',
'restructuredtext_lint',
'yamllint',
]
}
def get_addon_data():
"""Loads the Kodi plugin data from addon.xml"""
root_dir = dirname(abspath(__file__))
pathname = join(root_dir, 'addon.xml')
with open(pathname, 'rb') as addon_xml:
addon_xml_contents = addon_xml.read()
_id = search(
r'(?<!xml )id="(.+?)"',
addon_xml_contents).group(1)
author = search(
r'(?<!xml )provider-name="(.+?)"',
addon_xml_contents).group(1)
name = search(
r'(?<!xml )name="(.+?)"',
addon_xml_contents).group(1)
version = search(
r'(?<!xml )version="(.+?)"',
addon_xml_contents).group(1)
desc = search(
r'(?<!xml )description lang="en_GB">(.+?)<',
addon_xml_contents).group(1)
email = search(
r'(?<!xml )email>(.+?)<',
addon_xml_contents).group(1)
source = search(
r'(?<!xml )email>(.+?)<',
addon_xml_contents).group(1)
return {
'id': _id,
'author': author,
'name': name,
'version': version,
'desc': desc,
'email': email,
'source': source,
}
if version_info < REQUIRED_PYTHON_VERSION:
exit('Python >= 2.7 is required. Your version:\n{0}'.format(version))
if __name__ == '__main__':
ADDON_DATA = get_addon_data()
setup(
name=ADDON_DATA.get('name'),
version=ADDON_DATA.get('version'),
author=ADDON_DATA.get('author'),
author_email=ADDON_DATA.get('email'),
description=ADDON_DATA.get('desc'),
packages=PACKAGES,
include_package_data=True,
install_requires=INSTALL_DEPENDENCIES,
setup_requires=SETUP_DEPENDENCIES,
tests_require=TEST_DEPENDENCIES,
extras_require=EXTRA_DEPENDENCIES,
test_suite='nose.collector',
)
| 26.711712 | 73 | 0.551771 |
12f2a17d10d6e7d8016a1adfcae38305fb8b1df9 | 2,386 | py | Python | franka_lcas_experiments/script/load_model_rtp.py | arsh09/franka_ros_lcas | b6211125436849d5c7def8ad96a384cc34f2f121 | [
"Apache-2.0"
] | 2 | 2021-11-09T00:50:43.000Z | 2021-11-15T09:50:47.000Z | franka_lcas_experiments/script/load_model_rtp.py | arsh09/franka_ros_lcas | b6211125436849d5c7def8ad96a384cc34f2f121 | [
"Apache-2.0"
] | null | null | null | franka_lcas_experiments/script/load_model_rtp.py | arsh09/franka_ros_lcas | b6211125436849d5c7def8ad96a384cc34f2f121 | [
"Apache-2.0"
] | 1 | 2021-11-17T13:24:23.000Z | 2021-11-17T13:24:23.000Z | import numpy as np
import os, sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from tensorflow.keras.models import Model
import tensorflow as tf
from PIL import Image
from utils_rtp import ProMP
if __name__ == "__main__":
ENCODED_MODEL_PATH = "/home/arshad/Documents/reach_to_palpate_validation_models/encoded_model_regions"
PREDICTOR_MODEL = "/home/arshad/Documents/reach_to_palpate_validation_models/model_cnn_rgb_1"
image = np.load( "/home/arshad/catkin_ws/image_xy_rtp.npy" )
predictor = Predictor(ENCODED_MODEL_PATH, PREDICTOR_MODEL)
traj = predictor.predict(image)
np.save("/home/arshad/catkin_ws/predicted_joints_values_rtp.npy", traj)
print ("\n Predicted ProMPs weights for RTP task. Joint trajectory is saved in the file. \n Press 'p' to display the trajectory...")
| 40.440678 | 138 | 0.690696 |
12f2a97e43141a9ad0fb868815aad72bb1ff0352 | 5,648 | py | Python | sdv/tabular/ctgan.py | joanvaquer/SDV | 83e4fdf0ff72e6c5b72cfc8c6ec9584dbd34de28 | [
"MIT"
] | null | null | null | sdv/tabular/ctgan.py | joanvaquer/SDV | 83e4fdf0ff72e6c5b72cfc8c6ec9584dbd34de28 | [
"MIT"
] | null | null | null | sdv/tabular/ctgan.py | joanvaquer/SDV | 83e4fdf0ff72e6c5b72cfc8c6ec9584dbd34de28 | [
"MIT"
] | null | null | null | """Wrapper around CTGAN model."""
from sdv.tabular.base import BaseTabularModel
| 40.056738 | 97 | 0.599681 |
12f479c7b7668c843b94467ffeb73f441443785b | 1,130 | py | Python | cointrader/config.py | 3con/cointrader | abb3d13d1105e11db0070a9052c45cb8a87f168c | [
"MIT"
] | 103 | 2017-03-10T07:23:12.000Z | 2021-08-24T17:39:22.000Z | cointrader/config.py | altfund/cointrader-1 | abb3d13d1105e11db0070a9052c45cb8a87f168c | [
"MIT"
] | 91 | 2017-03-11T06:23:09.000Z | 2021-11-15T17:47:06.000Z | cointrader/config.py | fwolfst/cointrader | abb3d13d1105e11db0070a9052c45cb8a87f168c | [
"MIT"
] | 36 | 2017-03-23T17:48:08.000Z | 2020-02-21T23:42:03.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import logging
import logging.config
if (sys.version_info > (3, 0)):
# Python 3 code in this block
import configparser
else:
# Python 2 code in this block
import ConfigParser as configparser
DEFAULT_CONFIG = ".cointrader.ini"
| 25.681818 | 64 | 0.642478 |
12f51cb5ac4eefb8f57e6dbd0a326e1ca9a0b225 | 712 | py | Python | src/snakeoil/descriptors.py | Arusekk/snakeoil | aad28a50118223766e5308452b369f2c72b971b2 | [
"BSD-3-Clause"
] | null | null | null | src/snakeoil/descriptors.py | Arusekk/snakeoil | aad28a50118223766e5308452b369f2c72b971b2 | [
"BSD-3-Clause"
] | null | null | null | src/snakeoil/descriptors.py | Arusekk/snakeoil | aad28a50118223766e5308452b369f2c72b971b2 | [
"BSD-3-Clause"
] | null | null | null | """Classes implementing the descriptor protocol."""
__all__ = ("classproperty",)
| 20.342857 | 75 | 0.605337 |
12f6a69fd0573ee6b9b0a6c81a158a82f44d6769 | 6,480 | py | Python | playground/pets_dubins.py | pecey/mbrl-lib | ebca518b35a1370dbaede2a1c96fcde714bc5489 | [
"MIT"
] | null | null | null | playground/pets_dubins.py | pecey/mbrl-lib | ebca518b35a1370dbaede2a1c96fcde714bc5489 | [
"MIT"
] | null | null | null | playground/pets_dubins.py | pecey/mbrl-lib | ebca518b35a1370dbaede2a1c96fcde714bc5489 | [
"MIT"
] | null | null | null | import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import torch
import omegaconf
import mbrl.env.continuous_dubins as dubins_env
import mbrl.env.reward_fns as reward_fns
import mbrl.env.termination_fns as termination_fns
import mbrl.models as models
import mbrl.planning as planning
import mbrl.util.common as common_util
import mbrl.util as util
if __name__ == "__main__":
mpl.rcParams.update({"font.size": 16})
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
noisy = False
seed = 0
env = dubins_env.ContinuousDubinsEnv(noisy)
env.seed(seed)
rng = np.random.default_rng(seed=seed)
generator = torch.Generator(device=device)
generator.manual_seed(seed)
obs_shape = env.observation_space.shape
act_shape = env.action_space.shape
# This functions allows the model to evaluate the true rewards given an observation
reward_fn = reward_fns.continuous_dubins
# This function allows the model to know if an observation should make the episode end
term_fn = termination_fns.continuous_dubins
trial_length = 200
num_trials = 10
ensemble_size = 5
# Everything with "???" indicates an option with a missing value.
# Our utility functions will fill in these details using the
# environment information
cfg_dict = {
# dynamics model configuration
"dynamics_model": {
"model": {
"_target_": "mbrl.models.GaussianMLP",
"device": device,
"num_layers": 3,
"ensemble_size": ensemble_size,
"hid_size": 200,
"use_silu": True,
"in_size": "???",
"out_size": "???",
"deterministic": False,
"propagation_method": "fixed_model"
}
},
# options for training the dynamics model
"algorithm": {
"learned_rewards": False,
"target_is_delta": True,
"normalize": True,
},
# these are experiment specific options
"overrides": {
"trial_length": trial_length,
"num_steps": num_trials * trial_length,
"model_batch_size": 32,
"validation_ratio": 0.05
}
}
cfg = omegaconf.OmegaConf.create(cfg_dict)
# Create a 1-D dynamics model for this environment
dynamics_model = common_util.create_one_dim_tr_model(cfg, obs_shape, act_shape)
# Create a gym-like environment to encapsulate the model
model_env = models.ModelEnv(env, dynamics_model, term_fn, reward_fn, generator=generator)
replay_buffer = common_util.create_replay_buffer(cfg, obs_shape, act_shape, rng=rng)
common_util.rollout_agent_trajectories(
env,
trial_length, # initial exploration steps
planning.RandomAgent(env),
{}, # keyword arguments to pass to agent.act()
replay_buffer=replay_buffer,
trial_length=trial_length
)
print("# samples stored", replay_buffer.num_stored)
agent_cfg = omegaconf.OmegaConf.create({
# this class evaluates many trajectories and picks the best one
"_target_": "mbrl.planning.TrajectoryOptimizerAgent",
"planning_horizon": 15,
"replan_freq": 1,
"verbose": False,
"action_lb": "???",
"action_ub": "???",
# this is the optimizer to generate and choose a trajectory
"optimizer_cfg": {
"_target_": "mbrl.planning.CEMOptimizer",
"num_iterations": 5,
"elite_ratio": 0.1,
"population_size": 500,
"alpha": 0.1,
"device": device,
"lower_bound": "???",
"upper_bound": "???",
"return_mean_elites": True
}
})
agent = planning.create_trajectory_optim_agent_for_model(
model_env,
agent_cfg,
num_particles=20
)
train_losses = []
val_scores = []
# Create a trainer for the model
model_trainer = models.ModelTrainer(dynamics_model, optim_lr=1e-3, weight_decay=5e-5)
# Create visualization objects
fig, axs = plt.subplots(1, 1, figsize=(14, 3.75))
ax_text = axs.text(300, 50, "")
# Main PETS loop
all_rewards = [0]
for trial in range(num_trials):
obs = env.reset()
agent.reset()
done = False
total_reward = 0.0
steps_trial = 0
while not done:
# --------------- Model Training -----------------
if steps_trial == 0:
dynamics_model.update_normalizer(replay_buffer.get_all()) # update normalizer stats
dataset_train, dataset_val = replay_buffer.get_iterators(
batch_size=cfg.overrides.model_batch_size,
val_ratio=cfg.overrides.validation_ratio,
train_ensemble=True,
ensemble_size=ensemble_size,
shuffle_each_epoch=True,
bootstrap_permutes=False, # build bootstrap dataset using sampling with replacement
)
model_trainer.train(
dataset_train, dataset_val=dataset_val, num_epochs=50, patience=50, callback=train_callback)
# --- Doing env step using the agent and adding to model dataset ---
next_obs, reward, done, _ = common_util.step_env_and_add_to_buffer(env, obs, agent, {}, replay_buffer)
obs = next_obs
total_reward += reward
steps_trial += 1
if steps_trial == trial_length:
break
all_rewards.append(total_reward)
env.save_trajectory(f"dubins_{trial}.png")
print(all_rewards)
plot_graph(axs, None, ax_text, trial, steps_trial, all_rewards, force_update=True)
# fig.savefig("dubins.png")
| 34.105263 | 114 | 0.622531 |
12f7704aea2bda946e46a42c6fdb1b32ab8e104a | 39 | py | Python | pixiv_spider/__init__.py | Uzukidd/Pixiv-spider | 10d21bf8f1e0ec0b0792383ae9e8ae55e77efd17 | [
"MIT"
] | 1 | 2021-11-12T19:16:56.000Z | 2021-11-12T19:16:56.000Z | pixiv_spider/__init__.py | Uzukidd/Pixiv-web-crawler | 10d21bf8f1e0ec0b0792383ae9e8ae55e77efd17 | [
"MIT"
] | null | null | null | pixiv_spider/__init__.py | Uzukidd/Pixiv-web-crawler | 10d21bf8f1e0ec0b0792383ae9e8ae55e77efd17 | [
"MIT"
] | null | null | null | # from pixiv_web_crawler import Getters | 39 | 39 | 0.871795 |
12f80c5f985c410a5af8bdf06f87e46b6aa396c4 | 1,241 | py | Python | parsers/parsers_base.py | xm4dn355x/async_test | 92e7ec6a693ff4850ed603c0f4f0fa83e63b4e49 | [
"MIT"
] | null | null | null | parsers/parsers_base.py | xm4dn355x/async_test | 92e7ec6a693ff4850ed603c0f4f0fa83e63b4e49 | [
"MIT"
] | null | null | null | parsers/parsers_base.py | xm4dn355x/async_test | 92e7ec6a693ff4850ed603c0f4f0fa83e63b4e49 | [
"MIT"
] | null | null | null | #
#
#
# :
# : MIT License
#
from time import sleep
import requests
def get_htmls(urls):
"""
URL-
HTML
:param urls: URL-
:type urls: list
:return: HTML-
"""
htmls = [] #
for url in urls: # URL
html = get_html(url) # HTML URL
htmls.append(html) # HTML
sleep(1)
return htmls # - HTML
def get_html(url):
"""
URL-
HTML
:param url: URL-
:type url: str
:return: HTML-
"""
print(f"""get_html url={url}""")
r = requests.get(url, headers={'User-Agent': 'Custom'}) # web- url
print(r) # <Response [200]>
return r.text # HTML
if __name__ == '__main__':
pass | 27.577778 | 109 | 0.654311 |
12f867945891bf95b1fd61c639ac565c8cecb9f9 | 16,303 | py | Python | smbspider/smbspider.py | vonahi/pentesting_scripts | 233b07a13e631cd121985465c083327f2fe372b6 | [
"MIT"
] | 13 | 2019-09-18T17:15:22.000Z | 2022-02-20T00:28:35.000Z | smbspider/smbspider.py | vonahi/pentesting_scripts | 233b07a13e631cd121985465c083327f2fe372b6 | [
"MIT"
] | null | null | null | smbspider/smbspider.py | vonahi/pentesting_scripts | 233b07a13e631cd121985465c083327f2fe372b6 | [
"MIT"
] | 4 | 2019-07-24T10:03:41.000Z | 2021-11-22T06:19:54.000Z | #!/usr/bin/python
#
# This post-exploitation script can be used to spider numerous systems
# to identify sensitive and/or confidential data. A good scenario to
# use this script is when you have admin credentials to tons of
# Windows systems, and you want to look for files containing data such
# as PII, network password documents, etc. For the most part,
# this script uses smbclient, parses the results, and prints
# out the results in a nice format for you.
#
# Author: Alton Johnson <alton@vonahi.io
# Version: 2.4
# Updated: 01/23/2014
#
import commands, time, getopt, re, os
from sys import argv
start_time = time.time()
banner = "\n " + "*" * 56
banner += "\n * _ *"
banner += "\n * | | // \\\\ *"
banner += "\n * ___ _ __ ___ | |__ _\\\\()//_ *"
banner += "\n * / __| '_ ` _ \| '_ \ / // \\\\ \ *"
banner += "\n * \__ \ | | | | | |_) | |\__/| *"
banner += "\n * |___/_| |_| |_|_.__/ *"
banner += "\n * *"
banner += "\n * SMB Spider v2.4, Alton Johnson (alton@vonahi.io) *"
banner += "\n " + "*" * 56 + "\n"
if __name__ == "__main__":
try:
start(argv[1:])
except KeyboardInterrupt:
print "\nExiting. Interrupted by user (ctrl-c)."
exit()
except Exception, err:
print err
exit()
print "\n-----"
print "Completed in: %.1fs" % (time.time() - start_time)
| 38.541371 | 157 | 0.626326 |
12fad400aa5ee6c8bf4a6f0d061c8bf3df14fbb1 | 1,675 | py | Python | api-inference-community/docker_images/spacy/app/pipelines/text_classification.py | mlonaws/huggingface_hub | 588f74b98fbcab2cd7e61a74cc6d9649a92e0ef2 | [
"Apache-2.0"
] | 362 | 2020-12-22T10:24:06.000Z | 2022-03-30T22:47:25.000Z | api-inference-community/docker_images/spacy/app/pipelines/text_classification.py | mlonaws/huggingface_hub | 588f74b98fbcab2cd7e61a74cc6d9649a92e0ef2 | [
"Apache-2.0"
] | 547 | 2020-12-24T13:35:57.000Z | 2022-03-31T17:32:42.000Z | api-inference-community/docker_images/spacy/app/pipelines/text_classification.py | mlonaws/huggingface_hub | 588f74b98fbcab2cd7e61a74cc6d9649a92e0ef2 | [
"Apache-2.0"
] | 98 | 2021-01-06T17:37:09.000Z | 2022-03-29T07:20:08.000Z | import os
import subprocess
import sys
from typing import Dict, List
from app.pipelines import Pipeline
| 34.895833 | 128 | 0.601791 |
12fb3b1f1de02a4bb72cea078775fd6a9b6cb1ac | 4,867 | py | Python | aws/logs_monitoring/tests/test_cloudtrail_s3.py | rkitron/datadog-serverless-functions | d69fe6fdb489c262ffa76a529b22f2a81ae6deba | [
"Apache-2.0"
] | 232 | 2018-11-20T16:57:04.000Z | 2022-03-23T14:38:11.000Z | aws/logs_monitoring/tests/test_cloudtrail_s3.py | rkitron/datadog-serverless-functions | d69fe6fdb489c262ffa76a529b22f2a81ae6deba | [
"Apache-2.0"
] | 207 | 2018-10-25T11:48:20.000Z | 2022-03-23T00:21:10.000Z | aws/logs_monitoring/tests/test_cloudtrail_s3.py | rkitron/datadog-serverless-functions | d69fe6fdb489c262ffa76a529b22f2a81ae6deba | [
"Apache-2.0"
] | 308 | 2018-10-24T13:36:05.000Z | 2022-03-21T21:17:02.000Z | from unittest.mock import MagicMock, patch
import os
import sys
import unittest
import json
import copy
import io
import gzip
sys.modules["trace_forwarder.connection"] = MagicMock()
sys.modules["datadog_lambda.wrapper"] = MagicMock()
sys.modules["datadog_lambda.metric"] = MagicMock()
sys.modules["datadog"] = MagicMock()
sys.modules["requests"] = MagicMock()
sys.modules["requests_futures.sessions"] = MagicMock()
env_patch = patch.dict(
os.environ,
{
"DD_API_KEY": "11111111111111111111111111111111",
"DD_ADDITIONAL_TARGET_LAMBDAS": "ironmaiden,megadeth",
},
)
env_patch.start()
import lambda_function
import parsing
env_patch.stop()
test_data = {
"Records": [
{
"eventVersion": "1.08",
"userIdentity": {
"type": "AssumedRole",
"principalId": "AROAYYB64AB3HGPQO2EPR:DatadogAWSIntegration",
"arn": "arn:aws:sts::601427279990:assumed-role/Siti_DatadogAWSIntegrationRole/i-08014e4f62ccf762d",
"accountId": "601427279990",
"accessKeyId": "ASIAYYB64AB3DWOY7JNT",
"sessionContext": {
"sessionIssuer": {
"type": "Role",
"principalId": "AROAYYB64AB3HGPQO2EPR",
"arn": "arn:aws:iam::601427279990:role/Siti_DatadogAWSIntegrationRole",
"accountId": "601427279990",
"userName": "Siti_DatadogAWSIntegrationRole",
},
"attributes": {
"creationDate": "2021-05-02T23:49:01Z",
"mfaAuthenticated": "false",
},
},
},
"eventTime": "2021-05-02T23:53:28Z",
"eventSource": "dynamodb.amazonaws.com",
"eventName": "DescribeTable",
"awsRegion": "us-east-1",
"sourceIPAddress": "54.162.201.161",
"userAgent": "Datadog",
"requestParameters": {"tableName": "KinesisClientLibraryLocal"},
"responseElements": None,
"requestID": "A9K7562IBO4MPDQE4O5G9QETRFVV4KQNSO5AEMVJF66Q9ASUAAJG",
"eventID": "a5dd11f9-f616-4ea8-8030-0b3eef554352",
"readOnly": True,
"resources": [
{
"accountId": "601427279990",
"type": "AWS::DynamoDB::Table",
"ARN": "arn:aws:dynamodb:us-east-1:601427279990:table/KinesisClientLibraryLocal",
}
],
"eventType": "AwsApiCall",
"apiVersion": "2012-08-10",
"managementEvent": True,
"recipientAccountId": "601427279990",
"eventCategory": "Management",
}
]
}
if __name__ == "__main__":
unittest.main()
| 32.231788 | 115 | 0.543661 |
12fc144c5d332d1edd841f8f777a22d5c30bf0b9 | 487 | py | Python | ch_06/tests/test_lookup_mapping.py | real-slim-chadi/Python-Object-Oriented-Programming---4th-edition | 7c486866171786b620795fa33a79ec9ac9a8ba1b | [
"MIT"
] | 43 | 2021-06-03T18:39:09.000Z | 2022-03-29T20:32:13.000Z | ch_06/tests/test_lookup_mapping.py | real-slim-chadi/Python-Object-Oriented-Programming---4th-edition | 7c486866171786b620795fa33a79ec9ac9a8ba1b | [
"MIT"
] | 16 | 2022-02-08T22:41:30.000Z | 2022-03-25T22:48:28.000Z | ch_06/tests/test_lookup_mapping.py | real-slim-chadi/Python-Object-Oriented-Programming---4th-edition | 7c486866171786b620795fa33a79ec9ac9a8ba1b | [
"MIT"
] | 36 | 2021-06-19T07:14:09.000Z | 2022-03-12T22:17:09.000Z | """
Python 3 Object-Oriented Programming
Chapter 6, Abstract Base Classes and Operator Overloading
"""
from lookup_mapping import Lookup
| 20.291667 | 57 | 0.486653 |
12fd58577de1528a698dc2d572273da89af94b00 | 217 | py | Python | serempre_todo/utils/choices.py | pygabo/Serempre | 6b29e337abd8d1b3f71ee889d318a2d473d6c744 | [
"MIT"
] | null | null | null | serempre_todo/utils/choices.py | pygabo/Serempre | 6b29e337abd8d1b3f71ee889d318a2d473d6c744 | [
"MIT"
] | null | null | null | serempre_todo/utils/choices.py | pygabo/Serempre | 6b29e337abd8d1b3f71ee889d318a2d473d6c744 | [
"MIT"
] | null | null | null | TASK_STATUS = [
('TD', 'To Do'),
('IP', 'In Progress'),
('QA', 'Testing'),
('DO', 'Done'),
]
TASK_PRIORITY = [
('ME', 'Medium'),
('HI', 'Highest'),
('HG', 'High'),
('LO', 'Lowest'),
]
| 15.5 | 26 | 0.40553 |
12fda5a81fde9ab3c46b39a497e89d5ab29b6639 | 17,673 | py | Python | symbols/block.py | zerofo/sdu-face-alignment | f4b57fde0576d2327369884fd5d5e9a7765a0790 | [
"MIT"
] | 192 | 2019-03-27T02:40:41.000Z | 2022-03-18T15:35:17.000Z | symbols/block.py | zerofo/sdu-face-alignment | f4b57fde0576d2327369884fd5d5e9a7765a0790 | [
"MIT"
] | 4 | 2019-04-01T14:51:22.000Z | 2020-11-25T08:22:04.000Z | symbols/block.py | zerofo/sdu-face-alignment | f4b57fde0576d2327369884fd5d5e9a7765a0790 | [
"MIT"
] | 38 | 2019-03-30T05:33:48.000Z | 2021-10-01T06:08:17.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mxnet as mx
import numpy as np
from config import config
#def lin(data, num_filter, workspace, name, binarize, dcn):
# bit = 1
# ACT_BIT = config.ACT_BIT
# bn_mom = config.bn_mom
# workspace = config.workspace
# if not binarize:
# if not dcn:
# conv1 = Conv(data=data, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0),
# no_bias=True, workspace=workspace, name=name + '_conv')
# bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn')
# act1 = Act(data=bn1, act_type='relu', name=name + '_relu')
# return act1
# else:
# bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn')
# act1 = Act(data=bn1, act_type='relu', name=name + '_relu')
# conv1_offset = mx.symbol.Convolution(name=name+'_conv_offset', data = act1,
# num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
# conv1 = mx.contrib.symbol.DeformableConvolution(name=name+"_conv", data=act1, offset=conv1_offset,
# num_filter=num_filter, pad=(1,1), kernel=(3, 3), num_deformable_group=1, stride=(1, 1), dilate=(1, 1), no_bias=False)
# #conv1 = Conv(data=act1, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1),
# # no_bias=False, workspace=workspace, name=name + '_conv')
# return conv1
# else:
# bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn')
# act1 = Act(data=bn1, act_type='relu', name=name + '_relu')
# conv1 = mx.sym.QConvolution_v1(data=act1, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0),
# no_bias=True, workspace=workspace, name=name + '_conv', act_bit=ACT_BIT, weight_bit=bit)
# conv1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2')
# return conv1
| 54.378462 | 148 | 0.62106 |
12fde371c750b67c435196b6031afbfb913cf73d | 9,198 | py | Python | train/metric.py | alexandrosstergiou/Squeeze-and-Recursion-Temporal-Gates | 1641b59b73c951a5b05d17b5528894ae73a014b8 | [
"MIT"
] | 54 | 2020-06-16T08:11:21.000Z | 2022-03-18T14:54:52.000Z | train/metric.py | alexandrosstergiou/Squeeze-and-Recursion-Temporal-Gates | 1641b59b73c951a5b05d17b5528894ae73a014b8 | [
"MIT"
] | 10 | 2020-06-23T07:57:45.000Z | 2021-12-16T04:18:03.000Z | train/metric.py | alexandrosstergiou/Squeeze-and-Recursion-Temporal-Gates | 1641b59b73c951a5b05d17b5528894ae73a014b8 | [
"MIT"
] | 7 | 2020-09-12T12:46:54.000Z | 2021-11-15T09:00:55.000Z | '''
--- I M P O R T S T A T E M E N T S ---
'''
import coloredlogs, logging
coloredlogs.install()
import numpy as np
'''
=== S T A R T O F C L A S S E V A L M E T R I C ===
[About]
Object class for calculating average values.
[Init Args]
- name: String for the variable name to calculate average value for.
[Methods]
- __init__ : Class initialiser
- update : Function to be implemented by the children sub-classes.
- reset : Function for resetting the number of instances and the sum of the metric.
- get : Calculation of the average value based on the number of instances and the provided sum.
- get_name_value : Function for returning the name(s) and the value(s).
- check_label_shapes : Function responsible for type and shape checking.
'''
'''
=== E N D O F C L A S S E V A L M E T R I C ===
'''
'''
=== S T A R T O F C L A S S M E T R I C L I S T ===
[About]
EvalMetric class for creating a list containing Evalmetric objects.
[Init Args]
- name: String for the variable name.
[Methods]
- __init__ : Class initialiser
- update : Function to update the list of EvalMetric objects.
- reset : Function for resetting the list.
- get : Function for getting each of the EvalMetric objects in the list.
- get_name_value : Function for getting the name of the list items.
'''
'''
=== E N D O F C L A S S M E T R I C L I S T ===
'''
'''
=== S T A R T O F C L A S S A C C U R A C Y ===
[About]
EvalMetric class for creating an accuracy estimate.
[Init Args]
- name: String for the variable name. Defaults to `accuracy`.
- topk: Number of top predictions to be used of the score (top-1, top-5 etc.).
Defaults to 1.
[Methods]
- __init__ : Class initialiser
- update : Function to update scores.
'''
'''
=== E N D O F C L A S S A C C U R A C Y ===
'''
'''
=== S T A R T O F C L A S S L O S S ===
[About]
EvalMetric class for creating a loss score. The class acts a a `dummy estimate`
as no further calculations are required for the loss. Instead it is primarily
used to easily/directly print the loss.
[Init Args]
- name: String for the variable name. Defaults to `loss`.
[Methods]
- __init__ : Class initialiser
- update : Function to update scores.
'''
'''
=== E N D O F C L A S S L O S S ===
'''
'''
=== S T A R T O F C L A S S L O S S ===
[About]
EvalMetric class for batch-size used. The class acts a a `dummy estimate`
as no further calculations are required for the size of the batch. Instead it is primarily
used to easily/directly print the batch size.
[Init Args]
- name: String for the variable name. Defaults to `batch-size`.
[Methods]
- __init__ : Class initialiser
- update : Function used for updates.
'''
'''
=== E N D O F C L A S S L O S S ===
'''
'''
=== S T A R T O F C L A S S L E A R N I N G R A T E ===
[About]
EvalMetric class for learning rate used. The class acts a a `dummy estimate`
as no further calculations are required for the size of the lr. Instead it is primarily
used to easily/directly print the learning rate.
[Init Args]
- name: String for the variable name. Defaults to `lr`.
[Methods]
- __init__ : Class initialiser
- update : Function used for updates.
'''
'''
=== E N D O F C L A S S L E A R N I N G R A T E ===
'''
if __name__ == "__main__":
import torch
# Test Accuracy
predicts = [torch.from_numpy(np.array([[0.7, 0.3], [0, 1.], [0.4, 0.6]]))]
labels = [torch.from_numpy(np.array([ 0, 1, 1 ]))]
losses = [torch.from_numpy(np.array([ 0.3, 0.4, 0.5 ]))]
logging.getLogger().setLevel(logging.DEBUG)
logging.debug("input pred: {}".format(predicts))
logging.debug("input label: {}".format(labels))
logging.debug("input loss: {}".format(labels))
acc = Accuracy()
acc.update(preds=predicts, labels=labels, losses=losses, lr=0, batch_size=1)
logging.info(acc.get())
# Test MetricList
metrics = MetricList(Loss(name="ce-loss"),
Accuracy(topk=1, name="acc-top1"),
Accuracy(topk=2, name="acc-top2"),
)
metrics.update(preds=predicts, labels=labels, losses=losses, lr=0, batch_size=1)
logging.info("------------")
logging.info(metrics.get())
acc.get_name_value()
| 30.356436 | 103 | 0.593064 |
12fe867458db015f3b4f5fd16c3634fc1b9c4dae | 3,018 | py | Python | poly/repl.py | jdanford/poly | 4f3a242dbb54fb68375a310af943be759588f459 | [
"0BSD"
] | null | null | null | poly/repl.py | jdanford/poly | 4f3a242dbb54fb68375a310af943be759588f459 | [
"0BSD"
] | null | null | null | poly/repl.py | jdanford/poly | 4f3a242dbb54fb68375a310af943be759588f459 | [
"0BSD"
] | null | null | null | import sys
from string import whitespace
from clint.textui import puts, indent, colored
from poly.common import *
from poly.node import *
def empty_space(s):
if len(s) == 0:
return True
for c in s:
if s in whitespace:
return True
return False
if __name__ == "__main__":
repl_main(sys.argv[1:])
| 23.578125 | 63 | 0.503313 |
12fea94d07f9c12bbbce2e89b9de91f96defafac | 1,330 | py | Python | resources/mgltools_x86_64Linux2_1.5.6/lib/python2.5/site-packages/Pmw/Pmw_1_3/demos/SelectionDialog.py | J-E-J-S/aaRS-Pipeline | 43f59f28ab06e4b16328c3bc405cdddc6e69ac44 | [
"MIT"
] | 3 | 2017-09-26T03:09:14.000Z | 2022-03-20T11:12:34.000Z | resources/mgltools_x86_64Linux2_1.5.6/lib/python2.5/site-packages/Pmw/Pmw_1_3/demos/SelectionDialog.py | J-E-J-S/aaRS-Pipeline | 43f59f28ab06e4b16328c3bc405cdddc6e69ac44 | [
"MIT"
] | null | null | null | resources/mgltools_x86_64Linux2_1.5.6/lib/python2.5/site-packages/Pmw/Pmw_1_3/demos/SelectionDialog.py | J-E-J-S/aaRS-Pipeline | 43f59f28ab06e4b16328c3bc405cdddc6e69ac44 | [
"MIT"
] | 2 | 2019-10-05T23:02:41.000Z | 2020-06-25T20:21:02.000Z | title = 'Pmw.SelectionDialog demonstration'
# Import Pmw from this directory tree.
import sys
sys.path[:0] = ['../../..']
import Tkinter
import Pmw
######################################################################
# Create demo in root window for testing.
if __name__ == '__main__':
root = Tkinter.Tk()
Pmw.initialise(root)
root.title(title)
exitButton = Tkinter.Button(root, text = 'Exit', command = root.destroy)
exitButton.pack(side = 'bottom')
widget = Demo(root)
root.mainloop()
| 27.708333 | 76 | 0.619549 |
12ff9748e2c126e4060dc274380a9e865c327195 | 778 | py | Python | py3plex/algorithms/infomap/examples/python/example-simple.py | awesome-archive/Py3plex | a099acb992441c1630208ba13694acb8e2a38895 | [
"BSD-3-Clause"
] | 1 | 2020-02-20T07:37:02.000Z | 2020-02-20T07:37:02.000Z | py3plex/algorithms/infomap/examples/python/example-simple.py | awesome-archive/Py3plex | a099acb992441c1630208ba13694acb8e2a38895 | [
"BSD-3-Clause"
] | null | null | null | py3plex/algorithms/infomap/examples/python/example-simple.py | awesome-archive/Py3plex | a099acb992441c1630208ba13694acb8e2a38895 | [
"BSD-3-Clause"
] | null | null | null | from infomap import infomap
infomapWrapper = infomap.Infomap("--two-level")
# Add weight as an optional third argument
infomapWrapper.addLink(0, 1)
infomapWrapper.addLink(0, 2)
infomapWrapper.addLink(0, 3)
infomapWrapper.addLink(1, 0)
infomapWrapper.addLink(1, 2)
infomapWrapper.addLink(2, 1)
infomapWrapper.addLink(2, 0)
infomapWrapper.addLink(3, 0)
infomapWrapper.addLink(3, 4)
infomapWrapper.addLink(3, 5)
infomapWrapper.addLink(4, 3)
infomapWrapper.addLink(4, 5)
infomapWrapper.addLink(5, 4)
infomapWrapper.addLink(5, 3)
infomapWrapper.run()
tree = infomapWrapper.tree
print("Found %d modules with codelength: %f" % (tree.numTopModules(), tree.codelength()))
print("\n#node module")
for node in tree.leafIter():
print("%d %d" % (node.physIndex, node.moduleIndex()))
| 25.096774 | 89 | 0.75964 |
12ffa5ef886269b64400e6ff0dbf8d65f1d35e0b | 305 | py | Python | api/tests.py | everett-toews/metaslacker | ec4bf3c4b39aa16b5ae46a0c3e732b8b9cb2cf72 | [
"MIT"
] | 90 | 2015-09-17T00:38:59.000Z | 2021-05-29T02:36:42.000Z | api/tests.py | everett-toews/metaslacker | ec4bf3c4b39aa16b5ae46a0c3e732b8b9cb2cf72 | [
"MIT"
] | null | null | null | api/tests.py | everett-toews/metaslacker | ec4bf3c4b39aa16b5ae46a0c3e732b8b9cb2cf72 | [
"MIT"
] | 10 | 2016-02-23T16:28:32.000Z | 2021-06-01T20:24:31.000Z | import unittest
if __name__ == '__main__':
unittest.main()
| 20.333333 | 38 | 0.642623 |
12ffe639dabbddd0482e5d8aa0dc1908fa825881 | 18,741 | py | Python | tools/modules/verify.py | andscha/containerization-for-sap-s4hana | 337df7b3b515dad9c243eae6b58ee95bf749782a | [
"Apache-2.0"
] | 6 | 2020-12-16T13:12:42.000Z | 2022-02-09T17:38:47.000Z | tools/modules/verify.py | andscha/containerization-for-sap-s4hana | 337df7b3b515dad9c243eae6b58ee95bf749782a | [
"Apache-2.0"
] | 5 | 2021-04-07T07:19:02.000Z | 2022-03-31T08:40:01.000Z | tools/modules/verify.py | andscha/containerization-for-sap-s4hana | 337df7b3b515dad9c243eae6b58ee95bf749782a | [
"Apache-2.0"
] | 7 | 2021-05-21T04:36:44.000Z | 2022-03-31T07:36:48.000Z | # ------------------------------------------------------------------------
# Copyright 2020, 2021 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
""" Verify settings in configuration YAML file (helper functions) """
# Global modules
# None
# Local modules
from modules.command import (
CmdShell,
CmdSsh
)
from modules.constants import getConstants
from modules.exceptions import RpmFileNotFoundException
from modules.ocp import ocLogin
from modules.tools import (
refSystemIsStandard,
areContainerMemResourcesValid,
getRpmFileForPackage,
strBold,
getHdbCopySshCommand
)
# Functions for formatting the output
def showMsgOk(text):
""" print text with header """
print("[Ok ] " + text)
def showMsgErr(text):
""" print text with header """
print('[' + strBold('Error') + '] ' + text)
def showMsgInd(text):
""" print text with header """
print("[.....] " + text)
# Classes
| 38.561728 | 100 | 0.566138 |
4200fb28b1b5da3ed4576b7e698fb2853d8ef02a | 1,060 | py | Python | rainbow/rainbow.py | jaxzin/adafruit-voice-docker | 8932e2432f56e795c4160dfeef8f61aa5a3da15a | [
"MIT"
] | null | null | null | rainbow/rainbow.py | jaxzin/adafruit-voice-docker | 8932e2432f56e795c4160dfeef8f61aa5a3da15a | [
"MIT"
] | null | null | null | rainbow/rainbow.py | jaxzin/adafruit-voice-docker | 8932e2432f56e795c4160dfeef8f61aa5a3da15a | [
"MIT"
] | null | null | null | import time
import board
import adafruit_dotstar
import atexit
import signal
kill_now = False
DOTSTAR_DATA = board.D5
DOTSTAR_CLOCK = board.D6
dots = adafruit_dotstar.DotStar(DOTSTAR_CLOCK, DOTSTAR_DATA, 3, brightness=0.5)
atexit.register(exit_handler)
signal.signal(signal.SIGINT, exit_handler)
signal.signal(signal.SIGTERM, exit_handler)
while not kill_now:
for j in range(255):
for i in range(3):
rc_index = (i * 256 // 3) + j * 5
dots[i] = wheel(rc_index & 255)
dots.show()
time.sleep(0.01)
| 24.090909 | 79 | 0.6 |
4201d4e01f67d6a8af781c7b4dac4cc684c59e89 | 117 | py | Python | src/iranlowo/corpus/__init__.py | Niger-Volta-LTI/iranlowo | 0046b61105ffadfff21dd8b37754b9d95177fbf8 | [
"MIT"
] | 17 | 2019-07-05T20:30:35.000Z | 2022-02-28T10:00:24.000Z | src/iranlowo/corpus/__init__.py | Olamyy/iranlowo | 1feb123988a8afac3ac53c7acfb72df862c4bc18 | [
"MIT"
] | 17 | 2019-07-06T09:10:10.000Z | 2020-11-13T08:30:37.000Z | src/iranlowo/corpus/__init__.py | ruohoruotsi/iranlowo | 0046b61105ffadfff21dd8b37754b9d95177fbf8 | [
"MIT"
] | 7 | 2019-07-01T01:59:07.000Z | 2020-11-27T17:12:46.000Z | from .corpus import Corpus, DirectoryCorpus
from .loaders import OweLoader, YorubaBlogCorpus, BBCCorpus, BibeliCorpus | 58.5 | 73 | 0.854701 |
4203e2556562a439641ccfc38f8f880faffaf2ad | 6,054 | py | Python | seq2seq.py | frozen86/SeqLite | 7f83e6a4716d756a45b2801085ac6628379fbea2 | [
"Apache-2.0"
] | 1 | 2018-05-10T01:40:55.000Z | 2018-05-10T01:40:55.000Z | seq2seq.py | frozen86/SeqLite | 7f83e6a4716d756a45b2801085ac6628379fbea2 | [
"Apache-2.0"
] | null | null | null | seq2seq.py | frozen86/SeqLite | 7f83e6a4716d756a45b2801085ac6628379fbea2 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
from masked_cross_entropy import *
from preprocess import *
from parameter import *
import time
# # Training
# # Evaluating the network
# def evaluate(input_seq, max_length=MAX_LENGTH):
| 34.20339 | 163 | 0.675421 |
4204040a123c3ac5e25851793e3ded084eda1953 | 41 | py | Python | ANNarchy_future/__init__.py | vitay/ANNarchy_future | 2c2a43c67f4201cf72175793aaa51189d208436b | [
"MIT"
] | 2 | 2021-03-11T18:11:30.000Z | 2021-05-12T09:15:17.000Z | ANNarchy_future/__init__.py | vitay/ANNarchy_future | 2c2a43c67f4201cf72175793aaa51189d208436b | [
"MIT"
] | null | null | null | ANNarchy_future/__init__.py | vitay/ANNarchy_future | 2c2a43c67f4201cf72175793aaa51189d208436b | [
"MIT"
] | null | null | null | from .api import *
__version__ = "5.0.0" | 13.666667 | 21 | 0.658537 |
4204eade92e97699c25ced0425caa0cabd5da0e0 | 1,881 | py | Python | pycqed/tests/analysis_v2/test_simple_analysis.py | nuttamas/PycQED_py3 | 1ee35c7428d36ed42ba4afb5d4bda98140b2283e | [
"MIT"
] | 60 | 2016-08-03T10:00:18.000Z | 2021-11-10T11:46:16.000Z | pycqed/tests/analysis_v2/test_simple_analysis.py | nuttamas/PycQED_py3 | 1ee35c7428d36ed42ba4afb5d4bda98140b2283e | [
"MIT"
] | 512 | 2016-08-03T17:10:02.000Z | 2022-03-31T14:03:43.000Z | pycqed/tests/analysis_v2/test_simple_analysis.py | nuttamas/PycQED_py3 | 1ee35c7428d36ed42ba4afb5d4bda98140b2283e | [
"MIT"
] | 34 | 2016-10-19T12:00:52.000Z | 2022-03-19T04:43:26.000Z | import unittest
import pycqed as pq
import os
import matplotlib.pyplot as plt
from pycqed.analysis_v2 import measurement_analysis as ma
| 36.882353 | 73 | 0.637959 |
42064154fe3a3a9a5966ee89da5b64cd37de9197 | 781 | py | Python | CS2/1275_turtle_recursion/2499_koch_snowflake/alternate_snowflake.py | nealholt/python_programming_curricula | eda4432dab97178b4a5712b160f5b1da74c068cb | [
"MIT"
] | 7 | 2020-10-14T03:23:12.000Z | 2022-03-09T23:16:13.000Z | CS2/1275_turtle_recursion/2499_koch_snowflake/alternate_snowflake.py | nealholt/python_programming_curricula | eda4432dab97178b4a5712b160f5b1da74c068cb | [
"MIT"
] | null | null | null | CS2/1275_turtle_recursion/2499_koch_snowflake/alternate_snowflake.py | nealholt/python_programming_curricula | eda4432dab97178b4a5712b160f5b1da74c068cb | [
"MIT"
] | 11 | 2021-02-21T20:50:56.000Z | 2022-01-29T07:01:28.000Z | import turtle
'''http://www.algorithm.co.il/blogs/computer-science/fractals-in-10-minutes-no-6-turtle-snowflake/
This would be a good introduction to recursion. I don't see how students
would invent this on their own, but they could modify it and see what
other fractals they could generate.
'''
pen = turtle.Turtle()
pen.penup()
pen.goto(-200,0)
pen.pendown()
pen.speed(0)
#Draw the fractal
fractal(pen, 500, 4)
turtle.done()
| 25.193548 | 98 | 0.663252 |
4206719b66d7095a812ba8babe145ead4c49882e | 1,325 | py | Python | test/test_edge.py | jbschwartz/spatial | 04dc619ae024ebb4f516cd6483f835421c7d84b1 | [
"MIT"
] | 1 | 2022-01-02T22:03:09.000Z | 2022-01-02T22:03:09.000Z | test/test_edge.py | jbschwartz/spatial | 04dc619ae024ebb4f516cd6483f835421c7d84b1 | [
"MIT"
] | null | null | null | test/test_edge.py | jbschwartz/spatial | 04dc619ae024ebb4f516cd6483f835421c7d84b1 | [
"MIT"
] | null | null | null | import unittest
from spatial import Edge, Vector3
| 36.805556 | 81 | 0.695849 |
4206df5fe7ed10541de178c4f224f75754304f2c | 324 | py | Python | wdae/wdae/user_queries/urls.py | iossifovlab/gpf | e556243d29666179dbcb72859845b4d6c011af2b | [
"MIT"
] | null | null | null | wdae/wdae/user_queries/urls.py | iossifovlab/gpf | e556243d29666179dbcb72859845b4d6c011af2b | [
"MIT"
] | 82 | 2019-07-22T11:44:23.000Z | 2022-01-13T15:27:33.000Z | wdae/wdae/user_queries/urls.py | iossifovlab/gpf | e556243d29666179dbcb72859845b4d6c011af2b | [
"MIT"
] | null | null | null | from django.urls import re_path
from user_queries.views import UserQuerySaveView, UserQueryCollectView
urlpatterns = [
re_path(r"^/save/?$", UserQuerySaveView.as_view(), name="user-save-query"),
re_path(
r"^/collect/?$",
UserQueryCollectView.as_view(),
name="user-collect-queries",
),
]
| 27 | 79 | 0.675926 |
4207202cb690f62fcf73ad7c61a82a12bebf477d | 419 | py | Python | src/login/migrations/0017_auto_20191006_1716.py | vandana0608/Pharmacy-Managament | f99bdec11c24027a432858daa19247a21cecc092 | [
"bzip2-1.0.6"
] | null | null | null | src/login/migrations/0017_auto_20191006_1716.py | vandana0608/Pharmacy-Managament | f99bdec11c24027a432858daa19247a21cecc092 | [
"bzip2-1.0.6"
] | null | null | null | src/login/migrations/0017_auto_20191006_1716.py | vandana0608/Pharmacy-Managament | f99bdec11c24027a432858daa19247a21cecc092 | [
"bzip2-1.0.6"
] | null | null | null | # Generated by Django 2.0.7 on 2019-10-06 11:46
import datetime
from django.db import migrations, models
| 20.95 | 70 | 0.620525 |
4208848cd73eaf4015f90f42e112e861d94326ec | 1,846 | py | Python | InfoGain.py | gsndr/AIDA | 538caf3ddb5aec8ec8904dc313eb7e31759f5154 | [
"MIT"
] | 4 | 2021-05-10T11:35:51.000Z | 2021-12-29T00:56:35.000Z | InfoGain.py | gsndr/AIDA | 538caf3ddb5aec8ec8904dc313eb7e31759f5154 | [
"MIT"
] | null | null | null | InfoGain.py | gsndr/AIDA | 538caf3ddb5aec8ec8904dc313eb7e31759f5154 | [
"MIT"
] | 1 | 2021-12-25T13:55:29.000Z | 2021-12-25T13:55:29.000Z | import pandas as pd
from math import log
| 29.301587 | 98 | 0.531419 |
4208c41522c79409c03ff3e274e65ad419a2c482 | 4,473 | py | Python | bot/localization.py | Supportiii/telegram-report-bot | 6a050caafb1c205c0fd58f91be9264f1190ea706 | [
"MIT"
] | null | null | null | bot/localization.py | Supportiii/telegram-report-bot | 6a050caafb1c205c0fd58f91be9264f1190ea706 | [
"MIT"
] | null | null | null | bot/localization.py | Supportiii/telegram-report-bot | 6a050caafb1c205c0fd58f91be9264f1190ea706 | [
"MIT"
] | null | null | null | strings = {
"en": {
"error_no_reply": "This command must be sent as a reply to one's message!",
"error_report_admin": "Whoa! Don't report admins ",
"error_restrict_admin": "You cannot restrict an admin.",
"report_date_format": "%d.%m.%Y at %H:%M",
"report_message": ' Sent {time} (server time)\n'
'<a href="{msg_url}">Go to message</a>',
"report_note": "\n\nNote: {note}",
"report_sent": "<i>Report sent</i>",
"action_del_msg": "Delete message",
"action_del_and_ban": "Delete and ban",
"action_deleted": "\n\n <b>Deleted</b>",
"action_deleted_banned": "\n\n <b>Deleted, user banned</b>",
"action_deleted_partially": "Some messages couldn't be found or deleted",
"readonly_forever": " <i>User set to read-only mode forever</i>",
"readonly_temporary": " <i>User set to read-only mode until {time} (server time)</i>",
"nomedia_forever": " <i>User set to text-only mode forever</i>",
"nomedia_temporary": " <i>User set to text-only mode until {time} (server time)</i>",
"need_admins_attention": 'Dear admins, your presence in chat is needed!\n\n'
'<a href="{msg_url}">Go to chat</a>',
},
"ru": {
"error_no_reply": " - !",
"error_report_admin": " ? -- ",
"error_restrict_admin": " .",
"report_date_format": "%d.%m.%Y %H:%M",
"report_message": ' {time} ( )\n'
'<a href="{msg_url}"> </a>',
"report_note": "\n\n: {note}",
"report_sent": "<i> </i>",
"action_del_msg": " ",
"action_del_and_ban": " ",
"action_deleted": "\n\n <b></b>",
"action_deleted_banned": "\n\n <b>, </b>",
"action_deleted_partially": " ",
"readonly_forever": " <i> </i>",
"readonly_temporary": " <i> {time} ( )</i>",
"nomedia_forever": " <i> </i>",
"nomedia_temporary": " <i> {time} ( )</i>",
"need_admins_attention": ' , !\n\n'
'<a href="{msg_url}"> </a>',
},
"de": {
"error_no_reply": "Dieser Befehl kann nur als Antwort gesendet werden!",
"error_report_admin": "Whoa! Du kannst Admins nicht melden ",
"error_restrict_admin": "Du kannst keine Admins einschrnken.",
"report_date_format": "%d.%m.%Y um %H:%M Uhr",
"report_message": ' Gesendet {time} (server time)\n'
'<a href="{msg_url}">Zur Nachricht</a>',
"report_note": "\n\nNotiz: {note}",
"report_sent": "<i>Gemeldet</i>",
"action_del_msg": "Nachricht lschen",
"action_del_and_ban": "Lschen und Sperren",
"action_deleted": "\n\n <b>Lschen</b>",
"action_deleted_banned": "\n\n <b>Gelscht, Nutzer gesperrt!</b>",
"action_deleted_partially": "Einige Nachrichten wurden nicht gefunden zum lschen",
"readonly_forever": " <i>Nutzer ist fr immer stumm</i>",
"readonly_temporary": " <i>Nutzer bis {time} stumm. (server time)</i>",
"nomedia_forever": " <i>Nutzer fr immer im Nur-Text-Modus.</i>",
"nomedia_temporary": " <i>Nutzer bis {time} im nur Text-Modus. (server time)</i>",
"need_admins_attention": 'Liebe Admins, ich sehne euch herbei!\n\n'
'<a href="{msg_url}">Zum Chat</a>',
}
@@ -64,7 +89,7 @@ def get_string(lang: str, key: str):
lang = strings.get(lang)
if not lang:
if not strings.get("en"):
raise KeyError(f'Neither "{lang}" nor "en" locales found')
raise KeyError(f'Weder "{lang}" noch "en" gefunden.')
else:
lang = strings.get("en")
try:
return lang[key]
except KeyError:
return strings.get("en").get(key, "ERR_NO_STRING")
| 47.585106 | 116 | 0.591549 |
4209d56bec0f4b46b06778591fc9cb1f2f7511a5 | 3,140 | py | Python | swagger_server/models/linecode_r_matrix.py | garagonc/simulation-engine | c129f0bf601e0d56d924c9e5fa2cf94f7e31a356 | [
"Apache-2.0"
] | 3 | 2019-06-24T09:02:21.000Z | 2020-01-30T10:37:46.000Z | swagger_server/models/linecode_r_matrix.py | linksmart/simulation-engine | c129f0bf601e0d56d924c9e5fa2cf94f7e31a356 | [
"Apache-2.0"
] | null | null | null | swagger_server/models/linecode_r_matrix.py | linksmart/simulation-engine | c129f0bf601e0d56d924c9e5fa2cf94f7e31a356 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server.models.impedance import Impedance # noqa: F401,E501
from swagger_server import util
| 26.610169 | 112 | 0.62293 |
420b2687d1f426ed1eefef8109dac3c6ae18bab7 | 261 | py | Python | workshop/serializers.py | shivammaniharsahu/django_api | 6ffb3d9f70f30f5fd3ae06ec00a6dd7c7783a797 | [
"bzip2-1.0.6"
] | null | null | null | workshop/serializers.py | shivammaniharsahu/django_api | 6ffb3d9f70f30f5fd3ae06ec00a6dd7c7783a797 | [
"bzip2-1.0.6"
] | null | null | null | workshop/serializers.py | shivammaniharsahu/django_api | 6ffb3d9f70f30f5fd3ae06ec00a6dd7c7783a797 | [
"bzip2-1.0.6"
] | null | null | null | from rest_framework import serializers
from .models import Register
| 29 | 83 | 0.724138 |
420ceb4ff961d4330b357c01567c2e654e43d336 | 5,303 | py | Python | experiments/twitter_event_data_2019/evaluation/groundtruth_processor.py | HHansi/WhatsUp | 87c3eb90570d2f997d8f1abc300a3553f8ef7ca9 | [
"Apache-2.0"
] | null | null | null | experiments/twitter_event_data_2019/evaluation/groundtruth_processor.py | HHansi/WhatsUp | 87c3eb90570d2f997d8f1abc300a3553f8ef7ca9 | [
"Apache-2.0"
] | null | null | null | experiments/twitter_event_data_2019/evaluation/groundtruth_processor.py | HHansi/WhatsUp | 87c3eb90570d2f997d8f1abc300a3553f8ef7ca9 | [
"Apache-2.0"
] | null | null | null | # Created by Hansi at 3/16/2020
import os
from algo.data_process.data_preprocessor import data_cleaning_flow
from algo.utils.file_utils import delete_create_folder
def extract_gt_tokens(text):
"""
Given GT string, method to extract GT labels.
GT string should be formatted as Twitter-Event-Data-2019.
parameters
-----------
:param text: str
:return: list
List of GT labels corresponding to a single event
Since there can be duplicate definitions for a single event, this list contains separate label lists for each
duplicate definition.
"""
duplicates = []
for element in text.split("|"):
labels = []
for subelement in element.split("["):
if subelement:
subelement = subelement.replace("\n", "")
subelement = subelement.replace("]", "")
tokens = subelement.split(",")
labels.append(tokens)
duplicates.append(labels)
return duplicates
def load_gt(folder_path):
"""
Method to read GT data into a dictionary formatted as {time-window: labels}
parameters
-----------
:param folder_path: str
Path to folder which contains GT data
:return: object
Dictionary of GT data
"""
gt = dict()
for root, dirs, files in os.walk(folder_path):
for file in files:
file_name = os.path.splitext(file)[0]
f = open(os.path.join(folder_path, file), 'r', encoding='utf-8')
events = []
for line in f:
tokens = extract_gt_tokens(line)
events.append(tokens)
gt[file_name] = events
f.close()
return gt
def generate_gt_string(tokens):
"""
Given a list of GT labels corresponding to a single event, convert them to a string formatted according to
Twitter-Event-Data-2019 GT format.
parameters
-----------
:param tokens: list
:return: str
"""
str = ""
for duplicate in tokens:
if str and str[-1] == "]":
str = str + "|"
for label in duplicate:
str = str + "["
for element in label:
if str[-1] == "[":
str = str + element
else:
str = str + "," + element
str = str + "]"
return str
def get_combined_gt(gt):
"""
Combine the GT labels of multiple events available at a time frame into single event representation.
parameters
-----------
:param gt: object
Dictionary of GT returned by load_GT
:return: object
Dictionary of combined GT
"""
combined_gt = dict()
for time_frame in gt.keys():
gt_events = gt[time_frame]
combined_gt_event = gt_events[0]
for event in gt_events[1:]:
temp = []
for duplicate in event:
for combined_event in combined_gt_event:
temp.append(combined_event + duplicate)
combined_gt_event = temp
# even though there is 1 event, it is added to a list to preserve consistency with general evaluation_v2 methods
events = [combined_gt_event]
combined_gt[time_frame] = events
return combined_gt
def preprocess_gt(input_filepath, output_filepath):
"""
Preprocess ground truth data in input_file and save to the output_file
parameters
-----------
:param input_filepath: str (.txt file path)
Ground truth file formatted as Twitter-Event-Data-2019
:param output_filepath: str (.txt file path)
:return:
"""
input_file = open(input_filepath, 'r')
output_file = open(output_filepath, 'a', encoding='utf-8')
events = []
for line in input_file:
tokens = extract_gt_tokens(line)
events.append(tokens)
# update tokens
new_events = []
for event in events:
new_duplicates = []
for duplicate in event:
new_labels = []
for label in duplicate:
new_elements = []
for element in label:
new_label = data_cleaning_flow(element)
new_elements.append(new_label)
new_labels.append(new_elements)
new_duplicates.append(new_labels)
new_events.append(new_duplicates)
for event in new_events:
str = generate_gt_string(event)
output_file.write(str)
output_file.write("\n")
output_file.close()
def preprocess_gt_bulk(input_folder_path, output_folder_path):
"""
Preprocess ground truth data in all files in input_folder and save to the output_folder
parameters
-----------
:param input_folder_path: str
Path to folder which contains GT data files
:param output_folder_path: str
Path to folder to save preprocessed GT data
:return:
"""
# delete if there already exist a folder and create new folder
delete_create_folder(output_folder_path)
for root, dirs, files in os.walk(input_folder_path):
for file in files:
input_filepath = os.path.join(input_folder_path, file)
output_filepath = os.path.join(output_folder_path, file)
preprocess_gt(input_filepath, output_filepath) | 30.302857 | 120 | 0.603998 |
420d148bc469105cd3d8585bbbb8f38f1d6ec875 | 2,058 | py | Python | metaflow/plugins/env_escape/configurations/test_lib_impl/test_lib.py | RobBlumberg/metaflow | 9f737e6026eee250c1593a2cb1d1c4b19a00adf4 | [
"Apache-2.0"
] | 5,821 | 2019-12-03T17:57:52.000Z | 2022-03-31T22:55:12.000Z | metaflow/plugins/env_escape/configurations/test_lib_impl/test_lib.py | RobBlumberg/metaflow | 9f737e6026eee250c1593a2cb1d1c4b19a00adf4 | [
"Apache-2.0"
] | 605 | 2019-12-03T23:09:32.000Z | 2022-03-31T16:15:05.000Z | metaflow/plugins/env_escape/configurations/test_lib_impl/test_lib.py | RobBlumberg/metaflow | 9f737e6026eee250c1593a2cb1d1c4b19a00adf4 | [
"Apache-2.0"
] | 539 | 2019-12-03T18:25:53.000Z | 2022-03-29T18:22:33.000Z | import functools
def test_func(*args, **kwargs):
return "In test func"
test_value = 1
| 20.376238 | 65 | 0.623907 |
420d3d5356dc0a6fa2f8ece54ea58e9f77d14058 | 38,124 | py | Python | venv/Lib/site-packages/aniso8601/tests/test_interval.py | GabrielSilva2y3d/api_atividade-sqlalchemy | 4a06e37fcb733d4185daf1de6bce415b4de28444 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/aniso8601/tests/test_interval.py | GabrielSilva2y3d/api_atividade-sqlalchemy | 4a06e37fcb733d4185daf1de6bce415b4de28444 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/aniso8601/tests/test_interval.py | GabrielSilva2y3d/api_atividade-sqlalchemy | 4a06e37fcb733d4185daf1de6bce415b4de28444 | [
"MIT"
] | 1 | 2022-01-13T10:05:55.000Z | 2022-01-13T10:05:55.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import unittest
import aniso8601
from aniso8601.exceptions import ISOFormatError
from aniso8601.interval import (_parse_interval, parse_interval,
parse_repeating_interval)
from aniso8601.tests.compat import mock
| 50.629482 | 115 | 0.387027 |
420d64c40f09249f80d51908d10b8e6dab472942 | 3,008 | py | Python | cool/core/utils.py | 007gzs/django-cool | 3b4ed1a8ca020e6f798ca47e20169e5a854b4f24 | [
"BSD-3-Clause"
] | 11 | 2020-05-19T09:52:35.000Z | 2022-02-25T10:39:56.000Z | cool/core/utils.py | 007gzs/django-cool | 3b4ed1a8ca020e6f798ca47e20169e5a854b4f24 | [
"BSD-3-Clause"
] | null | null | null | cool/core/utils.py | 007gzs/django-cool | 3b4ed1a8ca020e6f798ca47e20169e5a854b4f24 | [
"BSD-3-Clause"
] | 1 | 2020-12-24T08:14:58.000Z | 2020-12-24T08:14:58.000Z | # encoding: utf-8
import operator
from functools import reduce
from django.core.exceptions import FieldDoesNotExist
from django.db.models import Q
from django.db.models.constants import LOOKUP_SEP
def split_camel_name(name, fall=False):
"""
GenerateURLs => [Generate, URLs]
generateURLsLite => [generate, URLs, Lite]
"""
if not name:
return []
lastest_upper = name[0].isupper()
idx_list = []
for idx, char in enumerate(name):
upper = char.isupper()
# rising
if upper and not lastest_upper:
idx_list.append(idx)
# falling
elif fall and not upper and lastest_upper:
idx_list.append(idx-1)
lastest_upper = upper
l_idx = 0
name_items = []
for r_idx in idx_list:
if name[l_idx:r_idx]:
name_items.append(name[l_idx:r_idx])
l_idx = r_idx
if name[l_idx:]:
name_items.append(name[l_idx:])
return name_items
def construct_search(queryset, field_name):
"""
"""
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
# Use field_name if it includes a lookup.
opts = queryset.model._meta
lookup_fields = field_name.split(LOOKUP_SEP)
# Go through the fields, following all relations.
prev_field = None
for path_part in lookup_fields:
if path_part == 'pk':
path_part = opts.pk.name
try:
field = opts.get_field(path_part)
except FieldDoesNotExist:
# Use valid query lookups.
if prev_field and prev_field.get_lookup(path_part):
return field_name
else:
prev_field = field
if hasattr(field, 'get_path_info'):
# Update opts to follow the relation.
opts = field.get_path_info()[-1].to_opts
# Otherwise, use the field with icontains.
return "%s__icontains" % field_name
def get_search_results(queryset, search_term, search_fields, model):
"""
Return a tuple containing a queryset to implement the search
and a boolean indicating if the results may contain duplicates.
"""
try:
from django.contrib.admin.utils import (
lookup_needs_distinct as lookup_spawns_duplicates,
)
except ImportError:
from django.contrib.admin.utils import lookup_spawns_duplicates
use_distinct = False
if search_fields and search_term:
orm_lookups = [construct_search(queryset, str(search_field)) for search_field in search_fields]
for bit in search_term.split():
or_queries = [Q(**{orm_lookup: bit}) for orm_lookup in orm_lookups]
queryset = queryset.filter(reduce(operator.or_, or_queries))
use_distinct |= any(lookup_spawns_duplicates(model._meta, search_spec) for search_spec in orm_lookups)
return queryset, use_distinct
| 31.663158 | 110 | 0.648936 |
420dab6ca09e09f7cbafe716ac539156b5dcaa62 | 773 | py | Python | setup.py | atait/klayout-gadgets | a8d9655e547fc4531982bbe55e632009bad39096 | [
"MIT"
] | 13 | 2018-12-02T23:32:29.000Z | 2022-02-11T19:28:49.000Z | setup.py | atait/klayout-gadgets | a8d9655e547fc4531982bbe55e632009bad39096 | [
"MIT"
] | 3 | 2019-01-15T23:59:59.000Z | 2020-12-04T16:30:48.000Z | setup.py | atait/klayout-gadgets | a8d9655e547fc4531982bbe55e632009bad39096 | [
"MIT"
] | 1 | 2020-12-01T22:56:03.000Z | 2020-12-01T22:56:03.000Z | from setuptools import setup
setup(name='lygadgets',
version='0.1.31',
description='Tools to make klayout, the standalone, and python environments work better together',
long_description=readme(),
long_description_content_type='text/markdown',
author='Alex Tait',
author_email='alexander.tait@nist.gov',
license='MIT',
packages=['lygadgets'],
install_requires=['future', 'xmltodict'],
package_data={'': ['*.lym']},
include_package_data=True,
entry_points={'console_scripts': [
'lygadgets_link=lygadgets.command_line:cm_link_any',
'lygadgets_unlink=lygadgets.command_line:cm_unlink_any',
]},
)
| 29.730769 | 104 | 0.654592 |
420db9bdde8897b05f3ac2a8bb469ed44754dbb4 | 1,748 | py | Python | Python/Zelle/Chapter10_DefiningClasses/ProgrammingExercises/16_CannonballTarget/inputDialog.py | jeffvswanson/CodingPractice | 9ea8e0dd504230cea0e8684b31ef22c3ed90d2fb | [
"MIT"
] | null | null | null | Python/Zelle/Chapter10_DefiningClasses/ProgrammingExercises/16_CannonballTarget/inputDialog.py | jeffvswanson/CodingPractice | 9ea8e0dd504230cea0e8684b31ef22c3ed90d2fb | [
"MIT"
] | null | null | null | Python/Zelle/Chapter10_DefiningClasses/ProgrammingExercises/16_CannonballTarget/inputDialog.py | jeffvswanson/CodingPractice | 9ea8e0dd504230cea0e8684b31ef22c3ed90d2fb | [
"MIT"
] | null | null | null | # inputDialog.py
""" Provides a window to get input values
from the user to animate a cannonball."""
from graphics import GraphWin, Entry, Text, Point
from button import Button | 30.137931 | 71 | 0.568078 |
420e4e16ca0ab83a3724fd3b5d5775cec3e14b0e | 979 | py | Python | gym_envs/envs/reacher_done.py | gautams3/reacher-done | 6420f4ea3e0f6e47a3ebe25dbe170a9030b03b01 | [
"MIT"
] | 1 | 2021-11-13T13:51:37.000Z | 2021-11-13T13:51:37.000Z | gym_envs/envs/reacher_done.py | gautams3/reacher-done | 6420f4ea3e0f6e47a3ebe25dbe170a9030b03b01 | [
"MIT"
] | null | null | null | gym_envs/envs/reacher_done.py | gautams3/reacher-done | 6420f4ea3e0f6e47a3ebe25dbe170a9030b03b01 | [
"MIT"
] | 2 | 2021-04-08T12:48:29.000Z | 2021-05-09T02:04:33.000Z | import gym
from gym import error, spaces, utils
from gym.utils import seeding
from gym.envs.mujoco.reacher import ReacherEnv
import numpy as np
# def reset(self):
# super().reset()
# def render(self, mode='human'):
# ...
# def close(self):
# ... | 31.580645 | 76 | 0.668029 |
420ed2750c333b6a9c2bf33a7391b56504549e6c | 4,639 | py | Python | stackalytics/get_metric.py | yaoice/python_demo | 024f42f9cfce757bdaddf24202d8547801f0e8f6 | [
"Apache-2.0"
] | null | null | null | stackalytics/get_metric.py | yaoice/python_demo | 024f42f9cfce757bdaddf24202d8547801f0e8f6 | [
"Apache-2.0"
] | 2 | 2021-02-08T20:17:39.000Z | 2021-06-01T21:49:12.000Z | stackalytics/get_metric.py | yaoice/python_demo | 024f42f9cfce757bdaddf24202d8547801f0e8f6 | [
"Apache-2.0"
] | null | null | null | #/usr/bin/env python
import httplib2
import json
import sys
from prettytable import PrettyTable
from config import field
def main():
company_statistics = {}
engineer_statistics = {}
stackalytics = Stackalytics("http://stackalytics.com")
for project_type in field['project_type']:
company_statistics[project_type] = {}
for company in field['company']:
company_statistics[project_type][company] = {}
for metric in field['metric']:
company_statistics[project_type][company][metric] = {}
url = "/api/1.0/stats/companies?release={}&metric={}&project_type={}&company={}".format(field['release'],
metric,
project_type,
company)
resp, content = stackalytics.get_metrics(url)
stats = json.loads(content)['stats']
try:
metric_dict = stats[0]
except IndexError:
metric_dict = {'id': company, 'metric': 0}
company_statistics[project_type][company][metric] = metric_dict
for project_type in field['project_type']:
engineer_statistics[project_type] = {}
for engineer in field['engineers']['ids']:
engineer_statistics[project_type][engineer] = {}
for metric in field['metric']:
engineer_statistics[project_type][engineer][metric] = {}
engineers_url = "/api/1.0/stats/engineers?&release={}&metric={}"\
"&project_type={}&company={}&user_id={}".format(field['release'],
metric,
project_type,
field['engineers']['owercompany'],
engineer)
engineers_resp, engineers_content = stackalytics.get_metrics(engineers_url)
engineers_stats = json.loads(engineers_content)['stats']
try:
engineers_metric_dict = engineers_stats[0]
except IndexError:
engineers_metric_dict = {'id': engineer, 'metric': 0}
engineer_statistics[project_type][engineer][metric] = engineers_metric_dict
engineer_table_field = ['metric'] + [engineer for engineer in field['engineers']['ids']]
for project_type in field['project_type']:
print "{} {} project by tencent individual:".format(field['release'], project_type)
table = PrettyTable(engineer_table_field)
for metric in field['metric']:
table.add_row([metric] + [engineer_statistics[project_type][engineer][metric]['metric'] for engineer in field['engineers']['ids']])
print table
table_field = ['metric'] + [company.replace('%20', ' ') for company in field['company']]
for project_type in field['project_type']:
print "{} {} project by company:".format(field['release'], project_type)
table = PrettyTable(table_field)
for metric in field['metric']:
table.add_row([metric] + [company_statistics[project_type][company][metric]['metric'] for company in field['company']])
print table
# print company_statistics
if __name__ == '__main__':
sys.exit(main())
| 43.764151 | 143 | 0.527053 |