hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0b5048a8c70006e924308165169ee5c4fabe48fa | 934 | py | Python | asar_pi_applications/asar_vision/robot_distance_incorrect.py | ssnover/msd-p18542 | 32bef466f9d5ba55429da2119a14081b3e411d0b | [
"MIT"
] | 3 | 2021-01-07T07:46:50.000Z | 2021-11-17T10:48:39.000Z | asar_pi_applications/asar_vision/robot_distance_incorrect.py | ssnover/msd-p18542 | 32bef466f9d5ba55429da2119a14081b3e411d0b | [
"MIT"
] | 3 | 2018-02-19T20:30:30.000Z | 2018-04-20T23:25:29.000Z | asar_pi_applications/asar_vision/robot_distance_incorrect.py | ssnover95/msd-p18542 | 32bef466f9d5ba55429da2119a14081b3e411d0b | [
"MIT"
] | 1 | 2021-01-07T07:46:52.000Z | 2021-01-07T07:46:52.000Z | import numpy as np
from math import sqrt
| 44.47619 | 115 | 0.723769 |
0b519f8596f5bf7ee53103adc8d550ce1fb62540 | 68,172 | py | Python | tests/test_generate_unique_id_function.py | ssensalo/fastapi | 146f57b8f70c5757dc20edc716dba1b96936a8d6 | [
"MIT"
] | 1 | 2022-01-08T16:39:28.000Z | 2022-01-08T16:39:28.000Z | tests/test_generate_unique_id_function.py | ssensalo/fastapi | 146f57b8f70c5757dc20edc716dba1b96936a8d6 | [
"MIT"
] | 1 | 2022-01-07T21:04:04.000Z | 2022-01-07T21:04:04.000Z | tests/test_generate_unique_id_function.py | ssensalo/fastapi | 146f57b8f70c5757dc20edc716dba1b96936a8d6 | [
"MIT"
] | null | null | null | import warnings
from typing import List
from fastapi import APIRouter, FastAPI
from fastapi.routing import APIRoute
from fastapi.testclient import TestClient
from pydantic import BaseModel
def test_top_level_generate_unique_id():
app = FastAPI(generate_unique_id_function=custom_generate_unique_id)
router = APIRouter()
app.include_router(router)
client = TestClient(app)
response = client.get("/openapi.json")
data = response.json()
assert data == {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/": {
"post": {
"summary": "Post Root",
"operationId": "foo_post_root",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_foo_post_root"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Foo Post Root",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Foo Post Root",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/router": {
"post": {
"summary": "Post Router",
"operationId": "foo_post_router",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_foo_post_router"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Foo Post Router",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Foo Post Router",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"Body_foo_post_root": {
"title": "Body_foo_post_root",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"Body_foo_post_router": {
"title": "Body_foo_post_router",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"Item": {
"title": "Item",
"required": ["name", "price"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"price": {"title": "Price", "type": "number"},
},
},
"Message": {
"title": "Message",
"required": ["title", "description"],
"type": "object",
"properties": {
"title": {"title": "Title", "type": "string"},
"description": {"title": "Description", "type": "string"},
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
def test_router_overrides_generate_unique_id():
app = FastAPI(generate_unique_id_function=custom_generate_unique_id)
router = APIRouter(generate_unique_id_function=custom_generate_unique_id2)
app.include_router(router)
client = TestClient(app)
response = client.get("/openapi.json")
data = response.json()
assert data == {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/": {
"post": {
"summary": "Post Root",
"operationId": "foo_post_root",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_foo_post_root"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Foo Post Root",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Foo Post Root",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/router": {
"post": {
"summary": "Post Router",
"operationId": "bar_post_router",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_bar_post_router"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Bar Post Router",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Bar Post Router",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"Body_bar_post_router": {
"title": "Body_bar_post_router",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"Body_foo_post_root": {
"title": "Body_foo_post_root",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"Item": {
"title": "Item",
"required": ["name", "price"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"price": {"title": "Price", "type": "number"},
},
},
"Message": {
"title": "Message",
"required": ["title", "description"],
"type": "object",
"properties": {
"title": {"title": "Title", "type": "string"},
"description": {"title": "Description", "type": "string"},
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
def test_router_include_overrides_generate_unique_id():
app = FastAPI(generate_unique_id_function=custom_generate_unique_id)
router = APIRouter(generate_unique_id_function=custom_generate_unique_id2)
app.include_router(router, generate_unique_id_function=custom_generate_unique_id3)
client = TestClient(app)
response = client.get("/openapi.json")
data = response.json()
assert data == {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/": {
"post": {
"summary": "Post Root",
"operationId": "foo_post_root",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_foo_post_root"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Foo Post Root",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Foo Post Root",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/router": {
"post": {
"summary": "Post Router",
"operationId": "bar_post_router",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_bar_post_router"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Bar Post Router",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Bar Post Router",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"Body_bar_post_router": {
"title": "Body_bar_post_router",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"Body_foo_post_root": {
"title": "Body_foo_post_root",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"Item": {
"title": "Item",
"required": ["name", "price"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"price": {"title": "Price", "type": "number"},
},
},
"Message": {
"title": "Message",
"required": ["title", "description"],
"type": "object",
"properties": {
"title": {"title": "Title", "type": "string"},
"description": {"title": "Description", "type": "string"},
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
def test_subrouter_top_level_include_overrides_generate_unique_id():
app = FastAPI(generate_unique_id_function=custom_generate_unique_id)
router = APIRouter()
sub_router = APIRouter(generate_unique_id_function=custom_generate_unique_id2)
router.include_router(sub_router)
app.include_router(router, generate_unique_id_function=custom_generate_unique_id3)
client = TestClient(app)
response = client.get("/openapi.json")
data = response.json()
assert data == {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/": {
"post": {
"summary": "Post Root",
"operationId": "foo_post_root",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_foo_post_root"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Foo Post Root",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Foo Post Root",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/router": {
"post": {
"summary": "Post Router",
"operationId": "baz_post_router",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_baz_post_router"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Baz Post Router",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Baz Post Router",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/subrouter": {
"post": {
"summary": "Post Subrouter",
"operationId": "bar_post_subrouter",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_bar_post_subrouter"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Bar Post Subrouter",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Bar Post Subrouter",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"Body_bar_post_subrouter": {
"title": "Body_bar_post_subrouter",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"Body_baz_post_router": {
"title": "Body_baz_post_router",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"Body_foo_post_root": {
"title": "Body_foo_post_root",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"Item": {
"title": "Item",
"required": ["name", "price"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"price": {"title": "Price", "type": "number"},
},
},
"Message": {
"title": "Message",
"required": ["title", "description"],
"type": "object",
"properties": {
"title": {"title": "Title", "type": "string"},
"description": {"title": "Description", "type": "string"},
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
def test_router_path_operation_overrides_generate_unique_id():
app = FastAPI(generate_unique_id_function=custom_generate_unique_id)
router = APIRouter(generate_unique_id_function=custom_generate_unique_id2)
app.include_router(router)
client = TestClient(app)
response = client.get("/openapi.json")
data = response.json()
assert data == {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/": {
"post": {
"summary": "Post Root",
"operationId": "foo_post_root",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_foo_post_root"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Foo Post Root",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Foo Post Root",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/router": {
"post": {
"summary": "Post Router",
"operationId": "baz_post_router",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_baz_post_router"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Baz Post Router",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Baz Post Router",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"Body_baz_post_router": {
"title": "Body_baz_post_router",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"Body_foo_post_root": {
"title": "Body_foo_post_root",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"Item": {
"title": "Item",
"required": ["name", "price"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"price": {"title": "Price", "type": "number"},
},
},
"Message": {
"title": "Message",
"required": ["title", "description"],
"type": "object",
"properties": {
"title": {"title": "Title", "type": "string"},
"description": {"title": "Description", "type": "string"},
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
def test_app_path_operation_overrides_generate_unique_id():
app = FastAPI(generate_unique_id_function=custom_generate_unique_id)
router = APIRouter(generate_unique_id_function=custom_generate_unique_id2)
app.include_router(router)
client = TestClient(app)
response = client.get("/openapi.json")
data = response.json()
assert data == {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/": {
"post": {
"summary": "Post Root",
"operationId": "baz_post_root",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_baz_post_root"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Baz Post Root",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Baz Post Root",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/router": {
"post": {
"summary": "Post Router",
"operationId": "bar_post_router",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_bar_post_router"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Bar Post Router",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Bar Post Router",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"Body_bar_post_router": {
"title": "Body_bar_post_router",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"Body_baz_post_root": {
"title": "Body_baz_post_root",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"Item": {
"title": "Item",
"required": ["name", "price"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"price": {"title": "Price", "type": "number"},
},
},
"Message": {
"title": "Message",
"required": ["title", "description"],
"type": "object",
"properties": {
"title": {"title": "Title", "type": "string"},
"description": {"title": "Description", "type": "string"},
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
def test_callback_override_generate_unique_id():
app = FastAPI(generate_unique_id_function=custom_generate_unique_id)
callback_router = APIRouter(generate_unique_id_function=custom_generate_unique_id2)
client = TestClient(app)
response = client.get("/openapi.json")
data = response.json()
assert data == {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/": {
"post": {
"summary": "Post Root",
"operationId": "baz_post_root",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_baz_post_root"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Baz Post Root",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Baz Post Root",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"callbacks": {
"post_callback": {
"/post-callback": {
"post": {
"summary": "Post Callback",
"operationId": "baz_post_callback",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_baz_post_callback"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Baz Post Callback",
"type": "array",
"items": {
"$ref": "#/components/schemas/Item"
},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Baz Post Callback",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
}
}
},
}
},
"/tocallback": {
"post": {
"summary": "Post With Callback",
"operationId": "foo_post_with_callback",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_foo_post_with_callback"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Foo Post With Callback",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Foo Post With Callback",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"Body_baz_post_callback": {
"title": "Body_baz_post_callback",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"Body_baz_post_root": {
"title": "Body_baz_post_root",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"Body_foo_post_with_callback": {
"title": "Body_foo_post_with_callback",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"Item": {
"title": "Item",
"required": ["name", "price"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"price": {"title": "Price", "type": "number"},
},
},
"Message": {
"title": "Message",
"required": ["title", "description"],
"type": "object",
"properties": {
"title": {"title": "Title", "type": "string"},
"description": {"title": "Description", "type": "string"},
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
def test_warn_duplicate_operation_id():
app = FastAPI(generate_unique_id_function=broken_operation_id)
client = TestClient(app)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
client.get("/openapi.json")
assert len(w) == 2
assert issubclass(w[-1].category, UserWarning)
assert "Duplicate Operation ID" in str(w[-1].message)
| 41.772059 | 106 | 0.285161 |
0b5218f0be7a06f3e5bb1ddae6a9fce7c35741e8 | 11,880 | py | Python | hbruraldoctor/hbvirtual/lib/python3.7/site-packages/Naked/app.py | hallohubo/DjangoDocterAPI | 2d86d17c718affa968c0b2d4f9590aa08d43716e | [
"Apache-2.0"
] | 89 | 2015-04-10T14:34:05.000Z | 2021-11-08T09:17:09.000Z | hbruraldoctor/hbvirtual/lib/python3.7/site-packages/Naked/app.py | hallohubo/DjangoDocterAPI | 2d86d17c718affa968c0b2d4f9590aa08d43716e | [
"Apache-2.0"
] | 13 | 2015-03-17T15:44:41.000Z | 2020-11-19T03:07:13.000Z | hbruraldoctor/hbvirtual/lib/python3.7/site-packages/Naked/app.py | hallohubo/DjangoDocterAPI | 2d86d17c718affa968c0b2d4f9590aa08d43716e | [
"Apache-2.0"
] | 19 | 2015-05-13T09:18:12.000Z | 2021-04-28T10:35:39.000Z | #!/usr/bin/env python
# encoding: utf-8
#------------------------------------------------------------------------------
# Naked | A Python command line application framework
# Copyright 2014 Christopher Simpkins
# MIT License
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------------
# c.cmd = Primary command (<executable> <primary command>)
# c.cmd2 = Secondary command (<executable> <primary command> <secondary command>)
#
# c.option(option_string, [bool argument_required]) = test for option with optional test for positional arg to the option
# c.option_with_arg(option_string) = test for option and mandatory positional argument to option test
# c.flag(flag_string) = test for presence of a "--option=argument" style flag
#
# c.arg(arg_string) = returns the next positional argument to the arg_string argument
# c.flag_arg(flag_string) = returns the flag assignment for a "--option=argument" style flag
#------------------------------------------------------------------------------------
# Application start
if __name__ == '__main__':
main()
| 51.652174 | 135 | 0.448485 |
0b57844b6fc847c94e6d69c32ba1624b13f6dfa7 | 366 | py | Python | codes/day7_task1.py | tayyrov/AdventOfCode | 69003407fd345ea76f8125b4b132e5b5d5ea33ab | [
"MIT"
] | 1 | 2021-12-07T10:54:48.000Z | 2021-12-07T10:54:48.000Z | codes/day7_task1.py | tayyrov/AdventOfCode | 69003407fd345ea76f8125b4b132e5b5d5ea33ab | [
"MIT"
] | null | null | null | codes/day7_task1.py | tayyrov/AdventOfCode | 69003407fd345ea76f8125b4b132e5b5d5ea33ab | [
"MIT"
] | null | null | null | """
Advent Of Code 2021
Day 7
Date: 07-12-2021
Site: https://adventofcode.com/2021/day/7
Author: Tayyrov
"""
import sys
file1 = open('../input_files/day7_input', 'r')
numbers = list(map(int, file1.readlines()[0].split(",")))
numbers.sort()
middle = numbers[len(numbers)//2]
ans = 0
for n in numbers:
ans += abs(middle-n)
print(ans)
| 15.25 | 58 | 0.620219 |
0b57f9e75344dd34d7fe38dc10faa58dd476ec48 | 4,270 | py | Python | events/utils.py | ewjoachim/pythondotorg | 382741cc6208fc56aa827cdd1da41983fb7e6ba8 | [
"Apache-2.0"
] | null | null | null | events/utils.py | ewjoachim/pythondotorg | 382741cc6208fc56aa827cdd1da41983fb7e6ba8 | [
"Apache-2.0"
] | null | null | null | events/utils.py | ewjoachim/pythondotorg | 382741cc6208fc56aa827cdd1da41983fb7e6ba8 | [
"Apache-2.0"
] | null | null | null | import datetime
import re
import pytz
from django.utils.timezone import make_aware, is_aware
def timedelta_nice_repr(timedelta, display='long', sep=', '):
"""
Turns a datetime.timedelta object into a nice string repr.
'display' can be 'minimal', 'short' or 'long' (default).
Taken from bitbucket.org/schinckel/django-timedelta-field.
'sql' and 'iso8601' support have been removed.
"""
if not isinstance(timedelta, datetime.timedelta):
raise TypeError('First argument must be a timedelta.')
result = []
weeks = int(timedelta.days / 7)
days = timedelta.days % 7
hours = int(timedelta.seconds / 3600)
minutes = int((timedelta.seconds % 3600) / 60)
seconds = timedelta.seconds % 60
if display == 'minimal':
words = ['w', 'd', 'h', 'm', 's']
elif display == 'short':
words = [' wks', ' days', ' hrs', ' min', ' sec']
elif display == 'long':
words = [' weeks', ' days', ' hours', ' minutes', ' seconds']
else:
# Use django template-style formatting.
# Valid values are d, g, G, h, H, i, s.
return re.sub(r'([dgGhHis])', lambda x: '%%(%s)s' % x.group(), display) % {
'd': days,
'g': hours,
'G': hours if hours > 9 else '0%s' % hours,
'h': hours,
'H': hours if hours > 9 else '0%s' % hours,
'i': minutes if minutes > 9 else '0%s' % minutes,
's': seconds if seconds > 9 else '0%s' % seconds
}
values = [weeks, days, hours, minutes, seconds]
for i in range(len(values)):
if values[i]:
if values[i] == 1 and len(words[i]) > 1:
result.append('%i%s' % (values[i], words[i].rstrip('s')))
else:
result.append('%i%s' % (values[i], words[i]))
# Values with less than one second, which are considered zeroes.
if len(result) == 0:
# Display as 0 of the smallest unit.
result.append('0%s' % (words[-1]))
return sep.join(result)
def timedelta_parse(string):
"""
Parse a string into a timedelta object.
Taken from bitbucket.org/schinckel/django-timedelta-field.
"""
string = string.strip()
if not string:
raise TypeError(f'{string!r} is not a valid time interval')
# This is the format we get from sometimes PostgreSQL, sqlite,
# and from serialization.
d = re.match(
r'^((?P<days>[-+]?\d+) days?,? )?(?P<sign>[-+]?)(?P<hours>\d+):'
r'(?P<minutes>\d+)(:(?P<seconds>\d+(\.\d+)?))?$',
string
)
if d:
d = d.groupdict(0)
if d['sign'] == '-':
for k in 'hours', 'minutes', 'seconds':
d[k] = '-' + d[k]
d.pop('sign', None)
else:
# This is the more flexible format.
d = re.match(
r'^((?P<weeks>-?((\d*\.\d+)|\d+))\W*w((ee)?(k(s)?)?)(,)?\W*)?'
r'((?P<days>-?((\d*\.\d+)|\d+))\W*d(ay(s)?)?(,)?\W*)?'
r'((?P<hours>-?((\d*\.\d+)|\d+))\W*h(ou)?(r(s)?)?(,)?\W*)?'
r'((?P<minutes>-?((\d*\.\d+)|\d+))\W*m(in(ute)?(s)?)?(,)?\W*)?'
r'((?P<seconds>-?((\d*\.\d+)|\d+))\W*s(ec(ond)?(s)?)?)?\W*$',
string
)
if not d:
raise TypeError(f'{string!r} is not a valid time interval')
d = d.groupdict(0)
return datetime.timedelta(**{k: float(v) for k, v in d.items()})
| 34.16 | 102 | 0.545902 |
0b59207603dace13de5bad24d570481b2383557b | 4,282 | py | Python | frontend/main.py | loukwn/klougle | 45432841c594ced36437566f416e9c71017f83a5 | [
"MIT"
] | 2 | 2018-10-26T11:06:51.000Z | 2020-04-29T13:38:13.000Z | frontend/main.py | loukwn/klougle | 45432841c594ced36437566f416e9c71017f83a5 | [
"MIT"
] | null | null | null | frontend/main.py | loukwn/klougle | 45432841c594ced36437566f416e9c71017f83a5 | [
"MIT"
] | null | null | null | import json
import operator
import os
import webbrowser
from timeit import default_timer as timer
from kivy.app import App
from kivy.config import Config
from kivy.properties import ObjectProperty
from kivy.uix.stacklayout import StackLayout
from nltk.stem.wordnet import WordNetLemmatizer
Config.set('input', 'mouse', 'mouse,multitouch_on_demand')
INV_IDX_NAME = 'inv_index.json'
_wordnet_lemmatizer = WordNetLemmatizer()
_wordnet_lemmatizer.lemmatize('asd')
# -------------------------- UI -------------------------- #
# top layout of UI
# kivy app
# starting point
if __name__ == '__main__':
SearchApp().run()
| 34.532258 | 115 | 0.609295 |
0b5a05b2b3ff689eda558db7efd7ba2b693f4a50 | 1,244 | py | Python | test.py | richisusiljacob/VideoTo360VR | 14c176cfbe90fd7cf113cbdd2d4edf447c001894 | [
"MIT"
] | 5 | 2021-08-06T11:26:56.000Z | 2022-03-17T09:06:07.000Z | test.py | richisusiljacob/VideoTo360VR | 14c176cfbe90fd7cf113cbdd2d4edf447c001894 | [
"MIT"
] | 8 | 2021-07-03T08:08:00.000Z | 2021-07-09T06:59:34.000Z | test.py | richisusiljacob/VideoTo360VR | 14c176cfbe90fd7cf113cbdd2d4edf447c001894 | [
"MIT"
] | 2 | 2021-07-02T09:19:09.000Z | 2021-07-04T13:34:30.000Z | from tkinter import *
import tkinter.ttk as ttk
from PIL import ImageTk,Image
""" root = Tk()
canvas = Canvas(root, width = 300, height = 300)
canvas.pack()
img = ImageTk.PhotoImage(Image.open("output/collage1/FinalCollage.jpg"))
canvas.create_image(0,0,anchor=NW, image=img)
root.mainloop() """
root = Tk()
root.title("Tab Widget")
tabControl = ttk.Notebook(root)
tab1 = ttk.Frame(tabControl)
tab2 = ttk.Frame(tabControl)
tabControl.add(tab1, text ='Tab 1')
tabControl.add(tab2, text ='Tab 2')
tabControl.pack(expand = 1, fill ="both")
ttk.Label(tab1,
text ="Welcome to \
GeeksForGeeks").grid(column = 0,
row = 0,
padx = 30,
pady = 30)
canvas = Canvas(tab1, width = 300, height = 300)
canvas.grid(column= 1, row =0)
img = ImageTk.PhotoImage(Image.open("output/collage1/FinalCollage.jpg"))
canvas.create_image(0,0,anchor=NW, image=img)
ttk.Label(tab2,
text ="Lets dive into the\
world of computers").grid(column = 0,
row = 0,
padx = 30,
pady = 30)
root.mainloop()
| 31.1 | 74 | 0.553859 |
0b5a82c329031fc6f172ed423012d36ab20bca44 | 10,817 | py | Python | testscripts/RDKB/component/WEBCONFIG/TS_WEBCONFIG_DisableRFC_QuerySyncParams.py | rdkcmf/rdkb-tools-tdkb | 9f9c3600cd701d5fc90ac86a6394ebd28d49267e | [
"Apache-2.0"
] | null | null | null | testscripts/RDKB/component/WEBCONFIG/TS_WEBCONFIG_DisableRFC_QuerySyncParams.py | rdkcmf/rdkb-tools-tdkb | 9f9c3600cd701d5fc90ac86a6394ebd28d49267e | [
"Apache-2.0"
] | null | null | null | testscripts/RDKB/component/WEBCONFIG/TS_WEBCONFIG_DisableRFC_QuerySyncParams.py | rdkcmf/rdkb-tools-tdkb | 9f9c3600cd701d5fc90ac86a6394ebd28d49267e | [
"Apache-2.0"
] | null | null | null | ##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2021 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?><xml>
<id/>
<version>1</version>
<name>TS_WEBCONFIG_DisableRFC_QuerySyncParams</name>
<primitive_test_id/>
<primitive_test_name>Webconfig_DoNothing</primitive_test_name>
<primitive_test_version>1</primitive_test_version>
<status>FREE</status>
<synopsis>To disable the Webconfig RFC and check if a get operation on Force Sync parameters logs DB failure in WebConfig.log file</synopsis>
<groups_id/>
<execution_time>10</execution_time>
<long_duration>false</long_duration>
<advanced_script>false</advanced_script>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>Broadband</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_WEBCONFIG_02</test_case_id>
<test_objective>This test is case is to disable the RFC and check if a get operation on Force Sync parameters logs DB failure in WebConfig.log file</test_objective>
<test_type>Positive</test_type>
<test_setup>Broadband</test_setup>
<pre_requisite>1.Ccsp Components in DUT should be in a running state that includes component under test Cable Modem
2.TDK Agent should be in running state or invoke it through StartTdk.sh script
3.Webconfig distro should be enabled else enable with custom image</pre_requisite>
<api_or_interface_used>pam_GetParameterValues
pam_SetParameterValues</api_or_interface_used>
<input_parameters>Device.X_RDK_WebConfig.RfcEnable
Device.X_RDK_WebConfig.ConfigFile.1.ForceSyncCheck
Device.X_RDK_WebConfig.ConfigFile.1.SyncCheckOK"</input_parameters>
<automation_approch>1.Load the module
2.Get the current webconfig RFC enable status and disable the RFC
3.Do a get operation on Force Sync check and Force Sync Check Ok parameters
4.Check if DB failed message specific to the parameter is logged in WebConfig.log File
5.Revert the RFC status to previous
6.Unload the module</automation_approch>
<expected_output>When webconfig RFC is disabled and get operation done on Force Sync parameters should log Db failed message specific to the parameter in webConfig.log file</expected_output>
<priority>High</priority>
<test_stub_interface>WEBCONFIG</test_stub_interface>
<test_script>TS_WEBCONFIG_DisableRFC_QuerySyncParams</test_script>
<skipped>No</skipped>
<release_version>M86</release_version>
<remarks>None</remarks>
</test_cases>
</xml>
'''
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
from tdkbVariables import *;
import tdkutility
from tdkutility import *
from time import sleep;
#Test component to be tested
sysobj = tdklib.TDKScriptingLibrary("sysutil","1");
pamobj = tdklib.TDKScriptingLibrary("pam","1");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
pamobj.configureTestCase(ip,port,'TS_WEBCONFIG_DisableRFC_QuerySyncParams');
sysobj.configureTestCase(ip,port,'TS_WEBCONFIG_DisableRFC_QuerySyncParams');
#Get the result of connection with test component and DUT
pamloadmodulestatus =pamobj.getLoadModuleResult();
sysloadmodulestatus =sysobj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %pamloadmodulestatus ;
print "[LIB LOAD STATUS] : %s" %sysloadmodulestatus ;
revert = 0;
if "SUCCESS" in pamloadmodulestatus.upper() and "SUCCESS" in sysloadmodulestatus.upper():
#Set the result status of execution
pamobj.setLoadModuleStatus("SUCCESS");
sysobj.setLoadModuleStatus("SUCCESS");
tdkTestObj = pamobj.createTestStep('pam_GetParameterValues');
tdkTestObj.addParameter("ParamName","Device.X_RDK_WebConfig.RfcEnable");
expectedresult="SUCCESS";
#Execute the test case in DUT
tdkTestObj.executeTestCase("expectedresult");
actualresult = tdkTestObj.getResult();
initial_value = tdkTestObj.getResultDetails().strip();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 1: Get current value of Web Config Enable"
print "EXPECTED RESULT 1: Should get current value of Web Config Enable"
print "ACTUAL RESULT 1: current value is %s" %initial_value;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj = pamobj.createTestStep('pam_SetParameterValues');
tdkTestObj.addParameter("ParamName","Device.X_RDK_WebConfig.RfcEnable");
tdkTestObj.addParameter("ParamValue","false");
tdkTestObj.addParameter("Type","boolean");
expectedresult="SUCCESS";
#Execute testcase on DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
result = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
revert =1;
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 2: Set Web Config Enable status to false";
print "EXPECTED RESULT 2: Should set Web Config Enable status to false";
print "ACTUAL RESULT 2: %s" %result;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
paramlist = ["Device.X_RDK_WebConfig.ConfigFile.1.ForceSyncCheck","Device.X_RDK_WebConfig.ConfigFile.1.SyncCheckOK"];
logMsgs = ["ForceSyncCheck GET from DB failed","SyncCheckOK GET from DB failed"];
i=0;
for item in paramlist:
tdkTestObj = pamobj.createTestStep('pam_GetParameterValues');
tdkTestObj.addParameter("ParamName",item);
expectedresult="SUCCESS";
#Execute the test case in DUT
tdkTestObj.executeTestCase("expectedresult");
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails().strip();
if expectedresult in actualresult:
print "Querying %s parameter is sucessfull" %item;
print "Check if DB failed message is seen on querying this specific parameter";
sleep(5);
tdkTestObj = sysobj.createTestStep('ExecuteCmd');
expectedresult="SUCCESS";
cmd= "cat /rdklogs/logs/WebConfig.log | grep -rn \"%s\" " %logMsgs[i];
print cmd;
expectedresult="SUCCESS";
tdkTestObj.addParameter("command", cmd);
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails().strip().replace("\\n", "");
i= i+1;
if expectedresult in actualresult and details:
tdkTestObj.setResultStatus("SUCCESS");
print"%s" %details;
print"The expected log message is present when Queried";
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
tdkTestObj.setResultStatus("FAILURE");
print "The expected log message is not present: %s" %logMsgs[i];
print "[TEST EXECUTION RESULT] : FAILURE";
break;
else:
revert =0;
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 2: Set Web Config Enable status to false";
print "EXPECTED RESULT 2: Should set Web Config Enable status to false";
print "ACTUAL RESULT 2: %s" %result;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
if revert ==1 :
tdkTestObj = pamobj.createTestStep('pam_SetParameterValues');
tdkTestObj.addParameter("ParamName","Device.X_RDK_WebConfig.RfcEnable");
tdkTestObj.addParameter("ParamValue",initial_value);
tdkTestObj.addParameter("Type","boolean");
expectedresult="SUCCESS";
#Execute testcase on DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
result = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 3: Revert the Web Config Enable status to previous"
print "EXPECTED RESULT 3: Should revert Web Config status to previous"
print "ACTUAL RESULT 3: %s" %result;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS"
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 3: Revert Web Config Enable status to previous"
print "EXPECTED RESULT 3: Should revert Web Config Enable status to previous"
print "ACTUAL RESULT 3: %s" %result;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE"
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 1: Get current value of Web Config Enable"
print "EXPECTED RESULT 1: Should get current value of Web Config Enable"
print "ACTUAL RESULT 1: current value is %s" %initial_value;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
pamobj.unloadModule("pam");
sysobj.unloadModule("sysutil");
else:
print "Failed to load pam/sysutil module";
pamobj.setLoadModuleStatus("FAILURE");
sysobj.setLoadModuleStatus("FAILURE");
print "Module loading failed";
| 49.168182 | 194 | 0.665249 |
0b5a9a6d564a0a48f6482c88a286d5b324351dbc | 3,283 | py | Python | xappt_qt/plugins/tools/examples/auto_advance.py | cmontesano/xappt_qt | 74f8c62e0104a67b4b4eb65382df851221bf0bab | [
"MIT"
] | null | null | null | xappt_qt/plugins/tools/examples/auto_advance.py | cmontesano/xappt_qt | 74f8c62e0104a67b4b4eb65382df851221bf0bab | [
"MIT"
] | 12 | 2020-10-11T22:42:12.000Z | 2021-10-04T19:38:51.000Z | xappt_qt/plugins/tools/examples/auto_advance.py | cmontesano/xappt_qt | 74f8c62e0104a67b4b4eb65382df851221bf0bab | [
"MIT"
] | 1 | 2021-09-29T23:53:34.000Z | 2021-09-29T23:53:34.000Z | import time
import xappt
| 43.773333 | 110 | 0.616205 |
0b5adb9041b96e89affef15661e25d3114bd15aa | 962 | py | Python | play-1.2.4/python/Lib/site-packages/Rpyc/Utils/Discovery.py | AppSecAI-TEST/restcommander | a2523f31356938f5c7fc6d379b7678da0b1e077a | [
"Apache-2.0"
] | 550 | 2015-01-05T16:59:00.000Z | 2022-03-20T16:55:25.000Z | framework/python/Lib/site-packages/Rpyc/Utils/Discovery.py | lafayette/JBTT | 94bde9d90abbb274d29ecd82e632d43a4320876e | [
"MIT"
] | 15 | 2015-02-05T06:00:47.000Z | 2018-07-07T14:34:04.000Z | framework/python/Lib/site-packages/Rpyc/Utils/Discovery.py | lafayette/JBTT | 94bde9d90abbb274d29ecd82e632d43a4320876e | [
"MIT"
] | 119 | 2015-01-08T00:48:24.000Z | 2022-01-27T14:13:15.000Z | """
Discovery: broadcasts a query, attempting to discover all running RPyC servers
over the local network/specific subnet.
"""
import socket
import select
import struct
__all__ = ["discover_servers"]
UDP_DISCOVERY_PORT = 18813
QUERY_MAGIC = "RPYC_QUERY"
MAX_DGRAM_SIZE = 100
def discover_servers(subnet = "255.255.255.255", timeout = 1):
"""broadcasts a query and returns a list of (addr, port) of running servers"""
# broadcast
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.sendto(QUERY_MAGIC, (subnet, UDP_DISCOVERY_PORT))
# wait for replies
replies = []
while True:
rlist, dummy, dummy = select.select([s], [], [], timeout)
if not rlist:
break
data, (addr, port) = s.recvfrom(MAX_DGRAM_SIZE)
rpyc_port, = struct.unpack("<H", data)
replies.append((addr, rpyc_port))
return list(set(replies))
| 24.666667 | 82 | 0.672557 |
0b5cea9d906ea2c35bda5ccee23fdca482e7e9b4 | 335 | py | Python | atest/testresources/testlibs/objecttoreturn.py | userzimmermann/robotframework | 7aa16338ce2120cb082605cf548c0794956ec901 | [
"Apache-2.0"
] | 7 | 2015-02-25T10:55:02.000Z | 2015-11-04T03:20:05.000Z | atest/testresources/testlibs/objecttoreturn.py | userzimmermann/robotframework | 7aa16338ce2120cb082605cf548c0794956ec901 | [
"Apache-2.0"
] | 12 | 2015-02-24T17:00:06.000Z | 2015-07-31T08:32:07.000Z | atest/testresources/testlibs/objecttoreturn.py | userzimmermann/robotframework | 7aa16338ce2120cb082605cf548c0794956ec901 | [
"Apache-2.0"
] | 2 | 2015-12-15T11:00:35.000Z | 2018-02-24T18:11:24.000Z | try:
import exceptions
except ImportError: # Python 3
import builtins as exceptions
| 19.705882 | 45 | 0.647761 |
0b5db17336f788ad1d51e0ebfedab480c4c72a7e | 2,068 | py | Python | quiz/models.py | jzi040941/django_quiz | 465d29c74e3ff6814f686296d225f18a50c99b9a | [
"MIT"
] | 1 | 2018-03-14T16:43:00.000Z | 2018-03-14T16:43:00.000Z | quiz/models.py | jzi040941/django_quiz | 465d29c74e3ff6814f686296d225f18a50c99b9a | [
"MIT"
] | null | null | null | quiz/models.py | jzi040941/django_quiz | 465d29c74e3ff6814f686296d225f18a50c99b9a | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
'''
class quiz_one(models.Model):
AssignNum = models.ForeignKey('teacher.Assignment', on_delete=models.CASCADE)
Question = models.TextField()
Answer = models.TextField()
Wrong_1 = models.TextField()
Wrong_2 = models.TextField()
Wrong_3 = models.TextField()
def __str__(self):
return "AssignNum : %s, question: %s Answer: %s" % (self.AssignNum, self.Question, self.Answer)
'''
| 35.655172 | 113 | 0.696325 |
0b5e2ce14cd1b7d0c4bdab1dbcbd6268fb51f4f1 | 165 | py | Python | benchmark/VAR/GG/common.py | victor-estrade/SystGradDescent | 822e7094290301ec47a99433381a8d6406798aff | [
"MIT"
] | 2 | 2019-03-20T09:05:02.000Z | 2019-03-20T15:23:44.000Z | benchmark/VAR/GG/common.py | victor-estrade/SystGradDescent | 822e7094290301ec47a99433381a8d6406798aff | [
"MIT"
] | null | null | null | benchmark/VAR/GG/common.py | victor-estrade/SystGradDescent | 822e7094290301ec47a99433381a8d6406798aff | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
| 27.5 | 39 | 0.866667 |
0b610800704e8c840fbc0a2a516adbeed8570f93 | 3,479 | py | Python | problems/problem3.py | JakobHavtorn/euler | b5ca0b4393dc9a6d6e0623e0df5b96f803e116ab | [
"MIT"
] | null | null | null | problems/problem3.py | JakobHavtorn/euler | b5ca0b4393dc9a6d6e0623e0df5b96f803e116ab | [
"MIT"
] | null | null | null | problems/problem3.py | JakobHavtorn/euler | b5ca0b4393dc9a6d6e0623e0df5b96f803e116ab | [
"MIT"
] | null | null | null | """Largest prime factor
The prime factors of 13195 are 5, 7, 13 and 29.
What is the largest prime factor of the number 600851475143?
"""
import math
import numpy as np
def largest_prime_factor_naive(number):
"""
Let the given number be n and let k = 2, 3, 4, 5, ... .
For each k, if it is a factor of n then we divide n by k and completely divide out each k before moving to the next k.
It can be seen that when k is a factor it will necessarily be prime, as all smaller factors have been removed,
and the final result of this process will be n = 1.
"""
factor = 2
factors = []
while number > 1:
if number % factor == 0:
factors.append(factor)
number = number // factor # Remainder guarenteed to be zero
while number % factor == 0:
number = number // factor # Remainder guarenteed to be zero
factor += 1
return factors
def largest_prime_factor_even_optimized(number):
"""
We know that, excluding 2, there are no even prime numbers.
So we can increase factor by 2 per iteration after having found the
"""
factors = []
factor = 2
if number % factor == 0:
number = number // factor
factors.append(factor)
while number % factor == 0:
number = number // factor
factor = 3
while number > 1:
if number % factor == 0:
factors.append(factor)
number = number // factor # Remainder guarenteed to be zero
while number % factor == 0:
number = number // factor # Remainder guarenteed to be zero
factor += 2
return factors
def largest_prime_factor_square_optimized(number):
"""
Every number n can at most have one prime factor greater than n.
If we, after dividing out some prime factor, calculate the square root of the remaining number
we can use that square root as upper limit for factor.
If factor exceeds this square root we know the remaining number is prime.
"""
factors = []
factor = 2
if number % factor == 0:
number = number // factor
factors.append(factor)
while number % factor == 0:
number = number // factor
factor = 3
max_factor = math.sqrt(number)
while number > 1 and factor <= max_factor:
if number % factor == 0:
factors.append(factor)
number = number // factor
while number % factor == 0:
number = number // factor
max_factor = math.sqrt(number)
factor += 2
return factors
def idx_sieve(length):
"""Static length sieve-based prime generator"""
primes = []
is_prime = np.array([True]*length)
i = 2
while i < length:
if is_prime[i]:
is_prime[np.arange(i, length, i)] = False
primes.append(i)
else:
i += 1
return primes
if __name__ == '__main__':
number = 600851475143
print(largest_prime_factor_naive(number))
print(largest_prime_factor_even_optimized(number))
print(largest_prime_factor_square_optimized(number))
number = 600851475143
primes = idx_sieve(20000)
print(max(prime_factor(number, primes)))
| 28.516393 | 122 | 0.603334 |
0b61d6924578e04d8bbfa01176c73eece0bd32ef | 2,484 | py | Python | nova/tests/test_hooks.py | bopopescu/zknova | 8dd09199f5678697be228ffceeaf2c16f6d7319d | [
"Apache-2.0"
] | null | null | null | nova/tests/test_hooks.py | bopopescu/zknova | 8dd09199f5678697be228ffceeaf2c16f6d7319d | [
"Apache-2.0"
] | null | null | null | nova/tests/test_hooks.py | bopopescu/zknova | 8dd09199f5678697be228ffceeaf2c16f6d7319d | [
"Apache-2.0"
] | 1 | 2020-07-24T08:25:25.000Z | 2020-07-24T08:25:25.000Z | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for hook customization."""
import stevedore
from nova import hooks
from nova import test
| 28.227273 | 78 | 0.654187 |
0b6439e111fde6d2d72ca7b4f1a3a62557d36d00 | 8,850 | py | Python | code/reasoningtool/kg-construction/QueryUniprot.py | andrewsu/RTX | dd1de262d0817f7e6d2f64e5bec7d5009a3a2740 | [
"MIT"
] | 31 | 2018-03-05T20:01:10.000Z | 2022-02-01T03:31:22.000Z | code/reasoningtool/kg-construction/QueryUniprot.py | andrewsu/RTX | dd1de262d0817f7e6d2f64e5bec7d5009a3a2740 | [
"MIT"
] | 1,774 | 2018-03-06T01:55:03.000Z | 2022-03-31T03:09:04.000Z | code/reasoningtool/kg-construction/QueryUniprot.py | andrewsu/RTX | dd1de262d0817f7e6d2f64e5bec7d5009a3a2740 | [
"MIT"
] | 19 | 2018-05-10T00:43:19.000Z | 2022-03-08T19:26:16.000Z | """ This module defines the class QueryUniprot which connects to APIs at
http://www.uniprot.org/uploadlists/, querying reactome pathways from uniprot id.
* map_enzyme_commission_id_to_uniprot_ids(ec_id)
Description:
map enzyme commission id to UniProt ids
Args:
ec_id (str): enzyme commission id, e.g., "ec:1.4.1.17"
Returns:
ids (set): a set of the enzyme commission ids, or empty set if no UniProt id can be obtained or the response
status code is not 200.
"""
__author__ = ""
__copyright__ = ""
__credits__ = []
__license__ = ""
__version__ = ""
__maintainer__ = ""
__email__ = ""
__status__ = "Prototype"
# import requests
# import requests_cache
from cache_control_helper import CacheControlHelper
import CachedMethods
import sys
import urllib.parse
import xmltodict
if __name__ == '__main__':
print(QueryUniprot.get_citeable_accession_for_accession("P35354"))
print(QueryUniprot.get_citeable_accession_for_accession("A8K802"))
print(QueryUniprot.get_citeable_accession_for_accession("Q16876"))
# print(QueryUniprot.uniprot_id_to_reactome_pathways("P68871"))
# print(QueryUniprot.uniprot_id_to_reactome_pathways("Q16621"))
# print(QueryUniprot.uniprot_id_to_reactome_pathways("P09601"))
print(CachedMethods.cache_info())
print(QueryUniprot.map_enzyme_commission_id_to_uniprot_ids("ec:1.4.1.17")) # small results
print(QueryUniprot.map_enzyme_commission_id_to_uniprot_ids("ec:1.3.1.110")) # empty result
print(QueryUniprot.map_enzyme_commission_id_to_uniprot_ids("ec:1.2.1.22")) # large results
print(QueryUniprot.map_enzyme_commission_id_to_uniprot_ids("ec:4.4.1.xx")) # fake id
print(QueryUniprot.map_enzyme_commission_id_to_uniprot_ids("R-HSA-1912422")) # wrong id
print(QueryUniprot.get_protein_gene_symbol('UniProtKB:P20848'))
print(QueryUniprot.get_protein_gene_symbol("UniProtKB:P01358"))
print(QueryUniprot.get_protein_gene_symbol("UniProtKB:Q96P88"))
print(QueryUniprot.get_protein_name('UniProtKB:P01358'))
print(QueryUniprot.get_protein_name('UniProtKB:P20848'))
print(QueryUniprot.get_protein_name('UniProtKB:Q9Y471'))
print(QueryUniprot.get_protein_name('UniProtKB:O60397'))
print(QueryUniprot.get_protein_name('UniProtKB:Q8IZJ3'))
print(QueryUniprot.get_protein_name('UniProtKB:Q7Z2Y8'))
print(QueryUniprot.get_protein_name('UniProtKB:Q8IWN7'))
print(QueryUniprot.get_protein_name('UniProtKB:Q156A1'))
| 40.410959 | 116 | 0.63209 |
0b649e46fb5914bfe7b320bbcd19fe8e80f42ef7 | 1,624 | py | Python | code_trunk/emb.py | chris4540/DD2430-ds-proj | b876efabe949392b27a7ebd4afb2be623174e287 | [
"MIT"
] | null | null | null | code_trunk/emb.py | chris4540/DD2430-ds-proj | b876efabe949392b27a7ebd4afb2be623174e287 | [
"MIT"
] | null | null | null | code_trunk/emb.py | chris4540/DD2430-ds-proj | b876efabe949392b27a7ebd4afb2be623174e287 | [
"MIT"
] | null | null | null | import torch
from network.siamese import SiameseNet
from network.resnet import ResidualEmbNetwork
import os
import numpy as np
from utils.datasets import DeepFashionDataset
from torchvision.transforms import Compose
from torchvision.transforms import Resize
from torchvision.transforms import ToTensor
from torchvision.transforms import Normalize
from torch.utils.data import Subset
from torch.utils.data import DataLoader
from utils import extract_embeddings
import pickle
from cuml.manifold import TSNE
emb_net = ResidualEmbNetwork()
model = SiameseNet(emb_net)
trans = Compose(
[
Resize((224, 224)),
ToTensor(),
Normalize([0.7511, 0.7189, 0.7069], [0.2554, 0.2679, 0.2715]),
])
model.load_state_dict(torch.load('siamese_resnet18.pth'))
deep_fashion_root_dir = "./deepfashion_data"
train_ds = DeepFashionDataset(
deep_fashion_root_dir, 'train', transform=trans)
emb_net = model.emb_net
emb_net.cuda()
# subset
n_samples = 25000
sel_idx = np.random.choice(
list(range(len(train_ds))),
n_samples, replace=False)
assert len(set(sel_idx)) == n_samples
ds = Subset(train_ds, sel_idx)
loader = DataLoader(
ds, batch_size=100, pin_memory=True, num_workers=os.cpu_count())
print("extracting...")
embeddings, labels = extract_embeddings(emb_net, loader)
tsne = TSNE(n_iter=400, metric="euclidean")
projected_emb = tsne.fit_transform(embeddings)
with open('projected_emb.pkl', 'wb') as handle:
pickle.dump(projected_emb, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('labels.pkl', 'wb') as handle:
pickle.dump(labels, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 28.491228 | 72 | 0.76601 |
0b67517486e91d69f9ba0a1be6a90a8c7366f494 | 507 | py | Python | src/stations/datastructures.py | cwerner/st-folium-demo | 31bfc3184e7e90d1901ab48fd0d4ee6026f97fe6 | [
"Apache-2.0"
] | 1 | 2021-03-19T11:10:04.000Z | 2021-03-19T11:10:04.000Z | src/stations/datastructures.py | cwerner/st-folium-demo | 31bfc3184e7e90d1901ab48fd0d4ee6026f97fe6 | [
"Apache-2.0"
] | null | null | null | src/stations/datastructures.py | cwerner/st-folium-demo | 31bfc3184e7e90d1901ab48fd0d4ee6026f97fe6 | [
"Apache-2.0"
] | null | null | null | from enum import Enum
# ifu
ifu = {"name": "IFU", "geo_lat": 47.476180, "geo_lon": 11.063350}
# tereno stations
tereno_stations = [
{"name": "Fendth", "geo_lat": 47.83243, "geo_lon": 11.06111},
{"name": "Grasswang", "geo_lat": 47.57026, "geo_lon": 11.03189},
{"name": "Rottenbuch", "geo_lat": 47.73032, "geo_lon": 11.03189},
]
| 23.045455 | 69 | 0.601578 |
0b6861770f6d11f0e6e5144b7f72620064b17922 | 2,217 | py | Python | Tools/Scripts/Python/module_Basemap_RegCM_domain.py | taobrienlbl/RegCM | bda1c78790f0a1501916d0979b843216a08b2cef | [
"AFL-1.1"
] | 27 | 2019-04-23T08:36:25.000Z | 2021-11-15T08:55:01.000Z | Tools/Scripts/Python/module_Basemap_RegCM_domain.py | taobrienlbl/RegCM | bda1c78790f0a1501916d0979b843216a08b2cef | [
"AFL-1.1"
] | 9 | 2020-02-20T06:43:03.000Z | 2021-09-24T11:26:46.000Z | Tools/Scripts/Python/module_Basemap_RegCM_domain.py | taobrienlbl/RegCM | bda1c78790f0a1501916d0979b843216a08b2cef | [
"AFL-1.1"
] | 17 | 2019-06-10T12:49:05.000Z | 2021-11-14T06:55:20.000Z | #!/usr/bin/python2.6
""" Here a comment starts, with 3 quotation marks. In the same way, the comment ends ...
Purpose: Draw a base map of the CORDEX domain
Selected projection: Lambert Conformal Projection
Date: Sept. 26, 2018
Author: S. STRADA
REFERENCES:
Basemap Tool
http://basemaptutorial.readthedocs.org/en/latest/index.html
https://matplotlib.org/basemap/
"""
######################################################
# Import modules you need
#-----------------------------------------------------
from mpl_toolkits.basemap import Basemap, cm
import matplotlib.pyplot as plt
import numpy as np
######################################################
### Python fuction to build a map using a specific projection
#-----------------------------------------------------
def map_RegCMdomain(ax, lat_start, lat_end, lon_start, lon_end, lon0, lat0, fontsize, dparall, dmerid):
"""
How to call the function in a script to create a basemap object :
1. Import function to create the domain
from module_RegCM_domain import basemap_RegCMdomain
2. Call the function and pass to it all needed variables
map = map_RegCMdomain(ax, lat_start, lat_end, lon_start, lon_end, lon_end, lon_0, lat0, fontsize))
Setup Miller Cyclindrical Projection
--> llcrnrlat,llcrnrlon,urcrnrlat,urcrnrlon are the lat/lon values of the lower left and upper right corners of the map
--> resolution = 'i' means intermediate coastline resolution
--> area_thresh=1000 means don't plot coastline features less than 1000 km^2 in area (pay attention to this if you need to plot small islands!)
"""
m = Basemap(ax=ax, llcrnrlon=lon_start, llcrnrlat=lat_start, urcrnrlon=lon_end, urcrnrlat=lat_end,
resolution='i', area_thresh=1000., projection='mill', lon_0=lon0, lat_0=lat0, lat_ts=0)
m.drawcoastlines(color='k',linewidth=1, zorder=10)
m.drawcountries(color='k',linewidth=0.5, zorder=11)
m.drawparallels(range(-90, 90, dparall), labels=[1,0,0,0], fontsize=fontsize, dashes=[1, 2],linewidth=1, color='k', zorder=12)
m.drawmeridians(range(-180, 180, dmerid),labels=[0,0,0,1], fontsize=fontsize, dashes=[1, 2],linewidth=1, color='k', zorder=12)
return m
| 41.830189 | 145 | 0.656292 |
0b691cc681e4265eeba5b9e50b719f23cdd77315 | 24,369 | py | Python | old_game/combat.py | jwvhewitt/dmeternal | bb09f2d497daf9b40dd8cfee10c55be55fb7c3cb | [
"Apache-2.0"
] | 53 | 2015-07-03T21:25:36.000Z | 2022-02-18T23:08:38.000Z | old_game/combat.py | jwvhewitt/dmeternal | bb09f2d497daf9b40dd8cfee10c55be55fb7c3cb | [
"Apache-2.0"
] | 5 | 2015-07-03T21:27:12.000Z | 2016-12-08T14:40:38.000Z | old_game/combat.py | jwvhewitt/dmeternal | bb09f2d497daf9b40dd8cfee10c55be55fb7c3cb | [
"Apache-2.0"
] | 14 | 2016-02-02T06:49:51.000Z | 2022-02-24T13:24:35.000Z | from . import characters
from . import teams
from . import hotmaps
from . import pygwrap
import pygame
from . import maps
import collections
from . import image
from . import pfov
import random
from . import stats
from . import rpgmenu
from . import animobs
from . import effects
from . import enchantments
from . import aibrain
from . import services
# This is a complex effect- Check if target is undead. If so, first apply an
# enchantment. Then, make skill roll to cause 20-120 solar damage and paralysis.
# If that skill roll fails, make an easier skill roll to just cause paralysis.
HOLY_SIGN_EFFECT = effects.TargetIs( effects.UNDEAD, on_true = ( \
effects.Enchant(enchantments.HolySignMark,anim=animobs.YellowSparkle,children=( \
effects.OpposedRoll(stats.HOLY_SIGN, stats.CHARISMA, -70, stats.MAGIC_DEFENSE, stats.PIETY, on_success = (
effects.HealthDamage( (20,6,0), stats.PIETY, element=stats.RESIST_SOLAR, anim=animobs.YellowExplosion, on_success= (effects.Paralyze(max_duration=6),) )
,), on_failure = (
effects.OpposedRoll(stats.HOLY_SIGN, stats.CHARISMA, 5, stats.MAGIC_DEFENSE, stats.PIETY, on_success = (
effects.Paralyze(max_duration=8)
# Is there an obfuscated Python competition?
,)),)),)),))
# I do not intend to create one more boring derivative fantasy RPG. I intend to create all of the boring derivative fantasy RPGs.
| 43.207447 | 232 | 0.57249 |
0b6a970c6ea0942a3a8927c5faff7c9dff07c309 | 4,096 | py | Python | tests/testJobQueue.py | hartloff/Tango | 9dd867a596441e0e2ba1069017781dddb9c79bdb | [
"Apache-2.0"
] | 2 | 2020-10-30T03:01:55.000Z | 2021-03-25T03:18:12.000Z | tests/testJobQueue.py | hartloff/Tango | 9dd867a596441e0e2ba1069017781dddb9c79bdb | [
"Apache-2.0"
] | 7 | 2018-06-26T02:48:09.000Z | 2021-01-21T03:12:19.000Z | tests/testJobQueue.py | hartloff/Tango | 9dd867a596441e0e2ba1069017781dddb9c79bdb | [
"Apache-2.0"
] | 9 | 2018-09-28T23:48:48.000Z | 2021-10-03T20:29:48.000Z | import unittest
import redis
from jobQueue import JobQueue
from tangoObjects import TangoIntValue, TangoJob
from config import Config
if __name__ == '__main__':
unittest.main()
| 29.681159 | 63 | 0.619141 |
0b6b9493f9b4caffc3dc8d7eb74ffd39200333e1 | 6,891 | py | Python | hybmc/products/Swap.py | sschlenkrich/HybridMonteCarlo | 72f54aa4bcd742430462b27b72d70369c01f9ac4 | [
"MIT"
] | 3 | 2021-08-18T18:34:41.000Z | 2021-12-24T07:05:19.000Z | hybmc/products/Swap.py | sschlenkrich/HybridMonteCarlo | 72f54aa4bcd742430462b27b72d70369c01f9ac4 | [
"MIT"
] | null | null | null | hybmc/products/Swap.py | sschlenkrich/HybridMonteCarlo | 72f54aa4bcd742430462b27b72d70369c01f9ac4 | [
"MIT"
] | 3 | 2021-01-31T11:41:19.000Z | 2022-03-25T19:51:20.000Z | #!/usr/bin/python
import sys
sys.path.append('./')
import QuantLib as ql
from hybmc.simulations.Payoffs import Payoff, Fixed, ZeroBond, LiborRate, Cache, Asset
from hybmc.simulations.AmcPayoffs import AmcSum
from hybmc.products.Product import Product
| 43.06875 | 110 | 0.628646 |
0b6dc7197643d4c8de27269ff87d6ea19785b867 | 1,571 | py | Python | src/streamlink/plugins/tamago.py | hymer-up/streamlink | f09bf6e04cddc78eceb9ded655f716ef3ee4b84f | [
"BSD-2-Clause"
] | 5 | 2019-07-26T17:03:26.000Z | 2020-10-17T23:23:43.000Z | src/streamlink/plugins/tamago.py | hymer-up/streamlink | f09bf6e04cddc78eceb9ded655f716ef3ee4b84f | [
"BSD-2-Clause"
] | 9 | 2018-01-14T15:20:23.000Z | 2021-03-08T20:29:51.000Z | src/streamlink/plugins/tamago.py | bumplzz69/streamlink | 34abc43875d7663ebafa241573dece272e93d88b | [
"BSD-2-Clause"
] | 4 | 2018-01-14T13:27:25.000Z | 2021-11-15T22:28:30.000Z | import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import validate
from streamlink.stream import HTTPStream
from streamlink import NoStreamsError
__plugin__ = Tamago
| 29.641509 | 112 | 0.623806 |
0b6e713eceaaae29df8407fca294483723c28e41 | 17,811 | py | Python | models/misc/modules.py | zgjslc/Film-Recovery-master1 | 4497a9930398c9e826ac364056a79e5bcbf6c953 | [
"Apache-2.0"
] | null | null | null | models/misc/modules.py | zgjslc/Film-Recovery-master1 | 4497a9930398c9e826ac364056a79e5bcbf6c953 | [
"Apache-2.0"
] | null | null | null | models/misc/modules.py | zgjslc/Film-Recovery-master1 | 4497a9930398c9e826ac364056a79e5bcbf6c953 | [
"Apache-2.0"
] | null | null | null | """
Name: modules.py
Desc: This script defines some base module for building networks.
"""
from typing import Any
import torch
import torch.nn as nn
import torch.nn.functional as F
'''
class UNetDepth(nn.Module):
def __init__(self):
super(UNetDepth, self).__init__()
self.down_block1 = UNet_down_block(3, 16, False)
self.down_block2 = UNet_down_block(16, 32, True)
self.down_block3 = UNet_down_block(32, 64, True)
self.down_block4 = UNet_down_block(64, 128, True)
self.down_block5 = UNet_down_block(128, 256, True)
self.down_block6 = UNet_down_block(256, 512, True)
self.down_block7 = UNet_down_block(512, 1024, False)
self.mid_conv1 = nn.Conv2d(1024, 1024, 3, padding=1)
self.bn1 = nn.GroupNorm(8, 1024)
self.mid_conv2 = nn.Conv2d(1024, 1024, 3, padding=1)
self.bn2 = nn.GroupNorm(8, 1024)
self.mid_conv3 = torch.nn.Conv2d(1024, 1024, 3, padding=1)
self.bn3 = torch.nn.GroupNorm(8, 1024)
self.up_block1 = UNet_up_block(512, 1024, 512, False)
self.up_block2 = UNet_up_block(256, 512, 256, True)
self.up_block3 = UNet_up_block(128, 256, 128, True)
self.up_block4 = UNet_up_block(64, 128, 64, True)
self.up_block5 = UNet_up_block(32, 64, 32, True)
self.up_block6 = UNet_up_block(16, 32, 16, True)
self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 16)
self.last_conv2 = nn.Conv2d(16, 1, 1, padding=0)
self.relu = nn.ReLU()
def forward(self, x):
x = self.x1 = self.down_block1(x)
x = self.x2 = self.down_block2(self.x1)
x = self.x3 = self.down_block3(self.x2)
x = self.x4 = self.down_block4(self.x3)
x = self.x5 = self.down_block5(self.x4)
x = self.x6 = self.down_block6(self.x5)
x = self.x7 = self.down_block7(self.x6)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
x = self.up_block1(self.x6, x)
x = self.up_block2(self.x5, x)
x = self.up_block3(self.x4, x)
x = self.up_block4(self.x3, x)
x = self.up_block5(self.x2, x)
x = self.up_block6(self.x1, x)
x = self.relu(self.last_bn(self.last_conv1(x)))
x = self.last_conv2(x)
return x
'''
| 41.133949 | 117 | 0.60311 |
0b6eaa68175183e78cc2a72bb734ce612395335a | 341 | py | Python | flask_webpack_bundle/config.py | briancappello/flask-webpack-bundle | 67896e6ade345e34721a8f9da156b65fc0646984 | [
"MIT"
] | null | null | null | flask_webpack_bundle/config.py | briancappello/flask-webpack-bundle | 67896e6ade345e34721a8f9da156b65fc0646984 | [
"MIT"
] | null | null | null | flask_webpack_bundle/config.py | briancappello/flask-webpack-bundle | 67896e6ade345e34721a8f9da156b65fc0646984 | [
"MIT"
] | null | null | null | import os
from flask_unchained import AppConfig
| 18.944444 | 69 | 0.730205 |
0b6fda84960a8cf5a23f750128dc700eaee71d2f | 2,458 | py | Python | touchdown/aws/elasticache/replication_group.py | yaybu/touchdown | 70ecda5191ce2d095bc074dcb23bfa1584464814 | [
"Apache-2.0"
] | 14 | 2015-01-05T18:18:04.000Z | 2022-02-07T19:35:12.000Z | touchdown/aws/elasticache/replication_group.py | yaybu/touchdown | 70ecda5191ce2d095bc074dcb23bfa1584464814 | [
"Apache-2.0"
] | 106 | 2015-01-06T00:17:13.000Z | 2019-09-07T00:35:32.000Z | touchdown/aws/elasticache/replication_group.py | yaybu/touchdown | 70ecda5191ce2d095bc074dcb23bfa1584464814 | [
"Apache-2.0"
] | 5 | 2015-01-30T10:18:24.000Z | 2022-02-07T19:35:13.000Z | # Copyright 2014 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from touchdown.core import argument, output, serializers
from touchdown.core.plan import Plan
from ..common import SimpleApply, SimpleDescribe, SimpleDestroy
from .cache import BaseCacheCluster
| 32.773333 | 83 | 0.746542 |
0b6ffbf766a563164a019a52f34be9e1263ae173 | 4,197 | py | Python | core/env.py | ayyuriss/EigenFunctions | 8cb6c22871fcddb633392c0a12691e960dad5143 | [
"MIT"
] | null | null | null | core/env.py | ayyuriss/EigenFunctions | 8cb6c22871fcddb633392c0a12691e960dad5143 | [
"MIT"
] | null | null | null | core/env.py | ayyuriss/EigenFunctions | 8cb6c22871fcddb633392c0a12691e960dad5143 | [
"MIT"
] | null | null | null | import xxhash
import numpy as np
from base.grid import SimpleGRID
import scipy.sparse as SP
h = xxhash.xxh64()
s_to_i = lambda x,size : size*x[0]+x[1]
i_to_s = lambda x,size : (x%size,x//size)
def get_graph(size):
env = SimpleGRID(grid_size=size,max_time=5000)
input_shape = env.observation_space.shape
min_batch = size**2-size
indexer = Indexer()
W = np.zeros((min_batch,min_batch))
states = np.zeros(min_batch).astype(int)
data = np.zeros((min_batch,)+input_shape)
while indexer.total<min_batch:
done = False
s = env.reset()
#s = s.transpose(2,0,1)#np.expand_dims(s,axis=0)
i = indexer.get(s_to_i(env.get_cat(),size))
states[i] = s_to_i(env.get_cat(),size)
data[states[i]] = s
while not done:
s,r,done = env.step(np.random.randint(4))
#s = np.expand_dims(s,axis=0)
#s = s.transpose(-1,0,1)
j = indexer.get(s_to_i(env.get_cat(),size))
states[j] = s_to_i(env.get_cat(),size)
data[states[j]] = s
W[states[i],states[j]] = W[states[j],states[i]] = 1
if r==1:
print(s_to_i(env.get_cat(),size),indexer.total)
i = j
return data, W
| 26.732484 | 109 | 0.510841 |
0b710ba6108771869cc4dcfa0e46001cedd35936 | 14,324 | py | Python | tests/test_properties.py | jmborr/ipdflex | 9537247c78373f740873703448d948e7a7ada9fb | [
"MIT"
] | 3 | 2020-01-15T08:23:48.000Z | 2022-03-28T22:14:05.000Z | tests/test_properties.py | jmborr/idpflex | 9537247c78373f740873703448d948e7a7ada9fb | [
"MIT"
] | 46 | 2018-01-10T22:51:46.000Z | 2021-11-15T17:47:32.000Z | tests/test_properties.py | jmborr/ipdflex | 9537247c78373f740873703448d948e7a7ada9fb | [
"MIT"
] | 5 | 2018-01-27T15:27:45.000Z | 2020-01-15T08:23:50.000Z | import random
import numpy as np
import pytest
import tempfile
import shutil
from idpflex import properties as ps
from idpflex.properties import SecondaryStructureProperty as SSP
if __name__ == '__main__':
pytest.main()
| 39.569061 | 79 | 0.645909 |
0b71623dce279f8394f45396a0c88a69e51e39e7 | 272 | py | Python | 3d_cnn/src/constants/particles.py | mrmattuschka/DeePiCt | ef3e81ea25705076f340175d97ccff98f8d11799 | [
"Apache-2.0"
] | null | null | null | 3d_cnn/src/constants/particles.py | mrmattuschka/DeePiCt | ef3e81ea25705076f340175d97ccff98f8d11799 | [
"Apache-2.0"
] | 2 | 2022-03-08T09:22:23.000Z | 2022-03-20T21:13:07.000Z | 3d_cnn/src/constants/particles.py | ZauggGroup/DeePiCt | 0bdf1cd845cc306e66e30face1010c12ca3a38d0 | [
"Apache-2.0"
] | null | null | null | from os.path import join
| 34 | 74 | 0.636029 |
0b72374ff1f0c05184c363dcfc881dd0ee234e7e | 13,807 | py | Python | unn/models/heads/utils/loss.py | zongdaoming/TinyTransformer | 8e64f8816117048c388b4b20e3a56760ce149fe3 | [
"Apache-2.0"
] | 2 | 2021-08-08T11:23:14.000Z | 2021-09-16T04:05:23.000Z | unn/models/heads/utils/loss.py | zongdaoming/TinyTransformer | 8e64f8816117048c388b4b20e3a56760ce149fe3 | [
"Apache-2.0"
] | 1 | 2021-08-08T11:25:47.000Z | 2021-08-08T11:26:15.000Z | unn/models/heads/utils/loss.py | zongdaoming/TinyTransformer | 8e64f8816117048c388b4b20e3a56760ce149fe3 | [
"Apache-2.0"
] | null | null | null | import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from .... import extensions as E
from . import accuracy as A
logger = logging.getLogger('global')
def ohem_loss(batch_size, cls_pred, cls_target, loc_pred, loc_target, cls_type='softmax', smooth_l1_sigma=1.0):
"""
Arguments:
batch_size (int): number of sampled rois for bbox head training
loc_pred (FloatTensor): [R, 4], location of positive rois
loc_target (FloatTensor): [R, 4], location of positive rois
pos_mask (FloatTensor): [R], binary mask for sampled positive rois
cls_pred (FloatTensor): [R, C]
cls_target (LongTensor): [R]
Returns:
cls_loss, loc_loss (FloatTensor)
"""
if cls_type == 'softmax':
ohem_cls_loss = F.cross_entropy(cls_pred, cls_target, reduction='none', ignore_index=-1)
else:
ohem_cls_loss = F.binary_cross_entropy_with_logits(cls_pred, cls_target, reduction='none')
if loc_pred is None:
ohem_loc_loss = torch.zeros_like(ohem_cls_loss)
else:
ohem_loc_loss = smooth_l1_loss(loc_pred, loc_target, sigma=smooth_l1_sigma, reduce=False)
loss = ohem_cls_loss + ohem_loc_loss
sorted_ohem_loss, idx = torch.sort(loss, descending=True)
keep_num = min(sorted_ohem_loss.size()[0], batch_size)
if keep_num <= sorted_ohem_loss.size()[0]:
keep_idx_cuda = idx[:keep_num]
ohem_cls_loss = ohem_cls_loss[keep_idx_cuda]
ohem_loc_loss = ohem_loc_loss[keep_idx_cuda]
cls_loss = ohem_cls_loss.sum() / keep_num
loc_loss = ohem_loc_loss.sum() / keep_num
return cls_loss, loc_loss, keep_idx_cuda
def get_rpn_cls_loss(cls_pred, cls_target, sample_cls_mask, loss_type):
"""
Arguments:
cls_pred (FloatTensor): [B*K, C]
cls_target (LongTensor): [B*K]
sample_cls_mask (ByteTensor): [B, K], binary mask for sampled rois
loss_type (str): sigmoid or softmax
Returns:
cls_loss, acc (FloatTensor)
"""
sample_cls_mask = sample_cls_mask.reshape(-1)
if loss_type == "softmax":
cls_pred = cls_pred.reshape(cls_target.numel(), -1)
cls_target = cls_target.reshape(-1)
cls_loss = F.cross_entropy(cls_pred, cls_target.long(), ignore_index=-1)
acc = A.accuracy(cls_pred, cls_target)[0]
elif loss_type == "sigmoid":
cls_pred = cls_pred.reshape(-1)
cls_target = cls_target.reshape(-1)
normalizer = (sample_cls_mask > 0).float().sum()
normalizer = max(1, normalizer.item())
cls_loss = F.binary_cross_entropy_with_logits(cls_pred, cls_target.float(), reduction='none')
cls_loss = (cls_loss * sample_cls_mask.float()).sum() / normalizer
# acc = torch.tensor([0]).cuda().float() # for sigmoid, there is a bug in A.accuracy
acc = A.binary_accuracy(cls_pred, cls_target)[0]
return cls_loss, acc
def get_rpn_loc_loss(loc_pred, loc_target, sample_loc_mask, sigma, normalizer):
"""
Arguments:
loc_pred (FloatTensor): [B*K, 4]
loc_target (LongTensor): [B*K, 4]
sample_loc_mask (ByteTensor): [B, K], binary mask for sampled poitive rois
Returns:
loc_loss (FloatTensor)
"""
sample_loc_mask = sample_loc_mask.reshape(-1)
loc_target = loc_target.reshape(-1, 4)[sample_loc_mask]
loc_pred = loc_pred.reshape(-1, 4)[sample_loc_mask]
loc_loss = smooth_l1_loss(loc_pred, loc_target, sigma, normalizer=normalizer)
return loc_loss
def get_focal_loss(cls_pred, cls_target, normalizer, num_classes, cfg_loss):
"""
Arguments:
cls_pred (FloatTensor): [B*K, C]
cls_target (LongTensor): [B*K]
cfg_loss: config for focal loss
Returns:
cls_loss, acc (FloatTensor)
"""
alpha = cfg_loss['alpha']
gamma = cfg_loss['gamma']
loss_type = cfg_loss['type']
C = {'sigmoid': -1, 'softmax': 0}[loss_type] + num_classes
cls_pred = cls_pred.float().view(-1, C)
cls_target = cls_target.int().view(-1)
normalizer = torch.cuda.FloatTensor([normalizer])
loss_fn = {'sigmoid': E.SigmoidFocalLossFunction, 'softmax': E.SoftmaxFocalLossFunction}[loss_type]
loss_fn = loss_fn(gamma, alpha, C)
cls_loss = loss_fn(cls_pred, cls_target, normalizer)
if loss_type == 'softmax':
acc = A.accuracy(cls_pred, cls_target.long())[0]
elif loss_type == 'sigmoid':
acc = A.accuracy(cls_pred, cls_target.long() - 1, ignore_indices=[-1, -2])[0]
else:
raise NotImplementedError('{} is not supported for focal loss'.format(loss_type))
return cls_loss, acc
| 35.043147 | 127 | 0.599768 |
0b729c6af7d440093ab2706bff962e7602e418a9 | 1,327 | py | Python | integration/phore/tests/shardsynctest.py | phoreproject/synapse | 77d10ca2eb7828ca9f7c8e29b72a73cf2c07f954 | [
"MIT"
] | 9 | 2018-09-30T18:56:26.000Z | 2019-10-30T23:09:07.000Z | integration/phore/tests/shardsynctest.py | phoreproject/synapse | 77d10ca2eb7828ca9f7c8e29b72a73cf2c07f954 | [
"MIT"
] | 102 | 2018-11-09T16:17:59.000Z | 2020-11-04T19:06:01.000Z | integration/phore/tests/shardsynctest.py | phoreproject/graphene | 77d10ca2eb7828ca9f7c8e29b72a73cf2c07f954 | [
"MIT"
] | 5 | 2018-11-05T14:29:24.000Z | 2020-06-08T19:26:05.000Z | import logging
from phore.framework import tester, validatornode, shardnode
from phore.pb import common_pb2
ex = ShardSyncTest()
ex.run()
| 26.54 | 105 | 0.667671 |
0b72b6b59c7098297806590340d0f99c8c866547 | 426 | py | Python | chartconvert/mpp.py | e-sailing/avnav | b3e8df4d6fa122b05309eee09197c716e29b64ec | [
"MIT"
] | null | null | null | chartconvert/mpp.py | e-sailing/avnav | b3e8df4d6fa122b05309eee09197c716e29b64ec | [
"MIT"
] | null | null | null | chartconvert/mpp.py | e-sailing/avnav | b3e8df4d6fa122b05309eee09197c716e29b64ec | [
"MIT"
] | null | null | null | #! /usr/bin/env python
#
# vim: ts=2 sw=2 et
#
import sys
#from wx.py.crust import Display
inchpm=39.3700
dpi=100
if len(sys.argv) >1:
dpi=int(sys.argv[1])
displaympp=1/(float(dpi)*inchpm)
print "display mpp=%f"%(displaympp)
mpp= 20037508.342789244 * 2 / 256
print "Level : mpp \t\t: scale"
for i in range(0,31):
scale=mpp/displaympp
print "level(%02d):%07.4f:\t\t1:%5.2f"%(i,mpp,scale)
mpp=mpp/2
| 16.384615 | 54 | 0.638498 |
0b7345be2719bd8b7fdccdbf2d4ec3d3cec346b7 | 314 | py | Python | Lower_Upper_Counter/Lower_Upper_Counter.py | GracjanBuczek/Python | 655801ae58ed7ef21f7da2f69f649c556b20aaee | [
"MIT"
] | null | null | null | Lower_Upper_Counter/Lower_Upper_Counter.py | GracjanBuczek/Python | 655801ae58ed7ef21f7da2f69f649c556b20aaee | [
"MIT"
] | null | null | null | Lower_Upper_Counter/Lower_Upper_Counter.py | GracjanBuczek/Python | 655801ae58ed7ef21f7da2f69f649c556b20aaee | [
"MIT"
] | null | null | null | x = input("Enter sentence: ")
count={"Uppercase":0, "Lowercase":0}
for i in x:
if i.isupper():
count["Uppercase"]+=1
elif i.islower():
count["Lowercase"]+=1
else:
pass
print ("There is:", count["Uppercase"], "uppercases.")
print ("There is:", count["Lowercase"], "lowercases.")
| 26.166667 | 54 | 0.582803 |
0b740ea892a08bb96379c733e82f7e4324d439a4 | 684 | py | Python | examples/driving_in_traffic/scenarios/loop/scenario.py | zbzhu99/SMARTS | 652aa23e71bd4e2732e2742140cfcd0ec082a7da | [
"MIT"
] | 2 | 2021-12-13T12:41:54.000Z | 2021-12-16T03:10:24.000Z | examples/driving_in_traffic/scenarios/loop/scenario.py | zbzhu99/SMARTS | 652aa23e71bd4e2732e2742140cfcd0ec082a7da | [
"MIT"
] | null | null | null | examples/driving_in_traffic/scenarios/loop/scenario.py | zbzhu99/SMARTS | 652aa23e71bd4e2732e2742140cfcd0ec082a7da | [
"MIT"
] | null | null | null | from pathlib import Path
from smarts.sstudio import gen_scenario
from smarts.sstudio import types as t
traffic = t.Traffic(
flows=[
t.Flow(
route=t.RandomRoute(),
rate=60 * 60,
actors={t.TrafficActor(name="car", vehicle_type=vehicle_type): 1},
)
for vehicle_type in [
"passenger",
"bus",
"coach",
"truck",
"trailer",
"passenger",
"bus",
"coach",
"truck",
"trailer",
]
]
)
gen_scenario(
t.Scenario(
traffic={"basic": traffic},
),
output_dir=Path(__file__).parent,
)
| 20.117647 | 78 | 0.483918 |
0b74a2d6dbfc76ec355ef8ff8e62599cfa40e389 | 13,196 | py | Python | asyncorm/models/models.py | kejkz/asyncorm | 6342e2d5fbaa22fb368aead772ac4f255df7562a | [
"Apache-2.0"
] | 1 | 2017-02-27T05:37:39.000Z | 2017-02-27T05:37:39.000Z | asyncorm/models/models.py | kejkz/asyncorm | 6342e2d5fbaa22fb368aead772ac4f255df7562a | [
"Apache-2.0"
] | null | null | null | asyncorm/models/models.py | kejkz/asyncorm | 6342e2d5fbaa22fb368aead772ac4f255df7562a | [
"Apache-2.0"
] | null | null | null | import inspect
import os
from collections import Callable
from asyncorm.application.configure import get_model
from asyncorm.exceptions import AsyncOrmFieldError, AsyncOrmModelDoesNotExist, AsyncOrmModelError
from asyncorm.manager import ModelManager
from asyncorm.models.fields import AutoField, Field, ForeignKey, ManyToManyField
from asyncorm.serializers import ModelSerializer, SerializerMethod
__all__ = ["Model", "ModelSerializer", "SerializerMethod"]
def migration_queries(self):
migration_queries = [self.objects.create_table_builder()]
for f in self.fields.values():
if isinstance(f, ForeignKey):
migration_queries.append(self.objects.add_fk_field_builder(f))
for f in self.fields.values():
if isinstance(f, ManyToManyField):
migration_queries.append(self.objects.add_m2m_columns_builder(f))
migration_queries.append(self.objects.unique_together_builder())
return migration_queries
def __repr__(self):
return self.__str__()
| 35.761518 | 118 | 0.572674 |
0b7531882bc3693d78e18104d816fb7003ff5f35 | 74,222 | py | Python | pyseq/main.py | nygctech/PySeq2500 | 6969f178a5f5837ce2f41887d59624bf4cc39433 | [
"MIT"
] | 9 | 2019-09-25T16:41:42.000Z | 2021-11-15T08:49:48.000Z | pyseq/main.py | nygctech/PySeq2500 | 6969f178a5f5837ce2f41887d59624bf4cc39433 | [
"MIT"
] | 8 | 2020-07-18T09:50:33.000Z | 2022-03-12T01:01:21.000Z | pyseq/main.py | nygctech/PySeq2500 | 6969f178a5f5837ce2f41887d59624bf4cc39433 | [
"MIT"
] | 5 | 2020-08-02T09:51:12.000Z | 2022-01-04T15:54:32.000Z | """
TODO:
"""
import time
import logging
import os
from os.path import join
import sys
import configparser
import threading
import argparse
from . import methods
from . import args
from . import focus
# Global int to track # of errors during start up
def error(*args):
"""Keep count of errors and print to logger and/or console."""
global n_errors
i = 0
if isinstance(args[0], logging.Logger):
logger = args[0]
i = 1
msg = 'ERROR::'
for a in args[i:]:
msg = msg + str(a) + ' '
if i is 0:
print(msg)
else:
logger.log(21, msg)
n_errors += 1
return n_errors
##########################################################
## Flowcell Class ########################################
##########################################################
##########################################################
## Setup Flowcells #######################################
##########################################################
def setup_flowcells(first_line, IMAG_counter):
"""Read configuration file and create flowcells.
**Parameters:**
- first_line (int): Line number for the recipe to start from on the
initial cycle.
**Returns:**
- dict: Dictionary of flowcell position keys with flowcell object values.
"""
err_msg = 'ConfigFile::sections::'
experiment = config['experiment']
method = experiment['method']
method = config[method]
flowcells = {}
for sect_name in config['sections']:
f_sect_name = sect_name.replace('_','') #remove underscores
position = config['sections'][sect_name]
AorB, coord = position.split(':')
# Create flowcell if it doesn't exist
if AorB not in flowcells.keys():
fc = Flowcell(AorB)
fc.recipe_path = experiment['recipe path']
fc.first_line = first_line
fc.volume['main'] = int(method.get('main prime volume', fallback=500))
fc.volume['side'] = int(method.get('side prime volume', fallback=350))
fc.volume['sample'] = int(method.get('sample prime volume', fallback=250))
fc.volume['flush'] = int(method.get('flush volume', fallback=1000))
fs = int(method.get('flush flowrate',fallback=700))
fc.pump_speed['flush'] = fs
ps = int(method.get('prime flowrate',fallback=100))
fc.pump_speed['prime'] = ps
rs = int(method.get('reagent flowrate', fallback=40))
fc.pump_speed['reagent'] = rs
fc.total_cycles = int(config.get('experiment','cycles'))
fc.temp_interval = float(method.get('temperature interval', fallback=5))*60
z_planes = int(method.get('z planes', fallback=0))
if z_planes > 0:
fc.z_planes = z_planes
if IMAG_counter > 1:
fc.IMAG_counter = 0
fc.prerecipe_path = method.get('pre recipe', fallback = None)
flowcells[AorB] = fc
# Add section to flowcell
if sect_name in flowcells[AorB].sections:
error(err_msg, sect_name, 'duplicated on flowcell', AorB)
else:
coord = coord.split(',')
flowcells[AorB].sections[f_sect_name] = [] # List to store coordinates of section on flowcell
flowcells[AorB].stage[f_sect_name] = {} # Dictionary to store stage position of section on flowcell
if float(coord[0]) < float(coord[2]):
error(err_msg,'Invalid x coordinates for', sect_name)
if float(coord[1]) < float(coord[3]):
error(err_msg, 'Invalid y coordinates for', sect_name)
for i in range(4):
try:
flowcells[AorB].sections[f_sect_name].append(float(coord[i]))
except:
error(err_msg,' No position for', sect_name)
# if runnning mulitiple flowcells...
# Define first flowcell
# Define prior flowcell signals to next flowcell
if len(flowcells) > 1:
flowcell_list = [*flowcells]
for fc in flowcells.keys():
flowcells[fc].waits_for = flowcell_list[
flowcell_list.index(fc)-1]
if experiment['first flowcell'] not in flowcells:
error('ConfigFile::First flowcell does not exist')
if isinstance(IMAG_counter, int):
error('Recipe::Need WAIT before IMAG with 2 flowcells.')
# table = {}
# for fc in flowcells:
# table[fc] = flowcells[fc].sections.keys()
# print('Flowcell section summary')
# print(tabulate.tabulate(table, headers = 'keys', tablefmt = 'presto'))
#
# userYN('Confirm flowcell(s)')
return flowcells
##########################################################
## Parse lines from recipe ###############################
##########################################################
def parse_line(line):
"""Parse line and return event (str) and command (str).
If line starts with the comment character, #, then None is return for
both event and command.
"""
comment_character = '#'
#delimiter = '\t'
no_comment = line.split(comment_character)[0] # remove comment
sections = no_comment.split(':')
if len(sections) == 2:
event = sections[0].strip() # first section is event
event = event[0:4] # event identified by first 4 characters
command = sections[1] # second section is command
command = command.strip() # remove space
else:
event = None
command = None
return event, command
##########################################################
## Setup Logging #########################################
##########################################################
def setup_logger():
"""Create a logger and return the handle."""
# Get experiment info from config file
experiment = config['experiment']
experiment_name = experiment['experiment name']
# Make directory to save data
save_path = join(experiment['save path'],experiment_name)
if not os.path.exists(save_path):
os.mkdir(save_path)
# Make directory to save logs
log_path = join(save_path, experiment['log path'])
if not os.path.exists(log_path):
os.mkdir(log_path)
# Create a custom logger
logger = logging.getLogger(__name__)
logger.setLevel(10)
# Create console handler
c_handler = logging.StreamHandler()
c_handler.setLevel(21)
# Create file handler
f_log_name = join(log_path,experiment_name + '.log')
f_handler = logging.FileHandler(f_log_name)
f_handler.setLevel(logging.INFO)
# Create formatters and add it to handlers
c_format = logging.Formatter('%(asctime)s - %(message)s', datefmt = '%Y-%m-%d %H:%M')
f_format = logging.Formatter('%(asctime)s - %(message)s')
c_handler.setFormatter(c_format)
f_handler.setFormatter(f_format)
# Add handlers to the logger
logger.addHandler(c_handler)
logger.addHandler(f_handler)
# Save copy of config with log
config_path = join(log_path,'config.cfg')
with open(config_path, 'w') as configfile:
config.write(configfile)
return logger
def configure_instrument(IMAG_counter, port_dict):
"""Configure and check HiSeq settings."""
global n_errors
model, name = methods.get_machine_info(args_['virtual'])
if model is not None:
config['experiment']['machine'] = model+'::'+name
experiment = config['experiment']
method = experiment['method']
method = config[method]
try:
total_cycles = int(experiment.get('cycles'))
except:
error('ConfigFile:: Cycles not specified')
# Creat HiSeq Object
if model == 'HiSeq2500':
if args_['virtual']:
from . import virtualHiSeq
hs = virtualHiSeq.HiSeq(name, logger)
hs.speed_up = int(method.get('speed up', fallback = 5000))
else:
import pyseq
com_ports = pyseq.get_com_ports()
hs = pyseq.HiSeq(name, logger)
else:
sys.exit()
# Check side ports
try:
side_ports = method.get('side ports', fallback = '9,21,22,23,24')
side_ports = side_ports.split(',')
side_ports = list(map(int, side_ports))
except:
error('ConfigFile:: Side ports not valid')
# Check sample port
try:
sample_port = int(method.get('sample port', fallback = 20))
except:
error('ConfigFile:: Sample port not valid')
# Check barrels per lane make sense:
n_barrels = int(method.get('barrels per lane', fallback = 1)) # Get method specific pump barrels per lane, fallback to 1
if n_barrels not in [1,2,4,8]:
error('ConfigFile:: Barrels per lane must be 1, 2, 4 or 8')
# Check inlet ports, note switch inlet ports in initialize_hs
inlet_ports = int(method.get('inlet ports', fallback = 2))
if inlet_ports not in [2,8]:
error('MethodFile:: inlet ports must be 2 or 8.')
variable_ports = method.get('variable reagents', fallback = None)
hs.z.image_step = int(method.get('z position', fallback = 21500))
hs.overlap = abs(int(method.get('overlap', fallback = 0)))
hs.overlap_dir = method.get('overlap direction', fallback = 'left').lower()
if hs.overlap_dir not in ['left', 'right']:
error('MethodFile:: overlap direction must be left or right')
for fc in flowcells.values():
AorB = fc.position
hs.v24[AorB].side_ports = side_ports
hs.v24[AorB].sample_port = sample_port
hs.v24[AorB].port_dict = port_dict # Assign ports on HiSeq
if variable_ports is not None:
v_ports = variable_ports.split(',')
for v in v_ports: # Assign variable ports
hs.v24[AorB].variable_ports.append(v.strip())
hs.p[AorB].update_limits(n_barrels) # Assign barrels per lane to pump
for section in fc.sections: # Convert coordinate sections on flowcell to stage info
pos = hs.position(AorB, fc.sections[section])
fc.stage[section] = pos
fc.stage[section]['z_pos'] = [hs.z.image_step]*3
## TODO: Changing laser color unecessary for now, revist if upgrading HiSeq
# Configure laser color & filters
# colors = [method.get('laser color 1', fallback = 'green'),
# method.get('laser color 2', fallback = 'red')]
# for i, color in enumerate(default_colors):
# if color is not colors[i]:
# laser = hs.lasers.pop(color) # Remove default laser color
# hs.lasers[colors[i]] = laser # Add new laser
# hs.lasers[colors[i]].color = colors[i] # Update laser color
# hs.optics.colors[i] = colors[i] # Update laser line color
# Check laser power
for color in hs.lasers.keys():
lp = int(method.get(color+' laser power', fallback = 10))
if hs.lasers[color].min_power <= lp <= hs.lasers[color].max_power:
hs.lasers[color].set_point = lp
else:
error('MethodFile:: Invalid '+color+' laser power')
#Check filters for laser at each cycle are valid
hs.optics.cycle_dict = check_filters(hs.optics.cycle_dict, hs.optics.ex_dict)
focus_filters = [method.get('green focus filter', fallback = 2.0),
method.get('red focus filter', fallback = 2.4)]
for i, f in enumerate(focus_filters):
try:
f = float(f)
except:
pass
if f not in hs.optics.ex_dict[hs.optics.colors[i]]:
error('ConfigFile:: Focus filter not valid.')
else:
hs.optics.focus_filters[i] = f
# Check Autofocus Settings
hs.AF = method.get('autofocus', fallback = 'partial once')
if hs.AF.lower() in ['','none']: hs.AF = None
if hs.AF not in ['partial', 'partial once', 'full', 'full once', 'manual', None]:
# Skip autofocus and set objective position in config file
try:
if hs.obj.min_z <= int(hs.AF) <= hs.obj.max_z:
hs.AF = int(hs.AF)
except:
error('ConfigFile:: Auto focus method not valid.')
#Enable/Disable z stage
hs.z.active = method.getboolean('enable z stage', fallback = True)
# Get focus Tolerance
hs.focus_tol = float(method.get('focus tolerance', fallback = 0))
# Get focus range
range = float(method.get('focus range', fallback = 90))
spacing = float(method.get('focus spacing', fallback = 4.1))
hs.obj.update_focus_limits(range=range, spacing=spacing) # estimate, get actual value in hs.obj_stack()
hs.stack_split = float(method.get('stack split', fallback = 2/3))
hs.bundle_height = int(method.get('bundle height', fallback = 128))
# Assign output directory
save_path = experiment['save path']
experiment_name = experiment['experiment name']
save_path = join(experiment['save path'], experiment['experiment name'])
if not os.path.exists(save_path):
try:
os.mkdir(save_path)
except:
error('ConfigFile:: Save path not valid.')
# Assign image directory
image_path = join(save_path, experiment['image path'])
if not os.path.exists(image_path):
os.mkdir(image_path)
with open(join(image_path,'machine_name.txt'),'w') as file:
file.write(hs.name)
hs.image_path = image_path
# Assign log directory
log_path = join(save_path, experiment['log path'])
if not os.path.exists(log_path):
os.mkdir(log_path)
hs.log_path = log_path
return hs
def confirm_settings(recipe_z_planes = []):
"""Have user confirm the HiSeq settings before experiment."""
experiment = config['experiment']
method = experiment['method']
method = config[method]
total_cycles = int(experiment['cycles'])
# Print settings to screen
try:
import tabulate
print_table = True
except:
print_table = False
if n_errors > 0:
print()
if not userYN('Continue checking experiment before exiting'):
sys.exit()
# Experiment summary
print()
print('-'*80)
print()
print(experiment['experiment name'], 'summary')
print()
print('method:', experiment['method'])
print('recipe:', method['recipe'])
print('cycles:', experiment['cycles'])
pre_recipe = method.get('pre recipe', fallback = None)
if pre_recipe is not None:
print('pre recipe:', pre_recipe)
first_port = method.get('first port', fallback = None)
if first_port is not None:
print('first_port:', first_port)
print('save path:', experiment['save path'])
print('enable z stage:', hs.z.active)
print('machine:', experiment['machine'])
print()
if not userYN('Confirm experiment'):
sys.exit()
print()
# Flowcell summary
table = {}
for fc in flowcells:
table[fc] = flowcells[fc].sections.keys()
print('-'*80)
print()
print('Flowcells:')
print()
if print_table:
print(tabulate.tabulate(table, headers = 'keys', tablefmt = 'presto'))
else:
print(table)
print()
if not userYN('Confirm flowcells'):
sys.exit()
print()
# Valve summary:
table = []
ports = []
for port in port_dict:
if not isinstance(port_dict[port], dict):
ports.append(int(port_dict[port]))
table.append([port_dict[port], port])
print('-'*80)
print()
print('Valve:')
print()
if print_table:
print(tabulate.tabulate(table, headers=['port', 'reagent'], tablefmt = 'presto'))
else:
print(table)
print()
if not userYN('Confirm valve assignment'):
sys.exit()
print()
# Pump summary:
AorB = [*flowcells.keys()][0]
fc = flowcells[AorB]
print('-'*80)
print()
print('Pump Settings:')
print()
inlet_ports = int(method.get('inlet ports', fallback = 2))
print('Reagents pumped through row with ', inlet_ports, 'inlet ports')
print(hs.p[AorB].n_barrels, 'syringe pump barrels per lane')
print('Flush volume:',fc.volume['flush'], 'L')
if any([True for port in ports if port in [*range(1,9),*range(10,20)]]):
print('Main prime volume:', fc.volume['main'], 'L')
if any([True for port in ports if port in [9,21,22,23,24]]):
print('Side prime volume:', fc.volume['side'], 'L')
if 20 in ports:
print('Sample prime volume:', fc.volume['sample'], 'L')
print('Flush flowrate:',fc.pump_speed['flush'], 'L/min')
print('Prime flowrate:',fc.pump_speed['prime'], 'L/min')
print('Reagent flowrate:',fc.pump_speed['reagent'], 'L/min')
print('Max volume:', hs.p[AorB].max_volume, 'L')
print('Min flow:', hs.p[AorB].min_flow, 'L/min')
print()
if not userYN('Confirm pump settings'):
sys.exit()
# Cycle summary:
variable_ports = hs.v24[AorB].variable_ports
start_cycle = 1
if method.get('pre recipe', fallback = None) is not None:
start_cycle = 0
table = []
for cycle in range(start_cycle,total_cycles+1):
row = []
row.append(cycle)
if len(variable_ports) > 0:
for vp in variable_ports:
if cycle > 0:
row.append(port_dict[vp][cycle])
else:
row.append(None)
if IMAG_counter > 0:
colors = [*hs.optics.cycle_dict.keys()]
for color in colors:
row.append(hs.optics.cycle_dict[color][cycle])
else:
colors = []
table.append(row)
print('-'*80)
print()
print('Cycles:')
print()
if len(variable_ports) + len(colors) > 0:
headers = ['cycle', *variable_ports, *colors]
if print_table:
print(tabulate.tabulate(table, headers, tablefmt='presto'))
else:
print(headers)
print(table)
print()
stop_experiment = not userYN('Confirm cycles')
else:
if total_cycles == 1:
stop_experiment = not userYN('Confirm only 1 cycle')
else:
stop_experiment = not userYN('Confirm all', total_cycles, 'cycles are the same')
if stop_experiment:
sys.exit()
print()
if IMAG_counter > 0:
print('-'*80)
print()
print('Imaging settings:')
print()
laser_power = [hs.lasers['green'].set_point,
hs.lasers['red'].set_point]
print('green laser power:', laser_power[0], 'mW')
print('red laser power:',laser_power[1], 'mW')
print('autofocus:', hs.AF)
if hs.AF is not None:
print('focus spacing', hs.obj.focus_spacing,'um')
print('focus range', hs.obj.focus_range, '%')
if hs.focus_tol > 0 and hs.AF != 'manual':
print('focus tolerance:', hs.focus_tol, 'um')
elif hs.AF != 'manual':
print('focus tolerance: None')
print('WARNING::Out of focus image risk increased')
for i, filter in enumerate(hs.optics.focus_filters):
if filter == 'home':
focus_laser_power = 0
elif filter == 'open':
focus_laser_power = laser_power[i]
else:
focus_laser_power = laser_power[i]*10**(-float(filter))
print(colors[i+1], 'focus laser power ~', focus_laser_power, 'mW')
print('z position when imaging:', hs.z.image_step)
if hs.overlap > 0:
print('pixel overlap:', hs.overlap)
print('overlap direction:', hs.overlap_dir)
z_planes = int(method.get('z planes', fallback = 0))
if z_planes > 0:
print('z planes:', z_planes)
else:
print('z planes:', *recipe_z_planes)
if z_planes > 1 or any(recipe_z_planes):
print('stack split:', hs.stack_split)
if not userYN('Confirm imaging settings'):
sys.exit()
# Check if previous focus positions have been found, and confirm to use
if os.path.exists(join(hs.log_path, 'focus_config.cfg')):
focus_config = configparser.ConfigParser()
focus_config.read(join(hs.log_path, 'focus_config.cfg'))
cycles = 0
sections = []
for section in config.options('sections'):
if focus_config.has_section(section):
sections.append(section)
n_focus_cycles = len(focus_config.options(section))
if n_focus_cycles > cycles:
cycles = n_focus_cycles
table = []
for section in sections:
row = []
row.append(section)
for c in range(1,cycles+1):
if focus_config.has_option(section, str(c)):
row.append(focus_config[section][str(c)])
else:
row.append(None)
table.append(row)
if len(sections) > 0 and cycles > 0:
print('-'*80)
print()
print('Previous Autofocus Objective Positions:')
print()
headers = ['section', *['cycle'+str(c) for c in range(1,cycles+1)]]
if print_table:
print(tabulate.tabulate(table, headers, tablefmt='presto'))
else:
print(headers)
print(table)
print()
if not userYN('Confirm using previous autofocus positions'):
sys.exit()
print()
##########################################################
## Setup HiSeq ###########################################
##########################################################
def initialize_hs(IMAG_counter):
"""Initialize the HiSeq and return the handle."""
global n_errors
experiment = config['experiment']
method = experiment['method']
method = config[method]
if n_errors is 0:
if not userYN('Initialize HiSeq'):
sys.exit()
hs.initializeCams(logger)
x_homed = hs.initializeInstruments()
if not x_homed:
error('HiSeq:: X-Stage did not home correctly')
# HiSeq Settings
inlet_ports = int(method.get('inlet ports', fallback = 2))
hs.move_inlet(inlet_ports) # Move to 2 or 8 port inlet
# Set laser power
for color in hs.lasers.keys():
laser_power = int(method.get(color+' laser power', fallback = 10))
hs.lasers[color].set_power(laser_power)
if IMAG_counter > 0:
if not hs.lasers[color].on:
error('HiSeq:: Lasers did not turn on.')
hs.f.LED('A', 'off')
hs.f.LED('B', 'off')
LED('all', 'startup')
hs.move_stage_out()
return hs
##########################################################
## Check Instructions ####################################
##########################################################
def check_instructions():
"""Check the instructions for errors.
**Returns:**
- first_line (int): Line number for the recipe to start from on the
initial cycle.
- IMAG_counter (int): The number of imaging steps.
"""
method = config.get('experiment', 'method')
method = config[method]
first_port = method.get('first port', fallback = None) # Get first reagent to use in recipe
# Backdoor to input line number for first step in recipe
try:
first_port = int(first_port)
first_line = first_port
first_port = None
except:
first_line = 0
variable_ports = method.get('variable reagents', fallback = None)
valid_wait = []
ports = []
for port in config['reagents'].items():
ports.append(port[1])
if variable_ports is not None:
variable_ports = variable_ports.split(',')
for port in variable_ports:
ports.append(port.strip())
valid_wait = ports
valid_wait.append('IMAG')
valid_wait.append('STOP')
valid_wait.append('TEMP')
recipes = {}
recipes['Recipe'] = config['experiment']['recipe path']
pre_recipe = method.get('pre recipe',fallback= None)
if pre_recipe is not None:
recipes['Pre Recipe'] = pre_recipe
for recipe in sorted([*recipes.keys()]):
f = recipes[recipe]
try:
f = open(recipes[recipe])
except:
error(recipe,'::Unable to open', recipes[recipe])
#Remove blank lines
f_ = [line for line in f if line.strip()]
f.close()
IMAG_counter = 0.0
wait_counter = 0
z_planes = []
for line_num, line in enumerate(f_):
instrument, command = parse_line(line)
if instrument == 'PORT':
# Make sure ports in instruction files exist in port dictionary in config file
if command not in ports:
error(recipe,'::', command, 'on line', line_num,
'is not listed as a reagent')
#Find line to start at for first cycle
if first_line == 0 and first_port is not None and recipe is 'Recipe':
if command.find(first_port) != -1:
first_line = line_num
# Make sure pump volume is a number
elif instrument == 'PUMP':
if command.isdigit() == False:
error(recipe,'::Invalid volume on line', line_num)
# Make sure wait command is valid
elif instrument == 'WAIT':
wait_counter += 1
if command not in valid_wait:
error(recipe,'::Invalid wait command on line', line_num)
# Make sure z planes is a number
elif instrument == 'IMAG':
IMAG_counter = int(IMAG_counter + 1)
# Flag to make check WAIT is used before IMAG for 2 flowcells
if wait_counter >= IMAG_counter:
IMAG_counter = float(IMAG_counter)
if command.isdigit() == False:
error(recipe,'::Invalid number of z planes on line', line_num)
else:
z_planes.append(command)
# Make sure hold time (minutes) is a number
elif instrument == 'HOLD':
if command.isdigit() == False:
if command != 'STOP':
error(recipe,'::Invalid time on line', line_num)
else:
print(recipe,'::WARNING::HiSeq will stop until user input at line',
line_num)
elif instrument == 'TEMP':
if not command.isdigit():
error(recipe,'::Invalid temperature on line', line_num)
# # Warn user that HiSeq will completely stop with this command
# elif instrument == 'STOP':
# print('WARNING::HiSeq will stop until user input at line',
# line_num)
# Make sure the instrument name is valid
else:
error(recipe,'::Bad instrument name on line',line_num)
print(line)
return first_line, IMAG_counter, z_planes
##########################################################
## Check Ports ###########################################
##########################################################
def check_ports():
"""Check for port errors and return a port dictionary.
"""
method = config.get('experiment', 'method')
method = config[method]
total_cycles = int(config.get('experiment', 'cycles'))
# Get cycle and port information from configuration file
valve = config['reagents'] # Get dictionary of port number of valve : name of reagent
cycle_variables = method.get('variable reagents', fallback = None ) # Get list of port names in recipe that change every cycle
cycle_reagents = config['cycles'].items() # Get variable reagents that change with each cycle
port_dict = {}
# Make sure there are no duplicated names in the valve
if len(valve.values()) != len(set(valve.values())):
error('ConfigFile: Reagent names are not unique')
#TODO: PRINT DUPLICATES
if len(valve) > 0:
# Create port dictionary
for port in valve.keys():
try:
port_dict[valve[port]] = int(port)
except:
error('ConfigFile:List reagents as n (int) = name (str) ')
# Add cycle variable port dictionary
if cycle_variables is not None:
cycle_variables = cycle_variables.split(',')
for variable in cycle_variables:
variable = variable.replace(' ','')
if variable in port_dict:
error('ConfigFile::Variable', variable, 'can not be a reagent')
else:
port_dict[variable] = {}
# Fill cycle variable port dictionary with cycle: reagent name
for cycle in cycle_reagents:
reagent = cycle[1]
variable, cyc_number = cycle[0].split(' ')
if reagent in valve.values():
if variable in port_dict:
port_dict[variable][int(cyc_number)] = reagent
else:
error('ConfigFile::', variable, 'not listed as variable reagent')
else:
error('ConfigFiles::Cycle reagent:', reagent, 'does not exist on valve')
# Check number of reagents in variable reagents matches number of total cycles
for variable in cycle_variables:
variable = variable.replace(' ','')
if len(port_dict[variable]) != total_cycles:
error('ConfigFile::Number of', variable, 'reagents does not match experiment cycles')
else:
print('WARNING::No ports are specified')
# table = []
# for port in port_dict:
# if not isinstance(port_dict[port], dict):
# table.append([port_dict[port], port])
# print('Valve summary')
# print(tabulate.tabulate(table, headers=['port', 'reagent'], tablefmt = 'presto'))
return port_dict
def check_filters(cycle_dict, ex_dict):
"""Check filter section of config file.
**Errors:**
- Invalid Filter: System exits when a listed filter does not match
configured filters on the HiSeq.
- Duplicate Cycle: System exists when a filter for a laser is listed for
the same cycle more than once.
- Invalid laser: System exits when a listed laser color does not match
configured laser colors on the HiSeq.
"""
colors = [*cycle_dict.keys()]
# Check laser, cycle, and filter are valid
cycle_filters = config['filters'].items()
for item in cycle_filters:
# Get laser cycle = filter
filter = item[1]
# filters are floats, except for home and open,
# and emission (True/False)
if filter.lower() in ['true', 'yes', '1', 't', 'y']:
filter = True
elif filter.lower() in ['false', 'no', '0', 'f', 'n']:
filter = False
elif filter not in ['home','open']:
filter = float(filter)
laser, cycle = item[0].split()
cycle = int(cycle)
# Check if laser is valid, can use partial match ie, g or G for green
if laser in colors:
laser = [laser]
else:
laser = [colors[i] for i, c in enumerate(colors) if laser.lower() in c[0]]
if len(laser) > 0:
laser = laser[0]
if laser in ex_dict.keys():
if filter in ex_dict[laser]:
if cycle not in cycle_dict[laser]:
cycle_dict[laser][cycle] = filter
else:
error('ConfigFile::Duplicated cycle for', laser, 'laser')
elif laser == 'em':
if isinstance(filter, bool):
if cycle not in cycle_dict[laser]:
cycle_dict[laser][cycle] = filter
else:
error('ConfigFile::Duplicated emission filter cycle')
else:
error('ConfigFile::Invalid filter for', laser, 'laser')
else:
error('ConfigFile:Invalid laser')
# Add default/home to cycles with out filters specified
method = config.get('experiment', 'method')
method = config[method]
start_cycle = 1
if method.get('pre recipe', fallback = None):
start_cycle = 0
last_cycle = int(config.get('experiment','cycles'))+1
# Get/check default filters
default_filters = {}
fallbacks = {'red':'home', 'green':'home', 'em':'True'}
for laser in colors:
filter = method.get('default '+laser+' filter', fallback = fallbacks[laser])
try:
filter = float(filter)
except:
pass
if laser in ex_dict.keys():
if filter in ex_dict[laser].keys():
default_filters[laser] = filter
elif laser == 'em':
if filter in ['True', 'False']:
default_filters[laser] = filter
# Assign default filters to missing cycles
for cycle in range(start_cycle,last_cycle):
for laser in colors:
if cycle not in cycle_dict[laser]:
cycle_dict[laser][cycle] = default_filters[laser]
return cycle_dict
def LED(AorB, indicate):
"""Control front LEDs to communicate what the HiSeq is doing.
**Parameters:**
- AorB (str): Flowcell position (A or B), or all.
- indicate (str): Current action of the HiSeq or state of the flowcell.
=========== =========== =============================
LED MODE indicator HiSeq Action / Flowcell State
=========== =========== ===================================================
off off The flowcell is not in use.
yellow error There is an error with the flowcell.
green startup The HiSeq is starting up or shutting down
pulse green user The HiSeq requires user input
blue sleep The flowcell is holding or waiting.
pulse blue awake HiSeq valve, pump, or temperature action on the flowcell.
sweep blue imaging HiSeq is imaging the flowcell.
=========== =========== ========================================
"""
fc = []
if AorB in flowcells.keys():
fc = [AorB]
elif AorB == 'all':
fc = [*flowcells.keys()]
for AorB in fc:
if indicate == 'startup':
hs.f.LED(AorB, 'green')
elif indicate == 'user':
hs.f.LED(AorB, 'pulse green')
elif indicate == 'error':
hs.f.LED(AorB, 'yellow')
elif indicate == 'sleep':
hs.f.LED(AorB, 'blue')
elif indicate == 'awake':
hs.f.LED(AorB, 'pulse blue')
elif indicate == 'imaging':
hs.f.LED(AorB, 'sweep blue')
elif indicate == 'off':
hs.f.LED(AorB, 'off')
return True
def userYN(*args):
"""Ask a user a Yes/No question and return True if Yes, False if No."""
question = ''
for a in args:
question += str(a) + ' '
response = True
while response:
answer = input(question + '? Y/N = ')
answer = answer.upper().strip()
if answer == 'Y':
response = False
answer = True
elif answer == 'N':
response = False
answer = False
return answer
def do_flush():
"""Flush all, some, or none of lines."""
AorB_ = [*flowcells.keys()][0]
port_dict = hs.v24[AorB_].port_dict
# Select lines to flush
LED('all', 'user')
confirm = False
while not confirm:
flush_ports = input("Flush all, some, or none of the lines? ")
if flush_ports.strip().lower() == 'all':
flush_all = True
flush_ports = [*port_dict.keys()]
for vp in hs.v24[AorB_].variable_ports:
if vp in flush_ports:
flush_ports.remove(vp)
confirm = userYN('Confirm flush all lines')
elif flush_ports.strip().lower() in ['none', 'N', 'n', '']:
flush_ports = []
confirm = userYN('Confirm skip flushing lines')
else:
good =[]
bad = []
for fp in flush_ports.split(','):
fp = fp.strip()
if fp in port_dict.keys():
good.append(fp)
else:
try:
fp = int(fp)
if fp in range(1,hs.v24[AorB_].n_ports+1):
good.append(fp)
else:
bad.append(fp)
except:
bad.append(fp)
if len(bad) > 0:
print('Valid ports:', *good)
print('Invalid ports:', *bad)
confirm = not userYN('Re-enter lines to flush')
else:
confirm = userYN('Confirm only flushing',*good)
if confirm:
flush_ports = good
if len(flush_ports) > 0:
while not userYN('Temporary flowcell(s) locked on to stage'): pass
while not userYN('All valve input lines in water'): pass
while not userYN('Ready to flush'): pass
LED('all', 'startup')
# Flush ports
speed = flowcells[AorB_].pump_speed['flush']
volume = flowcells[AorB_].volume['flush']
for port in flush_ports:
if port in hs.v24[AorB_].variable_ports:
flush_ports.append(*hs.v24[AorB_].port_dict[port].values())
else:
hs.message('Flushing ' + str(port))
for fc in flowcells.values():
AorB = fc.position
fc.thread = threading.Thread(target=hs.v24[AorB].move,
args=(port,))
fc.thread.start()
alive = True
while alive:
alive_ = []
for fc in flowcells.values():
alive_.append(fc.thread.is_alive())
alive = any(alive_)
for fc in flowcells.values():
AorB = fc.position
fc.thread = threading.Thread(target=hs.p[AorB].pump,
args=(volume, speed,))
fc.thread.start()
alive = True
while alive:
alive_ = []
for fc in flowcells.values():
alive_.append(fc.thread.is_alive())
alive = any(alive_)
##########################################################
## Flush Lines ###########################################
##########################################################
def do_prime(flush_YorN):
"""Prime lines with all reagents in config if prompted."""
LED('all', 'user')
## Prime lines
confirm = False
while not confirm:
prime_YorN = userYN("Prime lines")
if prime_YorN:
confirm = userYN("Confirm prime lines")
else:
confirm = userYN("Confirm skip priming lines")
# LED('all', 'startup')
# hs.z.move([0,0,0])
# hs.move_stage_out()
#LED('all', 'user')
if prime_YorN:
if flush_YorN:
while not userYN('Temporary flowcell(s) locked on to stage'): pass
while not userYN('Valve input lines in reagents'): pass
while not userYN('Ready to prime lines'): pass
#Flush all lines
LED('all', 'startup')
while True:
AorB_ = [*flowcells.keys()][0]
port_dict = hs.v24[AorB_].port_dict
speed = flowcells[AorB_].pump_speed['prime']
for port in port_dict.keys():
if isinstance(port_dict[port], int):
hs.message('Priming ' + str(port))
for fc in flowcells.values():
port_num = port_dict[port]
AorB = fc.position
fc.thread = threading.Thread(target=hs.v24[AorB].move,
args=(port,))
fc.thread.start()
alive = True
while alive:
alive_ = []
for fc in flowcells.values():
alive_.append(fc.thread.is_alive())
alive = any(alive_)
for fc in flowcells.values():
if port_num in hs.v24[AorB].side_ports:
volume = fc.volume['side']
elif port_num == hs.v24[AorB].sample_port:
volume = fc.volume['sample']
else:
volume = fc.volume['main']
AorB = fc.position
fc.thread = threading.Thread(target=hs.p[AorB].pump,
args=(volume, speed,))
fc.thread.start()
alive = True
while alive:
alive_ = []
for fc in flowcells.values():
alive_.append(fc.thread.is_alive())
alive = any(alive_)
break
# Rinse flowcells
method = config.get('experiment', 'method') # Read method specific info
method = config[method]
rinse_port = method.get('rinse', fallback = None)
rinse = rinse_port in hs.v24[AorB].port_dict
if rinse_port == port: # Option to skip rinse if last reagent pump was rinse reagent
rinse = False
# Get rinse reagents
if not rinse:
LED('all', 'user')
print('Last reagent pumped was', port)
if userYN('Rinse flowcell'):
while not rinse:
if rinse_port not in hs.v24[AorB].port_dict:
rinse_port = input('Specify rinse reagent: ')
rinse = rinse_port in hs.v24[AorB].port_dict
if not rinse:
print('ERROR::Invalid rinse reagent')
print('Choose from:', *list(hs.v24[AorB].port_dict.keys()))
if rinse:
# Simultaneously Rinse Flowcells
for fc in flowcells.values():
fc.thread = threading.Thread(target=do_rinse,
args=(fc,rinse_port,))
fc.thread.start()
alive = True
# Wait for rinsing to complete
while alive:
alive_ = []
for fc in flowcells.values():
alive_.append(fc.thread.is_alive())
alive = any(alive_)
LED('all', 'user')
while not userYN('Temporary flowcell(s) removed'): pass
while not userYN('Experiment flowcell(s) locked on to stage'): pass
if not prime_YorN:
while not userYN('Valve input lines in reagents'): pass
while not userYN('Door closed'): pass
##########################################################
def do_nothing():
"""Do nothing."""
pass
##########################################################
## iterate over lines, send to pump, and print response ##
##########################################################
def do_recipe(fc):
"""Do the next event in the recipe.
**Parameters:**
- fc (flowcell): The current flowcell.
"""
AorB = fc.position
fc.thread = None
# Skip to first line of recipe on initial cycle
if fc.cycle == 1 and fc.first_line is not None:
for i in range(fc.first_line):
line = fc.recipe.readline()
fc.first_line = None
#get instrument and command
instrument = None
while instrument is None:
line = fc.recipe.readline()
if line:
instrument, command = parse_line(line)
else:
break
if line:
# Move reagent valve
if instrument == 'PORT':
#Move to cycle specific reagent if it is variable a reagent
if fc.cycle <= fc.total_cycles:
if command in hs.v24[AorB].variable_ports:
command = hs.v24[AorB].port_dict[command][fc.cycle]
log_message = 'Move to ' + command
fc.thread = threading.Thread(target = hs.v24[AorB].move,
args = (command,))
if fc.cycle <= fc.total_cycles:
LED(AorB, 'awake')
# Pump reagent into flowcell
elif instrument == 'PUMP':
volume = int(command)
speed = fc.pump_speed['reagent']
log_message = 'Pumping ' + str(volume) + ' uL'
fc.thread = threading.Thread(target = hs.p[AorB].pump,
args = (volume, speed,))
if fc.cycle <= fc.total_cycles:
LED(AorB, 'awake')
# Incubate flowcell in reagent for set time
elif instrument == 'HOLD':
if command.isdigit():
holdTime = float(command)*60
log_message = 'Flowcell holding for ' + str(command) + ' min.'
if hs.virtual:
fc.thread = threading.Timer(holdTime/hs.speed_up, fc.endHOLD)
#fc.thread = threading.Timer(holdTime, fc.endHOLD)
else:
fc.thread = threading.Timer(holdTime, fc.endHOLD)
elif command == 'STOP':
hs.message('PySeq::Paused')
LED(AorB, 'user')
input("Press enter to continue...")
log_message = ('Continuing...')
fc.thread = threading.Thread(target = do_nothing)
if fc.cycle <= fc.total_cycles:
LED(AorB, 'sleep')
# Wait for other flowcell to finish event before continuing with current flowcell
elif instrument == 'WAIT':
if command == 'TEMP':
fc.thread = threading.Thread(target = hs.T.wait_fc_T,
args=(AorB, fc.temperature,))
log_message = ('Waiting to reach '+str(fc.temperature)+'C')
elif fc.waits_for is not None:
if command in flowcells[fc.waits_for].events_since_IMAG:
log_message = command + ' has occurred, skipping WAIT'
fc.thread = threading.Thread(target = do_nothing)
else:
log_message = 'Waiting for ' + command
fc.thread = threading.Thread(target = WAIT,
args = (AorB, command,))
else:
log_message = 'Skip waiting for ' + command
fc.thread = threading.Thread(target = do_nothing)
if fc.cycle <= fc.total_cycles:
LED(AorB, 'sleep')
# Image the flowcell
elif instrument == 'IMAG':
if hs.scan_flag and fc.cycle <= fc.total_cycles:
hs.message('PySeq::'+AorB+'::Waiting for camera')
while hs.scan_flag:
pass
#hs.scan_flag = True
fc.events_since_IMAG = []
log_message = 'Imaging flowcell'
fc.thread = threading.Thread(target = IMAG,
args = (fc,int(command),))
if fc.cycle <= fc.total_cycles:
LED(AorB, 'imaging')
elif instrument == 'TEMP':
log_message = 'Setting temperature to ' + command + ' C'
command = float(command)
fc.thread = threading.Thread(target = hs.T.set_fc_T,
args = (AorB,command,))
fc.temperature = command
# Block all further processes until user input
# elif instrument == 'STOP':
# hs.message('PySeq::Paused')
# LED(AorB, 'user')
# input("Press enter to continue...")
# hs.message('PySeq::Continuing...')
#Signal to other flowcell that current flowcell reached signal event
if fc.signal_event == instrument or fc.signal_event == command:
fc.wait_thread.set()
fc.signal_event = None
# Start new action on current flowcell
if fc.thread is not None and fc.cycle <= fc.total_cycles:
fc.addEvent(instrument, command)
hs.message('PySeq::'+AorB+'::cycle'+str(fc.cycle)+'::'+log_message)
thread_id = fc.thread.start()
elif fc.thread is not None and fc.cycle > fc.total_cycles:
fc.thread = threading.Thread(target = time.sleep, args = (10,))
else:
# End of recipe
fc.restart_recipe()
##########################################################
## Image flowcell ########################################
##########################################################
def IMAG(fc, n_Zplanes):
"""Image the flowcell at a number of z planes.
For each section on the flowcell, the stage is first positioned
to the center of the section to find the optimal focus. Then if no
optical settings are listed, the optimal filter sets are found.
Next, the stage is repositioned to scan the entire section and
image the specified number of z planes.
**Parameters:**
fc: The flowcell to image.
n_Zplanes: The number of z planes to image.
**Returns:**
int: Time in seconds to scan the entire section.
"""
hs.scan_flag = True
AorB = fc.position
cycle = str(fc.cycle)
start = time.time()
# Manual focus ALL sections across flowcells
if hs.AF == 'manual':
focus.manual_focus(hs, flowcells)
hs.AF = 'partial once'
#Image sections on flowcell
for section in fc.sections:
pos = fc.stage[section]
hs.y.move(pos['y_initial'])
hs.x.move(pos['x_initial'])
hs.z.move(pos['z_pos'])
hs.obj.move(hs.obj.focus_rough)
# Autofocus
msg = 'PySeq::' + AorB + '::cycle' + cycle+ '::' + str(section) + '::'
if hs.AF and not isinstance(hs.AF, int):
obj_pos = focus.get_obj_pos(hs, section, cycle)
if obj_pos is None:
# Move to focus filters
for i, color in enumerate(hs.optics.colors):
hs.optics.move_ex(color,hs.optics.focus_filters[i])
hs.message(msg + 'Start Autofocus')
try:
if hs.autofocus(pos): # Moves to optimal objective position
hs.message(msg + 'Autofocus complete')
pos['obj_pos'] = hs.obj.position
else: # Moves to rough focus objective position
hs.message(msg + 'Autofocus failed')
pos['obj_pos'] = None
except:
hs.message(msg + 'Autofocus failed')
print(sys.exc_info()[0])
pos['obj_pos'] = None
else:
hs.obj.move(obj_pos)
pos['obj_pos'] = hs.obj.position
focus.write_obj_pos(hs, section, cycle)
#Override recipe number of z planes
if fc.z_planes is not None: n_Zplanes = fc.z_planes
# Calculate objective positions to image
if n_Zplanes > 1 and not isinstance(hs.AF, int):
obj_start = int(hs.obj.position - hs.nyquist_obj*n_Zplanes*hs.stack_split) # (Default) 2/3 of planes below opt_ob_pos and 1/3 of planes above
elif isinstance(hs.AF, int):
obj_start = hs.AF
else:
obj_start = hs.obj.position
image_name = AorB
image_name += '_s' + str(section)
image_name += '_r' + cycle
if fc.IMAG_counter is not None:
image_name += '_' + str(fc.IMAG_counter)
# Scan section on flowcell
hs.y.move(pos['y_initial'])
hs.x.move(pos['x_initial'])
hs.obj.move(obj_start)
n_tiles = pos['n_tiles']
n_frames = pos['n_frames']
# Set filters
for color in hs.optics.cycle_dict.keys():
filter = hs.optics.cycle_dict[color][fc.cycle]
if color is 'em':
hs.optics.move_em_in(filter)
else:
hs.optics.move_ex(color, filter)
hs.message(msg + 'Start Imaging')
try:
scan_time = hs.scan(n_tiles, n_Zplanes, n_frames, image_name)
scan_time = str(int(scan_time/60))
hs.message(msg + 'Imaging completed in', scan_time, 'minutes')
except:
error('Imaging failed.')
# Reset filters
for color in hs.optics.cycle_dict.keys():
if color is 'em':
hs.optics.move_em_in(True)
else:
hs.optics.move_ex(color, 'home')
if fc.IMAG_counter is not None:
fc.IMAG_counter += 1
hs.scan_flag = False
def WAIT(AorB, event):
"""Hold the flowcell *AorB* until the specfied event in the other flowell.
**Parameters:**
AorB (str): Flowcell position, A or B, to be held.
event: Event in the other flowcell that releases the held flowcell.
**Returns:**
int: Time in seconds the current flowcell was held.
"""
signaling_fc = flowcells[AorB].waits_for
cycle = str(flowcells[AorB].cycle)
start = time.time()
flowcells[signaling_fc].signal_event = event # Set the signal event in the signal flowcell
flowcells[signaling_fc].wait_thread.wait() # Block until signal event in signal flowcell
hs.message('PySeq::'+AorB+'::cycle'+cycle+'::Flowcell ready to continue')
flowcells[signaling_fc].wait_thread.clear() # Reset wait event
stop = time.time()
return stop-start
def do_rinse(fc, port=None):
"""Rinse flowcell with reagent specified in config file.
**Parameters:**
fc (flowcell): The flowcell to rinse.
"""
method = config.get('experiment', 'method') # Read method specific info
method = config[method]
if port is None:
port = method.get('rinse', fallback = None)
AorB = fc.position
rinse = port in hs.v24[AorB].port_dict
if rinse:
LED(fc.position, 'awake')
# Move valve
hs.message('PySeq::'+AorB+'::Rinsing flowcell with', port)
fc.thread = threading.Thread(target = hs.v24[AorB].move, args = (port,))
fc.thread.start()
# Pump
port_num = hs.v24[AorB].port_dict[port]
if port_num in hs.v24[AorB].side_ports:
volume = fc.volume['side']
elif port_num == hs.v24[AorB].sample_port:
volume = fc.volume['sample']
else:
volume = fc.volume['main']
speed = fc.pump_speed['reagent']
while fc.thread.is_alive(): # Wait till valve has moved
pass
fc.thread = threading.Thread(target = hs.p[AorB].pump,
args = (volume, speed,))
else:
fc.thread = threading.Thread(target = do_nothing)
##########################################################
## Shut down system ######################################
##########################################################
def do_shutdown():
"""Shutdown the HiSeq and flush all reagent lines if prompted."""
for fc in flowcells.values():
while fc.thread.is_alive():
fc.wait_thread.set()
time.sleep(10)
LED('all', 'startup')
hs.message('PySeq::Shutting down...')
hs.z.move([0, 0, 0])
hs.move_stage_out()
do_flush()
##Flush all lines##
# LED('all', 'user')
#
# # flush_YorN = userYN("Flush lines")
# if flush_YorN:
# hs.message('Lock temporary flowcell on stage')
# hs.message('Place all valve input lines in PBS/water')
# input('Press enter to continue...')
#
# LED('all', 'startup')
# for fc in flowcells.keys():
# volume = flowcells[fc].volume['main']
# speed = flowcells[fc].pump_speed['flush']
# for port in hs.v24[fc].port_dict.keys():
# if isinstance(port_dict[port], int):
# hs.v24[fc].move(port)
# hs.p[fc].pump(volume, speed)
# ##Return pump to top and NO port##
# hs.p[fc].command('OA0R')
# hs.p[fc].command('IR')
# else:
# LED('all', 'user')
hs.message('Retrieve experiment flowcells')
input('Press any key to finish shutting down')
for fc in flowcells.values():
AorB = fc.position
fc_log_path = join(hs.log_path, 'Flowcell'+AorB+'.log')
with open(fc_log_path, 'w') as fc_file:
for i in range(len(fc.history[0])):
fc_file.write(str(fc.history[0][i])+' '+
str(fc.history[1][i])+' '+
str(fc.history[2][i])+'\n')
# Turn off y stage motor
hs.y.move(0)
hs.y.command('OFF')
LED('all', 'off')
##########################################################
## Free Flowcells ########################################
##########################################################
def free_fc():
"""Release the first flowcell if flowcells are waiting on each other."""
# Get which flowcell is to be first
experiment = config['experiment']
cycles = int(experiment.get('first flowcell', fallback = 'A'))
first_fc = experiment.get('first flowcell', fallback = 'A')
if len(flowcells) == 1:
fc = flowcells[[*flowcells][0]]
try:
fc.wait_thread.set()
except:
pass
fc.signal_event = None
else:
flowcells_ = [fc.position for fc in flowcells.values() if fc.total_cycles <= cycles]
if len(flowcells_) == 1:
fc = flowcells_[0]
else:
fc = flowcells[first_fc]
flowcells[fc.waits_for].wait_thread.set()
flowcells[fc.waits_for].signal_event = None
hs.message('PySeq::Flowcells are waiting on each other starting flowcell',
fc.position)
return fc.position
def get_config(args):
"""Return the experiment config appended with the method config.
**Parameters:**
- args (dict): Dictionary with the config path, the experiment name and
the output path to store images and logs.
**Returns:**
- config: The experiment config appended with the method config.
"""
# Create config parser
config = configparser.ConfigParser()
# Defaults that can be overided
config.read_dict({'experiment' : {'log path': 'logs',
'image path': 'images'}
})
# Open config file
if os.path.isfile(args['config']):
config.read(args['config'])
else:
error('ConfigFile::Does not exist')
sys.exit()
# Set output path
config['experiment']['save path'] = args['output']
# Set experiment name
config['experiment']['experiment name'] = args['name']
# save user valve
USERVALVE = False
if config.has_section('reagents'):
valve = config['reagents'].items()
if len(valve) > 0:
USERVALVE = True
# Get method specific configuration
method = config['experiment']['method']
if method in methods.get_methods():
config_path, recipe_path = methods.return_method(method)
config.read(config_path)
elif os.path.isfile(method):
config.read(method)
recipe_path = None
elif config.has_section(method):
recipe_path = None
else:
error('ConfigFile::Error reading method configuration')
sys.exit()
# Check method keys
if not methods.check_settings(config[method]):
go = userYN('Proceed with experiment')
if not go:
sys.exit()
# Get recipe
recipe_name = config[method]['recipe']
if recipe_path is not None:
pass
elif os.path.isfile(recipe_name):
recipe_path = recipe_name
else:
error('ConfigFile::Error reading recipe')
config['experiment']['recipe path'] = recipe_path
# Don't override user defined valve
user_config = configparser.ConfigParser()
user_config.read(args['config'])
if USERVALVE:
config.read_dict({'reagents':dict(user_config['reagents'])})
if user_config.has_section(method):
config.read_dict({method:dict(user_config[method])})
return config
def check_fc_temp(fc):
"""Check temperature of flowcell."""
if fc.temperature is not None:
if fc.temp_timer is None:
fc.temp_timer = threading.Timer(fc.temp_interval, do_nothing)
fc.temp_timer.start()
if not fc.temp_timer.is_alive():
#print('checking temp')
T = hs.T.get_fc_T(fc.position)
hs.message(False, 'PySeq::'+fc.position+'::Temperature::',T,'C')
fc.temp_timer = None
if abs(fc.temperature - T) > 5:
msg = 'PySeq::'+fc.position+'::WARNING::Set Temperature '
msg += str(fc.temperature) + ' C'
hs.message(msg)
msg = 'PySeq::'+fc.position+'::WARNING::Actual Temperature '
msg += str(T) + ' C'
hs.message(msg)
return T
###################################
## Run System #####################
###################################
args_ = args.get_arguments() # Get config path, experiment name, & output path
if __name__ == 'pyseq.main':
n_errors = 0
config = get_config(args_) # Get config file
logger = setup_logger() # Create logfiles
port_dict = check_ports() # Check ports in configuration file
first_line, IMAG_counter, z_planes = check_instructions() # Checks instruction file is correct and makes sense
flowcells = setup_flowcells(first_line, IMAG_counter) # Create flowcells
hs = configure_instrument(IMAG_counter, port_dict)
confirm_settings(z_planes)
hs = initialize_hs(IMAG_counter) # Initialize HiSeq, takes a few minutes
if n_errors is 0:
flush_YorN = do_flush() # Ask to flush out lines
do_prime(flush_YorN) # Ask to prime lines
if not userYN('Start experiment'):
sys.exit()
# Do prerecipe or Initialize Flowcells
for fc in flowcells.values():
if fc.prerecipe_path:
fc.pre_recipe()
else:
fc.restart_recipe()
cycles_complete = False
while not cycles_complete:
stuck = 0
complete = 0
for fc in flowcells.values():
if not fc.thread.is_alive(): # flowcell not busy, do next step in recipe
do_recipe(fc)
if fc.signal_event: # check if flowcells are waiting on each other
stuck += 1
if fc.cycle > fc.total_cycles: # check if all cycles are complete on flowcell
complete += 1
check_fc_temp(fc)
if stuck == len(flowcells): # Start the first flowcell if they are waiting on each other
free_fc()
if complete == len(flowcells): # Exit while loop
cycles_complete = True
if hs.current_view is not None: # Show latest images in napari, WILL BLOCK
hs.current_view.show()
hs.current_view = None
do_shutdown() # Shutdown HiSeq
else:
error('Total number of errors =', n_errors)
| 38.437079 | 159 | 0.52832 |
0b77f76b149075d4d3817aa9211f7115e499a12a | 273 | py | Python | tests/parser/rewriting.projection.4.test.py | veltri/DLV2 | 944aaef803aa75e7ec51d7e0c2b0d964687fdd0e | [
"Apache-2.0"
] | null | null | null | tests/parser/rewriting.projection.4.test.py | veltri/DLV2 | 944aaef803aa75e7ec51d7e0c2b0d964687fdd0e | [
"Apache-2.0"
] | null | null | null | tests/parser/rewriting.projection.4.test.py | veltri/DLV2 | 944aaef803aa75e7ec51d7e0c2b0d964687fdd0e | [
"Apache-2.0"
] | null | null | null | input = """
f(X,1) :- a(X,Y),
g(A,X),g(B,X),
not f(1,X).
a(X,Y) :- g(X,0),g(Y,0).
g(x1,0).
g(x2,0).
"""
output = """
f(X,1) :- a(X,Y),
g(A,X),g(B,X),
not f(1,X).
a(X,Y) :- g(X,0),g(Y,0).
g(x1,0).
g(x2,0).
"""
| 11.869565 | 25 | 0.296703 |
0b78677adaa1ddcbacf884f29508f3b4ea829e33 | 5,100 | py | Python | focus/receiver.py | frederikhermans/focus | 6228ba5fc8b41c74f2e22d5c2de20040b206d70a | [
"BSD-3-Clause"
] | 6 | 2016-04-18T09:40:16.000Z | 2021-01-05T22:03:54.000Z | focus/receiver.py | horizon00/focus | 6228ba5fc8b41c74f2e22d5c2de20040b206d70a | [
"BSD-3-Clause"
] | 1 | 2017-12-10T14:13:50.000Z | 2017-12-10T14:13:50.000Z | focus/receiver.py | horizon00/focus | 6228ba5fc8b41c74f2e22d5c2de20040b206d70a | [
"BSD-3-Clause"
] | 5 | 2018-01-04T14:59:50.000Z | 2018-10-20T14:40:21.000Z | # Copyright (c) 2016, Frederik Hermans, Liam McNamara
#
# This file is part of FOCUS and is licensed under the 3-clause BSD license.
# The full license can be found in the file COPYING.
import cPickle as pickle
import sys
import click
import imageframer
import numpy as np
import rscode
import focus
if __name__ == '__main__':
main()
| 36.428571 | 81 | 0.610196 |
0b7aa19dc4e53889b36908ba53b351bf9cbef5d2 | 6,444 | py | Python | calc/bond.py | RaphaelOneRepublic/financial-calculator | 2451b35a4cb52a6c254ae9fdae462dfebdc51e65 | [
"MIT"
] | 2 | 2020-12-10T13:00:43.000Z | 2020-12-19T16:59:48.000Z | calc/bond.py | RaphaelOneRepublic/financial-calculator | 2451b35a4cb52a6c254ae9fdae462dfebdc51e65 | [
"MIT"
] | null | null | null | calc/bond.py | RaphaelOneRepublic/financial-calculator | 2451b35a4cb52a6c254ae9fdae462dfebdc51e65 | [
"MIT"
] | null | null | null | import logging
from typing import Sequence
import numpy as np
from calc.optimize import root
def bootstrap(bonds: Sequence[Bond], overnight: float, epsilon: float = 10e-10):
"""
Bootstrap a zero rate curve from the given bonds and bond values.
Note that the bonds must have equal coupon payment periods (equal <m>s).
Zero rates at times for which we do not have a bond are calculated
by a linear line connecting the two nearest rates at times for which we do have a bond.
:param overnight:
:param epsilon:
:param bonds:
:return:
"""
bonds = sorted(bonds, key=lambda x: x.T)
known = [overnight]
for bond in bonds:
known = find_curve(bond, known)
return known
| 24.689655 | 113 | 0.542675 |
0b7ab6dccc22b64a51e866ea9c844d792babb7c7 | 9,074 | py | Python | jasy/build/Script.py | sebastian-software/jasy | 9740ed33f0836ab2dd3e00ab4fae4049f9908072 | [
"MIT"
] | 2 | 2015-05-27T19:30:49.000Z | 2015-12-10T16:55:14.000Z | jasy/build/Script.py | sebastian-software/jasy | 9740ed33f0836ab2dd3e00ab4fae4049f9908072 | [
"MIT"
] | 2 | 2015-03-16T09:15:58.000Z | 2015-04-07T19:05:47.000Z | jasy/build/Script.py | sebastian-software/jasy | 9740ed33f0836ab2dd3e00ab4fae4049f9908072 | [
"MIT"
] | 2 | 2017-07-18T20:08:05.000Z | 2021-01-04T10:46:14.000Z | #
# Jasy - Web Tooling Framework
# Copyright 2010-2012 Zynga Inc.
# Copyright 2013-2014 Sebastian Werner
#
import os
import jasy
import jasy.core.Console as Console
from jasy.item.Script import ScriptError
from jasy.item.Script import ScriptItem
import jasy.script.Resolver as ScriptResolver
from jasy.script.Resolver import Resolver
import jasy.script.output.Optimization as ScriptOptimization
import jasy.script.output.Formatting as ScriptFormatting
| 33.116788 | 168 | 0.601278 |
0b7b1e425f8017f791073b532d42d48a2786d924 | 171 | py | Python | 13.py | kwoshvick/project-euler | d27370b0f22b51ad9ccb15afa912983d8fd8be5c | [
"MIT"
] | null | null | null | 13.py | kwoshvick/project-euler | d27370b0f22b51ad9ccb15afa912983d8fd8be5c | [
"MIT"
] | null | null | null | 13.py | kwoshvick/project-euler | d27370b0f22b51ad9ccb15afa912983d8fd8be5c | [
"MIT"
] | null | null | null | file = open("13")
sum = 0
for numbers in file:
#print(numbers.rstrip())
numbers = int(numbers)
sum += numbers;
print(sum)
sum = str(sum)
print(sum[:10])
| 10.6875 | 28 | 0.596491 |
0b7d1c464ba5f7b2f25f469546bc0725ef4ae2f0 | 25,033 | py | Python | snlds/model_cavi_snlds.py | egonrian/google-research | 8177adbe9ca0d7e5a9463b54581fe6dd27be0974 | [
"Apache-2.0"
] | 3 | 2021-01-18T04:46:49.000Z | 2021-03-05T09:21:40.000Z | snlds/model_cavi_snlds.py | Alfaxad/google-research | 2c0043ecd507e75e2df9973a3015daf9253e1467 | [
"Apache-2.0"
] | 25 | 2020-07-25T08:53:09.000Z | 2022-03-12T00:43:02.000Z | snlds/model_cavi_snlds.py | Alfaxad/google-research | 2c0043ecd507e75e2df9973a3015daf9253e1467 | [
"Apache-2.0"
] | 4 | 2021-02-08T10:25:45.000Z | 2021-04-17T14:46:26.000Z | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collapsed Amortized Variational Inference for SNLDS.
This is a reasonable baseline model for switching non-linear dynamical system
with the following architecture:
1. an inference network, with Bidirectional-RNN for input embedding, and a
forward RNN to get the posterior distribution of `q(z[1:T] | x[1:T])`.
2. a continuous state transition network, `p(z[t] | z[t-1], s[t])`.
3. a discrete state transition network that conditioned on the input,
`p(s[t] | s[t-1], x[t-1])`.
4. an emission network conditioned on the continuous hidden dynamics,
`p(x[t] | z[t])`.
It also contains a function, `create_model()`, to help to create the SNLDS
model discribed in ``Collapsed Amortized Variational Inference for Switching
Nonlinear Dynamical Systems``. 2019. https://arxiv.org/abs/1910.09588.
All the networks are configurable through function arguments `network_*`.
"""
import collections
import tensorflow as tf
import tensorflow_probability as tfp
from snlds import model_base
from snlds import utils
namedtuple = collections.namedtuple
layers = tf.keras.layers
tfd = tfp.distributions
tfpl = tfp.layers
RANDOM_SEED = 131
def construct_initial_state_distribution(
latent_dim,
num_categ,
use_trainable_cov=False,
use_triangular_cov=False,
raw_sigma_bias=0.0,
sigma_min=1e-5,
sigma_scale=0.05,
dtype=tf.float32,
name="z0"):
"""Construct the initial state distribution, `p(z[0])`.
Args:
latent_dim: an `int` scalar for dimension of continuous hidden states, `z`.
num_categ: an `int` scalar for number of discrete states, `s`.
use_trainable_cov: a `bool` scalar indicating whether the scale of `p(z[0])`
is trainable. Default to False.
use_triangular_cov: a `bool` scalar indicating whether to use triangular
covariance matrices and `tfp.distributions.MultivariateNormalTriL` for
distribution. Otherwise, a diagonal covariance matrices and
`tfp.distributions.MultivariateNormalDiag` will be used.
raw_sigma_bias: a `float` scalar to be added to the raw sigma, which is
standard deviation of the distribution. Default to `0.`.
sigma_min: a `float` scalar for minimal level of sigma to prevent
underflow. Default to `1e-5`.
sigma_scale: a `float` scalar for scaling the sigma. Default to `0.05`.
The above three arguments are used as
`sigma_scale * max(softmax(raw_sigma + raw_sigma_bias), sigma_min))`.
dtype: data type for variables within the scope. Default to `tf.float32`.
name: a `str` to construct names of variables.
Returns:
return_dist: a `tfp.distributions` instance for the initial state
distribution, `p(z[0])`.
"""
glorot_initializer = tf.keras.initializers.GlorotUniform()
z0_mean = tf.Variable(
initial_value=glorot_initializer(shape=[num_categ, latent_dim],
dtype=dtype),
name="{}_mean".format(name))
if use_triangular_cov:
z0_scale = tfp.math.fill_triangular(
tf.Variable(
initial_value=glorot_initializer(
shape=[int(latent_dim * (latent_dim + 1) / 2)],
dtype=dtype),
name="{}_scale".format(name),
trainable=use_trainable_cov))
z0_scale = (tf.maximum(tf.nn.softmax(z0_scale + raw_sigma_bias),
sigma_min)
* sigma_scale)
return_dist = tfd.Independent(
distribution=tfd.MultivariateNormalTriL(
loc=z0_mean, scale_tril=z0_scale),
reinterpreted_batch_ndims=0)
else:
z0_scale = tf.Variable(
initial_value=glorot_initializer(
shape=[latent_dim],
dtype=dtype),
name="{}_scale".format(name),
trainable=use_trainable_cov)
z0_scale = (tf.maximum(tf.nn.softmax(z0_scale + raw_sigma_bias),
sigma_min)
* sigma_scale)
return_dist = tfd.Independent(
distribution=tfd.MultivariateNormalDiag(
loc=z0_mean, scale_diag=z0_scale),
reinterpreted_batch_ndims=0)
return tfp.experimental.as_composite(return_dist)
def create_model(num_categ,
hidden_dim,
observation_dim,
config_emission,
config_inference,
config_z_initial,
config_z_transition,
network_emission,
network_input_embedding,
network_posterior_rnn,
network_s_transition,
networks_z_transition,
network_posterior_mlp=lambda x: x,
name="snlds"):
"""Construct SNLDS model.
Args:
num_categ: an `int` scalar for number of discrete states, `s`.
hidden_dim: an `int` scalar for dimension of continuous hidden states, `z`.
observation_dim: an `int` scalar for dimension of observations, `x`.
config_emission: a `dict` for configuring emission distribution,
`p(x[t] | z[t])`.
config_inference: a `dict` for configuring the posterior distribution,
`q(z[t]|h[t]=f_RNN(h[t-1], z[t-1], h[t]^b))`.
config_z_initial: a `dict` for configuring the initial distribution of
continuous hidden state, `p(z[0])`.
config_z_transition: a `dict` for configuring the transition distribution
`p(z[t] | z[t-1], s[t])`.
network_emission: a `callable` network taking continuous hidden
states, `z[t]`, and returning the mean of emission distribution,
`p(x[t] | z[t])`.
network_input_embedding: a `callable` network to embed the observations,
`x[t]`. E.g. a bidirectional RNN to embedding `x[1:T]`.
network_posterior_rnn: a RNN cell, `h[t]=f_RNN(h[t-1], z[t-1], input[t])`,
which recursively takes previous step RNN states `h`, previous step
sampled dynamical state `z[t-1]`, and conditioned input `input[t]`.
network_s_transition: a `callable` network taking batch conditional inputs,
`x[t-1]`, and returning the discrete state transition matrices,
`log p(s[t] |s[t-1], x[t-1])`.
networks_z_transition: a list of `callable` networks, with the length
of list same as `num_categories`. Each one of the networks will take
previous step hidden state, `z[t-1]`, and returns the mean of
transition distribution, `p(z[t] | z[t-1], s[t]=i)` for each
discrete state `i`.
network_posterior_mlp: an optional network to embedding the output of
inference RNN networks, before passing into the distribution as mean,
`q(z[t] | mlp( h[t] ))`. Default to identity mapping.
name: a `str` to construct names of variables.
Returns:
An instance of instantiated `model_base.SwitchingNLDS` model.
"""
z_transition = ContinuousStateTransition(
transition_mean_networks=networks_z_transition,
distribution_dim=hidden_dim,
num_categories=num_categ,
cov_mat=config_z_transition.cov_mat,
use_triangular_cov=config_z_transition.use_triangular_cov,
use_trainable_cov=config_z_transition.use_trainable_cov,
raw_sigma_bias=config_z_transition.raw_sigma_bias,
sigma_min=config_z_transition.sigma_min,
sigma_scale=config_z_transition.sigma_scale,
name=name+"_z_trans")
s_transition = DiscreteStateTransition(
transition_network=network_s_transition,
num_categories=num_categ)
emission_network = GaussianDistributionFromMean(
emission_mean_network=network_emission,
observation_dim=observation_dim,
cov_mat=config_emission.cov_mat,
use_triangular_cov=config_emission.use_triangular_cov,
use_trainable_cov=config_emission.use_trainable_cov,
raw_sigma_bias=config_emission.raw_sigma_bias,
sigma_min=config_emission.sigma_min,
sigma_scale=config_emission.sigma_scale,
name=name+"_x_emit")
posterior_distribution = GaussianDistributionFromMean(
emission_mean_network=network_posterior_mlp,
observation_dim=hidden_dim,
cov_mat=config_inference.cov_mat,
use_triangular_cov=config_inference.use_triangular_cov,
use_trainable_cov=config_inference.use_trainable_cov,
raw_sigma_bias=config_inference.raw_sigma_bias,
sigma_min=config_inference.sigma_min,
sigma_scale=config_inference.sigma_scale,
name=name+"_posterior")
posterior_network = RnnInferenceNetwork(
posterior_rnn=network_posterior_rnn,
posterior_dist=posterior_distribution,
latent_dim=hidden_dim,
embedding_network=network_input_embedding)
z_initial_distribution = construct_initial_state_distribution(
latent_dim=hidden_dim,
num_categ=num_categ,
use_trainable_cov=config_z_initial.use_trainable_cov,
use_triangular_cov=config_z_initial.use_triangular_cov,
raw_sigma_bias=config_z_initial.raw_sigma_bias,
sigma_min=config_z_initial.sigma_min,
sigma_scale=config_z_initial.sigma_scale,
name="init_dist")
snlds_model = model_base.SwitchingNLDS(
continuous_transition_network=z_transition,
discrete_transition_network=s_transition,
emission_network=emission_network,
inference_network=posterior_network,
initial_distribution=z_initial_distribution,
continuous_state_dim=None,
num_categories=None,
discrete_state_prior=None)
return snlds_model
| 40.770358 | 80 | 0.667878 |
0b7d1f1e2fd547f10391c4be9766498485799dc7 | 1,581 | py | Python | grid_search/mlp_gridsearch.py | RiboswitchClassifier/RiboswitchClassification | 4a4ab0590aa50aa765638b2bd8aa0cfd84054ac7 | [
"MIT"
] | 2 | 2019-12-16T13:08:28.000Z | 2021-02-23T03:03:18.000Z | grid_search/mlp_gridsearch.py | RiboswitchClassifier/RiboswitchClassification | 4a4ab0590aa50aa765638b2bd8aa0cfd84054ac7 | [
"MIT"
] | null | null | null | grid_search/mlp_gridsearch.py | RiboswitchClassifier/RiboswitchClassification | 4a4ab0590aa50aa765638b2bd8aa0cfd84054ac7 | [
"MIT"
] | 3 | 2019-01-01T06:00:20.000Z | 2020-01-28T13:57:49.000Z |
from sklearn.model_selection import cross_val_score, GridSearchCV, cross_validate, train_test_split
from sklearn.metrics import accuracy_score, classification_report
from sklearn.neural_network import MLPClassifier
import pandas as pd
import csv
from sklearn.preprocessing import label_binarize
from sklearn.preprocessing import StandardScaler
import numpy as np
data = pd.read_csv('processed_datasets/final_32classes.csv')
# Separate out the x_data and y_data.
x_data = data.loc[:, data.columns != "Type"]
x_data = x_data.loc[:,x_data.columns != "Sequence"]
y_data = data.loc[:, "Type"]
random_state = 100
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.7, random_state=100,stratify=y_data)
scaler = StandardScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
mlp = MLPClassifier()
mlp.fit(x_train, y_train)
y_pred_train = mlp.predict(x_train)
y_pred_test = mlp.predict(x_test)
print("classifier", mlp)
print ("Accuracy on Train Set")
print (mlp.score(x_train, y_train))
print ("MLP Classifier")
print ("Accuracy on Test Set")
print (mlp.score(x_test, y_test))
print ("Report")
print (classification_report(y_test,mlp.predict(x_test)))
param_grid = {
'activation': ['tanh', 'relu'],
'solver': ['sgd', 'adam'],
'alpha': [0.0001,0.01, 0.05,0.1,1.0],
'learning_rate': ['constant','adaptive'],
}
#,2000
#,70
grid_search = GridSearchCV(mlp, param_grid=param_grid,n_jobs=-1,cv=10)
grid_search.fit(x_train,y_train)
print(grid_search.best_params_)
print(grid_search.best_score_)
| 27.258621 | 116 | 0.759646 |
0b7e90a5769d5e45dae418f0e034fd90269bdc99 | 711 | py | Python | bermuda/demos/shape_options.py | glue-viz/bermuda | 0bc26bac376d4f08a4964481d1f737f6deb86270 | [
"BSD-3-Clause"
] | 1 | 2018-07-20T21:09:46.000Z | 2018-07-20T21:09:46.000Z | bermuda/demos/shape_options.py | glue-viz/bermuda | 0bc26bac376d4f08a4964481d1f737f6deb86270 | [
"BSD-3-Clause"
] | null | null | null | bermuda/demos/shape_options.py | glue-viz/bermuda | 0bc26bac376d4f08a4964481d1f737f6deb86270 | [
"BSD-3-Clause"
] | 1 | 2018-07-20T21:15:41.000Z | 2018-07-20T21:15:41.000Z | import matplotlib.pyplt as plt
from bermuda import ellipse, polygon, rectangle
plt.plot([1,2,3], [2,3,4])
ax = plg.gca()
# default choices for everything
e = ellipse(ax)
# custom position, genric interface for all shapes
e = ellipse(ax, bbox = (x, y, w, h, theta))
e = ellipse(ax, cen=(x, y), width=w, height=h, theta=theta)
# force square/circle?
e = ellipse(ax, aspect_equal=True)
# freeze properties?
e = ellipse(ax, width=1, height=2, aspect_frozen = True)
e = ellipse(ax, rotation_frozen=True)
e = ellipse(ax, center_frozen=True)
e = ellipse(ax, size_frozen=True)
# all of these kwargs should be settable properties as well
e.bbox = (x, y, w, h, theta)
e.aspect_equal = True
e.aspect_frozen = True
| 24.517241 | 59 | 0.707454 |
0b7fbd1451d21df02b8ac7806cf7eef5c4dcbb14 | 5,605 | py | Python | porthole/contact_management.py | speedyturkey/porthole | 5d47bb00d33d5aa93c3d2e84af993b5387b66be6 | [
"MIT"
] | 3 | 2017-06-22T01:52:10.000Z | 2019-09-25T22:52:56.000Z | porthole/contact_management.py | speedyturkey/porthole | 5d47bb00d33d5aa93c3d2e84af993b5387b66be6 | [
"MIT"
] | 48 | 2017-06-22T23:36:03.000Z | 2019-11-26T02:51:54.000Z | porthole/contact_management.py | speedyturkey/porthole | 5d47bb00d33d5aa93c3d2e84af993b5387b66be6 | [
"MIT"
] | 1 | 2019-02-27T13:59:07.000Z | 2019-02-27T13:59:07.000Z | from sqlalchemy.orm.exc import NoResultFound
from porthole.app import Session
from .logger import PortholeLogger
from porthole.models import AutomatedReport, AutomatedReportContact, AutomatedReportRecipient
| 49.60177 | 118 | 0.702587 |
0b8210f4f1d6486c1ca027ea81ba3795882b8a8f | 3,433 | py | Python | tests/python/benchmarks/two_neighborhood_bench.py | sid17/weaver | f9074397ca854a777a873eaf409621de679f9749 | [
"BSD-3-Clause"
] | 163 | 2015-01-02T03:51:38.000Z | 2022-03-21T23:06:39.000Z | tests/python/benchmarks/two_neighborhood_bench.py | sid17/weaver | f9074397ca854a777a873eaf409621de679f9749 | [
"BSD-3-Clause"
] | 1 | 2015-04-08T23:17:06.000Z | 2015-04-24T15:25:26.000Z | tests/python/benchmarks/two_neighborhood_bench.py | sid17/weaver | f9074397ca854a777a873eaf409621de679f9749 | [
"BSD-3-Clause"
] | 20 | 2015-02-17T19:24:05.000Z | 2020-10-29T01:59:18.000Z | #! /usr/bin/env python
#
# ===============================================================
# Description: Two neighborhood benchmark
#
# Created: 2014-03-21 13:39:06
#
# Author: Ayush Dubey, dubey@cs.cornell.edu
#
# Copyright (C) 2013-2014, Cornell University, see the LICENSE
# file for licensing agreement
# ===============================================================
#
import random
import sys
import time
import threading
import weaver.client as client
import simple_client
random.seed(42)
num_edges = 1768149
edge_sources = [None] * num_edges
if (len(sys.argv) != 2):
print "want single extra arg for file to open"
assert(False)
f = open(sys.argv[1])
i = 0
for line in f:
if (line[0] is '#'):
continue
edge_sources[i] = int(line.split(" ")[0])
i += 1
print "done loading file"
num_started = 0
num_finished = 0
cv = threading.Condition()
num_nodes = 81306 # snap twitter-combined
read_percent = 95
# node handles are range(0, num_nodes)
num_vts = 1
num_clients = 100
requests_per_client = 200
clients = []
simple_clients = []
for i in range(num_clients):
clients.append(client.Client(client._CLIENT_ID + i, i % num_vts))
simple_clients.append(simple_client.simple_client(clients[i]))
reqs = []
for i in range(num_clients):
cl_reqs = []
for _ in range(requests_per_client):
cl_reqs.append(choose_random_pair())
reqs.append(cl_reqs)
exec_time = [0] * num_clients
threads = []
print "starting writes"
for i in range(num_clients):
thr = threading.Thread(target=add_labels, args=(clients[i], i))
thr.start()
threads.append(thr)
for thr in threads:
thr.join()
print "starting requests"
for i in range(num_clients):
thr = threading.Thread(target=exec_reads, args=(reqs[i], simple_clients[i], clients[i], exec_time, i))
thr.start()
threads.append(thr)
start_time = time.time()
with cv:
num_started = num_clients
cv.notify_all()
while num_finished < num_clients:
cv.wait()
end_time = time.time()
total_time = end_time-start_time
for thr in threads:
thr.join()
print 'Total time for ' + str(num_clients * requests_per_client) + 'requests = ' + str(total_time)
throughput = (num_clients * requests_per_client) / total_time
print 'Throughput = ' + str(throughput)
| 26.206107 | 106 | 0.633265 |
0b829ab3570dda786322f71fd0c867093ba7b3dc | 1,705 | py | Python | composer/datasets/brats_hparams.py | growlix/composer | 27418a3c65dca26d90ac09c6ae67cbd5d0202ccf | [
"Apache-2.0"
] | null | null | null | composer/datasets/brats_hparams.py | growlix/composer | 27418a3c65dca26d90ac09c6ae67cbd5d0202ccf | [
"Apache-2.0"
] | null | null | null | composer/datasets/brats_hparams.py | growlix/composer | 27418a3c65dca26d90ac09c6ae67cbd5d0202ccf | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""BraTS (Brain Tumor Segmentation) dataset hyperparameters."""
from dataclasses import dataclass
import torch
import yahp as hp
from composer.datasets.brats import PytTrain, PytVal, get_data_split
from composer.datasets.dataset_hparams import DataLoaderHparams, DatasetHparams
from composer.utils import dist
def _my_collate(batch):
"""Custom collate function to handle images with different depths."""
data = [item[0] for item in batch]
target = [item[1] for item in batch]
return [torch.Tensor(data), torch.Tensor(target)]
| 32.788462 | 101 | 0.71261 |
0b83bfc7e85aab893f830a54d4b1eb6b31224483 | 43 | py | Python | examples/getchar.py | scalabli/quo | 70b6d4129ee705930f1f8a792fc4c9247d973f9d | [
"MIT"
] | 3 | 2022-03-13T13:22:35.000Z | 2022-03-18T08:22:51.000Z | examples/getchar.py | scalabli/quo | 70b6d4129ee705930f1f8a792fc4c9247d973f9d | [
"MIT"
] | 1 | 2022-03-21T16:29:54.000Z | 2022-03-21T16:29:54.000Z | examples/getchar.py | scalabli/quo | 70b6d4129ee705930f1f8a792fc4c9247d973f9d | [
"MIT"
] | null | null | null | from quo.getchar import getchar
getchar()
| 10.75 | 31 | 0.790698 |
0b83f0ab273b13a1a169d3aa5355aab90ac31ca1 | 313 | py | Python | setup.py | cfbolz/syntaxerrors | 1c7ecc8fd0d05253d5c55dee39802cfb86fb69f7 | [
"Apache-2.0",
"OpenSSL"
] | 5 | 2018-04-11T15:19:53.000Z | 2020-10-27T15:23:18.000Z | setup.py | cfbolz/syntaxerrors | 1c7ecc8fd0d05253d5c55dee39802cfb86fb69f7 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | setup.py | cfbolz/syntaxerrors | 1c7ecc8fd0d05253d5c55dee39802cfb86fb69f7 | [
"Apache-2.0",
"OpenSSL"
] | null | null | null | from setuptools import setup, find_packages
setup(
name='syntaxerrors',
version='0.0.1',
description='Report better SyntaxErrors',
author='Carl Friedrich Bolz-Tereick',
author_email='cfbolz@gmx.de',
packages=['syntaxerrors'],
package_dir={'': 'src'},
include_package_data=True,
)
| 24.076923 | 45 | 0.686901 |
0b843ec57c40e34a7b0ee2c71349c26723ef8771 | 1,492 | py | Python | unit/either_spec.py | tek/amino | 51b314933e047a45587a24ecff02c836706d27ff | [
"MIT"
] | 33 | 2016-12-21T07:05:46.000Z | 2020-04-29T04:26:46.000Z | unit/either_spec.py | tek/amino | 51b314933e047a45587a24ecff02c836706d27ff | [
"MIT"
] | 1 | 2019-04-19T17:15:52.000Z | 2019-04-20T18:28:23.000Z | unit/either_spec.py | tek/amino | 51b314933e047a45587a24ecff02c836706d27ff | [
"MIT"
] | 4 | 2017-09-04T18:46:23.000Z | 2021-11-02T04:18:13.000Z | import operator
from amino.either import Left, Right
from amino import Empty, Just, Maybe, List, Either, _
from amino.test.spec_spec import Spec
from amino.list import Lists
__all__ = ('EitherSpec',)
| 31.744681 | 73 | 0.586461 |
0b84b4636dfd6d734d772cca8a444833fce6d004 | 221 | py | Python | Modulo_1/semana2/variables_contantes/sentencia-global.py | rubens233/cocid_python | 492ebdf21817e693e5eb330ee006397272f2e0cc | [
"MIT"
] | null | null | null | Modulo_1/semana2/variables_contantes/sentencia-global.py | rubens233/cocid_python | 492ebdf21817e693e5eb330ee006397272f2e0cc | [
"MIT"
] | null | null | null | Modulo_1/semana2/variables_contantes/sentencia-global.py | rubens233/cocid_python | 492ebdf21817e693e5eb330ee006397272f2e0cc | [
"MIT"
] | 1 | 2022-03-04T00:57:18.000Z | 2022-03-04T00:57:18.000Z | variable1 = "variable original"
print(variable1)
#variable original
variable_global()
print(variable1)
#variable global modificada
| 20.090909 | 44 | 0.78733 |
0b84d29786d1202df0158d5a5b88910f8c8196a5 | 1,314 | py | Python | weather_alarm/main.py | Cs4r/weather_alarm | b78b6f11f91e3b81aa43a1bfaa55074a0626a036 | [
"MIT"
] | null | null | null | weather_alarm/main.py | Cs4r/weather_alarm | b78b6f11f91e3b81aa43a1bfaa55074a0626a036 | [
"MIT"
] | null | null | null | weather_alarm/main.py | Cs4r/weather_alarm | b78b6f11f91e3b81aa43a1bfaa55074a0626a036 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
import os
from apscheduler.schedulers.blocking import BlockingScheduler
from weather_alarm.constants import *
from weather_alarm.forecaster import Forecaster
from weather_alarm.sender import NotificationSender
sender = NotificationSender(BOT_TOKEN, TELEGRAM_USER_ID)
forecaster = Forecaster(OWM_API_KEY)
now = datetime.datetime.now()
nightly_alarm_time = datetime.datetime(now.year, now.month, now.day, *NIGHTLY_ALARM_TIME)
daily_alarm_time = datetime.datetime(now.year, now.month, now.day, *DAILY_ALARM_TIME)
scheduler = BlockingScheduler()
scheduler.add_job(func=send_tomorrow_forecast, args=FORECAST_TIME, trigger='interval', next_run_time=nightly_alarm_time,
misfire_grace_time=30, days=1)
scheduler.add_job(func=send_current_observed_weather, trigger='interval', next_run_time=daily_alarm_time,
misfire_grace_time=30, days=1)
print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
try:
scheduler.start()
except (KeyboardInterrupt, SystemExit):
pass | 32.04878 | 120 | 0.780822 |
0b88cc0b918db3b0b9bc55668bf46c025033b785 | 2,237 | py | Python | authlib/oauth2/rfc6749/__init__.py | geoffwhittington/authlib | 096f2a41f4fb18f9850427f07d556d4b9ab97383 | [
"BSD-3-Clause"
] | null | null | null | authlib/oauth2/rfc6749/__init__.py | geoffwhittington/authlib | 096f2a41f4fb18f9850427f07d556d4b9ab97383 | [
"BSD-3-Clause"
] | null | null | null | authlib/oauth2/rfc6749/__init__.py | geoffwhittington/authlib | 096f2a41f4fb18f9850427f07d556d4b9ab97383 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
authlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~
This module represents a direct implementation of
The OAuth 2.0 Authorization Framework.
https://tools.ietf.org/html/rfc6749
"""
from .wrappers import OAuth2Request, OAuth2Token, HttpRequest
from .errors import (
OAuth2Error,
AccessDeniedError,
MissingAuthorizationError,
InvalidGrantError,
InvalidClientError,
InvalidRequestError,
InvalidScopeError,
InsecureTransportError,
UnauthorizedClientError,
UnsupportedResponseTypeError,
UnsupportedGrantTypeError,
UnsupportedTokenTypeError,
# exceptions for clients
MissingCodeException,
MissingTokenException,
MissingTokenTypeException,
MismatchingStateException,
)
from .models import ClientMixin, AuthorizationCodeMixin, TokenMixin
from .authenticate_client import ClientAuthentication
from .authorization_server import AuthorizationServer
from .resource_protector import ResourceProtector, TokenValidator
from .token_endpoint import TokenEndpoint
from .grants import (
BaseGrant,
AuthorizationEndpointMixin,
TokenEndpointMixin,
AuthorizationCodeGrant,
ImplicitGrant,
ResourceOwnerPasswordCredentialsGrant,
ClientCredentialsGrant,
RefreshTokenGrant,
)
__all__ = [
'OAuth2Request', 'OAuth2Token', 'HttpRequest',
'OAuth2Error',
'AccessDeniedError',
'MissingAuthorizationError',
'InvalidGrantError',
'InvalidClientError',
'InvalidRequestError',
'InvalidScopeError',
'InsecureTransportError',
'UnauthorizedClientError',
'UnsupportedResponseTypeError',
'UnsupportedGrantTypeError',
'UnsupportedTokenTypeError',
'MissingCodeException',
'MissingTokenException',
'MissingTokenTypeException',
'MismatchingStateException',
'ClientMixin', 'AuthorizationCodeMixin', 'TokenMixin',
'ClientAuthentication',
'AuthorizationServer',
'ResourceProtector',
'TokenValidator',
'TokenEndpoint',
'BaseGrant',
'AuthorizationEndpointMixin',
'TokenEndpointMixin',
'AuthorizationCodeGrant',
'ImplicitGrant',
'ResourceOwnerPasswordCredentialsGrant',
'ClientCredentialsGrant',
'RefreshTokenGrant',
]
| 27.617284 | 67 | 0.743406 |
0b88fa702aed7c893ac61d3d5a2bd66384c1a59d | 1,613 | py | Python | scripts/serial_command.py | philip-long/singletact-python-wrapper | 659796f614116db77f31d6b0cc1e0c963104948e | [
"MIT"
] | null | null | null | scripts/serial_command.py | philip-long/singletact-python-wrapper | 659796f614116db77f31d6b0cc1e0c963104948e | [
"MIT"
] | null | null | null | scripts/serial_command.py | philip-long/singletact-python-wrapper | 659796f614116db77f31d6b0cc1e0c963104948e | [
"MIT"
] | null | null | null | TIMEOUT=100
| 23.042857 | 66 | 0.49349 |
0b89d5110511e9a326a0adf1605527ae76c9199c | 1,220 | py | Python | 1SiteRanking/create_kernel_density_map_arcpy.py | HCH2CHO/EmotionMap | bc572b4182637dcdd65e9a13c92f2fa0d9a3d680 | [
"MIT"
] | 3 | 2021-07-15T15:58:52.000Z | 2021-07-16T13:22:47.000Z | 1SiteRanking/create_kernel_density_map_arcpy.py | HCH2CHO/EmotionMap | bc572b4182637dcdd65e9a13c92f2fa0d9a3d680 | [
"MIT"
] | null | null | null | 1SiteRanking/create_kernel_density_map_arcpy.py | HCH2CHO/EmotionMap | bc572b4182637dcdd65e9a13c92f2fa0d9a3d680 | [
"MIT"
] | 4 | 2017-08-04T12:41:06.000Z | 2019-01-31T14:55:10.000Z | # coding:utf-8
# version:python2.7.3
# author:kyh
# import x,y data from txt and create kernel density map
import arcpy
from arcpy.sa import *
from arcpy import env
if __name__ == '__main__':
arcpy.CheckOutExtension('Spatial')
env.workspace=("D:\Users\KYH\Documents\ArcGIS\FlickrPhoto")
for i in range(0,25):
if (i==5) or (i==22):
continue
read_point_data("D:\\Users\\KYH\\Desktop\\EmotionMap\\FlickrEmotionData\\3faces_emotion\\faceflickr{0}.txt".format(i))
# Kernel Density Analysis
out_kernel_density=KernelDensity("World_Flickr{0}.shp".format(i),"NONE")
out_kernel_density.save("D:\Users\KYH\Documents\ArcGIS\FlickrPhoto\kd_Face{0}".format(i))
| 33.888889 | 127 | 0.645902 |
0b8ab81b9a756ab917e11852711a0c75ca7514c2 | 6,676 | py | Python | src/explore.py | dngo13/enpm808x_inspection_robot | 41f598d97d6526d4e85d1b738cd0bf1bce781b08 | [
"MIT"
] | null | null | null | src/explore.py | dngo13/enpm808x_inspection_robot | 41f598d97d6526d4e85d1b738cd0bf1bce781b08 | [
"MIT"
] | null | null | null | src/explore.py | dngo13/enpm808x_inspection_robot | 41f598d97d6526d4e85d1b738cd0bf1bce781b08 | [
"MIT"
] | 2 | 2021-12-05T23:39:56.000Z | 2021-12-06T17:54:54.000Z | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""*******************************************************************************
* MIT License
* Copyright (c) Charu Sharma 2021
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
********************************************************************************"""
"""
* @file explore.py
* @author Charu Sharma
* @date 12/14/2021
* @version 1.0
*
* @brief Main source file
*
* @section DESCRIPTION
*
* file to let the bot explore to the target locations and sending confirmation flag when reached
*
"""
#!/usr/bin/env python
#importing Libraries
import rospy
import math
import tf
from geometry_msgs.msg import Twist, Point
from sensor_msgs.msg import LaserScan
from tf.transformations import euler_from_quaternion
from enpm808x_inspection_robot.msg import location, flag_array, flag
rospy.init_node("move_robot")
pub = rospy.Publisher("cmd_vel", Twist, queue_size=1)
# create another publisher for location
# rospy.init_node('location_node')
# loc_pub = rospy.Publisher('location', array, queue_size=1)
loc_pub = rospy.Publisher('/flag', flag_array, queue_size=1)
rate = rospy.Rate(1)
velocity_msg = Twist()
rate = rospy.Rate(4)
tf_listener = tf.TransformListener()
parent_frame = 'odom'
child_frame = 'base_footprint'
k_h_gain = 1
k_v_gain = 1
distance_to_goal = 0.0
# locations = location()
flagged = flag()
flagged_arrays = flag_array()
flagged.check = "false"
try:
tf_listener.waitForTransform(parent_frame, child_frame, rospy.Time(), rospy.Duration(1.0))
except (tf.Exception, tf.ConnectivityException, tf.LookupException):
rospy.loginfo("Cannot find transform between {p} and {c}".format(p=parent_frame, c=child_frame))
rospy.signal_shutdown("tf Exception")
# while not rospy.is_shutdown():
# loc_pub.publish(arrays.loc)
if __name__ == "__main__":
action = ""
go_to_goal(-3, -1)
flagged.check = 'true'
print("The robot has reached the Chiller")
print("Commencing the pressure Detection")
# print("locations.loc_x = ", locations.loc_x)
# print("locations.loc_y = ", locations.loc_y)
flagged.check = 'false'
go_to_goal(0, 3)
print("The robot has reached the Boiler")
print("Commencing the pressure Detection")
# locations.loc_x = 0.0
# locations.loc_y = 3.0
# send message for boiler
go_to_goal(1, 3)
print("The robot has reached the Air Handling Units")
print("Commencing the pressure Detection")
# send message to AHU
# arrays.id.insert(0,flag)
while not rospy.is_shutdown():
loc_pub.publish(flagged_arrays.id)
flagged_arrays.id.insert(0,flagged)
rate.sleep()
exit() | 34.061224 | 100 | 0.673457 |
0b8b1ecbeb3c81d6b86bae93b7d7c89aca388a29 | 893 | py | Python | mapclientplugins/loadcsvstep/utils/processCSV.py | mahyar-osn/mapclientplugins.loadcsvstep | 2d483d4054f4c30247303e8d4eba706b70364158 | [
"Apache-2.0"
] | null | null | null | mapclientplugins/loadcsvstep/utils/processCSV.py | mahyar-osn/mapclientplugins.loadcsvstep | 2d483d4054f4c30247303e8d4eba706b70364158 | [
"Apache-2.0"
] | null | null | null | mapclientplugins/loadcsvstep/utils/processCSV.py | mahyar-osn/mapclientplugins.loadcsvstep | 2d483d4054f4c30247303e8d4eba706b70364158 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
| 28.806452 | 106 | 0.631579 |
0b8bbb57f61438a9cdd497a599dabe456d4ca928 | 2,521 | py | Python | src/api/handlers/projects/tokens.py | sap-steffen/InfraBox | 36c8b626b517415e4363c99037c5d2c118966e56 | [
"Apache-2.0"
] | 50 | 2017-09-03T15:54:08.000Z | 2019-03-13T16:53:15.000Z | src/api/handlers/projects/tokens.py | sap-steffen/InfraBox | 36c8b626b517415e4363c99037c5d2c118966e56 | [
"Apache-2.0"
] | 241 | 2017-09-03T14:40:08.000Z | 2022-03-02T02:32:26.000Z | src/api/handlers/projects/tokens.py | sap-steffen/InfraBox | 36c8b626b517415e4363c99037c5d2c118966e56 | [
"Apache-2.0"
] | 17 | 2017-09-03T11:28:01.000Z | 2018-04-30T15:58:18.000Z | from flask import request, g, abort
from flask_restplus import Resource, fields
from pyinfrabox.utils import validate_uuid4
from pyinfraboxutils.ibflask import auth_required, OK
from pyinfraboxutils.ibrestplus import api
from pyinfraboxutils.token import encode_project_token
from api.namespaces import project as ns
project_token_model = api.model('ProjectToken', {
'description': fields.String(required=True),
'scope_push': fields.Boolean(required=True),
'scope_pull': fields.Boolean(required=True),
'id': fields.String(required=False)
})
| 31.911392 | 84 | 0.624752 |
0b8c281f2be1b5f006c8dd22b32012df4fb6d732 | 2,716 | py | Python | tigergraph/benchmark.py | yczhang1017/ldbc_snb_bi | 5b97da8b2596e88bc460d5568fc7b31587695b62 | [
"Apache-2.0"
] | null | null | null | tigergraph/benchmark.py | yczhang1017/ldbc_snb_bi | 5b97da8b2596e88bc460d5568fc7b31587695b62 | [
"Apache-2.0"
] | null | null | null | tigergraph/benchmark.py | yczhang1017/ldbc_snb_bi | 5b97da8b2596e88bc460d5568fc7b31587695b62 | [
"Apache-2.0"
] | null | null | null | import argparse
from pathlib import Path
from datetime import datetime, date, timedelta
from queries import run_queries, precompute, cleanup
from batches import run_batch_update
import os
import time
import re
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='LDBC TigerGraph BI workload Benchmark')
parser.add_argument('data_dir', type=Path, help='The directory to load data from')
parser.add_argument('--header', action='store_true', help='whether data has the header')
parser.add_argument('--cluster', action='store_true', help='load concurrently on cluster')
parser.add_argument('--skip', action='store_true', help='skip precompute')
parser.add_argument('--para', type=Path, default=Path('../parameters'), help='parameter folder')
parser.add_argument('--test', action='store_true', help='test mode only run one time')
parser.add_argument('--nruns', '-n', type=int, default=10, help='number of runs')
parser.add_argument('--endpoint', type=str, default = 'http://127.0.0.1:9000', help='tigergraph rest port')
args = parser.parse_args()
sf = os.environ.get("SF")
results_file = open('output/results.csv', 'w')
timings_file = open('output/timings.csv', 'w')
timings_file.write(f"tool|sf|q|parameters|time\n")
query_variants = ["1", "2a", "2b", "3", "4", "5", "6", "7", "8a", "8b", "9", "10a", "10b", "11", "12", "13", "14a", "14b", "15a", "15b", "16a", "16b", "17", "18", "19a", "19b", "20"]
query_nums = [int(re.sub("[^0-9]", "", query_variant)) for query_variant in query_variants]
start_date = date(2012, 11, 29)
end_date = date(2013, 1, 1)
batch_size = timedelta(days=1)
needClean = False
batch_date = start_date
while batch_date < end_date:
start = time.time()
duration = run_batch_update(batch_date, args)
# For SF-10k and larger, sleep time may be needed after batch update to release memory
# time.sleep(duration * 0.2)
if needClean:
for query_num in [19,20]:
if query_num in query_nums:
cleanup(query_num, args.endpoint)
needClean = False
for query_num in [4,6,19,20]:
if query_num in query_nums:
precompute(query_num, args.endpoint)
needClean = True
writes_time = time.time() - start
timings_file.write(f"TigerGraph|{sf}|writes|{batch_date}|{writes_time:.6f}\n")
reads_time = run_queries(query_variants, results_file, timings_file, args)
timings_file.write(f"TigerGraph|{sf}|reads|{batch_date}|{reads_time:.6f}\n")
batch_date = batch_date + batch_size
results_file.close()
timings_file.close()
| 48.5 | 186 | 0.654639 |
0b8cb9211db86e8a3c8e8b138c17ac41f7b2fae4 | 3,001 | py | Python | wgan/updater.py | Aixile/chainer-gan-experiments | 4371e8369d2805e8ace6d7aacc397aa6e62680a6 | [
"MIT"
] | 70 | 2017-06-24T10:55:57.000Z | 2021-11-23T22:52:37.000Z | wgan/updater.py | Aixile/chainer-gan-experiments | 4371e8369d2805e8ace6d7aacc397aa6e62680a6 | [
"MIT"
] | 1 | 2017-08-21T06:19:31.000Z | 2017-08-21T07:54:28.000Z | wgan/updater.py | Aixile/chainer-gan-experiments | 4371e8369d2805e8ace6d7aacc397aa6e62680a6 | [
"MIT"
] | 16 | 2017-08-22T07:00:16.000Z | 2018-11-18T16:15:21.000Z | import numpy as np
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import cuda, optimizers, serializers, Variable
import sys
sys.path.insert(0, '../')
from common.loss_functions import *
| 31.589474 | 104 | 0.587471 |
0b8d785a697c0b3da778b64fbf2cc6f8d4ea2d37 | 19,032 | py | Python | tools/ig/definitions.py | grahamegrieve/vocab-poc | 9f8b6c29b32f15c9513f16f148fdf2a441ba3897 | [
"BSD-3-Clause"
] | 2 | 2017-06-25T22:15:18.000Z | 2017-09-15T05:12:50.000Z | tools/ig/definitions.py | grahamegrieve/vocab-poc | 9f8b6c29b32f15c9513f16f148fdf2a441ba3897 | [
"BSD-3-Clause"
] | null | null | null | tools/ig/definitions.py | grahamegrieve/vocab-poc | 9f8b6c29b32f15c9513f16f148fdf2a441ba3897 | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python3.
# create ig definition file with all value sets in the /resources directory
import json, os, sys, logging, re, csv
from lxml import etree
#logging.disable(logging.CRITICAL)
logging.basicConfig(level=logging.DEBUG, format=' %(asctime)s - %(levelname)s- %(message)s')
logging.info('Start of program')
logging.info('The logging module is working.')
# create the ig.json file template as dictoinary
logging.info('create the ig.json file template as dictionary')
# globals
dir = os.getcwd() + '/' # current dir
logging.info('cwd = ' + dir)
''' this is the definitions file skeleton you need to modify as needed see ig publisher documenentation at f http://wiki.hl7.org/index.php?title=IG_Publisher_Documentation or more information.'''
igpy = {
"broken-links": "warning",
"canonicalBase": "http://www.fhir.org/guides/ig-template",
"defaults": {
"Any": {
"template-base": "base.html",
"template-format": "format.html"
},
"CapabilityStatement": {
"template-base": "capst.html"
},
"CodeSystem": {
"template-base": "codesys.html"
},
"ConceptMap": {
"template-base": "cm.html"
},
"OperationDefinition": {
"template-base": "op.html"
},
"StructureDefinition": {
"template-base": "sd.html",
"template-defns": "sd-definitions.html",
"template-mappings": "sd-mappings.html"
},
"ValueSet": {
"template-base": "vs.html"
}
},
"dependencyList": [{}],
"do-transforms": "false",
"extraTemplates": [
"mappings"
],
"fixed-business-version": "0.0.0",
"gen-examples": "false",
"jurisdiction": "US",
"no-inactive-codes": "false",
"paths": {
"output": "output",
"pages": [],
"qa": "qa",
"resources": [],
"specification": "http://build.fhir.org",
"temp": "temp",
"txCache": "txCache"
},
"resources": {},
"sct-edition": "http://snomed.info/sct/731000124108",
"source": "ig.xml",
"special-urls": [],
"spreadsheets": [],
"tool": "jekyll",
"version": "3.1.0",
"working-dir": None,
"title": "Implementation Guide Template",
"status": "draft",
"publisher": "Health eData Inc",
"extensions": [],
"searches": [],
"codesystems": [],
"valuesets": [],
"structuremaps": []
}
logging.info('create the ig.xml file template as string')
''' this is the ig.xml file skeleton may need to modify as needed see ig publisher documenentation at f http://wiki.hl7.org/index.php?title=IG_Publisher_Documentation or more information. The Cap Case words are variables that are replaced by variables in the definitions file'''
igxml ='''<?xml version="1.0" encoding="UTF-8"?><!--Hidden IG for de facto IG publishing--><ImplementationGuide xmlns="http://hl7.org/fhir"><id value="ig"/><url value="BASE/ImplementationGuide/ig"/><name value="TITLE"/><status value="STATUS"/><experimental value="true"/><publisher value="PUBLISHER"/><package><name value="base"/></package><page><source value="index.html"/><title value="TITLE Homepage"/><kind value="page"/></page></ImplementationGuide>'''
# Function definitions here
#main
if __name__ == '__main__':
main()
logging.info('End of program')
| 49.5625 | 457 | 0.620954 |
0b8f097eaf823137c79c39a4fcd3c6e49316ae19 | 8,456 | py | Python | src/sadie/renumbering/clients/g3.py | jwillis0720/pybody | 2d7c68650ac1ef5f3003ccb67171898eac1f63eb | [
"MIT"
] | null | null | null | src/sadie/renumbering/clients/g3.py | jwillis0720/pybody | 2d7c68650ac1ef5f3003ccb67171898eac1f63eb | [
"MIT"
] | null | null | null | src/sadie/renumbering/clients/g3.py | jwillis0720/pybody | 2d7c68650ac1ef5f3003ccb67171898eac1f63eb | [
"MIT"
] | null | null | null | from functools import lru_cache
from itertools import product
from pathlib import Path
from typing import Optional, List, Tuple
from pydantic import validate_arguments
import pyhmmer
import requests as r
from yarl import URL
from sadie.typing import Species, Chain, Source
# def get_msa(
# self,
# source: Source = "imgt",
# species: Species = "human",
# chain: Chain = "H",
# limit: Optional[int] = None,
# ) -> str:
# stockholm_pairs = self.get_stockholm_pairs(source=source, chain=chain, species=species, limit=limit)
# sequences = []
# for name, align in stockholm_pairs:
# sequence = pyhmmer.easel.TextSequence(name=name.encode(), sequence=align)
# sequences.append(sequence)
# if not sequences:
# return None
# return pyhmmer.easel.TextMSA(name=f"{species}_{chain}".encode(), sequences=sequences).digitize(self.alphabet)
| 33.555556 | 119 | 0.561968 |
0b904a57fdd0e7c89f250e4ba02b11a25b8c89d4 | 1,683 | py | Python | projects/shadow/kmap-builder-jython27/MapReduce/mappers/__init__.py | zaqwes8811/smart-vocabulary-cards | abeab5c86b1c6f68d8796475cba80c4f2c6055ff | [
"Apache-2.0"
] | null | null | null | projects/shadow/kmap-builder-jython27/MapReduce/mappers/__init__.py | zaqwes8811/smart-vocabulary-cards | abeab5c86b1c6f68d8796475cba80c4f2c6055ff | [
"Apache-2.0"
] | 11 | 2015-01-25T14:22:52.000Z | 2015-09-08T09:59:38.000Z | projects/shadow/kmap-builder-jython27/MapReduce/mappers/__init__.py | zaqwes8811/vocabulary-cards | abeab5c86b1c6f68d8796475cba80c4f2c6055ff | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
from nlp_components.content_items_processors import process_list_content_sentences
from nlp_components.content_items_processors import process_list_content_sentences_real
import dals.os_io.io_wrapper as dal
import json
# NO DRY!!
def mapper(job):
""" [node_name, index_word, [count_sent, summ_sent_len], url, lang]"""
url_tmp_file = job[1]
node_name = job[0]
#
file_content = read_utf_txt_file(url_tmp_file)
metadata = file_content[0]
settings = json.loads(metadata)
url = settings['url']
lang = settings['lang']
list_content_items = file_content[1:]
#
index, (count_sents, summ_sents_len) = process_list_content_sentences(
list_content_items, lang)
parallel_pkg = (node_name, index, [count_sents, summ_sents_len], (url, lang))
return parallel_pkg
def mapper_real(job):
""" [node_name, .., .., .., ]"""
url = job[0]
text_extractor = job[1]
tokenizer = job[2]
node_name = job[3]
#
text = text_extractor(url)
#
lits_content_items = []
if tokenizer:
lits_content_items = tokenizer(text)
else:
lits_content_items = [text]
#
index, (count_sents, summ_sents_len) = \
process_list_content_sentences_real(
lits_content_items,
tokenizer)
parallel_pkg = (node_name, index, [count_sents, summ_sents_len], url)
return parallel_pkg
| 24.75 | 87 | 0.673203 |
0b9068ae3299f03b04a8be28a4b732a299e46459 | 1,132 | py | Python | config.py | xXAligatorXx/repostAlert | 74f450b577fa0971632a57c7d1f599eea4808427 | [
"MIT"
] | 25 | 2018-10-18T15:16:39.000Z | 2019-06-06T04:33:20.000Z | config.py | xXAligatorXx/repostAlert | 74f450b577fa0971632a57c7d1f599eea4808427 | [
"MIT"
] | 6 | 2018-10-17T01:34:13.000Z | 2019-06-08T18:31:41.000Z | config.py | xXAligatorXx/repostAlert | 74f450b577fa0971632a57c7d1f599eea4808427 | [
"MIT"
] | 2 | 2018-10-25T19:42:45.000Z | 2018-12-05T23:09:06.000Z | import os
client_id = os.environ['BOT_CLIENT_ID']
client_secret = os.environ['BOT_CLIENT_SECRET']
user_agent = os.environ['BOT_USER_AGENT']
username = os.environ['BOT_USERNAME']
password = os.environ['BOT_PASSWORD']
num_subs = int(os.environ['BOT_SUB_COUNT'])
sub_settings = [[
os.environ['BOT_SUBREDDIT' + i],
int(os.environ['BOT_TOP_DAYS' + i]) if 'BOT_TOP_DAYS' + i in os.environ else None,
int(os.environ['BOT_HOT_DAYS' + i]) if 'BOT_HOT_DAYS' + i in os.environ else None,
int(os.environ['BOT_NEW_DAYS' + i]) if 'BOT_NEW_DAYS' + i in os.environ else None,
int(os.environ['BOT_TOP_NUM_POSTS' + i]) if 'BOT_TOP_NUM_POSTS' + i in os.environ else 1000,
int(os.environ['BOT_HOT_NUM_POSTS' + i]) if 'BOT_HOT_NUM_POSTS' + i in os.environ else 1000,
int(os.environ['BOT_NEW_NUM_POSTS' + i]) if 'BOT_NEW_NUM_POSTS' + i in os.environ else 1000,
int(os.environ['BOT_THRESH' +i]) if 'BOT_THRESH' + i in os.environ else 5,
bool(os.environ['BOT_TEXT_IN_IMAGE' + i]) if 'BOT_TEXT_IN_IMAGE' + i in os.environ else False,
] for i in [str(x) for x in range(num_subs)]]
| 56.6 | 102 | 0.682862 |
0b906bf27fc67aeba61a035efc941b80ca56e405 | 4,305 | py | Python | lib/aquilon/worker/formats/list.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 7 | 2015-07-31T05:57:30.000Z | 2021-09-07T15:18:56.000Z | lib/aquilon/worker/formats/list.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 115 | 2015-03-03T13:11:46.000Z | 2021-09-20T12:42:24.000Z | lib/aquilon/worker/formats/list.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 13 | 2015-03-03T11:17:59.000Z | 2021-09-09T09:16:41.000Z | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""List formatter."""
from operator import attrgetter
from six import string_types
from sqlalchemy.orm.collections import InstrumentedList
from sqlalchemy.orm.query import Query
from sqlalchemy.ext.associationproxy import _AssociationList
from aquilon.worker.formats.formatters import ObjectFormatter
ObjectFormatter.handlers[list] = ListFormatter()
ObjectFormatter.handlers[Query] = ListFormatter()
ObjectFormatter.handlers[InstrumentedList] = ListFormatter()
ObjectFormatter.handlers[_AssociationList] = ListFormatter()
ObjectFormatter.handlers[StringList] = StringListFormatter()
ObjectFormatter.handlers[StringAttributeList] = StringAttributeListFormatter()
| 38.783784 | 83 | 0.673635 |
0b9113a200832679e9fc55536bc662bb2d860b4c | 228 | py | Python | satyrus/sat/types/string.py | lucasvg/Satyrus3-FinalProject-EspTopsOTM | 024785752abdc46e3463d8c94df7c3da873c354d | [
"MIT"
] | null | null | null | satyrus/sat/types/string.py | lucasvg/Satyrus3-FinalProject-EspTopsOTM | 024785752abdc46e3463d8c94df7c3da873c354d | [
"MIT"
] | null | null | null | satyrus/sat/types/string.py | lucasvg/Satyrus3-FinalProject-EspTopsOTM | 024785752abdc46e3463d8c94df7c3da873c354d | [
"MIT"
] | null | null | null | from .main import SatType
| 25.333333 | 49 | 0.605263 |
0b944182e57c98d2c412133b9ff0a2ad81333fdb | 737 | py | Python | setup.py | ZeroCater/zerocaterpy | 824af8613db0c5f203c0b2f7cebd830ee80eea5d | [
"MIT"
] | null | null | null | setup.py | ZeroCater/zerocaterpy | 824af8613db0c5f203c0b2f7cebd830ee80eea5d | [
"MIT"
] | null | null | null | setup.py | ZeroCater/zerocaterpy | 824af8613db0c5f203c0b2f7cebd830ee80eea5d | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name='zerocater',
version='0.0.1',
description="Python interface to ZeroCater",
long_description='',
keywords='zerocater food delivery meal planning catering lunch',
author='ZeroCater',
author_email='tech@zerocater.com',
url='https://github.com/ZeroCater/PyZeroCater',
download_url='https://github.com/ZeroCater/PyZeroCater/tarball/0.0.1',
license='MIT',
packages=['zerocater'],
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: Internet :: WWW/HTTP",
]
) | 35.095238 | 76 | 0.63365 |
0b95ab4e62401288fe9f479867e2cab6f6c5d09c | 373 | py | Python | tests/conftest.py | scottmanderson/minerva | fe6a6857d892d9c7d881701c91990d9697bde00e | [
"MIT"
] | null | null | null | tests/conftest.py | scottmanderson/minerva | fe6a6857d892d9c7d881701c91990d9697bde00e | [
"MIT"
] | null | null | null | tests/conftest.py | scottmanderson/minerva | fe6a6857d892d9c7d881701c91990d9697bde00e | [
"MIT"
] | null | null | null | import pytest
from app import create_app, db
from config import TestConfig
| 18.65 | 51 | 0.72118 |
0b96aaa21f422ac0c7d22576279c69b61dd42c95 | 154 | py | Python | Test/two/payments/momo/urls.py | titan256/Python-Django-Assignment | 9f56f69ea7182456729116e27435231925d24d11 | [
"MIT"
] | null | null | null | Test/two/payments/momo/urls.py | titan256/Python-Django-Assignment | 9f56f69ea7182456729116e27435231925d24d11 | [
"MIT"
] | 9 | 2020-06-05T23:53:04.000Z | 2022-02-10T08:33:32.000Z | Test/two/payments/momo/urls.py | titan256/Python-Django-Assignment | 9f56f69ea7182456729116e27435231925d24d11 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path , include
from . import views
urlpatterns = [
path('',views.index,name='index')
] | 19.25 | 38 | 0.701299 |
0b975c6ddf1a134fa942ba06d2fe6a39b749365f | 6,435 | py | Python | pdsensorvis/sensors/models.py | mickeykkim/masters-project-sphere | 6dbe0be877058e647f5e3822932e5a70f181bb53 | [
"MIT"
] | 2 | 2019-10-05T20:59:41.000Z | 2019-11-01T20:25:39.000Z | pdsensorvis/sensors/models.py | mickeykkim/masters-project-sphere | 6dbe0be877058e647f5e3822932e5a70f181bb53 | [
"MIT"
] | 6 | 2019-10-24T12:28:02.000Z | 2021-08-09T09:56:26.000Z | pdsensorvis/sensors/models.py | mickeykkim/masters-project-sphere | 6dbe0be877058e647f5e3822932e5a70f181bb53 | [
"MIT"
] | null | null | null | from django.db import models
from django.urls import reverse
from django.contrib.auth.models import User
from django.utils import timezone
import uuid
ANNOTATION = (
('asm', 'Asymmetry'),
('dst', 'Dystonia'),
('dsk', 'Dyskensia'),
('ebt', 'En Bloc Turning'),
('str', 'Short Stride Length'),
('mov', 'Slow/Hesitant Movement'),
('pos', 'Stooped Posture'),
('trm', 'Tremor'),
('oth', 'Other/Activity')
)
FRAME_RATES = (
('NTSC_Film', 23.98),
('Film', 24),
('PAL', 25),
('NTSC', 29.97),
('Web', 30),
('PAL_HD', 50),
('NTSC_HD', 59.94),
('High', 60),
)
| 37.631579 | 116 | 0.685004 |
0b9809b2c18e28f3af61ecc6021ff494abd1e0f4 | 533 | py | Python | setup.py | soumyarani/mopac | 72f10fdd3ea3c9c61b6c808ca07ee9031b7d4aa8 | [
"MIT"
] | 20 | 2021-03-16T08:18:01.000Z | 2022-03-12T13:46:43.000Z | setup.py | soumyarani/mopac | 72f10fdd3ea3c9c61b6c808ca07ee9031b7d4aa8 | [
"MIT"
] | 1 | 2021-05-13T14:49:25.000Z | 2021-05-13T19:45:26.000Z | setup.py | soumyarani/mopac | 72f10fdd3ea3c9c61b6c808ca07ee9031b7d4aa8 | [
"MIT"
] | 5 | 2020-11-01T15:46:39.000Z | 2021-07-30T13:12:06.000Z | from distutils.core import setup
from setuptools import find_packages
setup(
name='mopac',
packages=find_packages(),
version='0.1',
description='Model-based policy optimization',
long_description=open('./README.md').read(),
author='',
author_email='',
url='',
entry_points={
'console_scripts': (
'mopac=softlearning.scripts.console_scripts:main',
'viskit=mopac.scripts.console_scripts:main'
)
},
requires=(),
zip_safe=True,
license='MIT'
)
| 23.173913 | 62 | 0.626642 |
0b9851847b18a4b7b38e82d6bd87af07dc1c57a9 | 1,531 | py | Python | examples/imu.py | dan-stone/canal | 8a6b03a46102f7e5ca457538eb03ab9526eec095 | [
"MIT"
] | 2 | 2017-02-08T20:27:39.000Z | 2019-07-15T00:34:05.000Z | examples/imu.py | dan-stone/canal | 8a6b03a46102f7e5ca457538eb03ab9526eec095 | [
"MIT"
] | null | null | null | examples/imu.py | dan-stone/canal | 8a6b03a46102f7e5ca457538eb03ab9526eec095 | [
"MIT"
] | 1 | 2018-05-31T14:09:19.000Z | 2018-05-31T14:09:19.000Z | import datetime
import canal
from influxdb import InfluxDBClient
if __name__ == "__main__":
start_date = datetime.datetime.now(datetime.timezone.utc)
duration = datetime.timedelta(seconds=60)
user_id = 12345678
client = InfluxDBClient(
host="localhost",
port=8086,
database="canal"
)
# Write some dummy IMU data, sampled once per second
num_imu_samples = int(duration.total_seconds())
imu = IMU(
time=[start_date + datetime.timedelta(seconds=d) for d in
range(num_imu_samples)],
acc_x=range(0, 1 * num_imu_samples, 1),
acc_y=range(0, 2 * num_imu_samples, 2),
acc_z=range(0, 3 * num_imu_samples, 3),
gyro_x=range(0, 4 * num_imu_samples, 4),
gyro_y=range(0, 5 * num_imu_samples, 5),
gyro_z=range(0, 6 * num_imu_samples, 6),
user_id=user_id
)
client.write(
data=imu.to_line_protocol(),
params=dict(
db="canal"
)
)
# Read back the IMU data
imu_resp = client.query(IMU.make_query_string(
time__gte=start_date,
time__lte=start_date + duration,
user_id=user_id
))
assert imu == IMU.from_json(imu_resp.raw)
| 27.836364 | 65 | 0.640758 |
0b98688189c3ac958636f3a3393afa2872fb1f5c | 2,820 | py | Python | lib/ravstack/runtime.py | geertj/raviron | 7920c6b71757eddcca16b60051c1cf08706ae11b | [
"MIT"
] | 1 | 2015-05-11T21:39:35.000Z | 2015-05-11T21:39:35.000Z | lib/ravstack/runtime.py | geertj/raviron | 7920c6b71757eddcca16b60051c1cf08706ae11b | [
"MIT"
] | null | null | null | lib/ravstack/runtime.py | geertj/raviron | 7920c6b71757eddcca16b60051c1cf08706ae11b | [
"MIT"
] | null | null | null | #
# This file is part of ravstack. Ravstack is free software available under
# the terms of the MIT license. See the file "LICENSE" that was provided
# together with this source file for the licensing terms.
#
# Copyright (c) 2015 the ravstack authors. See the file "AUTHORS" for a
# complete list.
from __future__ import absolute_import, print_function
import sys
import logging
from . import config, defaults, util
prog_name = __name__.split('.')[0]
LOG = logging.getLogger(prog_name)
CONF = config.Config()
DEBUG = util.EnvInt('DEBUG')
VERBOSE = util.EnvInt('VERBOSE')
LOG_STDERR = util.EnvInt('LOG_STDERR')
log_context = ''
log_datetime = '%(asctime)s '
log_template = '%(levelname)s [%(name)s] %(message)s'
log_ctx_template = '%(levelname)s [{}] [%(name)s] %(message)s'
def setup_config():
"""Return the configuration object."""
CONF.set_schema(defaults.config_schema)
CONF.read_file(defaults.config_file)
CONF.update_from_env()
meta = util.get_ravello_metadata()
if 'appName' in meta and CONF['ravello']['application'] == '<None>':
CONF['ravello']['application'] = meta['appName']
CONF.update_to_env()
def setup_logging(context=None):
"""Set up or reconfigure logging."""
root = logging.getLogger()
if root.handlers:
del root.handlers[:]
global log_context
if context is not None:
log_context = context
template = log_ctx_template.format(log_context) if log_context else log_template
# Log to stderr?
if LOG_STDERR:
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(logging.Formatter(template))
root.addHandler(handler)
else:
root.addHandler(logging.NullHandler())
# Available log file?
logfile = defaults.log_file
if util.can_open(logfile, 'a'):
handler = logging.FileHandler(logfile)
handler.setFormatter(logging.Formatter(log_datetime + template))
root.addHandler(handler)
root.setLevel(logging.DEBUG if DEBUG else logging.INFO if VERBOSE else logging.ERROR)
# A little less verbosity for requests.
logger = logging.getLogger('requests.packages.urllib3.connectionpool')
logger.setLevel(logging.DEBUG if DEBUG else logging.ERROR)
# Silence "insecure platform" warning for requests module on Py2.7.x under
# default verbosity.
logging.captureWarnings(True)
logger = logging.getLogger('py.warnings')
logger.setLevel(logging.DEBUG if DEBUG else logging.ERROR)
# Run a main function
def run_main(func):
"""Run a main function."""
setup_config()
setup_logging()
# Run the provided main function.
try:
func()
except Exception as e:
LOG.error('Uncaught exception:', exc_info=True)
if DEBUG:
raise
print('Error: {!s}'.format(e))
| 30.989011 | 89 | 0.693617 |
0b9897a43237e684b6c66f4d6a3b18dc5aaad9da | 1,217 | py | Python | onetouch.py | kakoni/insulaudit | 18fe0802bafe5764882ac4e65e472fdc840baa45 | [
"MIT"
] | 1 | 2020-11-28T13:23:58.000Z | 2020-11-28T13:23:58.000Z | onetouch.py | kakoni/insulaudit | 18fe0802bafe5764882ac4e65e472fdc840baa45 | [
"MIT"
] | null | null | null | onetouch.py | kakoni/insulaudit | 18fe0802bafe5764882ac4e65e472fdc840baa45 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import user
import serial
from pprint import pprint, pformat
import insulaudit
from insulaudit.data import glucose
from insulaudit.log import io
from insulaudit.devices import onetouch2
import sys
PORT = '/dev/ttyUSB0'
if __name__ == '__main__':
port = init()
io.info( port )
| 22.537037 | 89 | 0.67461 |
0b9a8528a7dd0b2f831662e5079ebdffe6991f3a | 3,743 | py | Python | src/graphdb_builder/databases/parsers/smpdbParser.py | hhefzi/CKG | e117502a05f870174372da275e22ac3f8735d65a | [
"MIT"
] | null | null | null | src/graphdb_builder/databases/parsers/smpdbParser.py | hhefzi/CKG | e117502a05f870174372da275e22ac3f8735d65a | [
"MIT"
] | 1 | 2020-06-11T11:59:42.000Z | 2020-07-01T11:13:51.000Z | src/graphdb_builder/databases/parsers/smpdbParser.py | vemonet/CKG | c9e15c4c8ec8d81ca05c67e9a6f346ca385d8fbe | [
"MIT"
] | 1 | 2022-02-02T10:56:32.000Z | 2022-02-02T10:56:32.000Z | import os.path
import zipfile
import pandas as pd
from collections import defaultdict
from graphdb_builder import builder_utils
#########################
# SMPDB database #
#########################
| 39.819149 | 165 | 0.608068 |
0b9b80c225b518a078b36396f1fbccc56916e124 | 738 | py | Python | server/waitFramerate.py | mboerwinkle/RingGame | 5a9b6a6ea394c1e88689fa062d4d348383ab406a | [
"MIT"
] | null | null | null | server/waitFramerate.py | mboerwinkle/RingGame | 5a9b6a6ea394c1e88689fa062d4d348383ab406a | [
"MIT"
] | null | null | null | server/waitFramerate.py | mboerwinkle/RingGame | 5a9b6a6ea394c1e88689fa062d4d348383ab406a | [
"MIT"
] | null | null | null | import time
#Timing stuff
lastTime = None
prevFrameTime = 0;
| 32.086957 | 114 | 0.720867 |
0b9de1232f56d34e55746d5d53165b3e1ae67c6c | 9,988 | py | Python | shellbot/spaces/local.py | bernard357/shellbot | daf64fbab4085d1591bf9a1aecd06b4fc615d132 | [
"Apache-2.0"
] | 11 | 2017-04-30T18:10:27.000Z | 2021-11-07T16:59:29.000Z | shellbot/spaces/local.py | DataCraft-AI/shellbot | daf64fbab4085d1591bf9a1aecd06b4fc615d132 | [
"Apache-2.0"
] | 38 | 2017-04-20T17:33:05.000Z | 2017-11-10T20:19:07.000Z | shellbot/spaces/local.py | DataCraft-AI/shellbot | daf64fbab4085d1591bf9a1aecd06b4fc615d132 | [
"Apache-2.0"
] | 3 | 2017-04-21T21:14:53.000Z | 2021-07-27T22:01:21.000Z | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from multiprocessing import Process, Queue
import os
from six import string_types
import sys
import time
from shellbot.channel import Channel
from shellbot.events import Message
from shellbot.i18n import _
from .base import Space
| 26.634667 | 80 | 0.580396 |
0b9e17c3c6711c5899263cca3e86df88aba125ad | 13,497 | py | Python | src/warp/yul/AstTools.py | sambarnes/warp | f841afa22e665d5554587eaa866c4790698bfc22 | [
"Apache-2.0"
] | 414 | 2021-07-17T13:06:55.000Z | 2022-03-31T14:57:10.000Z | src/warp/yul/AstTools.py | sambarnes/warp | f841afa22e665d5554587eaa866c4790698bfc22 | [
"Apache-2.0"
] | 78 | 2021-07-19T12:33:56.000Z | 2022-03-29T17:16:27.000Z | src/warp/yul/AstTools.py | sambarnes/warp | f841afa22e665d5554587eaa866c4790698bfc22 | [
"Apache-2.0"
] | 19 | 2021-08-18T03:55:54.000Z | 2022-03-29T15:29:48.000Z | from __future__ import annotations
import re
from typing import Union
import warp.yul.ast as ast
from warp.yul.AstVisitor import AstVisitor
from warp.yul.WarpException import WarpException
| 32.601449 | 88 | 0.572127 |
0b9fa6b8eac70139650145aa00e7cb7eb8455c1b | 5,911 | py | Python | srfnef/tools/doc_gen/doc_generator.py | twj2417/srf | 63365cfd75199d70eea2273214a4fa580a9fdf2a | [
"Apache-2.0"
] | null | null | null | srfnef/tools/doc_gen/doc_generator.py | twj2417/srf | 63365cfd75199d70eea2273214a4fa580a9fdf2a | [
"Apache-2.0"
] | null | null | null | srfnef/tools/doc_gen/doc_generator.py | twj2417/srf | 63365cfd75199d70eea2273214a4fa580a9fdf2a | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
'''
@author: Minghao Guo
@contact: mh.guo0111@gmail.com
@software: basenef
@file: doc_generator.py
@date: 4/13/2019
@desc:
'''
import os
import sys
import time
from getpass import getuser
import matplotlib
import numpy as np
import json
from srfnef import Image, MlemFull
matplotlib.use('Agg')
author = getuser()
| 31.110526 | 99 | 0.607511 |
0b9ff21662fde4d991d952e6a81287147181af9f | 760 | py | Python | prep_scripts/0_join_data.py | linas-p/EVDPEP | 2062e20ef784a76eebaf71ebbe4f9006cde5bbd5 | [
"CC0-1.0"
] | 5 | 2021-10-05T14:02:52.000Z | 2021-11-23T07:59:06.000Z | prep_scripts/0_join_data.py | patrickiswwgp/EVDPEP | 2062e20ef784a76eebaf71ebbe4f9006cde5bbd5 | [
"CC0-1.0"
] | 1 | 2021-10-31T14:41:48.000Z | 2021-10-31T16:23:45.000Z | prep_scripts/0_join_data.py | patrickiswwgp/EVDPEP | 2062e20ef784a76eebaf71ebbe4f9006cde5bbd5 | [
"CC0-1.0"
] | 3 | 2021-11-23T07:59:17.000Z | 2022-03-31T09:09:03.000Z | import pandas as pd
import numpy as np
DATA_PATH = "./data/EVconsumption/"
weather = pd.read_csv(DATA_PATH + "dimweathermeasure.csv", sep = "|")
osm = pd.read_csv(DATA_PATH + "osm_dk_20140101.csv", sep = "|")
data0 = pd.read_csv(DATA_PATH + "2020_11_25_aal_viterbi.csv", sep = ",")
data1 = pd.read_csv(DATA_PATH + "2021_04_06_aal_north_viterbi.csv", sep = ",")
data2 = pd.read_csv(DATA_PATH + "2021_04_06_aal_south_viterbi.csv", sep = ",")
data = pd.concat([data0, data1, data2], axis=0)
data = data.drop_duplicates()
result = pd.merge(data, weather, how="left", on=["weathermeasurekey", "datekey"])
result = pd.merge(result, osm, how="left", on=["segmentkey"])
result.to_csv(DATA_PATH + "data_0_joined_data.csv")
print("Results {}".format(result.shape)) | 38 | 81 | 0.711842 |
0ba17e31d084e4b9249ccb1a58a413d758400527 | 964 | py | Python | csuibot/utils/kbbi.py | chadmadna/CSUIBot | a6f54639c256a3c86a9aa7c3fc094e69ce96a1b8 | [
"Apache-2.0"
] | null | null | null | csuibot/utils/kbbi.py | chadmadna/CSUIBot | a6f54639c256a3c86a9aa7c3fc094e69ce96a1b8 | [
"Apache-2.0"
] | null | null | null | csuibot/utils/kbbi.py | chadmadna/CSUIBot | a6f54639c256a3c86a9aa7c3fc094e69ce96a1b8 | [
"Apache-2.0"
] | null | null | null | import requests
import json
| 28.352941 | 89 | 0.580913 |
0ba1dc47fec515daa7ce78ab6cbd344fd812af6f | 113,179 | py | Python | networking_vsphere/tests/unit/agent/test_ovsvapp_agent.py | Mirantis/vmware-dvs | 37b874f9bf40b47d0de231c640367275fb3afb9b | [
"Apache-2.0"
] | 8 | 2015-04-23T15:36:56.000Z | 2019-03-06T13:23:28.000Z | networking_vsphere/tests/unit/agent/test_ovsvapp_agent.py | Mirantis/vmware-dvs | 37b874f9bf40b47d0de231c640367275fb3afb9b | [
"Apache-2.0"
] | 1 | 2016-10-04T13:24:50.000Z | 2016-10-04T13:24:50.000Z | networking_vsphere/tests/unit/agent/test_ovsvapp_agent.py | Mirantis/vmware-dvs | 37b874f9bf40b47d0de231c640367275fb3afb9b | [
"Apache-2.0"
] | 19 | 2015-09-15T13:25:01.000Z | 2019-09-03T08:23:21.000Z | # Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import time
import logging
from oslo_config import cfg
from networking_vsphere.agent import ovsvapp_agent
from networking_vsphere.common import constants as ovsvapp_const
from networking_vsphere.common import error
from networking_vsphere.tests import base
from networking_vsphere.tests.unit.drivers import fake_manager
from networking_vsphere.utils import resource_util
from neutron.agent.common import ovs_lib
from neutron.common import utils as n_utils
from neutron.plugins.common import constants as p_const
from neutron.plugins.common import utils as p_utils
from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_neutron_agent as ovs_agent # noqa
from neutron.plugins.ml2.drivers.openvswitch.agent import vlanmanager
NETWORK_ID = 'fake_net_id'
VNIC_ADDED = 'VNIC_ADDED'
FAKE_DEVICE_ID = 'fake_device_id'
FAKE_VM = 'fake_vm'
FAKE_HOST_1 = 'fake_host_1'
FAKE_HOST_2 = 'fake_host_2'
FAKE_CLUSTER_MOID = 'fake_cluster_moid'
FAKE_CLUSTER_1 = 'fake_cluster_1'
FAKE_CLUSTER_2 = 'fake_cluster_2'
FAKE_VCENTER = 'fake_vcenter'
FAKE_PORT_1 = 'fake_port_1'
FAKE_PORT_2 = 'fake_port_2'
FAKE_PORT_3 = 'fake_port_3'
FAKE_PORT_4 = 'fake_port_4'
MAC_ADDRESS = '01:02:03:04:05:06'
FAKE_CONTEXT = 'fake_context'
FAKE_SG = {'fake_sg': 'fake_sg_rule'}
FAKE_SG_RULE = {'security_group_source_groups': ['fake_rule_1',
'fake_rule_2',
'fake_rule_3'],
'security_group_rules': [
{'ethertype': 'IPv4',
'direction': 'egress',
'security_group_id': 'fake_id'
}],
'sg_provider_rules': [
{'ethertype': 'IPv4',
'direction': 'egress',
'source_port_range_min': 67,
'source_port_range_max': 67,
'port_range_min': 68,
'port_range_max': 68
}]
}
FAKE_SG_RULES = {FAKE_PORT_1: FAKE_SG_RULE}
FAKE_SG_RULES_MULTI_PORTS = {FAKE_PORT_1: FAKE_SG_RULE,
FAKE_PORT_2: FAKE_SG_RULE
}
FAKE_SG_RULES_MISSING = {FAKE_PORT_1: {'security_group_source_groups': [
'fake_rule_1',
'fake_rule_2',
'fake_rule_3'],
'sg_provider_rules': [],
'security_group_rules': [
{'ethertype': 'IPv4',
'direction': 'egress'
}]
}
}
FAKE_SG_RULES_PARTIAL = {FAKE_PORT_1: {'security_group_source_groups': [
'fake_rule_1',
'fake_rule_2',
'fake_rule_3'],
'sg_provider_rules': [],
'security_group_rules': [
{'ethertype': 'IPv4',
'direction': 'egress',
'port_range_min': 22,
'port_range_max': 22
}]
}
}
DEVICE = {'id': FAKE_DEVICE_ID,
'cluster_id': FAKE_CLUSTER_1,
'host': FAKE_HOST_1,
'vcenter': FAKE_VCENTER}
def test_update_port_bindings(self):
self.agent.ports_to_bind.add("fake_port")
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_ports_binding",
return_value=set(["fake_port"])
) as mock_update_ports_binding, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception:
self.agent._update_port_bindings()
self.assertTrue(mock_update_ports_binding.called)
self.assertFalse(self.agent.ports_to_bind)
self.assertFalse(mock_log_exception.called)
def test_update_port_bindings_rpc_exception(self):
self.agent.ports_to_bind.add("fake_port")
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_ports_binding",
side_effect=Exception()
) as mock_update_port_binding, \
mock.patch.object(self.LOG, 'exception'
) as mock_log_exception:
self.assertRaises(
error.OVSvAppNeutronAgentError,
self.agent._update_port_bindings)
self.assertTrue(mock_update_port_binding.called)
self.assertTrue(mock_log_exception.called)
self.assertEqual(set(['fake_port']),
self.agent.ports_to_bind)
def test_update_port_bindings_partial(self):
self.agent.ports_to_bind.add("fake_port1")
self.agent.ports_to_bind.add("fake_port2")
self.agent.ports_to_bind.add("fake_port3")
with mock.patch.object(self.agent.ovsvapp_rpc,
"update_ports_binding",
return_value=set(["fake_port1",
"fake_port2"])
) as mock_update_port_binding, \
mock.patch.object(self.LOG, 'exception'):
self.agent._update_port_bindings()
self.assertTrue(mock_update_port_binding.called)
self.assertEqual(set(["fake_port3"]),
self.agent.ports_to_bind)
def test_setup_ovs_bridges_vlan(self):
cfg.CONF.set_override('tenant_network_types',
"vlan", 'OVSVAPP')
cfg.CONF.set_override('bridge_mappings',
["physnet1:br-eth1"], 'OVSVAPP')
with mock.patch.object(self.agent, 'setup_physical_bridges'
) as mock_phys_brs, \
mock.patch.object(self.agent, '_init_ovs_flows'
) as mock_init_ovs_flows:
self.agent.setup_ovs_bridges()
mock_phys_brs.assert_called_with(self.agent.bridge_mappings)
mock_init_ovs_flows.assert_called_with(self.agent.bridge_mappings)
| 52.54364 | 96 | 0.594227 |
0ba2dfd95ee79027d8c63a0c75d4bd279b8d3f02 | 30 | py | Python | yolov3/utils/__init__.py | hysts/pytorch_yolov3 | 6d4c7a1e42d366894effac8ca52f7116f891b5ab | [
"MIT"
] | 13 | 2019-03-22T15:22:22.000Z | 2021-09-30T21:15:37.000Z | yolov3/utils/__init__.py | hysts/pytorch_yolov3 | 6d4c7a1e42d366894effac8ca52f7116f891b5ab | [
"MIT"
] | null | null | null | yolov3/utils/__init__.py | hysts/pytorch_yolov3 | 6d4c7a1e42d366894effac8ca52f7116f891b5ab | [
"MIT"
] | null | null | null | from yolov3.utils import data
| 15 | 29 | 0.833333 |
0ba3c4d7d4d48cd32673696a0d4ce0dedcefcaca | 21,354 | py | Python | pootlestuff/watchables.py | pootle/pootles_utils | bb47103e71ccc4fa01269259b73ca1932184af84 | [
"UPL-1.0"
] | null | null | null | pootlestuff/watchables.py | pootle/pootles_utils | bb47103e71ccc4fa01269259b73ca1932184af84 | [
"UPL-1.0"
] | null | null | null | pootlestuff/watchables.py | pootle/pootles_utils | bb47103e71ccc4fa01269259b73ca1932184af84 | [
"UPL-1.0"
] | null | null | null | """
This module provides classes that support observers, smart value handling and debug functions
All changes to values nominate an agent, and observers nominate the agent making changes they
are interested in.
It supercedes the pvars module
"""
import logging, sys, threading, pathlib, math, json
from enum import Enum, auto as enumauto, Flag
def loadsettings(value):
if isinstance(value, str):
spath=pathlib.Path(value).expanduser()
settingsfrom=spath
if spath.is_file():
try:
with spath.open('r') as spo:
startsettings=json.load(spo)
return startsettings, 'app settings loaded from file %s' % spath, spath
except:
return {}, 'failed to load settings from %s - default values used' % spath, spath
else:
return {}, 'app settings file %s not found - default values used' % str(spath), spath
elif hasattr(value,'keys'):
return value, 'using settings from passed object', None
elif value is None:
return {}, 'settings not specified, default values used', None
else:
return {}, 'setings not processed from passed %s' % type(values).__name__, None
| 38.475676 | 177 | 0.594924 |
0ba40eb83c69821a416e50be4bddb8886aa2cb30 | 578 | py | Python | tests/test_codecs.py | reece/et | 41977444a95ac8b8af7a73706f1e18634914d37f | [
"MIT"
] | null | null | null | tests/test_codecs.py | reece/et | 41977444a95ac8b8af7a73706f1e18634914d37f | [
"MIT"
] | null | null | null | tests/test_codecs.py | reece/et | 41977444a95ac8b8af7a73706f1e18634914d37f | [
"MIT"
] | null | null | null | import et.codecs
tests = [
{
"data": 0,
"e_data": {
1: b'\x00\x010',
2: b'\x00\x02x\x9c3\x00\x00\x001\x001'
}
},
{
"data": {},
"e_data": {
1: b'\x00\x01{}',
2: b'\x00\x02x\x9c\xab\xae\x05\x00\x01u\x00\xf9'
}
},
]
| 19.931034 | 79 | 0.448097 |
0ba563c5f1a8e8092fcd4bece03a89610c759cd4 | 2,801 | py | Python | tests/test_sa.py | mariushelf/sa2django | 936b0a70b0ccc8faf3ca26ff241b0b6dac13f204 | [
"MIT"
] | null | null | null | tests/test_sa.py | mariushelf/sa2django | 936b0a70b0ccc8faf3ca26ff241b0b6dac13f204 | [
"MIT"
] | null | null | null | tests/test_sa.py | mariushelf/sa2django | 936b0a70b0ccc8faf3ca26ff241b0b6dac13f204 | [
"MIT"
] | null | null | null | import sqlite3
import pytest
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import tests.testsite.testapp.models as dm
from tests.sa_models import Base, Car, Child, Dog, Parent
def test_data(mock_data_session):
assert len(mock_data_session.query(Parent).all()) == 2
assert len(mock_data_session.query(Child).all()) == 2
| 27.194175 | 71 | 0.687612 |
0ba58112cd9b83adb66bbb157c35557326dccf99 | 10,409 | py | Python | src/github4/session.py | staticdev/github3.py | b9af598dcf1771c083dcc512a2aa8e5008bf4ea8 | [
"MIT"
] | null | null | null | src/github4/session.py | staticdev/github3.py | b9af598dcf1771c083dcc512a2aa8e5008bf4ea8 | [
"MIT"
] | 32 | 2021-02-17T19:46:21.000Z | 2021-05-12T05:56:03.000Z | src/github4/session.py | staticdev/github3.py | b9af598dcf1771c083dcc512a2aa8e5008bf4ea8 | [
"MIT"
] | null | null | null | """Module containing session and auth logic."""
import collections.abc as abc_collections
import datetime
from contextlib import contextmanager
from logging import getLogger
import dateutil.parser
import requests
from . import __version__
from . import exceptions as exc
__url_cache__ = {}
__logs__ = getLogger(__package__)
def requires_2fa(response):
"""Determine whether a response requires us to prompt the user for 2FA."""
if (
response.status_code == 401
and "X-GitHub-OTP" in response.headers
and "required" in response.headers["X-GitHub-OTP"]
):
return True
return False
def _utcnow():
return datetime.datetime.now(dateutil.tz.UTC)
| 33.149682 | 87 | 0.639639 |
0ba6ccc9869c36c54441983043be28e4255463c3 | 3,046 | py | Python | models/ffn_ace.py | MilesQLi/Theano-Lights | 59864f4a1b089c04ff0403a6036ee052078fcd7d | [
"MIT"
] | 313 | 2015-03-23T15:19:58.000Z | 2021-05-17T15:40:09.000Z | models/ffn_ace.py | MilesQLi/Theano-Lights | 59864f4a1b089c04ff0403a6036ee052078fcd7d | [
"MIT"
] | 2 | 2015-08-31T06:35:31.000Z | 2016-04-04T11:55:43.000Z | models/ffn_ace.py | Ivaylo-Popov/Theano-Lights | 3c9de807e42e3875b1e3f4c1e8d02ad1242ddc94 | [
"MIT"
] | 68 | 2015-05-16T03:26:17.000Z | 2018-08-19T08:40:18.000Z | import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano.tensor.nnet.conv import conv2d
from theano.tensor.signal.downsample import max_pool_2d
from theano.tensor.shared_randomstreams import RandomStreams
import numpy as np
from toolbox import *
from modelbase import *
import itertools
| 34.613636 | 97 | 0.591924 |
0ba8551d5076fefbf31d48a58d0338bdc2763c0b | 181 | py | Python | src/misc/helpers.py | dnmarkon/kaggle_elo_merchant | 92ac552d72234455455c95f53e6091017f048504 | [
"MIT"
] | null | null | null | src/misc/helpers.py | dnmarkon/kaggle_elo_merchant | 92ac552d72234455455c95f53e6091017f048504 | [
"MIT"
] | null | null | null | src/misc/helpers.py | dnmarkon/kaggle_elo_merchant | 92ac552d72234455455c95f53e6091017f048504 | [
"MIT"
] | null | null | null | import pandas as pd
| 22.625 | 75 | 0.662983 |
0ba8bac551a05bebe5ab8cdbe7162fe74234100b | 1,019 | py | Python | 1306_Jump_Game_III.py | imguozr/LC-Solutions | 5e5e7098d2310c972314c9c9895aafd048047fe6 | [
"WTFPL"
] | null | null | null | 1306_Jump_Game_III.py | imguozr/LC-Solutions | 5e5e7098d2310c972314c9c9895aafd048047fe6 | [
"WTFPL"
] | null | null | null | 1306_Jump_Game_III.py | imguozr/LC-Solutions | 5e5e7098d2310c972314c9c9895aafd048047fe6 | [
"WTFPL"
] | null | null | null | from typing import List
| 24.261905 | 67 | 0.459274 |
0baa80eb9ba40c6d66b4f05785427f91545460d9 | 81 | py | Python | Extensions/BabaGUI/config.py | siva-msft/baba-is-auto | 3237b5b70167130558827979bde7dcee14ef39f3 | [
"MIT"
] | 108 | 2019-09-11T06:31:35.000Z | 2022-03-28T13:02:56.000Z | Extensions/BabaGUI/config.py | siva-msft/baba-is-auto | 3237b5b70167130558827979bde7dcee14ef39f3 | [
"MIT"
] | 29 | 2019-09-12T00:28:04.000Z | 2022-02-20T14:56:27.000Z | Extensions/BabaGUI/config.py | utilForever/baba-is-auto | 11937742e25b37b1e27de87fe74d10f09062d6ce | [
"MIT"
] | 14 | 2020-02-24T05:41:43.000Z | 2022-03-28T12:43:34.000Z | import pygame
FPS = 60
BLOCK_SIZE = 48
COLOR_BACKGROUND = pygame.Color(0, 0, 0)
| 13.5 | 40 | 0.728395 |
0bae78fa6080de85a0feb221980172f577f30cf7 | 42,674 | py | Python | packstack/plugins/neutron_350.py | melroyr/havana-packstack | 72cdb0e5e29df4cccb81844ec8b365dfededf4f7 | [
"Apache-2.0"
] | null | null | null | packstack/plugins/neutron_350.py | melroyr/havana-packstack | 72cdb0e5e29df4cccb81844ec8b365dfededf4f7 | [
"Apache-2.0"
] | null | null | null | packstack/plugins/neutron_350.py | melroyr/havana-packstack | 72cdb0e5e29df4cccb81844ec8b365dfededf4f7 | [
"Apache-2.0"
] | null | null | null | """
Installs and configures neutron
"""
import logging
import os
import re
import uuid
from packstack.installer import utils
from packstack.installer import validators
from packstack.installer.utils import split_hosts
from packstack.modules.ospluginutils import getManifestTemplate, appendManifestFile
# Controller object will be initialized from main flow
controller = None
# Plugin name
PLUGIN_NAME = "OS-NEUTRON"
logging.debug("plugin %s loaded", __name__)
| 49.334104 | 200 | 0.552819 |
0baeb09b96866048e3277bdd11b177c6f437a60e | 1,217 | py | Python | 01-Exercicios/Aula001/Ex2.py | AmandaRH07/Python_Entra21 | 4084962508f1597c0498d8b329e0f45e2ac55302 | [
"MIT"
] | null | null | null | 01-Exercicios/Aula001/Ex2.py | AmandaRH07/Python_Entra21 | 4084962508f1597c0498d8b329e0f45e2ac55302 | [
"MIT"
] | null | null | null | 01-Exercicios/Aula001/Ex2.py | AmandaRH07/Python_Entra21 | 4084962508f1597c0498d8b329e0f45e2ac55302 | [
"MIT"
] | null | null | null | #--- Exerccio 2 - Variveis
#--- Crie um menu para um sistema de cadastro de funcionrios
#--- O menu deve ser impresso com a funo format()
#--- As opes devem ser variveis do tipo inteiro
#--- As descries das opes sero:
#--- Cadastrar funcionrio
#--- Listar funcionrios
#--- Editar funcionrio
#--- Deletar funcionrio
#--- Sair
#--- Alm das opes o menu deve conter um cabealho e um rodap
#--- Entre o cabealho e o menu e entre o menu e o rodap dever ter espaamento de 3 linhas
#--- Deve ser utilizado os caracteres especiais de quebra de linha e de tabulao
opcao = int(input("""
SISTEMA DE CADASTRO DE FUNCIONARIO\n\n\n
{} - Cadastrar Funcionrio
{} - Listar Funcinrios
{} - Editar Funcionrio
{} - Deletar Funcionrio
{} - Sair\n\n\n
Escolha uma opo: """.format(1,2,3,4,5)))
if opcao == 1:
print("A opo escolhida foi 'Cadastrar funcionrio'")
elif opcao == 2:
print("A opo escolhida foi 'Listar funcionrios'")
elif opcao == 3:
print("A opo escolhida foi 'Editar funcionrio'")
elif opcao == 4:
print("A opo escolhida foi 'Deletar funcionrio'")
elif opcao == 5:
print("A opo escolhida foi 'Sair'")
else:
pass
| 32.891892 | 92 | 0.676253 |
0baf09cfe2bff0d7d8fbadcb0dcd9e76b3f75e76 | 284 | py | Python | venv/lib/python3.8/site-packages/crispy_forms/templates/uni_form/uni_form.html.py | Solurix/Flashcards-Django | 03c863f6722936093927785a2b20b6b668bb743d | [
"MIT"
] | 1 | 2021-05-16T03:20:23.000Z | 2021-05-16T03:20:23.000Z | venv/lib/python3.8/site-packages/crispy_forms/templates/uni_form/uni_form.html.py | Solurix/Flashcards-Django | 03c863f6722936093927785a2b20b6b668bb743d | [
"MIT"
] | 4 | 2021-03-30T14:06:09.000Z | 2021-09-22T19:26:31.000Z | venv/lib/python3.8/site-packages/crispy_forms/templates/uni_form/uni_form.html.py | Solurix/Flashcards-Django | 03c863f6722936093927785a2b20b6b668bb743d | [
"MIT"
] | null | null | null | BB BBBBBBBBBBBBBBBBBB
BB BBBBBBBBBBBBBBBB
BBBBBBB BBBBBBBBBBBBBBBBBBBBBB
BBBBB
BB BBBBBBBBBB BB BB BB BBBBBBBBBB
XXXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXX
BBBBB
BBB BBBBB BB BBBB
BBBBBBB BBBBBBBBBBBBBB
BBBBBB
BB BBBBBBBBBB BB BB BB BBBBBBBBBB
XXXXXXXXXXX
BBBBB
| 15.777778 | 34 | 0.792254 |
0bb224e01ebd658b05fd1ae3164a24c7e6a95713 | 1,137 | py | Python | baybars/timber.py | dkanarek12/baybars | 72f4cff706c11d25ce537cf0fed61bc3ef89da30 | [
"Apache-2.0"
] | 9 | 2018-10-16T19:20:35.000Z | 2020-06-02T13:27:29.000Z | baybars/timber.py | dkanarek12/baybars | 72f4cff706c11d25ce537cf0fed61bc3ef89da30 | [
"Apache-2.0"
] | 10 | 2018-07-29T08:56:18.000Z | 2019-03-21T18:31:15.000Z | baybars/timber.py | dkanarek12/baybars | 72f4cff706c11d25ce537cf0fed61bc3ef89da30 | [
"Apache-2.0"
] | 9 | 2018-07-29T08:59:53.000Z | 2019-12-31T07:50:57.000Z | # Copyright 2018 Jet.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
| 33.441176 | 94 | 0.769569 |
0bb4673a2136b7bf006e51e515e0e3d35ea020dd | 417 | py | Python | nlu_hyperopt/space.py | JulianGerhard21/nlu-hyperopt | 3d16fda97fa7cf1337b19395a57780e6e2dc9bd3 | [
"Apache-2.0"
] | null | null | null | nlu_hyperopt/space.py | JulianGerhard21/nlu-hyperopt | 3d16fda97fa7cf1337b19395a57780e6e2dc9bd3 | [
"Apache-2.0"
] | null | null | null | nlu_hyperopt/space.py | JulianGerhard21/nlu-hyperopt | 3d16fda97fa7cf1337b19395a57780e6e2dc9bd3 | [
"Apache-2.0"
] | 1 | 2021-07-08T11:40:27.000Z | 2021-07-08T11:40:27.000Z | from hyperopt import hp
# Define the search space here, e.g.
# from hyperopt.pyll.base import scope
# search_space = {
# 'epochs': hp.qloguniform('epochs', 0, 4, 2),
# 'max_df': hp.uniform('max_df', 1, 2),
# 'max_ngrams': scope.int(hp.quniform('max_ngram', 3, 9, 1))
# }
# Default search space: Try different numbers of training epochs.
search_space = {"epochs": hp.qloguniform("epochs", 0, 4, 2)}
| 29.785714 | 65 | 0.654676 |
0bb51dc78ddd2967ca706bd880e3869f1feac056 | 4,633 | py | Python | lib/taurus/qt/qtgui/panel/report/basicreport.py | MikeFalowski/taurus | ef041bf35dd847caf08a7efbe072f4020d35522e | [
"CC-BY-3.0"
] | 1 | 2016-10-19T13:54:08.000Z | 2016-10-19T13:54:08.000Z | lib/taurus/qt/qtgui/panel/report/basicreport.py | MikeFalowski/taurus | ef041bf35dd847caf08a7efbe072f4020d35522e | [
"CC-BY-3.0"
] | 27 | 2016-05-25T08:56:58.000Z | 2019-01-21T09:18:08.000Z | lib/taurus/qt/qtgui/panel/report/basicreport.py | MikeFalowski/taurus | ef041bf35dd847caf08a7efbe072f4020d35522e | [
"CC-BY-3.0"
] | 8 | 2015-07-24T09:16:50.000Z | 2018-06-12T12:33:59.000Z | #!/usr/bin/env python
#############################################################################
##
# This file is part of Taurus
##
# http://taurus-scada.org
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Taurus is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Taurus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Taurus. If not, see <http://www.gnu.org/licenses/>.
##
#############################################################################
"""This module provides a panel to display taurus messages"""
__all__ = ["ClipboardReportHandler", "SMTPReportHandler"]
__docformat__ = 'restructuredtext'
from taurus.core.util.report import TaurusMessageReportHandler
from taurus.external.qt import Qt
from taurus.qt.qtgui.util.ui import UILoadable
def main():
app = Qt.QApplication([])
w = SendMailDialog()
w.exec_()
if __name__ == "__main__":
main()
| 29.890323 | 77 | 0.618821 |
0bb56b74527c4ab3380dff7d3851c648cd78de0c | 347 | py | Python | src/workflows/__init__.py | stufisher/python-workflows | f1f67bb56a0f8a6820762f68e2e59ade2da60a95 | [
"BSD-3-Clause"
] | null | null | null | src/workflows/__init__.py | stufisher/python-workflows | f1f67bb56a0f8a6820762f68e2e59ade2da60a95 | [
"BSD-3-Clause"
] | null | null | null | src/workflows/__init__.py | stufisher/python-workflows | f1f67bb56a0f8a6820762f68e2e59ade2da60a95 | [
"BSD-3-Clause"
] | null | null | null | __version__ = "2.18"
def version():
"""Returns the version number of the installed workflows package."""
return __version__
| 23.133333 | 79 | 0.723343 |
0bb5af4cb0e1469e03fc6ee0d14c4d8bfb035eff | 18,592 | py | Python | autoarray/structures/grids/grid_decorators.py | jonathanfrawley/PyAutoArray_copy | c21e8859bdb20737352147b9904797ac99985b73 | [
"MIT"
] | null | null | null | autoarray/structures/grids/grid_decorators.py | jonathanfrawley/PyAutoArray_copy | c21e8859bdb20737352147b9904797ac99985b73 | [
"MIT"
] | null | null | null | autoarray/structures/grids/grid_decorators.py | jonathanfrawley/PyAutoArray_copy | c21e8859bdb20737352147b9904797ac99985b73 | [
"MIT"
] | null | null | null | import numpy as np
from functools import wraps
from autoconf import conf
from autoarray.structures.grids.one_d import abstract_grid_1d
from autoarray.structures.grids.two_d import grid_2d
from autoarray.structures.grids.two_d import grid_2d_interpolate
from autoarray.structures.grids.two_d import grid_2d_iterate
from autoarray.structures.grids.two_d import grid_2d_irregular
from autoarray.structures.arrays.one_d import array_1d
from autoarray.structures.arrays import values
from autoarray import exc
from typing import Union
def grid_1d_to_structure(func):
"""
Homogenize the inputs and outputs of functions that take 2D grids of (y,x) coordinates that return the results
as a NumPy array.
Parameters
----------
func : (obj, grid, *args, **kwargs) -> Object
A function which computes a set of values from a 2D grid of (y,x) coordinates.
Returns
-------
A function that can except cartesian or transformed coordinates
"""
return wrapper
def grid_1d_output_structure(func):
"""
Homogenize the inputs and outputs of functions that take 2D grids of (y,x) coordinates that return the results
as a NumPy array.
Parameters
----------
func : (obj, grid, *args, **kwargs) -> Object
A function which computes a set of values from a 2D grid of (y,x) coordinates.
Returns
-------
A function that can except cartesian or transformed coordinates
"""
return wrapper
def grid_2d_to_structure(func):
"""
Homogenize the inputs and outputs of functions that take 2D grids of (y,x) coordinates that return the results
as a NumPy array.
Parameters
----------
func : (obj, grid, *args, **kwargs) -> Object
A function which computes a set of values from a 2D grid of (y,x) coordinates.
Returns
-------
A function that can except cartesian or transformed coordinates
"""
return wrapper
def grid_2d_to_structure_list(func):
"""
Homogenize the inputs and outputs of functions that take 2D grids of (y,x) coordinates and return the results as
a list of NumPy arrays.
Parameters
----------
func : (obj, grid, *args, **kwargs) -> Object
A function which computes a set of values from a 2D grid of (y,x) coordinates.
Returns
-------
A function that can except cartesian or transformed coordinates
"""
return wrapper
def transform(func):
"""Checks whether the input Grid2D of (y,x) coordinates have previously been transformed. If they have not \
been transformed then they are transformed.
Parameters
----------
func : (profile, grid *args, **kwargs) -> Object
A function where the input grid is the grid whose coordinates are transformed.
Returns
-------
A function that can except cartesian or transformed coordinates
"""
return wrapper
def relocate_to_radial_minimum(func):
""" Checks whether any coordinates in the grid are radially near (0.0, 0.0), which can lead to numerical faults in \
the evaluation of a function (e.g. numerical integration reaching a singularity at (0.0, 0.0)). If any coordinates
are radially within the the radial minimum threshold, their (y,x) coordinates are shifted to that value to ensure
they are evaluated at that coordinate.
The value the (y,x) coordinates are rounded to is set in the 'radial_min.ini' config.
Parameters
----------
func : (profile, *args, **kwargs) -> Object
A function that takes a grid of coordinates which may have a singularity as (0.0, 0.0)
Returns
-------
A function that can except cartesian or transformed coordinates
"""
return wrapper
| 42.254545 | 122 | 0.64119 |
0bb89e9bc4b11618566c516b525db418c9d0a1b7 | 742 | py | Python | 079_039_189/ngram_2/get_10_summary.py | Aditya-AS/Question-Answering-System | 22c3fe549c03a3b5ba1f86befef3c9f91278d3fc | [
"MIT"
] | null | null | null | 079_039_189/ngram_2/get_10_summary.py | Aditya-AS/Question-Answering-System | 22c3fe549c03a3b5ba1f86befef3c9f91278d3fc | [
"MIT"
] | null | null | null | 079_039_189/ngram_2/get_10_summary.py | Aditya-AS/Question-Answering-System | 22c3fe549c03a3b5ba1f86befef3c9f91278d3fc | [
"MIT"
] | null | null | null | """
Sanjay Reddy S-2013A7PS189P
Aditya Sarma -2013A7PS079P
Vamsi T -2013A7PS039P
Artificial Intelligence Term Project
"""
import pickle
import BeautifulSoup
import re
import boto
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from google import search
def get_10_summary(query, source="google"):
"""
This function returns the first ten (or less, if 10 are not present) summaries when the query (a string) is run on the source (here google).
The return type is a beautifulSoup module's object and is similar to a list
"""
result = search(query) #calls query on google
#print "---------------------------" + str(type(results)) + "---------------------------"
return result
| 25.586207 | 144 | 0.669811 |