max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
tests/test_cards.py | zergov/flashcards | 21 | 12764051 | <gh_stars>10-100
import unittest
from flashcards import cards
from flashcards.cards import StudyCard
class TestModulefunctions(unittest.TestCase):
def test_create_card_from_dict(self):
data = {'question': '2+2=?', 'answer': '4'}
card = cards.create_from_dict(data)
self.assertEqual('2+2=?', card.question)
self.assertEqual('4', card.answer)
class TestQuestionCards(unittest.TestCase):
def setUp(self):
self.card = StudyCard('what is PI ?', '3.14159265359')
def test_Card_class_exists(self):
self.assertIsNotNone(StudyCard('meaning of life?', '42'))
def test_card_get_question(self):
self.assertEqual('what is PI ?', self.card.question)
def test_card_get_answer(self):
self.assertEqual('3.14159265359', self.card.answer)
def test_card_set_question(self):
new_question = 'What is e ?'
self.card.question = new_question
self.assertEqual(new_question, self.card.question)
def test_card_set_question_error(self):
bad_question = 4123451
self.assertRaises(TypeError, setattr, self.card.question, bad_question)
def test_card_set_answer(self):
new_answer = '2.71828'
self.card.answer = new_answer
self.assertEqual(new_answer, self.card.answer)
def test_card_set_answer_error(self):
bad_answer = 2.71828
self.assertRaises(TypeError, setattr, self.card.answer, bad_answer)
def test_to_dict(self):
data = self.card.to_dict()
expected = {'question': 'what is PI ?', 'answer': '3.14159265359'}
self.assertEqual(expected, data)
| 3.1875 | 3 |
venv/lib/python3.7/site-packages/Xlib/keysymdef/greek.py | umr-bot/sliding-puzzle-solver-bot | 10 | 12764052 | <gh_stars>1-10
XK_Greek_ALPHAaccent = 0x7a1
XK_Greek_EPSILONaccent = 0x7a2
XK_Greek_ETAaccent = 0x7a3
XK_Greek_IOTAaccent = 0x7a4
XK_Greek_IOTAdiaeresis = 0x7a5
XK_Greek_OMICRONaccent = 0x7a7
XK_Greek_UPSILONaccent = 0x7a8
XK_Greek_UPSILONdieresis = 0x7a9
XK_Greek_OMEGAaccent = 0x7ab
XK_Greek_accentdieresis = 0x7ae
XK_Greek_horizbar = 0x7af
XK_Greek_alphaaccent = 0x7b1
XK_Greek_epsilonaccent = 0x7b2
XK_Greek_etaaccent = 0x7b3
XK_Greek_iotaaccent = 0x7b4
XK_Greek_iotadieresis = 0x7b5
XK_Greek_iotaaccentdieresis = 0x7b6
XK_Greek_omicronaccent = 0x7b7
XK_Greek_upsilonaccent = 0x7b8
XK_Greek_upsilondieresis = 0x7b9
XK_Greek_upsilonaccentdieresis = 0x7ba
XK_Greek_omegaaccent = 0x7bb
XK_Greek_ALPHA = 0x7c1
XK_Greek_BETA = 0x7c2
XK_Greek_GAMMA = 0x7c3
XK_Greek_DELTA = 0x7c4
XK_Greek_EPSILON = 0x7c5
XK_Greek_ZETA = 0x7c6
XK_Greek_ETA = 0x7c7
XK_Greek_THETA = 0x7c8
XK_Greek_IOTA = 0x7c9
XK_Greek_KAPPA = 0x7ca
XK_Greek_LAMDA = 0x7cb
XK_Greek_LAMBDA = 0x7cb
XK_Greek_MU = 0x7cc
XK_Greek_NU = 0x7cd
XK_Greek_XI = 0x7ce
XK_Greek_OMICRON = 0x7cf
XK_Greek_PI = 0x7d0
XK_Greek_RHO = 0x7d1
XK_Greek_SIGMA = 0x7d2
XK_Greek_TAU = 0x7d4
XK_Greek_UPSILON = 0x7d5
XK_Greek_PHI = 0x7d6
XK_Greek_CHI = 0x7d7
XK_Greek_PSI = 0x7d8
XK_Greek_OMEGA = 0x7d9
XK_Greek_alpha = 0x7e1
XK_Greek_beta = 0x7e2
XK_Greek_gamma = 0x7e3
XK_Greek_delta = 0x7e4
XK_Greek_epsilon = 0x7e5
XK_Greek_zeta = 0x7e6
XK_Greek_eta = 0x7e7
XK_Greek_theta = 0x7e8
XK_Greek_iota = 0x7e9
XK_Greek_kappa = 0x7ea
XK_Greek_lamda = 0x7eb
XK_Greek_lambda = 0x7eb
XK_Greek_mu = 0x7ec
XK_Greek_nu = 0x7ed
XK_Greek_xi = 0x7ee
XK_Greek_omicron = 0x7ef
XK_Greek_pi = 0x7f0
XK_Greek_rho = 0x7f1
XK_Greek_sigma = 0x7f2
XK_Greek_finalsmallsigma = 0x7f3
XK_Greek_tau = 0x7f4
XK_Greek_upsilon = 0x7f5
XK_Greek_phi = 0x7f6
XK_Greek_chi = 0x7f7
XK_Greek_psi = 0x7f8
XK_Greek_omega = 0x7f9
XK_Greek_switch = 0xFF7E
| 1.078125 | 1 |
test_collision/test_bodies.py | Klumhru/boost-python-bullet | 2 | 12764053 | <reponame>Klumhru/boost-python-bullet
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_dynamics.test_bodies
"""
from __future__ import unicode_literals, print_function, absolute_import
| 1.015625 | 1 |
prebids/databasing/psydb.py | jokedurnez/Psychosis | 0 | 12764054 | <reponame>jokedurnez/Psychosis<filename>prebids/databasing/psydb.py
'''
File name: psydb.py
Author: <NAME>
Date created: 10/31/2017
Date last modified: 10/31/2017
Python Version: 2.7
Description: Script to clean up NIDB
Project: Psychosis
'''
from distutils.dir_util import copy_tree
from collections import Counter
from datetime import datetime
import pandas as pd
import numpy as np
import collections
import shutil
import json
import csv
import os
with open(os.environ.get("EXCLUSION_NIDB"),'r') as fl:
rules = json.load(fl)
def remove_old(DB):
#from DB file: remove old subjects from other study
print(" ...removing subjects from before 2015...")
new_id = [id for id,val in enumerate(DB[' study type']) if datetime.strptime(val,' %Y-%m-%d %H:%M').year>=2015]
DB = DB.loc[new_id]
return DB
def unique_subs(DB):
print(" ...extracting subjects...")
DB = DB[[' UID',' AltStudyID',' Study date',' Sex']]
DB = DB.drop_duplicates()
return DB
def remove_spaces(DB,protocols=False):
print(" ...removing spaces in names...")
scanID = [x[1:] for x in DB[' UID'].tolist()]
behID = [x[1:] for x in DB[' AltStudyID'].tolist()]
out = {"UID":scanID, "AltStudyID":behID}
if protocols:
protocol = [x[1:] for x in DB[' Protocol'].tolist()]
out["Protocol"] = protocol
DB = pd.DataFrame(out)
return DB
def database_exclude(DB):
for rule in rules:
if rule['reason'] == 'typo':
for k,v in rule['pairs'].iteritems():
DB.AltStudyID[DB.AltStudyID==k] = v
else:
for idx,sub in enumerate(rule['remove']):
DB = DB[DB.UID!=sub]
return DB
def clean_DB(DB):
print("============= Cleaning NIDB database ==============")
DB = remove_old(DB)
DB = unique_subs(DB)
DB = remove_spaces(DB)
DB = database_exclude(DB)
DB = DB.drop_duplicates()
DB = DB.reset_index()
return DB
def nidb_merge(PSYDB,DB):
print("============= Merging NIDB in database ==============")
DB = DB.rename(columns = {"AltStudyID":"scan_id"})
DB_col = DB.columns
numbers = {"present":0,'redcap_only':0,'new':0}
print(" ...looping over all entries...")
for idx,row in DB.iterrows():
newsub = dict(row)
newsub['in_nidb']=True
PSYDB = PSYDB.append(newsub,ignore_index=True)
print("added %i new subjects"%len(PSYDB))
return PSYDB
| 2.4375 | 2 |
dashboard/models.py | FurSquared/dev-game | 1 | 12764055 | <gh_stars>1-10
import pytz
from django.utils import timezone
from django.db import models
from django.core.validators import RegexValidator
from django.conf import settings
from django.utils import timezone
alphanumeric_validator = RegexValidator(r'^[0-9a-zA-Z_-]*$', 'Only alphanumeric, underscore, and dash are allowed.')
TIME_ZONE_OBJ = pytz.timezone(settings.TIME_ZONE)
class Token(models.Model):
def __str__(self):
return self.code
code = models.CharField(max_length=25, primary_key=True, validators=[alphanumeric_validator])
gm_note = models.TextField(blank=True)
reward_text = models.TextField(blank=True)
valid_from = models.DateTimeField(blank=True, null=True)
def is_valid(self):
if self.valid_from and self.valid_from > timezone.now():
return False, self.valid_from
return True, self.valid_from
@property
def user_reward(self):
is_valid, date = self.is_valid()
if is_valid:
return self.reward_text
date_str = date.astimezone(TIME_ZONE_OBJ).strftime('%b %d, %Y @ %H:%M:%S')
return f"This reward unlocks at {date_str}"
def save(self, *args, **kwargs):
self.code = self.code.upper().replace("-", "_")
super().save(*args, **kwargs)
class CollectedToken(models.Model):
def __str__(self):
return f"{self.user} | {self.token}"
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.DO_NOTHING)
token = models.ForeignKey(Token, on_delete=models.DO_NOTHING)
read = models.BooleanField(default=False)
collected_at = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
user_token_count = CollectedToken.objects.filter(user=self.user).count()
user_rewards = [cr.reward.id for cr in CollectedReward.objects.filter(user=self.user)]
for reward in Reward.objects.exclude(id__in=user_rewards).filter(count__gt=0, count__lte=user_token_count):
CollectedReward.objects.create(user=self.user, reward=reward)
for reward in Reward.objects.exclude(id__in=user_rewards).filter(required_tokens__isnull=False).distinct():
count = CollectedToken.objects.filter(user=self.user, token__in=reward.required_tokens.all()).count()
if count == reward.required_tokens.count():
CollectedReward.objects.create(user=self.user, reward=reward)
class Meta:
indexes = [
models.Index(fields=['user', 'token']),
models.Index(fields=['user']),
models.Index(fields=['token']),
]
unique_together = [['user', 'token']]
class Reward(models.Model):
def __str__(self):
return self.name
name = models.CharField(max_length=255)
# save time
count = models.PositiveIntegerField(default=0)
required_tokens = models.ManyToManyField(Token, blank=True)
# page load time
valid_from = models.DateTimeField(blank=True, null=True)
gm_note = models.TextField(blank=True)
reward_text = models.TextField(blank=True)
def is_valid(self):
if self.valid_from and self.valid_from > timezone.now():
return False, self.valid_from
for token in self.required_tokens.all():
is_valid, date = token.is_valid()
if not is_valid:
return False, date
return True, self.valid_from
@property
def user_reward(self):
is_valid, date = self.is_valid()
if is_valid:
return self.reward_text
date_str = date.astimezone(TIME_ZONE_OBJ).strftime('%b %d, %Y @ %H:%M:%S')
return f"This reward unlocks at {date_str}"
class Meta:
indexes = [
models.Index(fields=['count']),
]
class CollectedReward(models.Model):
def __str__(self):
return f"{self.user} | {self.reward}"
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.DO_NOTHING)
reward = models.ForeignKey(Reward, on_delete=models.DO_NOTHING)
read = models.BooleanField(default=False)
collected_at = models.DateTimeField(default=timezone.now)
class Meta:
indexes = [
models.Index(fields=['user']),
models.Index(fields=['user', 'reward']),
models.Index(fields=['reward']),
]
unique_together = [['user', 'reward']]
| 2.375 | 2 |
meltingpot/python/utils/substrates/reaction_graph_utils.py | yunfanjiang/meltingpot | 0 | 12764056 | <gh_stars>0
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library of functions for defining chemical motifs."""
from typing import Any, Dict
from absl import logging # pylint: disable=unused-import
import networkx as nx # pylint: disable=unused-import
import numpy as np
from meltingpot.python.utils.substrates import colors
from meltingpot.python.utils.substrates import shapes
EARTH_COLOR = (225, 169, 95, 255) # An earth color.
WHITE_COLOR = (255, 255, 255, 255) # A white color.
STOMACH = """
xxxx********xxxx
xxxx********xxxx
xxxx********xxxx
xxxx********xxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xx************xx
xx************xx
xx************xx
xx************xx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
"""
DIAMOND_SHAPE = """
xxxabxxx
xxaabbxx
xaaabbbx
aaaabbbb
ddddcccc
xdddcccx
xxddccxx
xxxdcxxx
"""
SQUARE_SHAPE = """
bbbbbbbb
bbbbbbbb
bbbbbbbb
bbbbbbbb
bbbbbbbb
bbbbbbbb
bbbbbbbb
bbbbbbbb
"""
def graph_semantics(g):
"""Convert a networkx.DiGraph to compounds and reactions for grid_land."""
compounds = {}
reactions = {}
for node, attributes in g.nodes.items():
if attributes.get("reaction"):
reactants = [e[0] for e in g.in_edges(node)]
products = [e[1] for e in g.out_edges(node)]
reactions[node] = create_reaction(reactants, products, attributes)
if not attributes.get("reaction"):
compounds[node] = create_compound(attributes)
return compounds, reactions
def create_reaction(reactants, products, attributes):
# TODO(b/192926758): support fixedSwapOrder = False, in that case, pass
# reactants# and products as a dictionary mapping to the number required (not
# a list with possibly repeated entries like the current version).
return {
"reactants": reactants,
"products": products,
"fixedSwapOrder": attributes.get("fixedSwapOrder", True),
"priority": attributes.get("priority", 1),
}
def create_compound(attributes):
"""Convert node attributes to dictionary structure needed for a compound."""
data = {
# Use black color if none provided.
"color": attributes.get("color", (0, 0, 0)),
"properties": {
# Use (0, 0) for structure if none provided,
"structure": attributes.get("structure", (0, 0)),
},
}
for k, v in attributes.items():
data[k] = v
return data
def add_system_nodes(g):
"""Add several nodes that must always be present for the system to function.
Args:
g: (nx.DiGraph): directed graph representing the reaction system.
"""
g.add_nodes_from(
[
# Add a node for the "empty" compound.
("empty", {"color": EARTH_COLOR, "reactivity": "low"}),
# Add a node for the "activated" compound.
("activated", {"color": WHITE_COLOR, "immovable": True}),
# Add unused nodes that serve only to make all standard groups valid so
# their corresponding updater can be created.
("_unused_a", {"reactivity": "low"}),
("_unused_b", {"reactivity": "medium"}),
("_unused_c", {"reactivity": "high"}),
]
)
def add_compounds_to_prefabs_dictionary(
prefabs,
compounds,
reactivity_levels,
sprites=False,
default_reaction_radius=None,
default_reaction_query_type=None,
priority_mode=False,
):
"""Add compounds."""
for compound_name in compounds.keys():
prefabs[compound_name] = create_cell_prefab(
compound_name,
compounds,
reactivity_levels,
sprites=sprites,
default_reaction_radius=default_reaction_radius,
default_reaction_query_type=default_reaction_query_type,
priority_mode=priority_mode,
)
return prefabs
def multiply_tuple(color_tuple, factor):
if len(color_tuple) == 3:
return tuple([int(np.min([x * factor, 255])) for x in color_tuple])
elif len(color_tuple) == 4:
return tuple([int(np.min([x * factor])) for x in color_tuple])
def create_cell_prefab(
compound_name,
compounds,
reactivity_levels,
sprites=False,
default_reaction_radius=None,
default_reaction_query_type=None,
priority_mode=False,
):
"""Create prefab for a cell object initially set to state=`compound_name`."""
state_configs = []
states_to_properties = {}
sprite_colors = []
query_configs = {}
for compound, attributes in compounds.items():
groups = []
if "reactivity" in attributes:
reactivity_group = attributes["reactivity"]
groups.append(reactivity_group)
if "immovable" in attributes and attributes["immovable"]:
groups.append("immovables")
if "query_config" in attributes:
query_configs[compound] = attributes["query_config"]
state_config = {
"state": compound,
"sprite": compound,
"layer": "lowerPhysical",
"groups": groups + ["spawnPoints"],
}
state_configs.append(state_config)
states_to_properties[compound] = attributes["properties"]
sprite_colors.append(attributes["color"])
# Configure the Reactant component.
reactivities = {}
for key, value in reactivity_levels.items():
reactivities[key] = value
if sprites:
def get_palette(sprite_color):
if len(sprite_color) == 3:
x_color = EARTH_COLOR[0:3]
a_color = (252, 252, 252)
elif len(sprite_color) == 4:
x_color = EARTH_COLOR
a_color = (252, 252, 252, 255)
return {
"x": x_color,
"a": a_color,
"b": sprite_color,
"c": multiply_tuple(sprite_color, 0.2),
"d": sprite_color,
}
appearance_kwargs = {
"renderMode": "ascii_shape",
"spriteNames": list(compounds.keys()),
"spriteShapes": [DIAMOND_SHAPE] * len(sprite_colors),
"palettes": [get_palette(color) for color in sprite_colors],
"noRotates": [True] * len(sprite_colors),
}
# Must ensure "empty" and "activated" are not given the diamond sprite.
for i, compound in enumerate(appearance_kwargs["spriteNames"]):
if compound in ["empty", "activated"]:
appearance_kwargs["spriteShapes"][i] = SQUARE_SHAPE
else:
appearance_kwargs = {
"spriteNames": list(compounds.keys()),
"spriteRGBColors": sprite_colors,
}
prefab = {
"name": "cell",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": compound_name,
"stateConfigs": state_configs,
},
},
{
"component": "Transform",
"kwargs": {"position": (0, 0), "orientation": "N"},
},
{"component": "Appearance", "kwargs": appearance_kwargs},
{
"component": "Cell",
"kwargs": {
"numCellStates": len(state_configs),
"statesToProperties": states_to_properties,
# The radius over which to search for neighbors on every step.
"radius": default_reaction_radius,
# Query according to L1 (diamond) or L2 (disc) norm.
"queryType": default_reaction_query_type,
# Layers on which to search for neighbors on every step.
"interactionLayers": ["lowerPhysical", "overlay"],
# You can override query properties on a per state basis.
"stateSpecificQueryConfig": query_configs,
},
},
{
"component": "Reactant",
"kwargs": {
"name": "Reactant",
"reactivities": reactivities,
"priorityMode": priority_mode,
},
},
{"component": "Product", "kwargs": {"name": "Product",}},
],
}
return prefab
def create_stomach(
compounds,
reactivity_levels,
default_reaction_radius=None,
default_reaction_query_type=None,
priority_mode=False,
):
"""Construct prefab for an avatar's stomach object."""
stomach_prefix = "stomach_"
state_configs = []
states_to_properties = {}
sprite_colors = []
query_configs = {}
for compound, attributes in compounds.items():
groups = []
if "reactivity" in attributes:
reactivity_group = stomach_prefix + attributes["reactivity"]
groups.append(reactivity_group)
if "immovable" in attributes and attributes["immovable"]:
groups.append("immovables")
if "query_config" in attributes:
query_configs[compound] = attributes["query_config"]
state_config = {
"state": compound,
"sprite": compound + "_stomach",
"layer": "overlay",
"groups": groups,
}
state_configs.append(state_config)
states_to_properties[compound] = attributes["properties"]
sprite_colors.append(attributes["color"])
# Configure the Reactant component.
reactivities = {}
for key, value in reactivity_levels.items():
reactivities[stomach_prefix + key] = value
prefab = {
"name": "avatar_stomach",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "preInit",
"stateConfigs": state_configs + [{"state": "preInit"}],
},
},
{
"component": "Transform",
"kwargs": {"position": (0, 0), "orientation": "N"},
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": [key + "_stomach" for key in compounds.keys()],
"spriteShapes": [STOMACH] * len(sprite_colors),
# color 0 is a dark navy blue. This is overwritten below.
"palettes": [
shapes.get_palette(sprite_colors[i])
for i in range(len(sprite_colors))
],
"noRotates": [False] * len(sprite_colors),
},
},
{
"component": "AvatarStomach",
"kwargs": {
"playerIndex": -1, # player index to be overwritten.
"preInitState": "preInit",
"initialState": "empty",
"waitState": "stomachWait",
},
},
{
"component": "Cell",
"kwargs": {
"numCellStates": len(state_configs),
"statesToProperties": states_to_properties,
# The radius over which to search for neighbors on every step.
"radius": default_reaction_radius,
# Query according to L1 (diamond) or L2 (disc) norm.
"queryType": default_reaction_query_type,
# Layers on which to search for neighbors on every step.
"interactionLayers": ["lowerPhysical", "overlay"],
# You can override query properties on a per state basis.
"stateSpecificQueryConfig": query_configs,
},
},
{
"component": "Reactant",
"kwargs": {
"name": "Reactant",
"reactivities": reactivities,
"priorityMode": priority_mode,
},
},
{"component": "Product", "kwargs": {"name": "Product",}},
],
}
return prefab
def create_avatar(rewarding_reactions):
"""Create an avatar prefab rewarded by reactions in `rewarding_reactions`."""
prefab = {
"name": "avatar",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "player",
"stateConfigs": [
{
"state": "player",
"layer": "upperPhysical",
"sprite": "Avatar",
"contact": "avatar",
"groups": ["players"],
},
{"state": "playerWait", "groups": ["playerWaits"]},
],
},
},
{
"component": "Transform",
"kwargs": {"position": (0, 0), "orientation": "N"},
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["Avatar"],
"spriteShapes": [shapes.AVATAR_DEFAULT],
# color 0 is a dark navy blue. This is overwritten below.
"palettes": [shapes.get_palette(colors.palette[0])],
"noRotates": [False],
},
},
{
"component": "Avatar",
"kwargs": {
"index": -1, # player index to be overwritten.
"spawnGroup": "spawnPoints",
"aliveState": "player",
"waitState": "playerWait",
"actionOrder": ["move", "turn", "ioAction"],
"actionSpec": {
"move": {"default": 0, "min": 0, "max": 4},
"turn": {"default": 0, "min": -1, "max": 1},
"ioAction": {"default": 0, "min": 0, "max": 1},
},
"view": {
"left": 5,
"right": 5,
"forward": 9,
"backward": 1,
"centered": False,
},
},
},
{"component": "IOBeam", "kwargs": {"cooldownTime": 2,}},
{
"component": "ReactionsToRewards",
"kwargs": {
# Specify rewards for specific reactions.
"rewardingReactions": rewarding_reactions
},
},
{
"component": "LocationObserver",
"kwargs": {"objectIsAvatar": True, "alsoReportOrientation": True},
},
],
}
return prefab
def create_avatar_constant_self_view(
rewarding_reactions, player_idx: int, target_sprite_self: Dict[str, Any]
) -> Dict[str, Any]:
"""Create an avatar prefab rewarded by reactions in `rewarding_reactions`."""
# Lua is 1-indexed.
lua_index = player_idx + 1
# Setup the self vs other sprite mapping.
source_sprite_self = "Avatar" + str(lua_index)
custom_sprite_map = {source_sprite_self: target_sprite_self["name"]}
live_state_name = "player{}".format(lua_index)
avatar_object = {
"name": "avatar",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": live_state_name,
"stateConfigs": [
{
"state": live_state_name,
"layer": "upperPhysical",
"sprite": source_sprite_self,
"contact": "avatar",
"groups": ["players"],
},
{"state": "playerWait", "groups": ["playerWaits"]},
],
},
},
{
"component": "Transform",
"kwargs": {"position": (0, 0), "orientation": "N"},
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": [source_sprite_self],
"spriteShapes": [shapes.AVATAR_DEFAULT],
"palettes": [shapes.get_palette(colors.palette[player_idx])],
"noRotates": [False],
},
},
{
"component": "AdditionalSprites",
"kwargs": {
"renderMode": "ascii_shape",
"customSpriteNames": [target_sprite_self["name"]],
"customSpriteShapes": [target_sprite_self["shape"]],
"customPalettes": [target_sprite_self["palette"]],
"customNoRotates": [target_sprite_self["noRotate"]],
},
},
{
"component": "Avatar",
"kwargs": {
"index": lua_index,
"spawnGroup": "spawnPoints",
"aliveState": live_state_name,
"waitState": "playerWait",
"actionOrder": ["move", "turn", "ioAction"],
"actionSpec": {
"move": {"default": 0, "min": 0, "max": 4},
"turn": {"default": 0, "min": -1, "max": 1},
"ioAction": {"default": 0, "min": 0, "max": 1},
},
"view": {
"left": 5,
"right": 5,
"forward": 9,
"backward": 1,
"centered": False,
},
"spriteMap": custom_sprite_map,
},
},
{"component": "IOBeam", "kwargs": {"cooldownTime": 2,}},
{
"component": "ReactionsToRewards",
"kwargs": {
# Specify rewards for specific reactions.
"rewardingReactions": rewarding_reactions
},
},
{
"component": "LocationObserver",
"kwargs": {"objectIsAvatar": True, "alsoReportOrientation": True},
},
],
}
return avatar_object
def create_scene(reactions, stochastic_episode_ending=False):
"""Construct the global scene prefab."""
scene = {
"name": "scene",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "scene",
"stateConfigs": [{"state": "scene",}],
},
},
{
"component": "Transform",
"kwargs": {"position": (0, 0), "orientation": "N"},
},
{"component": "ReactionAlgebra", "kwargs": {"reactions": reactions}},
{
"component": "GlobalMetricTracker",
"kwargs": {"name": "GlobalMetricTracker",},
},
],
}
if stochastic_episode_ending:
scene["components"].append(
{
"component": "StochasticIntervalEpisodeEnding",
"kwargs": {
"minimumFramesPerEpisode": 1000,
"intervalLength": 100, # Set equal to unroll length.
"probabilityTerminationPerInterval": 0.2,
},
}
)
return scene
| 2.109375 | 2 |
nrfreverse.py | matthew5025/nrf5x-tools | 0 | 12764057 | <reponame>matthew5025/nrf5x-tools<filename>nrfreverse.py
#!/usr/bin/env python3.5
"""
NRF5 reverse tool using IDA-python
"""
import sqlite3
import idaapi
import idc
def launch_print():
"""print message"""
print("############################ nRF5-tool ############################ ")
print("##### ##### ")
print("##### IDA python-Reversing nRF5x binaries ##### ")
print("##### ##### ")
print("################################################################### ")
print("\n")
class NRF5xReverse(object):
"""
nRF5x reverse class initiates objects with the softdevice's signature
renames all syscalls in IDA
"""
def __init__(self, nRFv_path, nRF_db):
"""
Database initialisation
"""
self.nrf_db = nRF_db
self.con = sqlite3.connect(self.nrf_db)
self.cur = self.con.cursor()
self.svc_addr = dict()
self.svc_count = dict()
self.structs = []
self.types = {"int8_t":"__int8", "int16_t": "__int16", "int32_t": "__int32", "int64_t":"__int64", "int128_t":"__int128"}
with open(nRFv_path, "r") as nrf_file:
self.sign = nrf_file.read()
def extract_syscalls(self):
"""
Extracts SVC from ASM to self.svc_addr
Condition met in ASM : SVC 0xnum
Retrieves syscall number called (SVC 0xnum => 0xnum syscall) at svc_addr
"""
for segea in Segments():
for head in Heads(segea, SegEnd(segea)):
if isCode(GetFlags(head)):
mnem = GetMnem(head)
if mnem == "SVC":
syscall = GetOpnd(head, 0)
self.svc_addr[head] = syscall
def count_svcs(self):
"""
Detects same svcs in different sub_addresses
"""
for k, v in self.svc_addr.items():
self.svc_count[v] = self.svc_count.get(v, 0) + 1
def resolve_svcs(self):
"""
Resolves svcs in binary
"""
for addr, syscall in self.svc_addr.items():
req = "select distinct(svc) from SVCALL where LOWER(syscall)=LOWER(?) and softdev_signature LIKE ?"
self.cur.execute(req, (syscall, self.sign))
svc = self.cur.fetchall()
if len(svc) != 1:
print("No SVC identified or SoftDevice version must be specified\n", syscall, self.sign)
else:
svcall = SVCALL(self.sign, self.cur, addr, syscall, self.svc_count[syscall])
svcall.rename(self.types)
def get_structs(self):
"""
Extracts structures from nRF.db
"""
self.structs = dict()
req = "select distinct(name) from Structures where softdev_signature LIKE ?"
self.cur.execute(req, (self.sign, ))
structs = self.cur.fetchall()
print(structs)
for struct in structs:
req1 = "select distinct(arg_name) from StructArgs where struct_name=? and softdev_signature LIKE ?"
self.cur.execute(req1, (struct[0], self.sign))
struct_args = self.cur.fetchall()
self.structs[struct[0]] = struct_args
def add_struc(self):
"""
Adds structures to IDA
"""
print("## Structures ##")
structs = dict()
idx = idaapi.get_last_struc_idx()
print(idx, self.structs)
idaapi.add_struc(idx, str(next(iter(self.structs))))
for structure, args in self.structs.items():
idx = idaapi.get_next_struc_idx(idx)
struct_id = idaapi.get_struc_by_idx(idx)
struct_name = str(structure)
idaapi.add_struc(idx, struct_name)
self.types[struct_name] = struct_name
def add_strucmem(self):
"""
Adds structures'members
"""
idx = idaapi.get_last_struc_idx()
for structure, args in self.structs.items():
struct_name = str(structure)
sid = idc.GetStrucIdByName(struct_name)
mem_cmt = ""
for struct_arg in args:
if "union" in struct_arg[0]:
print(struct_arg[0])
union = struct_arg[0].replace("union ", "")
union_name = union.split("(")[0]
union_members = union.split("(")[1].replace(")", "").rsplit(",")
uid = idaapi.get_next_struc_idx(idx)
idaapi.add_struc(uid, str(union_name), 1)
for union_member in union_members:
if len(union_member.split(" ")) > 1 :
print(union_member)
member_name = str(union_member.split(" ")[1])
member_type = str(union_member.split(" ")[0])
member_id = idc.GetStrucIdByName(member_type)
member_size = idc.GetStrucSize(member_id)
print(uid, union_name, member_type, member_name, member_size)
idc.AddStrucMember(uid, member_name, -1, idc.FF_DWRD, -1, member_size)
member = struct_arg[0].split(" ")[1].split("(")[0]
member_type = struct_arg[0].split(" ")[0]
mem_cmt += struct_arg[0] + "|"
idc.AddStrucMember(sid, str(member), -1, idc.FF_DWRD, -1, 8)
struct_cmt = "STRUCTURE " + struct_name + " contains " + mem_cmt
idaapi.set_struc_cmt(sid, str(struct_cmt), False)
class SVCALL():
"""
SVCALL class initiates svc object associated to an address in IDA
sets function names and prototypes
"""
def __init__(self, softdev_sign, cur, addr, syscall, syscall_cnt):
self.cur = cur
self.addr = addr
self.syscall = syscall
self.syscall_cnt = syscall_cnt
req = "select distinct(svc), function, ret_type, arguments from SVCALL where LOWER(syscall)=LOWER(?) and softdev_signature LIKE ?"
self.cur.execute(req, (syscall, softdev_sign))
#checking if syscall has same number of arguments for different softdevices given the approximative signature
res1 = self.cur.fetchall()
if len(res1) != 1:
self.args_len = len(res1[0][3].rsplit(","))
for i in range(len(res1)):
args_1 = res1[i][3]
if len(args_1.rsplit(",")) != self.args_len:
print('number of arguments is different for softdevices, SYSCALL:', self.syscall)
print(args_1, len(args_1.rsplit(",")))
print(res1[0][3], self.args_len)
req = "select distinct(svc), function, ret_type, arguments from SVCALL where LOWER(syscall)=LOWER(?) and softdev_signature LIKE ?"
self.cur.execute(req, (syscall, softdev_sign))
res = self.cur.fetchone()
self.svc = res[0]
self.function = str(res[1])
self.ret_type = res[2]
self.args = res[3]
def set_funcname(self):
if self.syscall_cnt > 1:
comment = "contains " + str(self.function) + "'function code. Same declaration at "
comment += str(self.syscall_cnt) + " different addresses"
self.function += "_" + str(hex(self.addr)).replace("0x", "") + "_" + str(self.syscall_cnt)
else:
self.function = str(self.function)
comment = str(self.function)
MakeComm(self.addr, comment)
MakeNameEx(self.addr, self.function, SN_NOWARN)
def rename(self, types):
"""
Sets prototypes and names to functions in IDA
"""
try:
if "uint" in self.args:
self.args = self.args.replace("uint", "unsigned int")
args_an = self.args.rsplit(" ")
for item in args_an:
if "_t" in item or "IRQn_Type" in item:
if item in types:
#print("known type", item)
self.args = self.args.replace(item, types[item])
elif "IRQn_Type" in item:
#int is a temporary value before getting structures from parsing SDKs
unknown_type = "IRQn_Type"
self.args = self.args.replace(unknown_type, "int __struct_ptr")
print("IRQN", self.addr, self.function, self.args)
self.args = "(" + self.args + ");"
self.set_funcname()
newtype = "unsigned __int32 "+ str(self.function) + str(self.args)
SetType(self.addr, str(newtype))
except IOError:
pass
def apply_struct(self, sid):
"""
Applies structures on arguments
"""
size = idc.GetStrucSize(sid)
idc.MaleUnknown(self.addr, size, idc.DOUNK_DELNAMES)
idaapi.doStruct(self.addr, size, sid)
def main():
"""
main
"""
launch_print()
nrf_sign = "./nRF_ver"
nrf_db = "./nRF.db"
nrf = NRF5xReverse(nrf_sign, nrf_db)
nrf.get_structs()
nrf.add_struc()
nrf.add_strucmem()
nrf.extract_syscalls()
nrf.count_svcs()
nrf.resolve_svcs()
nrf.con.close()
if __name__ == "__main__":
main()
| 2.234375 | 2 |
tests/tests_manifests_workflow/test_component_opensearch_dashboards_min.py | VijayanB/opensearch-build | 0 | 12764058 | <gh_stars>0
# SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
import unittest
from unittest.mock import MagicMock, patch
from git.git_repository import GitRepository
from manifests_workflow.component_opensearch_dashboards_min import \
ComponentOpenSearchDashboardsMin
from system.config_file import ConfigFile
class TestComponentOpenSearchDashboardsMin(unittest.TestCase):
@patch("subprocess.check_output")
def test_branches(self, mock):
mock.return_value = "\n".join(
["main", "1.x", "1.21", "20.1", "something", "else"]
).encode()
self.assertEqual(
ComponentOpenSearchDashboardsMin.branches(), ["main", "1.x", "1.21", "20.1"]
)
mock.assert_called_with(
"git ls-remote https://github.com/opensearch-project/OpenSearch-Dashboards.git refs/heads/* | cut -f2 | cut -d/ -f3",
shell=True,
)
@patch("os.makedirs")
@patch.object(GitRepository, "__checkout__")
def test_checkout(self, *mocks):
component = ComponentOpenSearchDashboardsMin.checkout("path")
self.assertEqual(component.name, "OpenSearch-Dashboards")
self.assertFalse(component.snapshot)
@patch.object(ConfigFile, "from_file")
def test_version(self, mock_config):
mock_config.return_value = ConfigFile('{"version":"2.1"}')
component = ComponentOpenSearchDashboardsMin(
MagicMock(working_directory="path")
)
self.assertEqual(component.version, "2.1")
@patch.object(ConfigFile, "from_file")
def test_properties(self, mock_config):
mock_config.return_value = ConfigFile('{"version":"2.1"}')
component = ComponentOpenSearchDashboardsMin(
MagicMock(working_directory="path")
)
self.assertEqual(component.properties.get_value("version"), "2.1")
@patch.object(ConfigFile, "from_file")
def test_to_dict(self, mock_config):
mock_config.return_value = ConfigFile('{"version":"2.1"}')
repo = MagicMock(ref="ref", url="repo")
component = ComponentOpenSearchDashboardsMin(repo)
self.assertEqual(
component.to_dict(),
{"name": "OpenSearch-Dashboards", "ref": "ref", "repository": "repo"},
)
| 2.09375 | 2 |
the-settler/API/Screen/moveable_player.py | EnderDas/the-settler | 0 | 12764059 | #moveable_player
class MoveablePlayer:
def __init__(self, x=2, y=2, dir="FORWARD", color="170"):
self.x = x
self.y = y
self.direction = dir
self.color = color
| 3.03125 | 3 |
data-science/CollectiveIntelligence/com/recommendations/Recommendations.py | matija94/show-me-the-code | 1 | 12764060 | <reponame>matija94/show-me-the-code<gh_stars>1-10
'''
Created on Mar 21, 2017
@author: matija
FUNCTIONS FOR COMPUTING SIMILARITY BETWEEN TWO DATASETS BASED ON IT'S KEYS
HERE WE USED PERSON AS KEY AND THEIRS VALUES ARE RATINGS FOR THE MOVIES
WE COMPUTED A SIMILARITIES IN THIS MODULE BASED ON TWO FUNCTIONS
1.EUCLIDEAN DISTANCE
2.PEARSON
FROM MY EXPERIANCE RUNNING THESE FUNCTIONS AGAINST SAME DATA SET
1.EUCLIDEAN DISTANCE IS VERY HARSH ON THE DIFFERENT RATINGS AND DECREASES SIMILARITY BY BIG MARGIN
2.PEARSON IS NOT AS HARSH AS EUCLIDEAN AND LOOKS MORE NATURAL TO ME WHEN COMPUTING THE SIMILARITY RESULT
also from my experiance if the preferences are charted only in one region of the graph then pearson will behave very strange.
which means it looks good only if data is charted across the whole graph(or at least one preference take spot in every 'region')
'''
from math import sqrt
import numpy as np
import matplotlib.pyplot as plt
criticsCustom={
'<NAME>': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.5, 'Superman':2.5, 'Batman':2,
'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5,
'The Night Listener': 3.0},
'<NAME>': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5, 'Superman':2.5, 'Batman':3,
'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 3.5},
'<NAME>': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0,'Superman':2.5, 'Batman':4,
'Superman Returns': 3.5, 'The Night Listener': 4.0},
'<NAME>': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0,'Superman':2.5, 'Batman':5,
'The Night Listener': 4.5, 'Superman Returns': 4.0,
' You, Me and Dupree': 2.5},
'<NAME>': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,'Superman':2.5, 'Batman':5,
'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 2.0},
'<NAME>': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,'Superman':2.5, 'Batman':4,
'The Night Listener': 3.0, 'Superman Returns': 5.0, 'You, Me and Dupree': 3.5},
'Toby': {'Snakes on a Plane':3.0,'You, Me and Dupree':3.5,'Superman Returns':5.0, 'Lady in the Water':3.0,'Superman':2.5, 'Batman':3},
'Matija' : {'Batman':3}
}
# A dictionary of movie critics and their ratings of a small
# set of movies
critics={'<NAME>': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.5,
'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5,
'The Night Listener': 3.0},
'<NAME>': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5,
'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 3.5},
'<NAME>': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0,
'Superman Returns': 3.5, 'The Night Listener': 4.0},
'<NAME>': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0,
'The Night Listener': 4.5, 'Superman Returns': 4.0,
'You, Me and Dupree': 2.5},
'<NAME>': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 2.0},
'<NAME>': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'The Night Listener': 3.0, 'Superman Returns': 5.0, 'You, Me and Dupree': 3.5},
'Toby': {'Snakes on a Plane':4.5,'You, Me and Dupree':1.0,'Superman Returns':4.0}}
# if two films were plot on the graph (x and y axis) and the people were charted in preference space( some coordinate in the graph(x,y))
# then this function will calculate the distance between the two
#the higher the value the similar preferences are (people share same opinions about films)
def sim_euclidean_distance(prefs, person1, person2):
#Get the list of shared items
si = {}
for item in prefs[person1]:
if item in prefs[person2]:
si[item]=1
#if they have no ratings in common , return 0
if len(si)==0: return 0
#add up squares of all the differences
sum_of_squares = sum( [pow(prefs[person1][item]-prefs[person2][item], 2) for item in prefs[person1] if item in prefs[person2]] )
return 1/(1+sum_of_squares)
#person 1 critics are represented on x axis while person2 critics on y axis
#films are charted in preference space matching the value of the persons axis
#this function will plot the line which is exactly in the middle of the each of persons critics for every movie
# and will return a score which represents the 'similarity of persons preferences
def sim_pearson(prefs, person1, person2):
#list of mutually rated items
si={}
for item in prefs[person1]:
if item in prefs[person2]:
si[item]=1
#the number of the elements
n = len(si)
#if no common critic subject
if n==0: return 0
#add up all the preferences
sum1 = sum([prefs[person1][it] for it in si])
sum2 = sum([prefs[person2][it] for it in si])
#add up all squares of the preferences
sum1SQ = sum( [pow(prefs[person1][it],2) for it in si] )
sum2SQ = sum( [pow(prefs[person2][it],2) for it in si] )
#sum up the products of the preferences(p1Pref*p2Pref)
pSum = sum ( [prefs[person1][it] * prefs[person2][it] for it in si] )
#compute pearson score
num = pSum-(sum1*sum2/n)
den = sqrt( (sum1SQ - pow(sum1,2)/n) * (sum2SQ - pow(sum2,2)/n) )
if den==0: return 0
return num/den
#compares every key(person) critics from data set against one entry(persons critics, in this case let it be mine)
#returns sorted list by best recommendations that should one follow(the argument person which was against whole set)
#so u can see which person have most similar tastes like you do and pick his advices for movies
def top_matches(prefs, person, n=5, similarity=sim_pearson ):
scores=[(similarity(prefs,person,other), other) for other in prefs if other != person]
#sort the list so the highest scores appear on top
scores.sort(reverse=True)
return scores[0:n]
#gets recommendations for films that person(argument) didn't watch
# but the people who have watched the same film as person did watch those films
#reccommendations are working on weighted average prinicpal
#of every other user's rankings
def getRecommendations(prefs, person ,precomputedUserSims=None, similarity=sim_pearson):
totals = {}
simSums = {}
if precomputedUserSims is None:
for other in prefs:
#don't compare me to myself
if other==person: continue
sim=similarity(prefs, person, other)
# ignore scores of zero or lower
if sim<=0: continue
for item in prefs[other]:
#only score movies that i haven't seen
if item not in prefs[person] or prefs[person][item]==0:
#Similarity * Score
totals.setdefault(item,0)
totals[item]+=prefs[other][item]*sim
#Sum of similarities
simSums.setdefault(item,0)
simSums[item]+=sim
else :
for sim,user in precomputedUserSims:
if person==user: continue
for item in prefs[user]:
if item not in prefs[person]:
totals.setdefault(item, 0)
totals[item] += prefs[user][item]*sim
simSums.setdefault(item,0)
simSums[item]+=sim
#Create normalized list
#to minimize the advantage of the films that were reviewed by the more users
rankings = [(total/simSums[item], item) for item,total in totals.items()]
rankings.sort(reverse=True)
return rankings
def userSimilarities(prefs, person, n):
return top_matches(prefs, person, n)
def tanimotoScore(a,b):
c = [v for v in a if v in b]
return (len(c)/(len(a)+len(b)-len(c)))
def transformPrefs(prefs):
result={}
for person in prefs:
for item in prefs[person]:
result.setdefault(item,{})
#flip the result
result[item][person] = prefs[person][item]
return result
def calculateSimilarItems(prefs, n=10):
#Create a dictionary of items showing which other items they are most similar to
result = {}
#invert the preference matrix to be item-centric
itemPrefs = transformPrefs(prefs)
c=0
for item in itemPrefs:
#Status updates for lage datasets
c+=1
if c%100==0: print "%d / %d" %(c,len(itemPrefs))
#Find most similar items to this one
scores = top_matches(itemPrefs, item, n, similarity=sim_euclidean_distance)
result[item]=scores
return result
def getRecommendedItems(prefs, itemMatch, user):
userRatings = prefs[user]
scores = {}
totalSim = {}
# Loop over items rated by this user
for (item,rating) in userRatings.items():
# Loop over items similar to this one
for (similarity,item2) in itemMatch[item]:
# Ignore if this user has already rated this item
if item2 in userRatings: continue
# Weighted sum of rating times similarity
scores.setdefault(item2,0)
scores[item2]+=similarity*rating
# Sum of all the similarities
totalSim.setdefault(item2,0)
totalSim[item2]+=similarity
# Divide each total score by total weighting to get an average
rankings = [(score/totalSim[item], item) for item,score in scores.items()]
# Return the rankings from highest to lowest
rankings.sort()
rankings.reverse()
return rankings
def loadMoviesLens(path='/home/matija/Desktop/ml-latest-small/'):
# Get movie titles
movies = {}
skipFirst=True
for line in open(path + "/movies.csv"):
if skipFirst :
skipFirst = not skipFirst
continue
(id,title)=line.split(',')[0:2]
movies[id]=title
# Load data
prefs={}
skipFirst=True
for line in open(path + '/ratings.csv'):
if skipFirst:
skipFirst = not skipFirst
continue
(user,movieid,rating)=line.split(',')[0:3]
prefs.setdefault(user,{})
prefs[user][movies[movieid]]=float(rating)
return prefs
print getRecommendations(critics, 'Toby')
precomputedUserSims = userSimilarities(critics, 'Toby', 5)
print getRecommendations(critics, 'Toby',precomputedUserSims)
| 3.578125 | 4 |
pycon/schedule/management/commands/create_schedule.py | pyconjp/pyconjp-website | 6 | 12764061 | <reponame>pyconjp/pyconjp-website
import csv
import time
from datetime import datetime, timedelta
from django.core.management.base import BaseCommand, CommandError
from symposion.conference.models import Section, current_conference
from symposion.proposals.models import ProposalBase
from symposion.schedule.models import Day, Room, Schedule, SlotKind, Slot, SlotRoom
ROOM_KEY = 'Track'
DAY_KEY = 'Day'
START_KEY = 'Start Time'
DURATION_KEY = 'Duration'
PRESENTATION_ID_KEY = 'ID'
class Command(BaseCommand):
def _get_start_end_times(self, data):
"Return start and end time objects"
start_time = time.strptime(data[START_KEY], '%I:%M %p')
start = datetime(100, 1, 1, start_time.tm_hour, start_time.tm_min, 00)
end = start + timedelta(minutes=int(data[DURATION_KEY]))
return start.time(), end.time()
def _build_rooms(self, schedule, data):
"Get or Create Rooms based on schedule type and set of Tracks"
# TODO: Ensure ordering...this isn't bulletproof for roman numerals, for example
rooms = sorted(set([x[ROOM_KEY] for x in data]))
for i, room in enumerate(rooms):
name = '{0} {1}'.format(ROOM_KEY, room)
room, _ = Room.objects.get_or_create(schedule=schedule, name=name, order=i)
def _build_days(self, schedule, data):
"Get or Create Days based on schedule type and set of Days"
days = set([x[DAY_KEY] for x in data])
for day in days:
date = datetime.strptime(day, "%m/%d/%y")
day, _ = Day.objects.get_or_create(schedule=schedule, date=date)
def _build_lunches(self, schedule):
"Get or Create Lunches for the Days and Rooms"
slot_kind, _ = SlotKind.objects.get_or_create(label="Lunch", schedule=schedule)
days = Day.objects.filter(schedule=schedule).order_by('date')[:2]
rooms = Room.objects.filter(schedule=schedule).order_by('order')
for day in days:
for i, room in enumerate(rooms, start=1):
if i % 2:
start = datetime(100, 1, 1, 12, 55, 00)
else:
start = datetime(100, 1, 1, 12, 40, 00)
end = start + timedelta(minutes=60)
slot = Slot.objects.create(kind=slot_kind, day=day, start=start.time(), end=end.time())
SlotRoom.objects.get_or_create(slot=slot, room=room)
def _build_breaks(self, schedule):
"Get or Create Breaks for the Days and Rooms"
slot_kind, _ = SlotKind.objects.get_or_create(label="Break", schedule=schedule)
days = Day.objects.filter(schedule=schedule).order_by('date')[:2]
rooms = Room.objects.filter(schedule=schedule).order_by('order')
for day in days:
for i, room in enumerate(rooms, start=1):
if i % 2:
start = datetime(100, 1, 1, 16, 00, 00)
else:
start = datetime(100, 1, 1, 15, 45, 00)
end = start + timedelta(minutes=30)
slot = Slot.objects.create(kind=slot_kind, day=day, start=start.time(), end=end.time())
SlotRoom.objects.get_or_create(slot=slot, room=room)
def handle(self, *args, **options):
if len(args) != 2:
raise CommandError("The first argument must be a Schedule Type (ex. Talk)" \
" and the second argument a path/to/csv")
else:
section_name, path = args
conf = current_conference()
proposals = ProposalBase.objects.select_related("result").select_subclasses()
# TODO: Pin start and end dates for the section?
section, _ = Section.objects.get_or_create(name=section_name, conference=conf)
schedule, _ = Schedule.objects.get_or_create(section=section)
slot_kind, _ = SlotKind.objects.get_or_create(label=section_name.rstrip('s').lower(), schedule=schedule)
with open(path, 'rb') as f:
data = [x for x in csv.DictReader(f)]
# build rooms
self._build_rooms(schedule, data)
# build_days
self._build_days(schedule, data)
self._build_lunches(schedule)
self._build_breaks(schedule)
# build Slot -> SlotRoom -> Presentation associations
for row in data:
name = '{0} {1}'.format(ROOM_KEY, row[ROOM_KEY])
room = Room.objects.get(schedule=schedule, name=name)
date = datetime.strptime(row[DAY_KEY], "%m/%d/%y")
day = Day.objects.get(schedule=schedule, date=date)
start, end = self._get_start_end_times(row)
slot = Slot.objects.create(kind=slot_kind, day=day, start=start, end=end)
proposal = proposals.get(pk=row[PRESENTATION_ID_KEY])
slot.assign(proposal.presentation)
SlotRoom.objects.create(slot=slot, room=room)
| 2.5 | 2 |
Desktop_App/Addax/env/Lib/site-packages/serial/utilities/__init__.py | filesmuggler/antelope | 0 | 12764062 | from __future__ import nested_scopes, generators, division, absolute_import, with_statement, \
print_function, unicode_literals
from . import compatibility
compatibility.backport() # noqa
import builtins
import os # noqa
import sys # noqa
from io import UnsupportedOperation # noqa
from collections import OrderedDict # noqa
from unicodedata import normalize # noqa
import re # noqa
import inspect # noqa
from keyword import iskeyword # noqa
# region Compatibility Conditionals
# The following detects the presence of the typing library
try:
from typing import Union, Optional, Iterable, Tuple, Any, Callable, AnyStr # noqa
except ImportError:
Union = Optional = Iterable = Tuple = Any = Callable = AnyStr = None
# Before `collections.abc` existed, the definitions we use from this module were in `collections`
try:
import collections.abc as collections_abc
import collections
except ImportError:
import collections
collections_abc = collections
# Earlier versions of the `collections` library do not include the `Generator` class, so when this class is missing--
# we employ a workaround.
if hasattr(collections_abc, 'Generator'):
Generator = collections_abc.Generator
else:
Generator = type(n for n in (1, 2, 3))
# endregion
try:
from inspect import signature
getargspec = None
except ImportError:
signature = None
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
_Module = type(re)
def qualified_name(type_):
# type: (Union[type, _Module]) -> str
"""
>>> print(qualified_name(qualified_name))
qualified_name
>>> from serial import model
>>> print(qualified_name(model.marshal))
serial.model.marshal
"""
if hasattr(type_, '__qualname__'):
type_name = '.'.join(name_part for name_part in type_.__qualname__.split('.') if name_part[0] != '<')
else:
type_name = type_.__name__
if isinstance(type_, _Module):
if type_name in (
'builtins', '__builtin__', '__main__', '__init__'
):
type_name = None
else:
if type_.__module__ not in (
'builtins', '__builtin__', '__main__', '__init__'
):
type_name = type_.__module__ + '.' + type_name
return type_name
def calling_function_qualified_name(depth=1):
# type: (int) -> Optional[str]
"""
>>> def my_function(): return calling_function_qualified_name()
>>> print(my_function())
my_function
"""
if not isinstance(depth, int):
depth_representation = repr(depth)
raise TypeError(
'The parameter `depth` for `serial.utilities.calling_function_qualified_name` must be an `int`, not' +
(
(':\n%s' if '\n' in depth_representation else ' %s.') %
depth_representation
)
)
stack = inspect.stack()
if len(stack) < (depth + 1):
return None
else:
name_list = []
stack = inspect.stack()
frame_info = stack[depth] # type: inspect.FrameInfo
try:
frame_function = frame_info.function
except AttributeError:
frame_function = frame_info[3]
if frame_function != '<module>':
try:
frame = frame_info.frame
except AttributeError:
frame = frame_info[0]
name_list.append(frame_function)
arguments, _, _, frame_locals = inspect.getargvalues(frame)
if arguments:
argument = arguments[0]
argument_value = frame_locals[argument]
argument_value_type = type(argument_value)
if (
hasattr(argument_value_type, '__name__') and
hasattr(argument_value_type, '__module__') and
(
(argument_value_type.__name__ not in dir(builtins)) or
(getattr(builtins, argument_value_type.__name__) is not argument_value_type)
)
):
name_list.append(qualified_name(argument_value_type))
if len(name_list) < 2:
try:
file_name = frame_info.filename
except AttributeError:
file_name = frame_info[1]
module_name = inspect.getmodulename(file_name)
if module_name not in sys.modules:
path_parts = list(os.path.split(file_name))
path_parts.pop()
while path_parts:
parent = path_parts.pop()
module_name = parent + '.' + module_name
if module_name in sys.modules:
break
if module_name is None:
raise ValueError('The path "%s" is not a python module' % file_name)
else:
if module_name in sys.modules:
qualified_module_name = qualified_name(sys.modules[module_name])
name_list.append(qualified_module_name)
return '.'.join(reversed(name_list))
def property_name(string):
# type: (str) -> str
"""
Converts a "camelCased" attribute/property name, or a name which conflicts with a python keyword, to a
pep8-compliant property name.
>>> print(property_name('theBirdsAndTheBees'))
the_birds_and_the_bees
>>> print(property_name('FYIThisIsAnAcronym'))
fyi_this_is_an_acronym
>>> print(property_name('in'))
in_
>>> print(property_name('id'))
id_
"""
pn = re.sub(
r'__+',
'_',
re.sub(
r'[^\w]+',
'',
re.sub(
r'([a-zA-Z])([0-9])',
r'\1_\2',
re.sub(
r'([0-9])([a-zA-Z])',
r'\1_\2',
re.sub(
r'([A-Z])([A-Z])([a-z])',
r'\1_\2\3',
re.sub(
r'([a-z])([A-Z])',
r'\1_\2',
re.sub(
r'([^\x20-\x7F]|\s)+',
'_',
normalize('NFKD', string)
)
)
)
)
)
)
).lower()
if iskeyword(pn) or (pn in dir(builtins)):
pn += '_'
return pn
def class_name(string):
"""
>>> print(class_name('the birds and the bees'))
TheBirdsAndTheBees
>>> print(class_name('the-birds-and-the-bees'))
TheBirdsAndTheBees
>>> print(class_name('**the - birds - and - the - bees**'))
TheBirdsAndTheBees
>>> print(class_name('FYI is an acronym'))
FYIIsAnAcronym
>>> print(class_name('in-you-go'))
InYouGo
>>> print(class_name('False'))
False_
>>> print(class_name('True'))
True_
>>> print(class_name('ABC Acronym'))
ABCAcronym
"""
return camel(string, capitalize=True)
def camel(string, capitalize=False):
# type: (str, bool) -> str
"""
>>> print(camel('the birds and the bees'))
theBirdsAndTheBees
>>> print(camel('the-birds-and-the-bees'))
theBirdsAndTheBees
>>> print(camel('**the - birds - and - the - bees**'))
theBirdsAndTheBees
>>> print(camel('FYI is an acronym'))
fyiIsAnAcronym
>>> print(camel('in-you-go'))
inYouGo
>>> print(camel('False'))
false
>>> print(camel('True'))
true
>>> print(camel('in'))
in_
"""
string = normalize('NFKD', string)
characters = []
if not capitalize:
string = string.lower()
capitalize_next = capitalize
for s in string:
if s in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789':
if capitalize_next:
if capitalize or characters:
s = s.upper()
characters.append(s)
capitalize_next = False
else:
capitalize_next = True
cn = ''.join(characters)
if iskeyword(cn) or (cn in dir(builtins)):
cn += '_'
return cn
def get_source(o):
# type: (object) -> str
if hasattr(o, '_source') and isinstance(o._source, str):
result = o._source
else:
result = inspect.getsource(o)
return result
def camel_split(string):
# test: (str) -> str
"""
>>> print('(%s)' % ', '.join("'%s'" % s for s in camel_split('theBirdsAndTheBees')))
('the', 'Birds', 'And', 'The', 'Bees')
>>> print('(%s)' % ', '.join("'%s'" % s for s in camel_split('theBirdsAndTheBees123')))
('the', 'Birds', 'And', 'The', 'Bees', '123')
>>> print('(%s)' % ', '.join("'%s'" % s for s in camel_split('theBirdsAndTheBeesABC123')))
('the', 'Birds', 'And', 'The', 'Bees', 'ABC', '123')
>>> print('(%s)' % ', '.join("'%s'" % s for s in camel_split('the-Birds-And-The-Bees-ABC--123')))
('the', '-', 'Birds', '-', 'And', '-', 'The', '-', 'Bees', '-', 'ABC', '--', '123')
>>> print('(%s)' % ', '.join("'%s'" % s for s in camel_split('THEBirdsAndTheBees')))
('THE', 'Birds', 'And', 'The', 'Bees')
"""
words = []
character_type = None
acronym = False
for s in string:
if s in '0123456789':
if character_type == 0:
words[-1].append(s)
else:
words.append([s])
character_type = 0
acronym = False
elif s in 'abcdefghijklmnopqrstuvwxyz':
if character_type == 1:
words[-1].append(s)
elif character_type == 2:
if acronym:
words.append([words[-1].pop()] + [s])
else:
words[-1].append(s)
else:
words.append([s])
character_type = 1
acronym = False
elif s in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
if character_type == 2:
words[-1].append(s)
acronym = True
else:
words.append([s])
acronym = False
character_type = 2
else:
if character_type == 3:
words[-1].append(s)
else:
words.append([s])
character_type = 3
return tuple(
''.join(w) for w in words
)
def properties_values(o):
# type: (object) -> Sequence[Tuple[AnyStr, Any]]
for a in dir(o):
if a[0] != '_':
v = getattr(o, a)
if not callable(v):
yield a, v
UNDEFINED = None
class Undefined(object):
def __init__(self):
if UNDEFINED is not None:
raise RuntimeError(
'%s may only be defined once.' % repr(self)
)
def __repr__(self):
return (
'UNDEFINED'
if self.__module__ in ('__main__', 'builtins', '__builtin__', __name__) else
'%s.UNDEFINED' % self.__module__
)
def __bool__(self):
return False
def __hash__(self):
return 0
def __eq__(self, other):
# type: (Any) -> bool
return other is self
UNDEFINED = Undefined()
def parameters_defaults(function):
# type: (Callable) -> OrderedDict
"""
Returns an ordered dictionary mapping a function's argument names to default values, or `UNDEFINED` in the case of
positional arguments.
>>> class X(object):
...
... def __init__(self, a, b, c, d=1, e=2, f=3):
... pass
...
>>> print(list(parameters_defaults(X.__init__).items()))
[('self', UNDEFINED), ('a', UNDEFINED), ('b', UNDEFINED), ('c', UNDEFINED), ('d', 1), ('e', 2), ('f', 3)]
"""
pd = OrderedDict()
if signature is None:
spec = getfullargspec(function)
i = - 1
for a in spec.args:
pd[a] = UNDEFINED
for a in reversed(spec.args):
try:
pd[a] = spec.defaults[i]
except IndexError:
break
i -= 1
else:
for pn, p in signature(function).parameters.items():
if p.default is inspect.Parameter.empty:
pd[pn] = UNDEFINED
else:
pd[pn] = p.default
return pd
def read(data):
# type: (Union[str, IOBase, addbase]) -> Any
if (
(hasattr(data, 'readall') and callable(data.readall)) or
(hasattr(data, 'read') and callable(data.read))
):
if hasattr(data, 'seek') and callable(data.seek):
try:
data.seek(0)
except UnsupportedOperation:
pass
if hasattr(data, 'readall') and callable(data.readall):
try:
data = data.readall()
except UnsupportedOperation:
data = data.read()
else:
data = data.read()
return data
else:
raise TypeError(
'%s is not a file-like object' % repr(data)
)
if __name__ == '__main__':
import doctest
doctest.testmod()
| 1.976563 | 2 |
src/orders/signals/handlers.py | tlgtaa/education-backend | 1 | 12764063 | <reponame>tlgtaa/education-backend<gh_stars>1-10
from django.db.models.signals import post_save
from django.dispatch import receiver
from orders.models import Order
from tinkoff.models import PaymentNotification as TinkoffPaymentNotification
@receiver(post_save, sender=TinkoffPaymentNotification)
def mark_order_as_payd_on_tinkoff_transactions(instance, created, **kwargs):
if not created:
return
if instance.status != 'CONFIRMED':
return
order = Order.objects.get(pk=instance.order_id)
order.set_paid()
| 2.109375 | 2 |
google_workflow/encv_to_db/settings.py | hcksystem/exposure-notifications-metrics-public | 14 | 12764064 | <reponame>hcksystem/exposure-notifications-metrics-public
import os
from pathlib import Path
from typing import Optional
from pydantic import BaseSettings
class Settings(BaseSettings):
google_application_credentials_json: Optional[str] = None # If supplied, we will read this json blob right away as the credentials
google_application_credentials: Optional[str] = None # If supplied, we read from this file to get the json. If even this is not supplied, we will query secrets manager for the value
log_level: str = "INFO"
class Config:
env_file = Path('.') / '.env.prod'
settings = Settings()
| 2.390625 | 2 |
pycholib/Trial.py | lugtigheid/PychophysicsLibrary | 0 | 12764065 | # -*- coding: utf-8 -*-
class Trial(object):
'''
Created Fri 2 Aug 2013 - ajl
Last edited: Sun 4 Aug 2013 - ajl
This class contains all variables and methods associated with a single trial
'''
def __init__(self, trialid = 0, staircaseid = 0,
condition = 0, stimval = 0, interval = 1):
''' Constructor '''
self._TrialID = trialid;
self._StaircaseID = staircaseid;
self._Condition = condition;
self._Stimval = stimval;
self._Interval = interval;
self._Response = None;
self._ReactionTime = None;
# show the values of this specific trial
def __str__(self):
return '#%2.f \t %2.f \t %2.f' % (self._TrialID, self._StaircaseID, self._Stimval);
# s = '[ #%2.f ] \t (%.f) \t\t Stim(%4.f) \t Interval(%.f) \t Resp(%.f)' % \
# (self._TrialID+1, self._Condition, self._Stimval, self._Interval, self._Response)
''' Fields or properties '''
@property
def Response(self):
return self._Response;
@Response.setter
def Response(self, value):
self._Response = value;
''' Methods '''
# returns the name of the current condition
def GetConditionName(self):
# this is really quite awkward
return
| 3.421875 | 3 |
utilities/stopping/stopping_key.py | bootml/agent | 0 | 12764066 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'cnheider'
from pynput import keyboard
# import keyboard
import utilities as U
COMBINATIONS = [
{keyboard.Key.shift, keyboard.Key.alt, keyboard.KeyCode(char='s')},
{keyboard.Key.shift, keyboard.Key.alt, keyboard.KeyCode(char='S')},
]
CALLBACKS = []
# The currently active modifiers
current = set()
def add_early_stopping_key_combination(callback, key='ctrl+shift+s'):
# keyboard.add_hotkey(key, callback)
CALLBACKS.append(callback)
U.sprint(f'\n\nPress any of:\n{COMBINATIONS}\n for early stopping\n', color='red', bold=True,
highlight=True)
print('')
return keyboard.Listener(on_press=on_press, on_release=on_release)
def on_press(key):
if any([key in COMBO for COMBO in COMBINATIONS]):
current.add(key)
if any(all(k in current for k in COMBO) for COMBO in COMBINATIONS):
for callback in CALLBACKS:
callback()
def on_release(key):
if any([key in COMBO for COMBO in COMBINATIONS]):
current.remove(key)
| 2.96875 | 3 |
modules/get_cc.py | honchen22/Image-Hashing | 0 | 12764067 | import numpy as np
import json
class CorrelationCoefficient:
def __init__(self):
self.hash_dict = {}
self.cc_dict = {}
def init_hash_dict(self):
with open('modules/benchmark_cc/standard_benchmark_hash.json', 'r') as f:
self.hash_dict = json.loads(f.read())
def init_cc_dict(self):
with open('modules/benchmark_cc/cc_format.json', 'r') as f:
self.cc_dict = json.loads(f.read())
def run(self):
self.init_hash_dict()
self.init_cc_dict()
for image_name in self.cc_dict:
image = self.cc_dict[image_name]
for attack_type in image:
self.set_cc_dict(image_name, attack_type)
with open('modules/benchmark_cc/standard_benchmark_cc_dict.json', 'w') as f:
content = json.dumps(self.cc_dict, indent=2)
content = content.replace("'", '"').replace('\n ', '')
f.write(content)
def set_cc_dict(self, image_name='Airplane', attack_type='JPEG'):
image = self.hash_dict[image_name]
attack = image[attack_type]
param_range = attack['param_range']
param_cnt = attack['param_cnt']
print(image_name, attack_type, param_cnt, param_range)
cc_list = [0] * param_cnt
H0 = image['raw']['H']
for i in range(param_cnt):
H_key = '<KEY>)
Hi = attack[H_key]
cc = np.corrcoef(H0, Hi)[0][1]
cc_list[i] = cc
self.cc_dict[image_name][attack_type]['cc_list'] = cc_list
# if __name__ == '__main__':
# cc = CorrelationCoefficient()
# cc.run() | 2.765625 | 3 |
recover.py | bennofs/nixpkgs-locate | 0 | 12764068 | #!/usr/bin/env python3
import sys
import json
CHUNK_SIZE = 4*32*1024
def wrong_written_size(x):
out = 0
while x >= 0:
out += x
x -= CHUNK_SIZE
return out
if __name__ == '__main__':
with open(sys.argv[1], 'rb') as f:
data = f.read()
print(sys.argv[1])
try:
json.loads(data)
except json.JSONDecodeError as e:
exc = e
for margin in range(10):
if len(data) == wrong_written_size(e.pos + margin):
print(margin, exc, len(data), e.pos, data[e.pos:][:10], data[:10])
sys.exit(0)
sys.exit(1)
| 3.109375 | 3 |
refinery/units/formats/pdf.py | bronxc/refinery | 0 | 12764069 | <filename>refinery/units/formats/pdf.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import annotations
from typing import Optional, Set, TYPE_CHECKING, cast
from itertools import islice
if TYPE_CHECKING:
from PyPDF2.generic import EncodedStreamObject
from refinery.units.formats import PathExtractorUnit, UnpackResult
from refinery.lib.tools import NoLogging
from refinery.lib.structures import MemoryFile
class xtpdf(PathExtractorUnit):
"""
Extract objects from PDF documents.
"""
@PathExtractorUnit.Requires('PyPDF2', optional=False)
def _pypdf2():
import PyPDF2
import PyPDF2.generic
return PyPDF2
def _walk(self, blob, memo: Optional[Set[int]] = None, *path):
while isinstance(blob, self._pypdf2.generic.IndirectObject):
blob = blob.getObject()
if memo is None:
memo = {id(blob)}
elif id(blob) in memo:
return
else:
memo.add(id(blob))
try:
name = blob['/F']
blob = blob['/EF']['/F']
except Exception:
pass
else:
path = *path[:-1], F'/{name}'
try:
if TYPE_CHECKING:
blob: EncodedStreamObject = cast(EncodedStreamObject, blob)
extract = blob.getData
except AttributeError:
pass
else:
yield UnpackResult(''.join(path), extract, kind='object')
return
if isinstance(blob, self._pypdf2.generic.ByteStringObject):
yield UnpackResult(''.join(path), blob, kind='bytes')
return
if isinstance(blob, self._pypdf2.generic.TextStringObject):
yield UnpackResult(''.join(path), blob.encode(self.codec), kind='string')
return
if isinstance(blob, (
self._pypdf2.generic.BooleanObject,
self._pypdf2.generic.ByteStringObject,
self._pypdf2.generic.FloatObject,
self._pypdf2.generic.NameObject,
self._pypdf2.generic.NullObject,
self._pypdf2.generic.NumberObject,
self._pypdf2.generic.RectangleObject,
)):
# unhandled PDF objects
return
if isinstance(blob, self._pypdf2.generic.TreeObject):
blob = list(blob)
pdf = self._pypdf2.generic.PdfObject
if isinstance(blob, list):
if (
len(blob) % 2 == 0
and all(isinstance(key, str) for key in islice(iter(blob), 0, None, 2))
and all(isinstance(key, pdf) for key in islice(iter(blob), 1, None, 2))
):
blob = dict(zip(*([iter(blob)] * 2)))
else:
for key, value in enumerate(blob):
yield from self._walk(value, memo, *path, F'/{key}')
return
if isinstance(blob, dict):
for key, value in blob.items():
if not isinstance(key, str):
continue
if not key.startswith('/'):
key = F'/{key}'
yield from self._walk(value, memo, *path, key)
def unpack(self, data):
with MemoryFile(data) as stream:
with NoLogging():
pdf = self._pypdf2.PdfFileReader(stream)
catalog = pdf.trailer['/Root']
yield from self._walk(catalog)
@classmethod
def handles(self, data: bytearray) -> Optional[bool]:
return data.startswith(B'%PDF-')
| 2.125 | 2 |
malcolm/modules/aravisGigE/blocks/__init__.py | hir12111/pymalcolm | 11 | 12764070 | from malcolm.yamlutil import check_yaml_names, make_block_creator
aravisGigE_driver_block = make_block_creator(__file__, "aravisGigE_driver_block.yaml")
aravisGigE_runnable_block = make_block_creator(
__file__, "aravisGigE_runnable_block.yaml"
)
aravisGigE_manager_block = make_block_creator(__file__, "aravisGigE_manager_block.yaml")
__all__ = check_yaml_names(globals())
| 1.898438 | 2 |
project/blueprints/bp1/controllers.py | takwas/flask-layout-demo | 0 | 12764071 | from werkzeug.exceptions import NotFound
from . import bp_obj, views
from project.core.models import User
@bp_obj.route('/')
def index():
return 'Welcome home.'
@bp_obj.route('/users/<username>')
def get_profile_info(username):
user = User.query.filter(User.username == username).first()
if user is None:
raise NotFound('User not found.')
return views.json_serialise_user(user)
| 2.171875 | 2 |
src/Utilities/__init__.py | christosgalano/Chatroom_Server | 0 | 12764072 | from .shared import create_shared_standards
create_shared_standards()
| 1.117188 | 1 |
main (Joao Costa's conflicted copy 2011-07-29).py | borgaster/SpaceWarsEvolved | 0 | 12764073 | import time
from animation import *
from asteroidField import *
from background import *
from loader import *
from physics import *
from player import *
from powerup import *
import pygame
from pygame.locals import *
from rotatingMenu_img import *
from spacemenu import *
from starField import *
# teclas dos jogadores default
keyPresset1 = [K_LEFT,K_RIGHT,K_UP,K_DOWN, K_SPACE, K_m]
keyPresset2 = [K_a, K_d, K_w, K_s, K_x, K_r]
pygame.init()
def game(numkills,nave1,nave2):
SCREENSIZE = [1024,768]
screen = pygame.display.set_mode(SCREENSIZE,pygame.FULLSCREEN)
## uncomment for debug
#screen = pygame.display.set_mode(SCREENSIZE)
pygame.mouse.set_visible(0)
clock = pygame.time.Clock()
#init background
background = Background(screen,'galaxy.jpg')
#init efeito campo estrelado e asteroids
starfield = StarField(screen)
asteroidField = AsteroidField(screen)
#init musica
rand = random.randrange(0,2)
if rand == 0:
load_music('After Burner.mp3')
else:
load_music('Spybreak.mp3')
#load_music('Gundam.mp3')
#init players
player1 = Player((200,SCREENSIZE[1]/2),keyPresset1,1,nave1,numkills)
playerSprite1 = pygame.sprite.RenderPlain((player1))
player1.spin(90,3)
player2 = Player((SCREENSIZE[0]-200,SCREENSIZE[1]/2),keyPresset2,2,nave2,numkills)
playerSprite2 = pygame.sprite.RenderPlain((player2))
player2.spin(90,1)
#powerup stuff variables
powerups_on_screen = False
done = False
retval = 0
powerup_available = 0
#vars apenas para animacao do rapaz no canto do ecra
i = random.randrange(1,4)
pickup_timer = 0
while not done:
clock.tick(40)
#se nao ha asteroides, respawn
current_asteroids = len(asteroidField.asteroidSprites)
if current_asteroids <= 0:
current_asteroids = asteroidField.refresh(asteroidField.num_asteroids +1)
if pickup_timer != 0:
elapsed = round(time.clock())
##desenhar informacoes do jogadores
font = pygame.font.SysFont("consola", 20)
ScorePanel1 ="Player 1 - Lives: "+str(player1.statistics[0])+" "+"Score: "+str(player1.statistics[3])
scorePlayer1 = font.render(ScorePanel1, True, (255,255,255))
if nave2 != 0:
ScorePanel2 ="Player 2 - Lives: "+str(player2.statistics[0])+" Score: "+str(player2.statistics[3])
scorePlayer2 = font.render(ScorePanel2, True, (255,255,255))
# desenhar informacoes de powerups disponiveis
font = pygame.font.SysFont("consola", 40)
PowerupPanel = ""
if powerups_on_screen == False:
poweruppanel = font.render(PowerupPanel, True, (0,255,0))
#############################
##MOVER JOGADORES
#se esta so um jogador
if nave2 == 0:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
done = True
elif event.key == keyPresset1[0]:
player1.dx = -10
player1.spin(90,1)
elif event.key == keyPresset1[1]:
player1.dx = 10
player1.spin(90,3)
elif event.key == keyPresset1[2]:
player1.dy = -10
player1.spin(90,0)
elif event.key == keyPresset1[3]:
player1.dy = 10
player1.spin(90,2)
elif event.type == KEYUP:
if event.key == keyPresset1[0]:
player1.dx = -3
elif event.key == keyPresset1[1]:
player1.dx = 3
elif event.key == keyPresset1[2]:
player1.dy = -3
elif event.key == keyPresset1[3]:
player1.dy = 3
elif event.key == keyPresset1[5]:
player1.changeWeapon()
# ha dois jogadores a jogar, apanhar teclas todas
else:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
done = True
elif event.key == keyPresset1[0]:
player1.dx = -10
player1.spin(90,1)
elif event.key == keyPresset1[1]:
player1.dx = 10
player1.spin(90,3)
elif event.key == keyPresset1[2]:
player1.dy = -10
player1.spin(90,0)
elif event.key == keyPresset1[3]:
player1.dy = 10
player1.spin(90,2)
elif event.key == keyPresset2[0]:
player2.dx = -10
player2.spin(90,1)
elif event.key == keyPresset2[1]:
player2.dx = 10
player2.spin(90,3)
elif event.key == keyPresset2[2]:
player2.dy = -10
player2.spin(90,0)
elif event.key == keyPresset2[3]:
player2.dy = 10
player2.spin(90,2)
elif event.type == KEYUP:
if event.key == keyPresset1[0]:
player1.dx = -3
elif event.key == keyPresset1[1]:
player1.dx = 3
elif event.key == keyPresset1[2]:
player1.dy = -3
elif event.key == keyPresset1[3]:
player1.dy = 3
elif event.key == keyPresset1[5]:
player1.changeWeapon()
elif event.key == keyPresset2[0]:
player2.dx = -3
elif event.key == keyPresset2[1]:
player2.dx = 3
elif event.key == keyPresset2[2]:
player2.dy = -3
elif event.key == keyPresset2[3]:
player2.dy = 3
elif event.key == keyPresset2[5]:
player2.changeWeapon()
background.update()
starfield.update()
#calcular tempo de activacao de um powerup novo e o tipo
#se estiver em single player so ha powerup de armas
activate_powerups = random.randrange(0,200)
if nave2 != 0:
powerup_type = random.randrange(1,4)
else:
powerup_type = 2
if activate_powerups == 150:
if powerups_on_screen == False:
powerup_available = powerup_type
if (powerup_type == 1):
PowerupPanel = "Health Powerup Available!"
poweruppanel = font.render(PowerupPanel, True, (0,255,0))
elif powerup_type == 2:
PowerupPanel = "Weapon Powerup Available!"
poweruppanel = font.render(PowerupPanel, True, (255,0,0))
else:
PowerupPanel = "Mines Available!!"
poweruppanel = font.render(PowerupPanel, True, (255,0,0))
powerup = Powerup(powerup_available,SCREENSIZE)
powerupSprite = pygame.sprite.RenderPlain((powerup))
powerups_on_screen = True
## POWERUP JA ESTA NO ECRA
########################
#calculos de intersects
#Calcular colisoes de lasers entre jogadores
kill = lasers(player1,player2,playerSprite1,playerSprite2,asteroidField)
#se matou algum jogador, sai
if kill == 1:
done = True
kill = asteroids(player1,player2,playerSprite1,playerSprite2,asteroidField)
#se matou algum jogador, sai
if kill == 1:
done = True
#apanhar powerups
if powerups_on_screen == True:
retval = pickup_powerup(powerup,powerupSprite,player1,playerSprite1,powerup_available)
if retval == 1:
retval = 0
powerups_on_screen = False
if powerup.tipo == 2 and powerup.damagefactor == 4:
pickup_timer = round(time.clock())
elapsed = pickup_timer
else:
retval = pickup_powerup(powerup,powerupSprite,player2,playerSprite2,powerup_available)
if retval == 1:
retval = 0
powerups_on_screen = False
if powerup.tipo == 2 and powerup.damagefactor == 4:
pickup_timer = round(time.clock())
elapsed = pickup_timer
#############################
# Desenhar
#desenhar jogador 1
screen.blit(scorePlayer1, (10, 740))
playerSprite1.update(screen)
playerSprite1.draw(screen)
player1.draw_health(screen)
player1.draw_stats(screen)
#desenhar jogador 2
if nave2 != 0:
screen.blit(scorePlayer2, (10, 750))
playerSprite2.update(screen)
playerSprite2.draw(screen)
player2.draw_health(screen)
player2.draw_stats(screen)
#powerups
screen.blit(poweruppanel, (350, 10))
if powerups_on_screen == True:
powerupSprite.draw(screen)
#desenhar powerup_pickups
for sprite in weapon_pickups:
sprite.render(screen,False)
for sprite in health_pickups:
sprite.render(screen,False)
#desenhar asteroides
asteroidField.update()
#desenhar explosoes
for sprite in explosoes:
sprite.render(screen,False)
#desenhar humor pic
if pickup_timer != 0:
if (elapsed - pickup_timer) < 1.5:
toasty_pic, toasty_rect = load_image("toasty"+str(i)+".PNG", -1)
screen.blit(toasty_pic,(885,650))
else:
pickup_timer = 0
#Alterei o random pois o grau de aleatoriedade eh baixo
#desta forma aparecemos todos mais vezes :)
listagem=[1,2,3,4]
random.shuffle(listagem)
random.shuffle(listagem)
i = listagem[0]
pygame.display.flip()
##FIM DO WHILE
#####################################
stop_music()
pygame.display.set_mode([800,600])
return player1,player2
def main():
pygame.init()
SCREENSIZE = [800,600]
screen = pygame.display.set_mode(SCREENSIZE)
pygame.display.set_caption("Space War Evolved")
pygame.mouse.set_visible(0)
#init musica
load_music('menu.mp3')
clock = pygame.time.Clock()
SP, rect = load_image("SP.png", -1)
MP, rect2 = load_image("MP.png", -1)
S, rect3 = load_image("S.png", -1)
H, rect4 = load_image("H.png", -1)
A, rect5 = load_image("A.png", -1)
E, rect6 = load_image("E.png", -1)
SP_red, rect = load_image("SP_red_35_433.png", -1)
MP_red, rect = load_image("MP_red_93_433.png", -1)
S_red, rect = load_image("S_red_151_478.png", -1)
H_red, rect = load_image("H_red_93_478.png", -1)
A_red, rect = load_image("A_red_151_433.png", -1)
E_red, rect = load_image("E_red_35_478.png", -1)
extra, rect = load_image("extra.png", -1)
multi = []
multi_images = load_sliced_sprites(221,34,'multi_player_anim_221x34.png')
single = []
single_images = load_sliced_sprites(243,34,'single_anim_243x34.png')
help = []
help_images = load_sliced_sprites(74,35,'help_anim_74x35.png')
about = []
about_images = load_sliced_sprites(112,29,'about_anim_112x29.png')
exit = []
exit_images = load_sliced_sprites(74,28,'exit_anim_74x28.png')
setkeys = []
setkeys_images = load_sliced_sprites(179,29,'setkeys_anim_179x29.png')
jiproj = []
jiproj_images = load_sliced_sprites(128,160,'ji_proj_128x160.png')
jiproj.append(AnimatedSprite(jiproj_images,129,31))
autores = []
autores_images = load_sliced_sprites(111,160,'autores.png')
autores.append(AnimatedSprite(autores_images,129,217))
moverCursor = load_sound('moverCursor.wav')
moverCursor.set_volume(0.2)
clock = pygame.time.Clock()
menu = RotatingMenu(x=520, y=295, radius=160, arc=pi, defaultAngle=pi/2.0)
background = Background(screen,'Stargate_menu.png')
menu.addItem(MenuItem(H))
menu.addItem(MenuItem(S))
menu.addItem(MenuItem(SP))
menu.addItem(MenuItem(MP))
menu.addItem(MenuItem(A))
menu.addItem(MenuItem(E))
menu.selectItem(2)
#Loop
while True:
#Handle events
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
return False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
moverCursor.play()
menu.selectItem(menu.selectedItemNumber + 1)
if event.key == pygame.K_RIGHT:
moverCursor.play()
menu.selectItem(menu.selectedItemNumber - 1)
if event.key == pygame.K_RETURN:
if menu.selectedItemNumber == 0:
option2()
elif menu.selectedItemNumber == 1:
option4()
elif menu.selectedItemNumber == 2:
option0()
elif menu.selectedItemNumber == 3:
option1()
elif menu.selectedItemNumber == 4:
option3()
elif menu.selectedItemNumber == 5:
option5()
return False
#Update stuff
background.update()
menu.update()
for sprite in jiproj:
sprite.render(screen,True)
for sprite in autores:
sprite.render(screen,True)
screen.blit(extra, (124,24))
if menu.selectedItemNumber == 0:
single = []
multi = []
exit = []
about = []
setkeys = []
screen.blit(H_red, (93,478))
help.append(AnimatedSprite(help_images,490,280))
elif menu.selectedItemNumber == 1:
single = []
help = []
exit = []
about = []
multi = []
screen.blit(S_red, (151,478))
setkeys.append(AnimatedSprite(setkeys_images,435,280))
elif menu.selectedItemNumber == 2:
help = []
multi = []
exit = []
about = []
setkeys = []
screen.blit(SP_red, (35,433))
single.append(AnimatedSprite(single_images,403,280))
elif menu.selectedItemNumber == 3:
single = []
help = []
exit = []
about = []
setkeys = []
screen.blit(MP_red, (93,433))
multi.append(AnimatedSprite(multi_images,410,280))
elif menu.selectedItemNumber == 4:
single = []
multi = []
exit = []
help = []
setkeys = []
screen.blit(A_red, (151,433))
about.append(AnimatedSprite(about_images,470,280))
elif menu.selectedItemNumber == 5:
single = []
multi = []
help = []
about = []
setkeys = []
screen.blit(E_red, (35,478))
exit.append(AnimatedSprite(exit_images,490,280))
for sprite in multi:
sprite.render(screen,True)
for sprite in single:
sprite.render(screen,True)
for sprite in about:
sprite.render(screen,True)
for sprite in exit:
sprite.render(screen,True)
for sprite in help:
sprite.render(screen,True)
for sprite in setkeys:
sprite.render(screen,True)
#Draw stuff
#display.fill((0,0,0))
menu.draw(screen)
pygame.display.flip() #Show the updated scene
clock.tick(fpsLimit) #Wait a little
if __name__ == "__main__":
main()
| 2.53125 | 3 |
nmap to excel converter/Older version/nmap_to_excel_conv_v1.7.py | pr2h/Penetration-Testing-Tools | 1 | 12764074 | # Welcome function
def hujambo():
# Release Date : 21 May 2018
print '''
##################################################
# Tool : nmap_to_excel_conv #
# Version : 1.7 #
# Profile : https://github.com/pr2h/ #
# Coded with Python 2.7 #
# ###### ##### # #
# # # # ### # # # #
# ###### ## # ###### #
# # # # # # #
# # # ####### # # #
##################################################
'''
# Required imports
import sys
import os.path
# Importing openpyxl for excel operations
try:
from openpyxl import Workbook
from openpyxl import load_workbook
from openpyxl.styles import Alignment, PatternFill, Font
from openpyxl.styles.borders import Border, Side
# ERROR if openpyxl is not installed
except:
print "You don't have openpyxl, please perform 'pip install openpyxl'"
print "or download and install it from 'https://pypi.python.org/pypi/openpyxl'"
sys.exit()
def nmap_to_xl_exec(filename):
# Initializing / Assigning variables
count=2
column_width_b = 0
column_width_c = 0
space_before_service = 10
cell_border = Border(top=Side(style='medium'), bottom=Side(style='medium'), left=Side(style='medium'), right=Side(style='medium'))
fileexists = 0
while True:
if os.path.isfile(filename.replace('.txt','')+'.xlsx') == True:
fileexists = 1
wb = load_workbook(filename.replace('.txt','')+'.xlsx')
ws = wb.active
first_column = ws['A']
count = len(first_column)+1
else:
fileexists = 0
# Creating / Opening excel
wb=Workbook()
ws=wb.active
ws.title="Open_Ports"
break
#except:
#print "ERROR: Excel file is open / inaccessible.\nPlease close it and try again. Exiting.."
#sys.exit()
if fileexists == 0:
# Initializing / Assigning Heading cells
try:
heading_cells = ['A1','B1','C1']
ws[heading_cells[0]]='S.No.'
ws[heading_cells[1]]='IP'
ws[heading_cells[2]]='Open Ports'
for heading_cell in heading_cells:
ws[heading_cell].alignment = Alignment(horizontal='center', vertical='center')
ws[heading_cell].fill = PatternFill(start_color='00C0C0C0', end_color='00C0C0C0', fill_type='solid')
ws[heading_cell].border = cell_border
ws[heading_cell].font = ws[heading_cell].font.copy(bold=True)
except:
print "ERROR: Excel file is open / inaccessible.\nPlease close it and try again. Exiting.."
sys.exit()
# Reading line by line
try:
for line in content:
# Checking for new IP
if line.startswith("Nmap scan report for "):
cell="A"+str(count)
ws[cell].value=count-1
ws[cell].alignment = Alignment(horizontal='center', vertical='top')
ws[cell].border = cell_border
IP=line.replace("Nmap scan report for ",'')
cell="B"+str(count)
count+=1
IP = IP.strip('\n')
ws[cell].value=IP
ws[cell].alignment = Alignment(horizontal='center', vertical='top')
ws[cell].border = cell_border
if len(IP) > column_width_b:
column_width_b = len(IP)
# Checking for open ports
elif "open" in line and "open|filtered" not in line:
complete_line = ''
line = line.strip('\n')
line = line.replace('open','')
line = line.replace('/tcp','/TCP')
line = line.replace('/udp','/UDP')
line = line.replace('unknown','-')
line = line.split()
line[0] = line[0]+((space_before_service-len(line[0]))*' ')
complete_line = (line[0]+'\t'+line[1]).expandtabs(space_before_service)
cell="C"+str(count-1)
if ws[cell].value==None:
ws[cell].value='="'+complete_line+'"'
ws.row_dimensions[count-1].height=14.4
else:
temp=ws[cell].value+'&CHAR(10)&"'+complete_line+'"'
ws[cell].value=temp
ws[cell].alignment = Alignment(wrapText=True)
ws.row_dimensions[count-1].height+=14.4
ws[cell].border = cell_border
if len(complete_line) > column_width_c:
column_width_c = len(complete_line)
# Assigning column dimensions
ws.column_dimensions['A'].width = 6
ws.column_dimensions['B'].width = column_width_b
ws.column_dimensions['C'].width = column_width_c
# Saving output file
filename=filename.replace('.txt','')+'.xlsx'
wb.save(filename)
print 'File saved as : ',filename
except:
print "ERROR: Excel file is open / inaccessible.\nPlease close it and try again. Exiting.."
sys.exit()
if __name__=='__main__':
hujambo() # Welcome message
# Reading nmap output (.txt file)
while True:
filename=raw_input("Enter filename: ")
if filename.endswith('.txt'):
pass
else:
filename=filename+'.txt'
try:
with open(filename,"r") as f:
content=f.readlines()
f.close()
break
except:
print "No such file exists. Please enter filename (.txt) again"
nmap_to_xl_exec(filename)
| 2.90625 | 3 |
bld/libs/builder/src/steps/copyfilestep.py | webbers/dongle.net | 2 | 12764075 | <reponame>webbers/dongle.net<filename>bld/libs/builder/src/steps/copyfilestep.py
import os
import distutils.file_util
from steps.abstractstep import *
class CopyFileStep(AbstractStep):
"""Copy File Step"""
def __init__( self, srcFile, destFile, overwrite = 1, makeDir = 1 ):
AbstractStep.__init__( self, "Copy File" )
self.srcFile = srcFile
self.destFile = destFile
self.overwrite = overwrite
self.makeDir = makeDir
def do( self ):
self.reporter.message( "COPY FILE: %s => %s" % ( self.srcFile, self.destFile ) )
result = distutils.file_util.copy_file(self.srcFile, self.destFile) #result = StCommon.CopyFile( self.srcFile, self.destFile, self.overwrite, self.makeDir )
return result | 2.46875 | 2 |
xdo/__init__.py | rickyosser/python-libxdo | 62 | 12764076 | # -*- coding: utf-8 -*-
import ctypes
import os
from collections import namedtuple
from ctypes import POINTER
from six.moves import range
from .xdo import libX11 as _libX11
from .xdo import libxdo as _libxdo
from .xdo import ( # noqa
CURRENTWINDOW, SEARCH_CLASS, SEARCH_CLASSNAME, SEARCH_DESKTOP, SEARCH_NAME,
SEARCH_ONLYVISIBLE, SEARCH_PID, SEARCH_SCREEN, SEARCH_TITLE, Atom, Screen,
XdoException, XErrorHandler, charcodemap_t, window_t, xdo_search_t)
mouse_location = namedtuple('mouse_location', 'x,y,screen_num')
mouse_location2 = namedtuple('mouse_location2', 'x,y,screen_num,window')
window_location = namedtuple('window_location', 'x,y,screen')
window_size = namedtuple('window_size', 'width,height')
input_mask = namedtuple('input_mask', 'shift,lock,control,mod1,mod2,mod3,mod4,mod5') # noqa
# Mouse button constants
MOUSE_LEFT = 1
MOUSE_MIDDLE = 2
MOUSE_RIGHT = 3
MOUSE_WHEEL_UP = 4
MOUSE_WHEEL_DOWN = 5
# Keyboard modifiers
MOD_Shift = 1 << 0
MOD_Lock = 1 << 1
MOD_Control = 1 << 2
MOD_Mod1 = 1 << 3
MOD_Mod2 = 1 << 4
MOD_Mod3 = 1 << 5
MOD_Mod4 = 1 << 6
MOD_Mod5 = 1 << 7
def _gen_input_mask(mask):
"""Generate input mask from bytemask"""
return input_mask(
shift=bool(mask & MOD_Shift),
lock=bool(mask & MOD_Lock),
control=bool(mask & MOD_Control),
mod1=bool(mask & MOD_Mod1),
mod2=bool(mask & MOD_Mod2),
mod3=bool(mask & MOD_Mod3),
mod4=bool(mask & MOD_Mod4),
mod5=bool(mask & MOD_Mod5))
class XError(Exception):
pass
class Xdo(object):
def __init__(self, display=None):
if display is None:
display = os.environ.get('DISPLAY', '')
display = display.encode('utf-8')
self._xdo = _libxdo.xdo_new(display)
def _handle_x_error(evt):
# todo: handle errors in a nicer way, eg. try getting error message
raise XError('Event: {}'.format(evt))
self._error_handler = XErrorHandler(_handle_x_error)
_libX11.XSetErrorHandler(self._error_handler)
@classmethod
def version(cls):
return _libxdo.xdo_version()
@classmethod
def version_info(cls):
return tuple(int(x) for x in cls.version().split(b'.'))
def move_mouse(self, x, y, screen=0):
"""
Move the mouse to a specific location.
:param x: the target X coordinate on the screen in pixels.
:param y: the target Y coordinate on the screen in pixels.
:param screen: the screen (number) you want to move on.
"""
# todo: apparently the "screen" argument is not behaving properly
# and sometimes even making the interpreter crash..
# Figure out why (changed API / using wrong header?)
# >>> xdo.move_mouse(3000,200,1)
# X Error of failed request: BadWindow (invalid Window parameter)
# Major opcode of failed request: 41 (X_WarpPointer)
# Resource id in failed request: 0x2a4fca0
# Serial number of failed request: 25
# Current serial number in output stream: 26
# Just to be safe..
# screen = 0
x = ctypes.c_int(x)
y = ctypes.c_int(y)
screen = ctypes.c_int(screen)
_libxdo.xdo_move_mouse(self._xdo, x, y, screen)
def move_mouse_relative_to_window(self, window, x, y):
"""
Move the mouse to a specific location relative to the top-left corner
of a window.
:param x: the target X coordinate on the screen in pixels.
:param y: the target Y coordinate on the screen in pixels.
"""
_libxdo.xdo_move_mouse_relative_to_window(
self._xdo, ctypes.c_ulong(window), x, y)
def move_mouse_relative(self, x, y):
"""
Move the mouse relative to it's current position.
:param x: the distance in pixels to move on the X axis.
:param y: the distance in pixels to move on the Y axis.
"""
_libxdo.xdo_move_mouse_relative(self._xdo, x, y)
def mouse_down(self, window, button):
"""
Send a mouse press (aka mouse down) for a given button at
the current mouse location.
:param window:
The window you want to send the event to or CURRENTWINDOW
:param button:
The mouse button. Generally, 1 is left, 2 is middle, 3 is
right, 4 is wheel up, 5 is wheel down.
"""
_libxdo.xdo_mouse_down(
self._xdo, ctypes.c_ulong(window), ctypes.c_int(button))
def mouse_up(self, window, button):
"""
Send a mouse release (aka mouse up) for a given button at
the current mouse location.
:param window:
The window you want to send the event to or CURRENTWINDOW
:param button:
The mouse button. Generally, 1 is left, 2 is middle, 3 is
right, 4 is wheel up, 5 is wheel down.
"""
_libxdo.xdo_mouse_up(
self._xdo, ctypes.c_ulong(window), ctypes.c_int(button))
def get_mouse_location(self):
"""
Get the current mouse location (coordinates and screen number).
:return: a namedtuple with ``x``, ``y`` and ``screen_num`` fields
"""
x = ctypes.c_int(0)
y = ctypes.c_int(0)
screen_num = ctypes.c_int(0)
_libxdo.xdo_get_mouse_location(
self._xdo, ctypes.byref(x), ctypes.byref(y),
ctypes.byref(screen_num))
return mouse_location(x.value, y.value, screen_num.value)
def get_window_at_mouse(self):
"""
Get the window the mouse is currently over
"""
window_ret = ctypes.c_ulong(0)
_libxdo.xdo_get_window_at_mouse(self._xdo, ctypes.byref(window_ret))
return window_ret.value
def get_mouse_location2(self):
"""
Get all mouse location-related data.
:return: a namedtuple with ``x``, ``y``, ``screen_num``
and ``window`` fields
"""
x = ctypes.c_int(0)
y = ctypes.c_int(0)
screen_num_ret = ctypes.c_ulong(0)
window_ret = ctypes.c_ulong(0)
_libxdo.xdo_get_mouse_location2(
self._xdo, ctypes.byref(x), ctypes.byref(y),
ctypes.byref(screen_num_ret), ctypes.byref(window_ret))
return mouse_location2(x.value, y.value, screen_num_ret.value,
window_ret.value)
def wait_for_mouse_move_from(self, origin_x, origin_y):
"""
Wait for the mouse to move from a location. This function will block
until the condition has been satisified.
:param origin_x: the X position you expect the mouse to move from
:param origin_y: the Y position you expect the mouse to move from
"""
_libxdo.xdo_wait_for_mouse_move_from(self._xdo, origin_x, origin_y)
def wait_for_mouse_move_to(self, dest_x, dest_y):
"""
Wait for the mouse to move to a location. This function will block
until the condition has been satisified.
:param dest_x: the X position you expect the mouse to move to
:param dest_y: the Y position you expect the mouse to move to
"""
_libxdo.xdo_wait_for_mouse_move_from(self._xdo, dest_x, dest_y)
def click_window(self, window, button):
"""
Send a click for a specific mouse button at the current mouse location.
:param window:
The window you want to send the event to or CURRENTWINDOW
:param button:
The mouse button. Generally, 1 is left, 2 is middle, 3 is
right, 4 is wheel up, 5 is wheel down.
"""
_libxdo.xdo_click_window(self._xdo, window, button)
def click_window_multiple(self, window, button, repeat=2, delay=100000):
"""
Send a one or more clicks for a specific mouse button at the
current mouse location.
:param window:
The window you want to send the event to or CURRENTWINDOW
:param button:
The mouse button. Generally, 1 is left, 2 is middle, 3 is
right, 4 is wheel up, 5 is wheel down.
:param repeat: number of repetitions (default: 2)
:param delay: delay between clicks, in microseconds (default: 100k)
"""
_libxdo.xdo_click_window_multiple(
self._xdo, window, button, repeat, delay)
def enter_text_window(self, window, string, delay=12000):
"""
Type a string to the specified window.
If you want to send a specific key or key sequence, such as
"alt+l", you want instead ``send_keysequence_window(...)``.
:param window:
The window you want to send keystrokes to or CURRENTWINDOW
:param string:
The string to type, like "Hello world!"
:param delay:
The delay between keystrokes in microseconds.
12000 is a decent choice if you don't have other plans.
"""
return _libxdo.xdo_enter_text_window(self._xdo, window, string, delay)
def send_keysequence_window(self, window, keysequence, delay=12000):
"""
Send a keysequence to the specified window.
This allows you to send keysequences by symbol name. Any combination
of X11 KeySym names separated by '+' are valid. Single KeySym names
are valid, too.
Examples:
"l"
"semicolon"
"alt+Return"
"Alt_L+Tab"
If you want to type a string, such as "Hello world." you want to
instead use xdo_enter_text_window.
:param window: The window you want to send the keysequence to or
CURRENTWINDOW
:param keysequence: The string keysequence to send.
:param delay: The delay between keystrokes in microseconds.
"""
_libxdo.xdo_send_keysequence_window(
self._xdo, window, keysequence, delay)
def send_keysequence_window_up(self, window, keysequence, delay=12000):
"""Send key release (up) events for the given key sequence"""
_libxdo.xdo_send_keysequence_window_up(
self._xdo, window, keysequence, ctypes.c_ulong(delay))
def send_keysequence_window_down(self, window, keysequence, delay=12000):
"""Send key press (down) events for the given key sequence"""
_libxdo.xdo_send_keysequence_window_down(
self._xdo, window, keysequence, ctypes.c_ulong(delay))
def send_keysequence_window_list_do(
self, window, keys, pressed=1, modifier=None, delay=120000):
"""
Send a series of keystrokes.
:param window: The window to send events to or CURRENTWINDOW
:param keys: The array of charcodemap_t entities to send.
:param pressed: 1 for key press, 0 for key release.
:param modifier:
Pointer to integer to record the modifiers
activated by the keys being pressed. If NULL, we don't save
the modifiers.
:param delay:
The delay between keystrokes in microseconds.
"""
# todo: how to properly use charcodes_t in a nice way?
_libxdo.xdo_send_keysequence_window_list_do(
self._xdo, window, keys, len(keys), pressed, modifier, delay)
def get_active_keys_to_keycode_list(self):
"""Get a list of active keys. Uses XQueryKeymap"""
try:
_libxdo.xdo_get_active_keys_to_keycode_list
except AttributeError:
# Apparently, this was implemented in a later version..
raise NotImplementedError()
keys = POINTER(charcodemap_t)
nkeys = ctypes.c_int(0)
_libxdo.xdo_get_active_keys_to_keycode_list(
self._xdo, ctypes.byref(keys), ctypes.byref(nkeys))
# todo: make sure this returns a list of charcodemap_t!
return keys.value
def wait_for_window_map_state(self, window, state):
"""
Wait for a window to have a specific map state.
State possibilities:
IsUnmapped - window is not displayed.
IsViewable - window is mapped and shown (though may be
clipped by windows on top of it)
IsUnviewable - window is mapped but a parent window is unmapped.
:param window: the window you want to wait for.
:param state: the state to wait for.
"""
_libxdo.xdo_wait_for_window_map_state(self._xdo, window, state)
def wait_for_window_size(self, window, width, height, flags, to_or_from):
_libxdo.xdo_wait_for_window_size(self._xdo)
def wait_for_window_size_to(self, window, width, height, flags=0):
return self.wait_for_window_size(window, width, height, flags, 0)
def wait_for_window_size_from(self, window, width, height, flags=0):
return self.wait_for_window_size(window, width, height, flags, 1)
def move_window(self, window, x, y):
"""
Move a window to a specific location.
The top left corner of the window will be moved to the x,y coordinate.
:param wid: the window to move
:param x: the X coordinate to move to.
:param y: the Y coordinate to move to.
"""
_libxdo.xdo_move_window(self._xdo, window, x, y)
def translate_window_with_sizehint(self, window, width, height):
"""
Apply a window's sizing hints (if any) to a given width and height.
This function wraps XGetWMNormalHints() and applies any
resize increment and base size to your given width and height values.
:param window: the window to use
:param width: the unit width you want to translate
:param height: the unit height you want to translate
:return: (width, height)
"""
width_ret = ctypes.c_uint(0)
height_ret = ctypes.c_uint(0)
_libxdo.xdo_translate_window_with_sizehint(
self._xdo, window, width, height,
ctypes.byref(width_ret),
ctypes.byref(height_ret))
return width_ret.value, height_ret.value
def set_window_size(self, window, w, h, flags=0):
"""
Change the window size.
:param wid: the window to resize
:param w: the new desired width
:param h: the new desired height
:param flags: if 0, use pixels for units. If SIZE_USEHINTS, then
the units will be relative to the window size hints.
"""
_libxdo.xdo_set_window_size(self._xdo, window, w, h, flags)
def set_window_property(self, window, name, value):
"""
Change a window property.
Example properties you can change are WM_NAME, WM_ICON_NAME, etc.
:param wid: The window to change a property of.
:param name: the string name of the property.
:param value: the string value of the property.
"""
_libxdo.xdo_set_window_property(self._xdo, window, name, value)
def set_window_class(self, window, name, class_):
"""
Change the window's classname and or class.
:param name: The new class name. If ``None``, no change.
:param class_: The new class. If ``None``, no change.
"""
_libxdo.xdo_set_window_class(self._xdo, window, name, class_)
def set_window_urgency(self, window, urgency):
"""Sets the urgency hint for a window"""
_libxdo.xdo_set_window_urgency(self._xdo, window, urgency)
def set_window_override_redirect(self, window, override_redirect):
"""
Set the override_redirect value for a window. This generally means
whether or not a window manager will manage this window.
If you set it to 1, the window manager will usually not draw
borders on the window, etc. If you set it to 0, the window manager
will see it like a normal application window.
"""
_libxdo.xdo_set_window_override_redirect(
self._xdo, window, override_redirect)
def focus_window(self, window):
"""
Focus a window.
:see: xdo_activate_window
:param wid: the window to focus.
"""
_libxdo.xdo_focus_window(self._xdo, window)
def raise_window(self, window):
"""
Raise a window to the top of the window stack. This is also sometimes
termed as bringing the window forward.
:param wid: The window to raise.
"""
_libxdo.xdo_raise_window(self._xdo, window)
def get_focused_window(self):
"""
Get the window currently having focus.
:param window_ret:
Pointer to a window where the currently-focused window
will be stored.
"""
window_ret = window_t(0)
_libxdo.xdo_get_focused_window(self._xdo, ctypes.byref(window_ret))
return window_ret.value
def wait_for_window_focus(self, window, want_focus):
"""
Wait for a window to have or lose focus.
:param window: The window to wait on
:param want_focus: If 1, wait for focus. If 0, wait for loss of focus.
"""
_libxdo.xdo_wait_for_window_focus(self._xdo, window, want_focus)
def get_pid_window(self, window):
"""
Get the PID owning a window. Not all applications support this.
It looks at the ``_NET_WM_PID`` property of the window.
:param window: the window to query.
:return: the process id or 0 if no pid found.
"""
# todo: if the pid is 0, it means "not found" -> exception?
return _libxdo.xdo_get_pid_window(self._xdo, window)
def get_focused_window_sane(self):
"""
Like xdo_get_focused_window, but return the first ancestor-or-self
window * having a property of WM_CLASS. This allows you to get
the "real" or top-level-ish window having focus rather than something
you may not expect to be the window having focused.
:param window_ret:
Pointer to a window where the currently-focused window
will be stored.
"""
window_ret = window_t(0)
_libxdo.xdo_get_focused_window_sane(
self._xdo, ctypes.byref(window_ret))
return window_ret.value
def activate_window(self, window):
"""
Activate a window. This is generally a better choice than
xdo_focus_window for a variety of reasons, but it requires window
manager support:
- If the window is on another desktop, that desktop is switched to.
- It moves the window forward rather than simply focusing it
Requires your window manager to support this.
Uses _NET_ACTIVE_WINDOW from the EWMH spec.
:param wid: the window to activate
"""
_libxdo.xdo_activate_window(self._xdo, window)
def wait_for_window_active(self, window, active=1):
"""
Wait for a window to be active or not active.
Requires your window manager to support this.
Uses _NET_ACTIVE_WINDOW from the EWMH spec.
:param window: the window to wait on
:param active: If 1, wait for active. If 0, wait for inactive.
"""
_libxdo.xdo_wait_for_window_active(self._xdo, window, active)
def map_window(self, window):
"""
Map a window. This mostly means to make the window visible if it is
not currently mapped.
:param wid: the window to map.
"""
_libxdo.xdo_map_window(self._xdo, window)
def unmap_window(self, window):
"""
Unmap a window
:param wid: the window to unmap
"""
_libxdo.xdo_unmap_window(self._xdo, window)
def minimize_window(self, window):
"""Minimize a window"""
_libxdo.xdo_minimize_window(self._xdo, window)
def reparent_window(self, window_source, window_target):
"""
Reparents a window
:param wid_source: the window to reparent
:param wid_target: the new parent window
"""
_libxdo.xdo_reparent_window(self._xdo, window_source, window_target)
def get_window_location(self, window):
"""
Get a window's location.
"""
screen_ret = Screen()
x_ret = ctypes.c_int(0)
y_ret = ctypes.c_int(0)
_libxdo.xdo_get_window_location(
self._xdo, window, ctypes.byref(x_ret), ctypes.byref(y_ret),
ctypes.byref(screen_ret))
return window_location(x_ret.value, y_ret.value, screen_ret)
def get_window_size(self, window):
"""
Get a window's size.
"""
w_ret = ctypes.c_uint(0)
h_ret = ctypes.c_uint(0)
_libxdo.xdo_get_window_size(self._xdo, window, ctypes.byref(w_ret),
ctypes.byref(h_ret))
return window_size(w_ret.value, h_ret.value)
def get_active_window(self):
"""
Get the currently-active window.
Requires your window manager to support this.
Uses ``_NET_ACTIVE_WINDOW`` from the EWMH spec.
"""
window_ret = window_t(0)
_libxdo.xdo_get_active_window(self._xdo, ctypes.byref(window_ret))
return window_ret.value
def select_window_with_click(self):
"""
Get a window ID by clicking on it.
This function blocks until a selection is made.
"""
window_ret = window_t(0)
_libxdo.xdo_select_window_with_click(
self._xdo, ctypes.byref(window_ret))
return window_ret.value
def set_number_of_desktops(self, ndesktops):
"""
Set the number of desktops.
Uses ``_NET_NUMBER_OF_DESKTOPS`` of the EWMH spec.
:param ndesktops: the new number of desktops to set.
"""
_libxdo.xdo_set_number_of_desktops(self._xdo, ndesktops)
def get_number_of_desktops(self):
"""
Get the current number of desktops.
Uses ``_NET_NUMBER_OF_DESKTOPS`` of the EWMH spec.
:param ndesktops:
pointer to long where the current number of desktops is stored
"""
ndesktops = ctypes.c_long(0)
_libxdo.xdo_get_number_of_desktops(self._xdo, ctypes.byref(ndesktops))
return ndesktops.value
def set_current_desktop(self, desktop):
"""
Switch to another desktop.
Uses ``_NET_CURRENT_DESKTOP`` of the EWMH spec.
:param desktop: The desktop number to switch to.
"""
_libxdo.xdo_set_current_desktop(self._xdo, desktop)
def get_current_desktop(self):
"""
Get the current desktop.
Uses ``_NET_CURRENT_DESKTOP`` of the EWMH spec.
"""
desktop = ctypes.c_long(0)
_libxdo.xdo_get_current_desktop(self._xdo, ctypes.byref(desktop))
return desktop.value
def set_desktop_for_window(self, window, desktop):
"""
Move a window to another desktop
Uses _NET_WM_DESKTOP of the EWMH spec.
:param wid: the window to move
:param desktop: the desktop destination for the window
"""
_libxdo.xdo_set_desktop_for_window(self._xdo, window, desktop)
def get_desktop_for_window(self, window):
"""
Get the desktop a window is on.
Uses _NET_WM_DESKTOP of the EWMH spec.
If your desktop does not support ``_NET_WM_DESKTOP``, then '*desktop'
remains unmodified.
:param wid: the window to query
"""
desktop = ctypes.c_long(0)
_libxdo.xdo_get_desktop_for_window(
self._xdo, window, ctypes.byref(desktop))
return desktop.value
def search_windows(
self, winname=None, winclass=None, winclassname=None,
pid=None, only_visible=False, screen=None, require=False,
searchmask=0, desktop=None, limit=0, max_depth=-1):
"""
Search for windows.
:param winname:
Regexp to be matched against window name
:param winclass:
Regexp to be matched against window class
:param winclassname:
Regexp to be matched against window class name
:param pid:
Only return windows from this PID
:param only_visible:
If True, only return visible windows
:param screen:
Search only windows on this screen
:param require:
If True, will match ALL conditions. Otherwise, windows matching
ANY condition will be returned.
:param searchmask:
Search mask, for advanced usage. Leave this alone if you
don't kwnow what you are doing.
:param limit:
Maximum number of windows to list. Zero means no limit.
:param max_depth:
Maximum depth to return. Defaults to -1, meaning "no limit".
:return:
A list of window ids matching query.
"""
windowlist_ret = ctypes.pointer(window_t(0))
nwindows_ret = ctypes.c_uint(0)
search = xdo_search_t(searchmask=searchmask)
if winname is not None:
search.winname = winname
search.searchmask |= SEARCH_NAME
if winclass is not None:
search.winclass = winclass
search.searchmask |= SEARCH_CLASS
if winclassname is not None:
search.winclassname = winclassname
search.searchmask |= SEARCH_CLASSNAME
if pid is not None:
search.pid = pid
search.searchmask |= SEARCH_PID
if only_visible:
search.only_visible = True
search.searchmask |= SEARCH_ONLYVISIBLE
if screen is not None:
search.screen = screen
search.searchmask |= SEARCH_SCREEN
if screen is not None:
search.screen = desktop
search.searchmask |= SEARCH_DESKTOP
search.limit = limit
search.max_depth = max_depth
_libxdo.xdo_search_windows(
self._xdo, search,
ctypes.byref(windowlist_ret),
ctypes.byref(nwindows_ret))
return [windowlist_ret[i] for i in range(nwindows_ret.value)]
def get_window_property_by_atom(self, window, atom):
# todo: figure out what exactly this method does, and implement it
raise NotImplemented(
"get_window_property_by_atom() is not implemented (yet)")
def get_window_property(self, window, name):
value = ctypes.c_char_p() # unsigned char **value
nitems = ctypes.c_long()
type_ = Atom()
size = ctypes.c_int(0)
_libxdo.xdo_get_window_property(
self._xdo, window, name, ctypes.byref(value), ctypes.byref(nitems),
ctypes.byref(type_), ctypes.byref(size))
# todo: we need to convert atoms into their actual type..
values = []
for i in range(nitems):
i_val = value[i]
# i_type = type_[i]
values.append(i_val)
# todo: perform type conversion for "Atom"s of this type?
# todo: how does the "Atom" thing work?
return values
def get_input_state(self):
"""
Get the current input state.
:return:
a namedtuple with the following (boolean) fields:
shift, lock, control, mod1, mod2, mod3, mod4, mod5
"""
mask = _libxdo.xdo_get_input_state(self._xdo)
return _gen_input_mask(mask)
def get_symbol_map(self):
"""
If you need the symbol map, use this method.
The symbol map is an array of string pairs mapping common tokens
to X Keysym strings, such as "alt" to "Alt_L"
:return: array of strings.
"""
# todo: make sure we return a list of strings!
sm = _libxdo.xdo_get_symbol_map()
# Return value is like:
# ['alt', 'Alt_L', ..., None, None, None, ...]
# We want to return only values up to the first None.
# todo: any better solution than this?
i = 0
ret = []
while True:
c = sm[i]
if c is None:
return ret
ret.append(c)
i += 1
def get_active_modifiers(self):
"""
Get a list of active keys. Uses XQueryKeymap.
:return: list of charcodemap_t instances
"""
keys = ctypes.pointer(charcodemap_t())
nkeys = ctypes.c_int(0)
_libxdo.xdo_get_active_modifiers(
self._xdo, ctypes.byref(keys), ctypes.byref(nkeys))
return [keys[i] for i in range(nkeys.value)]
def clear_active_modifiers(self, window, mods=None):
"""
Send any events necesary to clear the the active modifiers.
For example, if you are holding 'alt' when xdo_get_active_modifiers is
called, then this method will send a key-up for 'alt'
"""
raise NotImplementedError()
def set_active_modifiers(self, window, mods=None):
"""
Send any events necessary to make these modifiers active.
This is useful if you just cleared the active modifiers and then wish
to restore them after.
"""
raise NotImplementedError()
def get_desktop_viewport(self):
"""
Get the position of the current viewport.
This is only relevant if your window manager supports
``_NET_DESKTOP_VIEWPORT``.
"""
raise NotImplementedError()
def set_desktop_viewport(self, x, y):
"""
Set the position of the current viewport.
This is only relevant if your window manager supports
``_NET_DESKTOP_VIEWPORT``
"""
raise NotImplementedError()
def kill_window(self):
"""
Kill a window and the client owning it.
"""
raise NotImplementedError()
XDO_FIND_PARENTS = 0
XDO_FIND_CHILDREN = 1
def find_window_client(self):
"""
Find a client window (child) in a given window. Useful if you get the
window manager's decorator window rather than the client window.
"""
raise NotImplementedError()
def get_window_name(self, win_id):
"""
Get a window's name, if any.
"""
window = window_t(win_id)
name_ptr = ctypes.c_char_p()
name_len = ctypes.c_int(0)
name_type = ctypes.c_int(0)
_libxdo.xdo_get_window_name(
self._xdo, window, ctypes.byref(name_ptr),
ctypes.byref(name_len), ctypes.byref(name_type))
name = name_ptr.value
_libX11.XFree(name_ptr) # Free the string allocated by Xlib
return name
def enable_feature(self):
"""
Enable an xdo feature.
This function is mainly used by libxdo itself, however,
you may find it useful in your own applications.
:see: XDO_FEATURES
"""
raise NotImplementedError()
def has_feature(self):
"""
Check if a feature is enabled.
This function is mainly used by libxdo itself, however,
you may find it useful in your own applications.
:see: XDO_FEATURES
"""
raise NotImplementedError()
def get_viewport_dimensions(self):
"""
Query the viewport (your display) dimensions
If Xinerama is active and supported, that api internally is used.
If Xineram is disabled, we will report the root window's dimensions
for the given screen.
"""
raise NotImplementedError()
def __del__(self):
_libxdo.xdo_free(self._xdo)
| 2.359375 | 2 |
reliance/DD/hybrid.py | Lizz647/SJTU-Software | 2 | 12764077 | <filename>reliance/DD/hybrid.py
import os
def spurious_hybrid(num,seqlist):
filename = "cachefile"
score = 0
with open(filename,'w') as fp:
fp.write(str(num)+'\n')
fp.write(">targetseq"+'\n')
fp.write(seqlist[0]+'\n')
for i in range(num-1):
fp.write(">otherseq"+'\n')
fp.write(seqlist[i+1]+'\n')
dd = "./dd "+filename
a = os.popen(dd).read()
score = float(a)
return score
if __name__ == "__main__":
seqlist = []
seqlist.append("ATGGCTTTAA")
seqlist.append("TTAAAGCCAT")
seqlist.append("TTAAGCCA")
score = spurious_hybrid(3,seqlist)
print(score) | 2.53125 | 3 |
saveToDatabase.py | MuSystemsAnalysis/craigslist_area_search | 0 | 12764078 | <gh_stars>0
import sqlite3
import sys
def saveCityNamesToDatabase(cityList):
database_file = 'test.db'
#connect
conn = sqlite3.connect(database_file)
cursor = conn.cursor()
#create table
cursor.execute('DROP TABLE IF EXISTS cities')
cursor.execute('''CREATE TABLE cities
(ID INTEGER PRIMARY KEY AUTOINCREMENT,
NAME TEXT NOT NULL);''')
print("Table created...\n")
#insert
for city in cityList:
format_str = """INSERT INTO cities (ID, NAME) VALUES (NULL, "{cityName}");"""
sql_command = format_str.format(cityName=city)
cursor.execute(sql_command)
#view
c = conn.execute("SELECT * FROM cities")
for row in c:
print ("ID = ", row[0])
print ("NAME = ", row[1])
#save and close the database
conn.commit()
conn.close()
| 4 | 4 |
netbox/utilities/filters.py | michaelxniu/netbox | 0 | 12764079 | import django_filters
from django import forms
from django.conf import settings
from django.db import models
from extras.models import Tag
def multivalue_field_factory(field_class):
"""
Given a form field class, return a subclass capable of accepting multiple values. This allows us to OR on multiple
filter values while maintaining the field's built-in validation. Example: GET /api/dcim/devices/?name=foo&name=bar
"""
class NewField(field_class):
widget = forms.SelectMultiple
def to_python(self, value):
if not value:
return []
return [
# Only append non-empty values (this avoids e.g. trying to cast '' as an integer)
super(field_class, self).to_python(v) for v in value if v
]
return type('MultiValue{}'.format(field_class.__name__), (NewField,), dict())
#
# Filters
#
class MultiValueCharFilter(django_filters.MultipleChoiceFilter):
field_class = multivalue_field_factory(forms.CharField)
class MultiValueDateFilter(django_filters.MultipleChoiceFilter):
field_class = multivalue_field_factory(forms.DateField)
class MultiValueDateTimeFilter(django_filters.MultipleChoiceFilter):
field_class = multivalue_field_factory(forms.DateTimeField)
class MultiValueNumberFilter(django_filters.MultipleChoiceFilter):
field_class = multivalue_field_factory(forms.IntegerField)
class MultiValueTimeFilter(django_filters.MultipleChoiceFilter):
field_class = multivalue_field_factory(forms.TimeField)
class TreeNodeMultipleChoiceFilter(django_filters.ModelMultipleChoiceFilter):
"""
Filters for a set of Models, including all descendant models within a Tree. Example: [<Region: R1>,<Region: R2>]
"""
def filter(self, qs, value):
value = [node.get_descendants(include_self=True) for node in value]
return super().filter(qs, value)
class NumericInFilter(django_filters.BaseInFilter, django_filters.NumberFilter):
"""
Filters for a set of numeric values. Example: id__in=100,200,300
"""
pass
class NullableCharFieldFilter(django_filters.CharFilter):
"""
Allow matching on null field values by passing a special string used to signify NULL.
"""
def filter(self, qs, value):
if value != settings.FILTERS_NULL_CHOICE_VALUE:
return super().filter(qs, value)
qs = self.get_method(qs)(**{'{}__isnull'.format(self.field_name): True})
return qs.distinct() if self.distinct else qs
class TagFilter(django_filters.ModelMultipleChoiceFilter):
"""
Match on one or more assigned tags. If multiple tags are specified (e.g. ?tag=foo&tag=bar), the queryset is filtered
to objects matching all tags.
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('field_name', 'tags__slug')
kwargs.setdefault('to_field_name', 'slug')
kwargs.setdefault('conjoined', True)
kwargs.setdefault('queryset', Tag.objects.all())
super().__init__(*args, **kwargs)
#
# FilterSets
#
class NameSlugSearchFilterSet(django_filters.FilterSet):
"""
A base class for adding the search method to models which only expose the `name` and `slug` fields
"""
q = django_filters.CharFilter(
method='search',
label='Search',
)
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(
models.Q(name__icontains=value) |
models.Q(slug__icontains=value)
)
#
# Update default filters
#
FILTER_DEFAULTS = django_filters.filterset.FILTER_FOR_DBFIELD_DEFAULTS
FILTER_DEFAULTS.update({
models.AutoField: {
'filter_class': MultiValueNumberFilter
},
models.CharField: {
'filter_class': MultiValueCharFilter
},
models.DateField: {
'filter_class': MultiValueDateFilter
},
models.DateTimeField: {
'filter_class': MultiValueDateTimeFilter
},
models.DecimalField: {
'filter_class': MultiValueNumberFilter
},
models.EmailField: {
'filter_class': MultiValueCharFilter
},
models.FloatField: {
'filter_class': MultiValueNumberFilter
},
models.IntegerField: {
'filter_class': MultiValueNumberFilter
},
models.PositiveIntegerField: {
'filter_class': MultiValueNumberFilter
},
models.PositiveSmallIntegerField: {
'filter_class': MultiValueNumberFilter
},
models.SlugField: {
'filter_class': MultiValueCharFilter
},
models.SmallIntegerField: {
'filter_class': MultiValueNumberFilter
},
models.TimeField: {
'filter_class': MultiValueTimeFilter
},
models.URLField: {
'filter_class': MultiValueCharFilter
},
})
| 2.671875 | 3 |
TestRegExTabs.py | allisonoler/Tabular_File_Benchmark | 0 | 12764080 | <filename>TestRegExTabs.py<gh_stars>0
import itertools
import mmap
import sys
import re
file_path = sys.argv[1]
out_file_path = sys.argv[2]
memory_map = sys.argv[3] == "True"
def add_column(i, end_character):
if i in index_range:
return r"([^\t]+" + end_character + ")"
else:
return r"(?:[^\t]+" + end_character + ")"
with open(file_path, 'r') as my_file:
header_items = next(my_file).rstrip("\n").split("\t")
index_range = range(0, len(header_items), 100)
reg_ex = r"^"
for i in range(len(header_items)-1):
reg_ex += add_column(i, r"\t")
if i > max(index_range):
break
if max(index_range) == (len(header_items) - 1):
reg_ex += add_column(len(header_items)-1, r"\n")
reg_ex_comp = re.compile(reg_ex.encode(), re.MULTILINE | re.DOTALL)
with open(file_path, 'rb') as my_file:
if memory_map:
my_file_text = mmap.mmap(my_file.fileno(), 0, prot=mmap.PROT_READ)
else:
my_file_text = my_file.read()
with open(out_file_path, 'wb') as out_file:
for match in reg_ex_comp.finditer(my_file_text):
out_file.write(b"".join(match.groups()).rstrip(b"\t").rstrip(b"\n") + b"\n")
my_file.close()
| 2.609375 | 3 |
simpleoperationandvirus.py | Jarvood11/Secureoperation11 | 0 | 12764081 | # -*- coding: utf-8 -*-
"""
Created on Sun May 23 16:38:11 2021
@author: Jaroslav
"""
# -*- coding: utf-8 -*-
def f1(x):
return x+1
x2=f1(1)
print("vvedite vo skolko uvelichet functziy:")
n=int(input())
def doublern (f):
def g(n):
return n*f
return g
#print(x2)
g=doublern(x2)
print(g(n))
print("vvedite vo skolko uvelichet functziy:")
n=int(input())
def f1(x):
return x+n
x2=f1(n)
def doublern (f):
def g(n):
return n*f
return g
#print(x2)
g=doublern(x2)
print(g(n))
p='hello'
print(p)
#print(g(p))
text_file = open('C:/F#/exp2/file1.txt','r',encoding='utf8')
print(text_file)
line_list = text_file.readlines();
for line in line_list:
print(line)
text_file.close()
import shutil
shutil.copy('C:/F#/exp2/file1.txt', 'C:/F#/exp2/file3.txt')
import os
os.getcwd()
print(os.listdir('C:/F#/exp2/'))
#print(shutil.rmtree.avoids_symlink_attacks)
import shutil
for i in range(10):
shutil.copy2('C:/F#/exp2/file1.txt', 'C:/F#/exp2/file1{}.txt'.format(i)) | 3.25 | 3 |
07_aula04/q2.py | fdelgadov/LabADAGrupoB | 0 | 12764082 | ## Q2: What is the time complexity of
## O(n), porque es un for de i=n hasta i=1
# Algoritmo
# for (i = n; i > 0; i--) { # n
# statement; # 1
# }
n = 5
for i in range(n, 0, -1): # n
print(i); # 1 | 3.859375 | 4 |
Previous_Designs/Hybrid_Prototype/Software/RPi_PythonScripts/hybrid_logger_2.py | UCHIC/CIWS-Loggers | 6 | 12764083 | #!/bin/env/python
# File: hybrid_logger.py
#
# Raspberry Pi data transfer script
#
# Date: 6/27/18
# <NAME> and <NAME>
# Platform: Raspberry Pi 3 model B
#
# Python version: 2.7.9
import os # for Linux terminal commands\
import RPi.GPIO as GPIO # for General Purpose I/O library
from smbus import SMBus # for basic i2c (smbus) functions
import time # for time functions
#--------------------------------------------------------------------------------------------------
#
# Section: Function to get Diskspace
#
#--------------------------------------------------------------------------------------------------
def getDiskSpace():
p = os.popen("df /")
i = 0
while 1:
i = i + 1
line = p.readline()
if i == 2:
return(line.split()[1:5])
#--------------------------------------------------------------------------------------------------
#
# Section: Get the Time for the Datalogger
#
#--------------------------------------------------------------------------------------------------
currTime = time.time() # Get the current time on the Raspberry Pi
print(currTime)
now = time.strftime("\"%Y-%m-%d %H:%M:%S\"", time.localtime(currTime)) # Get a properly formatted date string
print(now)
samplePeriod = 4 # define number of seconds per record
bootDelay = 26 # define boot delay in seconds, 26 seconds was verified with testing.
crunchTime = 600 # Amount of time user has to grab data off of the Raspberry Pi (in seconds)
lastRecord = 0 # The last record number of the last run, initially set to zero.
if os.path.exists("/home/pi/LastRecord"): # Determine the last record number to compute current record number in this run
fh = open("/home/pi/LastRecord", "r")
lastRecord = fh.read()
fh.close()
#--------------------------------------------------------------------------------------------------
#
# Section: Raspberry Pi GPIO Configuration
#
#--------------------------------------------------------------------------------------------------
# Generate "EEPROM Busy" signal on the Raspberry Pi's GPIO
GPIO.setmode(GPIO.BCM) # use BCM pin numbering
busyPin = 19 # GPIO 19 as signal pin
GPIO.setup(busyPin,GPIO.OUT) # set pin as an output
GPIO.output(busyPin,GPIO.HIGH) # set it high, meaning the EEPROM is busy
# Generate "Pi On" signal on the Raspberry Pi's GPIO
GPIO.setmode(GPIO.BCM) # Use BCM pin numbering
onPin = 25 # GPIO 25 as signal pin
GPIO.setup(onPin,GPIO.OUT) # Set pin as an output
GPIO.output(onPin,GPIO.HIGH) # Set it high, signalling to the controller that the system is on
# Listen for "Stay ON" signal on the Raspberry Pi's GPIO
GPIO.setmode(GPIO.BCM) # use BCM pin numbering
stayOnPin = 13 # GPIO 13 as signal pin
GPIO.setup(stayOnPin,GPIO.IN) # set pin as an input
#--------------------------------------------------------------------------------------------------
#
# Section: Initialize File
#
#--------------------------------------------------------------------------------------------------
filename = "/home/pi/test.csv" # Define a path/filename to be used.
dataLoggerName = "0000" # Define a Data Logger name
siteName = "0000" # Define a Site Name
siteDescription = "UWRL Test Facility" # Define a Site Description
if os.path.exists(filename): # If the file exists,
fh = open(filename, "a") # Open the file to append
else: # Otherwise,
fh = open(filename, "w") # Open the new file and write the header
fh.write("CIWS Data Logger Output File\n")
fh.write("DataLoggerName: ")
fh.write(dataLoggerName)
fh.write("\nSiteName: ")
fh.write(siteName)
fh.write("\nSiteDescription: ")
fh.write(siteDescription)
fh.write("\nTime,Record,Pulses,Disk_Space_Available (kB)\n")
#--------------------------------------------------------------------------------------------------
#
# Section: Data Transfer from EEPROM to SD Card Data File
#
#--------------------------------------------------------------------------------------------------
# prepare to read EEPROM chip starting at address zero
# what address belongs to EEPROM? 1010000 in binary ==> 0x50 ==> 80 in base ten, before left shifting the Read/Write bit into the address
# to read, we set the R/W bit to 1 (R/~W).
# 1010 0001
# send that address (control byte)
# then send two bytes for addressing into the EEPROM
# declare object
bus = SMBus(1) # NOTE: the i2c bus number is 1, not zero like in some tutorials
bus.write_byte_data(0x50, 0, 0) # start reading EEPROM at address zero
numRecordsHigh = bus.read_byte(0x50) # read high byte of the number of records on the EEPROM
numRecordsLow = bus.read_byte(0x50) # read low byte of the number of records on the EEPROM
dummy = bus.read_byte(0x50) # clear dummy byte
dummy = bus.read_byte(0x50) # clear dummy byte
numRecords = (numRecordsHigh << 8) + numRecordsLow # splice together the low and high bytes to get the number of records
firstRecordTime = currTime - (numRecords * samplePeriod) - bootDelay # calculate starting time in seconds
# EEPROM loop
for i in range(numRecords): # loop reads an entire three byte record (sample) + one dummy byte for EEPROM organization purposes
recordNumHigh = bus.read_byte(0x50)
recordNumLow = bus.read_byte(0x50)
pulseCount = bus.read_byte(0x50)
dummy = bus.read_byte(0x50)
recordNum = (recordNumHigh << 8) + recordNumLow # splice together the high and low bytes of recordNum
recordNum = recordNum + int(lastRecord) # Compute current record number based on last record number
currTime = firstRecordTime + (samplePeriod * i) # calculate time difference for each individual record
timeStamp = time.strftime("\"%Y-%m-%d %H:%M:%S\"", time.localtime(currTime)) # write timestamp for the current record
DISK_stats = getDiskSpace() # Get free space on Disk
diskSpace = DISK_stats[2]
# write data out to file
fh.write(`timeStamp`)
fh.write(',')
fh.write(`recordNum`)
fh.write(',')
fh.write(`pulseCount`)
fh.write(',')
fh.write(diskSpace)
fh.write("\n")
GPIO.output(busyPin,GPIO.LOW) # As soon as EEPROM to Raspberry Pi data transfer is complete, set the EEPROM Busy signal LOW, to signal the ATtiny microcontroller.
fh.close()
fh = open("/home/pi/LastRecord", "w")
fh.write(`recordNum`)
fh.close()
#--------------------------------------------------------------------------------------------------
#
# Section: Power ON/OFF Control
#
#--------------------------------------------------------------------------------------------------
stayOn = GPIO.input(stayOnPin) # read the status of the Stay On Pin
if stayOn == True:
time.sleep(crunchTime) # have the script wait for crunchTime seconds before an automatic system poweroff
os.system("sudo poweroff") # automatic system poweroff
| 2.5 | 2 |
ddn/data_logger.py | chamillj/SDN-Analytics | 0 | 12764084 | #@@---------------------------@@
# Author: <NAME>
# Date: 5/18/17
# Description:
#@@---------------------------@@
from mininet.log import setLogLevel, info, lg
import sys
import logging
import subprocess
class Logger(object):
def __init__(self, terminal, filename):
self.terminal = terminal
self.log = filename
with open(self.log, 'w') as f:
f.write("****CONSOLE OUTPUT****" + '\n\n\n')
def write(self, message):
self.terminal.write(message)
with open(self.log, 'a') as f:
f.write(message + '\n')
def flush(self):
#this flush method is needed for python 3 compatibility.
#this handles the flush command by doing nothing.
#you might want to specify some extra behavior here.
pass
def log_to_file(file_name):
with open(file_name, 'w') as f:
f.write("****LOG OUTPUT****" + '\n\n\n')
fh = logging.FileHandler(file_name)
setLogLevel('info')
lg.addHandler(fh)
def start_logging():
##Copy config file
subprocess.call(['cp' , './configs.json', './data/PARAMS/'])
##Redirect python logger to a file
log_to_file('data/PARAMS/output.log')
##Redirect Stout to a file
sys.stdout = Logger(sys.stdout, "data/PARAMS/console.log") | 2.46875 | 2 |
Scripts/python/scripts mundo 1/jogo do dodo/Desafio021.py | BrenoNAlmeida/Scripts-Escola | 0 | 12764085 | from pygame import *
print('a musica será executada em breve')
mixer.init()
mixer.music.load('Desafio021.mp3')
mixer.music.play()
print('tocando...')
while mixer.music.get_busy():
time.Clock().tick(10)
print('a musica acabou !!!')
escolha4 = ''
while escolha4 != 'sim' and escolha4 != 'nao':
escolha4 = str(input('você deseja executar novamente [sim/nao]?')).lower()
if escolha4 == 'sim':
import jogo_do_tio_Dodo
if escolha4 == 'nao':
print('obrigado por ultilizar nossos serviços')
break | 3.078125 | 3 |
package/tests/application/models/test_author.py | Jhsmit/awesome-panel | 1 | 12764086 | # pylint: disable=redefined-outer-name,protected-access
# pylint: disable=missing-function-docstring,missing-module-docstring,missing-class-docstring
def test_can_construct_author(author):
assert isinstance(author.name, str)
assert isinstance(author.url, str)
assert isinstance(author.github_url, str)
assert isinstance(author.github_avatar_url, str)
assert str(author) == author.name
assert repr(author) == author.name
assert author._repr_html_(width="21x", height="22px") == (
'<a href="https://github.com/holoviz/" title="Author: panel" target="_blank">'
'<img application="https://avatars2.githubusercontent.com/u/51678735" alt="panel" '
'style="border-radius: 50%;width: 21x;height: 22px;vertical-align: text-bottom;">'
"</img></a>"
)
| 2.4375 | 2 |
tests/zquantum/core/serialization_test.py | bartubisgin/z-quantum-core | 0 | 12764087 | <gh_stars>0
"""Test cases for serialization module."""
import json
import os
import numpy as np
import pytest
from scipy.optimize import OptimizeResult
from zquantum.core.bitstring_distribution import BitstringDistribution
from zquantum.core.history.recorder import HistoryEntry, HistoryEntryWithArtifacts
from zquantum.core.interfaces.optimizer import optimization_result
from zquantum.core.serialization import (
OrquestraDecoder,
OrquestraEncoder,
load_optimization_results,
save_optimization_results,
)
from zquantum.core.utils import ValueEstimate, convert_array_to_dict
# The result constructed below does not make sense.
# It does not matter though, as we are only testing serialization and it contains variety
# of data to be serialized.
EXAMPLE_OPTIMIZATION_RESULT = optimization_result(
opt_value=0.5,
opt_params=np.array([0, 0.5, 2.5]),
nit=3,
fev=10,
history=[
HistoryEntry(
call_number=0,
params=np.array([0.1, 0.2, 0.3j]),
value=ValueEstimate(0.5, precision=6),
),
HistoryEntry(call_number=1, params=np.array([1, 2, 3]), value=-10.0),
HistoryEntryWithArtifacts(
call_number=2,
params=np.array([-1, -0.5, -0.6]),
value=-20.0,
artifacts={
"bitstring": "0111",
"bitstring_distribution": BitstringDistribution(
{"111": 0.25, "010": 0.75}
),
},
),
],
)
EXPECTED_DESERIALIZED_RESULT = {
"schema": "zapata-v1-optimization_result",
"opt_value": 0.5,
"opt_params": convert_array_to_dict(np.array([0, 0.5, 2.5])),
"nit": 3,
"fev": 10,
"history": [
{
"call_number": 0,
"params": convert_array_to_dict(np.array([0.1, 0.2, 0.3j])),
"value": ValueEstimate(0.5, precision=6).to_dict(),
},
{
"call_number": 1,
"params": convert_array_to_dict(np.array([1, 2, 3])),
"value": -10.0,
},
{
"call_number": 2,
"params": convert_array_to_dict(np.array([-1, -0.5, -0.6])),
"value": -20.0,
"artifacts": {
"bitstring": "0111",
"bitstring_distribution": {"111": 0.25, "010": 0.75},
},
},
],
}
def history_entries_equal(entry_1, entry_2):
"""Compare entry_1 and entry_2 assuming theyir params are of np.array type."""
if entry_1.call_number != entry_2.call_number:
return False
elif entry_1.value != entry_2.value:
return False
elif not np.array_equal(entry_1.params, entry_2.params):
return False
elif hasattr(entry_1, "artifacts") != hasattr(entry_2, "artifacts"):
return False
else:
artifacts_1 = getattr(entry_1, "artifacts", [])
artifacts_2 = getattr(entry_2, "artifacts", [])
if len(artifacts_1) != len(artifacts_2):
return False
for entry_1, entry_2 in zip(artifacts_1, artifacts_2):
if isinstance(entry_1, BitstringDistribution) and isinstance(entry_2, BitstringDistribution):
if entry_1.distribution_dict != entry_2.distribution_dict:
return False
elif entry_1 != entry_2:
return False
return True
def optimization_results_equal(result_1, result_2):
if any(
result_1[key] != result_2[key]
for key in ("opt_value", "nit", "fev",)
):
return False
elif not np.array_equal(result_1.opt_params, result_2.opt_params):
return False
elif len(result_1.history) != len(result_2.history) or any(
not history_entries_equal(entry_1, entry_2)
for entry_1, entry_2 in zip(result_1.history, result_2.history)
):
return False
else:
return True
def test_orquestra_encoder_can_handle_numpy_arrays():
dict_to_serialize = {
"array_1": np.array([1, 2, 3]),
"array_2": np.array([0.5 + 1j, 0.5 - 0.25j]),
"list_of_arrays": [np.array([0.5, 0.4, 0.3]), np.array([1, 2, 3])],
}
deserialized_dict = json.loads(json.dumps(dict_to_serialize, cls=OrquestraEncoder))
expected_deserialized_dict = {
"array_1": convert_array_to_dict(dict_to_serialize["array_1"]),
"array_2": convert_array_to_dict(dict_to_serialize["array_2"]),
"list_of_arrays": [
convert_array_to_dict(arr) for arr in dict_to_serialize["list_of_arrays"]
],
}
assert deserialized_dict == expected_deserialized_dict
def test_orquestra_encoder_can_handle_optimization_result():
result_to_serialize = EXAMPLE_OPTIMIZATION_RESULT
expected_deserialized_result = EXPECTED_DESERIALIZED_RESULT
deserialized_result = json.loads(json.dumps(result_to_serialize, cls=OrquestraEncoder))
assert deserialized_result == expected_deserialized_result
def test_save_optimization_results_successfully_saves_optimization_result():
result_to_serialize = EXAMPLE_OPTIMIZATION_RESULT
expected_deserialized_result = EXPECTED_DESERIALIZED_RESULT
optimization_result_filename = "test-optimization-result-io.json"
# When
save_optimization_results(result_to_serialize, optimization_result_filename)
# Then
with open(optimization_result_filename, "r") as f:
loaded_data = json.load(f)
assert loaded_data == expected_deserialized_result
os.remove(optimization_result_filename)
def test_orquestra_decoder_can_load_numpy_arrays():
dict_of_arrays = {
"array_1": {"real": [1, 2, 3, 4]},
"array_2": {"real": [0.5, 0.25, 0], "imag": [0, 0.25, 0.5]},
"array_3": {"real": [[1.5, 2.5], [0.5, 0.5]], "imag": [[0.5, -0.5], [1.0, 2.0]]}
}
expected_deserialized_object = {
"array_1": np.array([1, 2, 3, 4]),
"array_2": np.array([0.5, 0.25 + 0.25j, 0.5j]),
"array_3": np.array([[1.5 + 0.5j, 2.5 - 0.5j], [0.5 + 1.0j, 0.5 + 2.0j]])
}
deserialized_object = json.loads(json.dumps(dict_of_arrays), cls=OrquestraDecoder)
for key in dict_of_arrays:
assert np.array_equal(deserialized_object[key], expected_deserialized_object[key])
@pytest.mark.parametrize(
"value_estimate",
[
ValueEstimate(-2.5, 0.125),
ValueEstimate(0.5)
]
)
def test_orquestra_decoder_can_load_value_estimate(value_estimate):
serialized_value_estimate = json.dumps(value_estimate.to_dict())
assert value_estimate == json.loads(serialized_value_estimate, cls=OrquestraDecoder)
@pytest.mark.parametrize(
"history_entry",
[
HistoryEntry(call_number=9, value=-20.0, params=np.array([0.2, 0.3])),
HistoryEntry(call_number=1, value=-15.5, params=np.array([0.5, 0.4, 0.1])),
HistoryEntryWithArtifacts(
call_number=10,
value=0.25,
params=np.array([-0.1, 0.2, -1.2]),
artifacts={
"bitstring": "111010",
"bitstring_distribution": {"111": 0.25, "010": 0.75}
}
)
]
)
def test_orquestra_decoder_successfully_loads_history_entry_objects(history_entry):
serialized_history_entry = json.dumps(history_entry, cls=OrquestraEncoder)
deserialized_history_entry = json.loads(serialized_history_entry, cls=OrquestraDecoder)
assert history_entry.call_number == deserialized_history_entry.call_number
assert history_entry.value == deserialized_history_entry.value
assert np.array_equal(history_entry.params, deserialized_history_entry.params)
assert history_entries_equal(history_entry, deserialized_history_entry)
def test_orquestra_decoder_successfully_loads_optimization_result():
result_to_serialize = EXAMPLE_OPTIMIZATION_RESULT
serialized_result = json.dumps(result_to_serialize, cls=OrquestraEncoder)
deserialized_result = json.loads(serialized_result, cls=OrquestraDecoder)
assert isinstance(deserialized_result, OptimizeResult)
assert optimization_results_equal(result_to_serialize, deserialized_result)
def test_load_optimization_results_successfully_loads_optimization_result_from_file():
result_to_serialize = EXAMPLE_OPTIMIZATION_RESULT
optimization_result_filename = "test-optimization-result-io.json"
save_optimization_results(result_to_serialize, optimization_result_filename)
loaded_data = load_optimization_results(optimization_result_filename)
assert optimization_results_equal(result_to_serialize, loaded_data)
os.remove(optimization_result_filename)
| 2.015625 | 2 |
openstack_dashboard/dashboards/project/data_processing/job_binaries/forms.py | maofutian/horizon | 0 | 12764088 | <reponame>maofutian/horizon<gh_stars>0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import uuid
from django.forms import util
from django.forms import widgets
from django import template
from django.template import defaultfilters
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard.api import sahara as saharaclient
LOG = logging.getLogger(__name__)
class LabeledInput(widgets.Input):
def render(self, name, values, attrs=None):
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
output = "<span id='%s'>%s</span>%s" %\
("id_%s_label" % name,
"internal-db://",
('<input%s />' % util.flatatt(final_attrs)))
return mark_safe(output)
class JobBinaryCreateForm(forms.SelfHandlingForm):
NEW_SCRIPT = "%%%NEWSCRIPT%%%"
UPLOAD_BIN = "%%%UPLOADFILE%%%"
job_binary_name = forms.CharField(label=_("Name"))
job_binary_type = forms.ChoiceField(label=_("Storage type"))
job_binary_url = forms.CharField(label=_("URL"),
required=False,
widget=LabeledInput())
job_binary_internal = forms.ChoiceField(label=_("Internal binary"),
required=False)
job_binary_file = forms.FileField(label=_("Upload File"),
required=False)
job_binary_script_name = forms.CharField(label=_("Script name"),
required=False)
job_binary_script = forms.CharField(label=_("Script text"),
required=False,
widget=forms.Textarea())
job_binary_username = forms.CharField(label=_("Username"),
required=False)
job_binary_password = forms.CharField(label=_("Password"),
required=False,
widget=forms.PasswordInput(
attrs={'autocomplete': 'off'}))
job_binary_description = forms.CharField(label=_("Description"),
required=False,
widget=forms.Textarea())
def __init__(self, request, *args, **kwargs):
super(JobBinaryCreateForm, self).__init__(request, *args, **kwargs)
self.help_text_template = ("project/data_processing.job_binaries/"
"_create_job_binary_help.html")
self.fields["job_binary_type"].choices =\
[("internal-db", "Internal database"),
("swift", "Swift")]
self.fields["job_binary_internal"].choices =\
self.populate_job_binary_internal_choices(request)
def populate_job_binary_internal_choices(self, request):
try:
job_binaries = saharaclient.job_binary_internal_list(request)
except Exception:
exceptions.handle(request,
_("Failed to get list of internal binaries."))
job_binaries = []
choices = [(job_binary.id, job_binary.name)
for job_binary in job_binaries]
choices.insert(0, (self.NEW_SCRIPT, '*Create a script'))
choices.insert(0, (self.UPLOAD_BIN, '*Upload a new file'))
return choices
def handle(self, request, context):
try:
extra = {}
bin_url = "%s://%s" % (context["job_binary_type"],
context["job_binary_url"])
if(context["job_binary_type"] == "internal-db"):
bin_url = self.handle_internal(request, context)
elif(context["job_binary_type"] == "swift"):
extra = self.handle_swift(request, context)
saharaclient.job_binary_create(
request,
context["job_binary_name"],
bin_url,
context["job_binary_description"],
extra)
messages.success(request, "Successfully created job binary")
return True
except Exception:
exceptions.handle(request,
_("Unable to create job binary"))
return False
def get_help_text(self, extra_context=None):
text = ""
extra_context = extra_context or {}
if self.help_text_template:
tmpl = template.loader.get_template(self.help_text_template)
context = template.RequestContext(self.request, extra_context)
text += tmpl.render(context)
else:
text += defaultfilters.linebreaks(force_text(self.help_text))
return defaultfilters.safe(text)
class Meta:
name = _("Create Job Binary")
help_text_template = ("project/data_processing.job_binaries/"
"_create_job_binary_help.html")
def handle_internal(self, request, context):
result = ""
bin_id = context["job_binary_internal"]
if(bin_id == self.UPLOAD_BIN):
try:
result = saharaclient.job_binary_internal_create(
request,
self.get_unique_binary_name(
request, request.FILES["job_binary_file"].name),
request.FILES["job_binary_file"].read())
except Exception:
exceptions.handle(request,
_("Unable to upload job binary"))
return None
elif(bin_id == self.NEW_SCRIPT):
try:
result = saharaclient.job_binary_internal_create(
request,
self.get_unique_binary_name(
request, context["job_binary_script_name"]),
context["job_binary_script"])
except Exception:
exceptions.handle(request,
_("Unable to create job binary"))
return None
bin_id = result.id
return "internal-db://%s" % bin_id
def handle_swift(self, request, context):
username = context["job_binary_username"]
password = context["job_binary_password"]
extra = {
"user": username,
"password": password
}
return extra
def get_unique_binary_name(self, request, base_name):
try:
internals = saharaclient.job_binary_internal_list(request)
except Exception:
internals = []
exceptions.handle(request,
_("Failed to fetch internal binary list"))
names = [internal.name for internal in internals]
if base_name in names:
return "%s_%s" % (base_name, uuid.uuid1())
return base_name
| 1.796875 | 2 |
examples/tempo.py | storagebot/pyechonest | 2 | 12764089 | <reponame>storagebot/pyechonest
import sys
from pyechonest import song
def get_tempo(artist, title):
"gets the tempo for a song"
results = song.search(artist=artist, title=title, results=1, buckets=['audio_summary'])
if len(results) > 0:
return results[0].audio_summary['tempo']
else:
return None
if __name__ == '__main__':
if len(sys.argv) <> 3:
print "Usage: python tempo.py 'artist name' 'song title'"
else:
tempo = get_tempo(sys.argv[1], sys.argv[2])
if tempo:
print 'Tempo for', sys.argv[1], sys.argv[2], 'is', tempo
else:
print "Can't find Tempo for artist:", sys.argv[1], 'song:', sys.argv[2]
| 2.890625 | 3 |
archivy/click_web/resources/cmd_exec.py | thethales/archivy | 0 | 12764090 | <filename>archivy/click_web/resources/cmd_exec.py
import os
import shutil
import subprocess
import sys
import tempfile
import traceback
from pathlib import Path
from typing import List
from flask import Response, request
from werkzeug.utils import secure_filename
from archivy import click_web
from .input_fields import FieldId
logger = None
def exec(command_path):
"""
Execute the command and stream the output from it as response
:param command_path:
"""
command_path = "cli/" + command_path
global logger
logger = click_web.logger
omitted = ["shell", "run", "routes"]
root_command, *commands = command_path.split('/')
cmd = ["archivy"]
req_to_args = RequestToCommandArgs()
# root command_index should not add a command
cmd.extend(req_to_args.command_args(0))
for i, command in enumerate(commands):
if command in omitted:
return Response(status=400)
cmd.append(command)
cmd.extend(req_to_args.command_args(i + 1))
def _generate_output():
yield _create_cmd_header(commands)
try:
yield from _run_script_and_generate_stream(req_to_args, cmd)
except Exception as e:
# exited prematurely, show the error to user
yield f"\nERROR: Got exception when reading output from script: {type(e)}\n"
yield traceback.format_exc()
raise
return Response(_generate_output(),
mimetype='text/plain')
def _run_script_and_generate_stream(req_to_args: 'RequestToCommandArgs', cmd: List[str]):
"""
Execute the command the via Popen and yield output
"""
logger.info('Executing: %s', cmd)
if not os.environ.get('PYTHONIOENCODING'):
# Fix unicode on windows
os.environ['PYTHONIOENCODING'] = 'UTF-8'
process = subprocess.Popen(cmd,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
logger.info('script running Pid: %d', process.pid)
encoding = sys.getdefaultencoding()
with process.stdout:
for line in iter(process.stdout.readline, b''):
yield line.decode(encoding)
process.wait() # wait for the subprocess to exit
logger.info('script finished Pid: %d', process.pid)
for fi in req_to_args.field_infos:
fi.after_script_executed()
def _create_cmd_header(commands: List[str]):
"""
Generate a command header.
Note:
here we always allow to generate HTML as long as we have it between CLICK-WEB comments.
This way the JS frontend can insert it in the correct place in the DOM.
"""
def generate():
yield '<!-- CLICK_WEB START HEADER -->'
yield '<div class="command-line">Executing: {}</div>'.format('/'.join(commands))
yield '<!-- CLICK_WEB END HEADER -->'
# important yield this block as one string so it pushed to client in one go.
# so the whole block can be treated as html.
html_str = '\n'.join(generate())
return html_str
def _create_result_footer(req_to_args: 'RequestToCommandArgs'):
"""
Generate a footer.
Note:
here we always allow to generate HTML as long as we have it between CLICK-WEB comments.
This way the JS frontend can insert it in the correct place in the DOM.
"""
to_download = [fi for fi in req_to_args.field_infos
if fi.generate_download_link and fi.link_name]
# important yield this block as one string so it pushed to client in one go.
# This is so the whole block can be treated as html if JS frontend.
lines = []
lines.append('<!-- CLICK_WEB START FOOTER -->')
if to_download:
lines.append('<b>Result files:</b><br>')
for fi in to_download:
lines.append('<ul> ')
lines.append(f'<li>{_get_download_link(fi)}<br>')
lines.append('</ul>')
else:
lines.append('<b>DONE</b>')
lines.append('<!-- CLICK_WEB END FOOTER -->')
html_str = '\n'.join(lines)
yield html_str
def _get_download_link(field_info):
"""Hack as url_for need request context"""
rel_file_path = Path(field_info.file_path).relative_to(click_web.OUTPUT_FOLDER)
uri = f'/static/results/{rel_file_path.as_posix()}'
return f'<a href="{uri}">{field_info.link_name}</a>'
class RequestToCommandArgs:
def __init__(self):
field_infos = [FieldInfo.factory(key) for key in
list(request.form.keys()) + list(request.files.keys())]
# important to sort them so they will be in expected order on command line
self.field_infos = list(sorted(field_infos))
def command_args(self, command_index) -> List[str]:
"""
Convert the post request into a list of command line arguments
:param command_index: (int) the index for the command to get arguments for.
:return: list of command line arguments for command at that cmd_index
"""
args = []
# only include relevant fields for this command index
commands_field_infos = [fi for fi in self.field_infos
if fi.param.command_index == command_index]
commands_field_infos = sorted(commands_field_infos)
for fi in commands_field_infos:
# must be called mostly for saving and preparing file output.
fi.before_script_execute()
if fi.cmd_opt.startswith('--'):
# it's an option
args.extend(self._process_option(fi))
else:
# argument(s)
if isinstance(fi, FieldFileInfo):
# it's a file, append the written temp file path
# TODO: does file upload support multiple keys? In that case support it.
args.append(fi.file_path)
else:
arg_values = request.form.getlist(fi.key)
has_values = bool(''.join(arg_values))
if has_values:
if fi.param.nargs == -1:
# Variadic argument, in html form each argument
# is a separate line in a textarea.
# treat each line we get from text area as a separate argument.
for value in arg_values:
values = value.splitlines()
logger.info(f'variadic arguments, split into: "{values}"')
args.extend(values)
else:
logger.info(f'arg_value: "{arg_values}"')
args.extend(arg_values)
return args
def _process_option(self, field_info):
vals = request.form.getlist(field_info.key)
if field_info.is_file:
if field_info.link_name:
# it's a file, append the file path
yield field_info.cmd_opt
yield field_info.file_path
elif field_info.param.param_type == 'flag':
# To work with flag that is default True
# a hidden field with same name is also sent by form.
# This is to detect if checkbox was not checked as then
# we will get the field anyway with the "off flag" as value.
if len(vals) == 1:
off_flag = vals[0]
flag_on_cmd_line = off_flag
else:
# we got both off and on flags, checkbox is checked.
on_flag = vals[1]
flag_on_cmd_line = on_flag
yield flag_on_cmd_line
elif ''.join(vals):
# opt with value, if option was given multiple times get the values for each.
# flag options should always be set if we get them
# for normal options they must have a non empty value
yield field_info.cmd_opt
for val in vals:
if val:
yield val
else:
# option with empty values, should not be added to command line.
pass
class FieldInfo:
"""
Extract information from the encoded form input field name
the parts:
[command_index].[opt_or_arg_index].[click_type].[html_input_type].[opt_or_arg_name]
e.g.
"0.0.option.text.text.--an-option"
"0.1.argument.file[rb].text.an-argument"
"""
@staticmethod
def factory(key):
field_id = FieldId.from_string(key)
is_file = field_id.click_type.startswith('file')
is_path = field_id.click_type.startswith('path')
is_uploaded = key in request.files
if is_file:
if is_uploaded:
field_info = FieldFileInfo(field_id)
else:
field_info = FieldOutFileInfo(field_id)
elif is_path:
if is_uploaded:
field_info = FieldPathInfo(field_id)
else:
field_info = FieldPathOutInfo(field_id)
else:
field_info = FieldInfo(field_id)
return field_info
def __init__(self, param: FieldId):
self.param = param
self.key = param.key
'Type of option (file, text)'
self.is_file = self.param.click_type.startswith('file')
'The actual command line option (--debug)'
self.cmd_opt = param.name
self.generate_download_link = False
def before_script_execute(self):
pass
def after_script_executed(self):
pass
def __str__(self):
return str(self.param)
def __lt__(self, other):
"Make class sortable"
return (self.param.command_index, self.param.param_index) < \
(other.param.command_index, other.param.param_index)
def __eq__(self, other):
return self.key == other.key
class FieldFileInfo(FieldInfo):
"""
Use for processing input fields of file type.
Saves the posted data to a temp file.
"""
'temp dir is on class in order to be uniqe for each request'
_temp_dir = None
def __init__(self, fimeta):
super().__init__(fimeta)
# Extract the file mode that is in the type e.g file[rw]
self.mode = self.param.click_type.split('[')[1][:-1]
self.generate_download_link = True if 'w' in self.mode else False
self.link_name = f'{self.cmd_opt}.out'
logger.info(f'File mode for {self.key} is {self.mode}')
def before_script_execute(self):
self.save()
@classmethod
def temp_dir(cls):
if not cls._temp_dir:
cls._temp_dir = tempfile.mkdtemp(dir=click_web.OUTPUT_FOLDER)
logger.info(f'Temp dir: {cls._temp_dir}')
return cls._temp_dir
def save(self):
logger.info('Saving...')
logger.info('field value is a file! %s', self.key)
file = request.files[self.key]
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
raise ValueError('No selected file')
elif file and file.filename:
filename = secure_filename(file.filename)
name, suffix = os.path.splitext(filename)
fd, filename = tempfile.mkstemp(dir=self.temp_dir(), prefix=name, suffix=suffix)
self.file_path = filename
logger.info(f'Saving {self.key} to {filename}')
file.save(filename)
def __str__(self):
res = [super().__str__()]
res.append(f'file_path: {self.file_path}')
return ', '.join(res)
class FieldOutFileInfo(FieldFileInfo):
"""
Used when file option is just for output and form posted it as hidden or text field.
Just create a empty temp file to give it's path to command.
"""
def __init__(self, fimeta):
super().__init__(fimeta)
if self.param.form_type == 'text':
self.link_name = request.form[self.key]
# set the postfix to name name provided from form
# this way it will at least have the same extension when downloaded
self.file_suffix = request.form[self.key]
else:
# hidden no preferred file name can be provided by user
self.file_suffix = '.out'
def save(self):
name = secure_filename(self.key)
filename = tempfile.mkstemp(dir=self.temp_dir(), prefix=name, suffix=self.file_suffix)
logger.info(f'Creating empty file for {self.key} as {filename}')
self.file_path = filename
class FieldPathInfo(FieldFileInfo):
"""
Use for processing input fields of path type.
Extracts the posted data to a temp folder.
When script finished zip that folder and provide download link to zip file.
"""
def save(self):
super().save()
zip_extract_dir = tempfile.mkdtemp(dir=self.temp_dir())
logger.info(f'Extracting: {self.file_path} to {zip_extract_dir}')
shutil.unpack_archive(self.file_path, zip_extract_dir, 'zip')
self.file_path = zip_extract_dir
def after_script_executed(self):
super().after_script_executed()
fd, filename = tempfile.mkstemp(dir=self.temp_dir(), prefix=self.key)
folder_path = self.file_path
self.file_path = filename
logger.info(f'Zipping {self.key} to {filename}')
self.file_path = shutil.make_archive(self.file_path, 'zip', folder_path)
logger.info(f'Zip file created {self.file_path}')
self.generate_download_link = True
class FieldPathOutInfo(FieldOutFileInfo):
"""
Use for processing output fields of path type.
Create a folder and use as path to script.
When script finished zip that folder and provide download link to zip file.
"""
def save(self):
super().save()
self.file_path = tempfile.mkdtemp(dir=self.temp_dir())
def after_script_executed(self):
super().after_script_executed()
fd, filename = tempfile.mkstemp(dir=self.temp_dir(), prefix=self.key)
folder_path = self.file_path
self.file_path = filename
logger.info(f'Zipping {self.key} to {filename}')
self.file_path = shutil.make_archive(self.file_path, 'zip', folder_path)
logger.info(f'Zip file created {self.file_path}')
self.generate_download_link = True
| 2.421875 | 2 |
features/FEATURE_BLE/targets/TARGET_NORDIC/TARGET_MCU_NRF51822/sdk/script/pick_nrf51_files.py | pradeep-gr/mbed-os5-onsemi | 22 | 12764091 | #!/usr/bin/env python
# Copyright (c) 2015-2016 ARM Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, shutil, json, pprint, sys
from collections import OrderedDict
help_text = """
Usage: python {} [options] <full-noridc-sdk-path> <nrf51-sdk-yotta-module-path>
options: --purge : to delete all existing files and start again
--dry-run : to list the files to be copied but not actually copy them
""".format(os.path.basename(__file__))
# exclude path to avoid confusion over files of the same name
exclude_path = ["examples", "SVD", "s110", "s120", "s210", "s310", "nrf_soc_nosd", "serialization/connectivity",
'components/libraries/hci/config', 'components/libraries/bootloader_dfu/ble_transport']
def find(name, path):
paths = []
for root, dirs, files in os.walk(path):
if True not in [x in root for x in exclude_path]:
if name in files:
paths.append(os.path.join(root, name))
if len(paths) == 0:
print "-"*30
print "Warning! No {} found!!!!".format(name)
print "-"*30
return None
elif len(paths) > 1:
print "-"*30
print "Warning! More than one {} found!!!!".format(name)
print paths
print "-"*30
return None
else:
return paths[0]
def find_dir(dir_name, path):
paths = []
for root, dirs, files in os.walk(path):
if dir_name in root:
for fn in files:
paths.append(os.path.join(root, fn))
return paths
if __name__ == "__main__":
# define source and destination of copy
arg_valid = True
if len(sys.argv) not in [3, 4]:
arg_valid = False
else:
src_folder = sys.argv[-2]
yt_module_dir = sys.argv[-1]
for d in [src_folder, yt_module_dir]:
if not os.path.isdir(d):
arg_valid = False
print src_folder, "is not a folder"
purge = ("--purge" in sys.argv)
dry_run = ("--dry-run" in sys.argv)
if not arg_valid:
print help_text
sys.exit(1)
dst_folder = os.path.join(yt_module_dir, "source/nordic_sdk")
# build a file_list from required_files.txt
file_list = []
with open("required_files.txt", "r") as fd:
for line in fd:
line = line.strip()
if line.startswith("D "):
directory = line.split(" ")[-1]
file_list += find_dir(directory, src_folder)
elif not line.startswith("#") and line != '':
fn = os.path.basename(line).strip()
fn = find(fn, src_folder)
file_list.append(fn)
# remove everything from the destination folder
if purge and not dry_run and os.path.exists(dst_folder):
shutil.rmtree(dst_folder)
# copy files
extra_includes = []
for src in file_list:
if src:
rel_dst = os.path.relpath(src, src_folder)
dst = os.path.join(dst_folder, rel_dst)
print src, "->", dst
directory = os.path.dirname(dst)
if not os.path.exists(directory):
print "Creating directory:", directory
if not dry_run:
os.makedirs(directory)
if not os.path.isfile(dst):
print "Copying file", dst
if not dry_run:
shutil.copyfile(src, dst)
# build a list of extra includes to be added to module.json
if dst.endswith(".h"):
inc_rel_path = os.path.relpath(dst, yt_module_dir)
inc_dir_path = os.path.dirname(inc_rel_path)
if inc_dir_path not in extra_includes:
extra_includes.append(inc_dir_path)
# write extraIncludes in the module.json file
mod_json = os.path.join(yt_module_dir, "module.json")
print "-"*30
print "Writing extra_includes to {}".format(mod_json)
print "-"*30
for n in sorted(extra_includes):
print n
if not dry_run:
with open(mod_json, 'r+') as fd:
jobj = json.loads(fd.read(), object_pairs_hook=OrderedDict)
jobj['extraIncludes'] = sorted(extra_includes)
jdump = json.dumps(jobj, indent=2, separators=(',', ': '))
fd.seek(0)
fd.write(jdump)
fd.write("\n")
fd.truncate()
| 2.0625 | 2 |
mdstcl_kernel/mdstcl_kernel/kernel.py | MDSplus/jupyter | 1 | 12764092 | from ipykernel.kernelbase import Kernel
from MDSplus import Data
class MdstclKernel(Kernel):
implementation = 'Mdstcl'
implementation_version = '1.0'
language = 'no-op'
language_version = '0.1'
language_info = {
'name': 'mdstcl commands',
'mimetype': 'text/plain',
'file_extension': '.tcl',
}
banner = "MDSplus Mdstcl kernel - Tree Command Language interpreter"
def do_execute(self, code, silent, store_history=True, user_expressions=None,
allow_stdin=False):
if not silent:
try:
lines=code.split('\n')
for line in lines:
ans = Data.execute('_status=tcl($1,_out),_out',line)
status = int(Data.execute('_status'))
if status & 1:
stream_content = {'name': 'stdout', 'text': str(ans)}
else:
stream_content = {'name':'stderr','text': '\n'.join([line,str(ans)])}
self.send_response(self.iopub_socket,'stream',stream_content)
except Exception as e:
stream_content = {'name': 'stderr', 'text': str(e)}
self.send_response(self.iopub_socket, 'stream', stream_content)
return {'status': 'ok',
# The base class increments the execution count
'execution_count': self.execution_count,
'payload': [],
'user_expressions': {},
}
| 2.171875 | 2 |
agenda.py | StartFuture/workstation-backend | 1 | 12764093 | <reponame>StartFuture/workstation-backend<filename>agenda.py
import app.dao as Bank
# agenda workstation
from datetime import datetime
from datetime import timedelta
# validando e salvando agendamentos
start_date = request.form['data_inicio'] # aaaa/mm/dd
start_hour = request.form["hora_inicio"] # hh:mm:ss
final_hour = request.form["hora_fim"] # hh:mm:ss
final_date = request.form["data_final"] # aaaa/mm/dd
id_user = request.form["id_user"]
id_box = request.form["id_box"]
if Bank.verify_scheduling(start_date, start_hour, final_hour, final_date, id_box):
Bank.save_scheduling(start_date, start_hour, final_hour,
final_date, id_user, id_box)
else:
print('Agendamento inválido')
# mandando todas datas usadas para o front
list_locations = Bank.show_all_scheduling()
if list_locations:
for dict_locations in list_locations:
start_date = dict_locations['datainicio']
final_date = dict_locations['datafim']
start_day = start_date.day
start_month = start_date.month
start_year = start_date.year
final_day = final_date.day
final_month = final_date.month
final_year = final_date.year
list_days = [days for days in range(start_day, final_day+1)]
list_dates = [f'{days}/{start_month}/{start_year}' for days in list_days]
print(list_dates)
| 2.53125 | 3 |
compare_experiments/compare_generic_genes.py | ajlee21/generic-expression-patterns | 8 | 12764094 | # ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.9.1+dev
# kernelspec:
# display_name: Python [conda env:generic_expression] *
# language: python
# name: conda-env-generic_expression-py
# ---
# # Compare generic genes
#
# The goal of this notebook is to compare the generic genes found using the same template experiment run two times and 2 different recount2 template experiments.
# +
# %load_ext autoreload
# %autoreload 2
import os
from scipy import stats
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from ponyo import utils
# +
# Read in config variables
base_dir = os.path.abspath(os.path.join(os.getcwd(), "../"))
config_filename = os.path.abspath(
os.path.join(base_dir, "configs", "config_human_general.tsv")
)
params = utils.read_config(config_filename)
local_dir = params["local_dir"]
project_id1 = "SRP012656"
project_id2 = "SRP061689"
# +
# Get data directory containing gene summary data
data_dir = os.path.join(base_dir, "human_general_analysis")
# Get gene ranking files
gene_ranking_filename1 = os.path.join(
data_dir, f"generic_gene_summary_{project_id1}.tsv"
)
gene_ranking_filename1_run2 = os.path.join(
data_dir, f"generic_gene_summary_{project_id1}_run2.tsv"
)
gene_ranking_filename2 = os.path.join(
data_dir, f"generic_gene_summary_{project_id2}.tsv"
)
# Get template data
template_filename1 = os.path.join(
data_dir, "data", f"processed_recount2_template_{project_id1}.tsv"
)
template_filename2 = os.path.join(
data_dir, "data", f"processed_recount2_template_{project_id2}.tsv"
)
# -
# ## Correlation between rankings between same experiment
#
# Here we compare gene ranking after running SOPHIE 2 times using the same template experiment but different seeds.
# Load gene ranking
gene_ranking_summary1 = pd.read_csv(
gene_ranking_filename1, sep="\t", index_col=0, header=0
)
gene_ranking_summary1_run2 = pd.read_csv(
gene_ranking_filename1_run2, sep="\t", index_col=0, header=0
)
# Get simulated ranking
gene_ranking1 = (
gene_ranking_summary1["Rank (simulated)"].rename("Rank 1").to_frame("Rank 1")
)
gene_ranking1_run2 = (
gene_ranking_summary1_run2["Rank (simulated)"].rename("Rank 2").to_frame("Rank 2")
)
# +
# Scale ranking to percentile (0,100)
scaler = MinMaxScaler(feature_range=(0, 100))
gene_ranking1["Percentile 1"] = scaler.fit_transform(
np.array(gene_ranking1["Rank 1"]).reshape(-1, 1)
)
gene_ranking1_run2["Percentile 2"] = scaler.fit_transform(
np.array(gene_ranking1_run2["Rank 2"]).reshape(-1, 1)
)
gene_ranking1_run2.head()
# -
# Combine ranking
gene_ranking_same_combined = pd.concat(
[gene_ranking1["Percentile 1"], gene_ranking1_run2["Percentile 2"]], axis=1
)
print(gene_ranking_same_combined.shape)
gene_ranking_same_combined.head()
# Check for NAs
gene_ranking_same_combined[pd.isnull(gene_ranking_same_combined).any(axis=1)]
# +
# Plot correlation between ranking
r, p = stats.spearmanr(
gene_ranking_same_combined["Percentile 1"],
gene_ranking_same_combined["Percentile 2"],
)
print(r, p)
fig = sns.jointplot(
data=gene_ranking_same_combined,
x="Percentile 1",
y="Percentile 2",
kind="hex",
marginal_kws={"color": "white", "edgecolor": "white"},
)
fig.set_axis_labels(
f"Percentile in {project_id1}",
f"Percentile in {project_id1} different runs",
fontsize=14,
fontname="Verdana",
)
cbar_ax = fig.fig.add_axes([0.9, 0.25, 0.05, 0.4]) # x, y, width, height
cb = plt.colorbar(cax=cbar_ax)
cb.set_label("Number of genes")
output_figure_filename = "concordance_between_same_recount2_templates.svg"
fig.savefig(
output_figure_filename,
format="svg",
bbox_inches="tight",
transparent=True,
pad_inches=0,
dpi=300,
)
# -
# **Takeaway:**
# * Running SOPHIE twice using the same template experiment will generate 2 different sets of simulated experiments.
# * Since the template experiment is the same, these 2 sets of simulated experiments will have the same experimental design structure/biological context
# * As expected, the concordance is very high especially for high ranked and low ranked genes. The genes in the middle rank are more sensitive to changes so you don't get as clear of a signal compared to the extreme ranked genes.
# ## Correlation between rankings between 2 different experiments
#
# Here we compare gene ranking generated by SOPHIE using 2 different template experiments.
# Load gene ranking
gene_ranking_summary2 = pd.read_csv(
gene_ranking_filename2, sep="\t", index_col=0, header=0
)
# Get simulated ranking
gene_ranking1 = (
gene_ranking_summary1["Rank (simulated)"].rename("Rank 1").to_frame("Rank 1")
)
gene_ranking2 = (
gene_ranking_summary2["Rank (simulated)"].rename("Rank 2").to_frame("Rank 2")
)
# +
# Scale ranking to percentile (0,100)
scaler = MinMaxScaler(feature_range=(0, 100))
gene_ranking1["Percentile 1"] = scaler.fit_transform(
np.array(gene_ranking1["Rank 1"]).reshape(-1, 1)
)
gene_ranking2["Percentile 2"] = scaler.fit_transform(
np.array(gene_ranking2["Rank 2"]).reshape(-1, 1)
)
gene_ranking2.head()
# -
# Combine ranking
gene_ranking_diff_combined = pd.concat(
[gene_ranking1["Percentile 1"], gene_ranking2["Percentile 2"]], axis=1
)
print(gene_ranking_diff_combined.shape)
gene_ranking_diff_combined.head()
# Check for NAs
gene_ranking_diff_combined[pd.isnull(gene_ranking_diff_combined).any(axis=1)]
# +
# Plot correlation between ranking
r, p = stats.spearmanr(
gene_ranking_diff_combined["Percentile 1"],
gene_ranking_diff_combined["Percentile 2"],
)
print(r, p)
fig = sns.jointplot(
data=gene_ranking_diff_combined,
x="Percentile 1",
y="Percentile 2",
kind="hex",
marginal_kws={"color": "white", "edgecolor": "white"},
)
fig.set_axis_labels(
f"Percentile in {project_id1}",
f"Percentile in {project_id2}",
fontsize=14,
fontname="Verdana",
)
cbar_ax = fig.fig.add_axes([0.9, 0.25, 0.05, 0.4]) # x, y, width, height
cb = plt.colorbar(cax=cbar_ax)
cb.set_label("Number of genes")
output_figure_filename = "concordance_between_diff_recount2_templates.svg"
fig.savefig(
output_figure_filename,
format="svg",
bbox_inches="tight",
transparent=True,
pad_inches=0,
dpi=300,
)
# -
# **Takeaway:**
#
# * Looks like there is good concordance between highly ranked genes (i.e. generic genes)
# * By comparison if we run SOPHIE using two different template experiments, there are genes in the off-diagonal regions that might indicate that there are generic within the given context of the specific experiment.
# * In general, the genes in the middle rank are more sensitive to changes so you don't get as clear of a signal compared to the highest rank genes.
# ## Examine gene expression data
# Read expression data
template_1 = pd.read_csv(template_filename1, sep="\t", index_col=0, header=0)
template_2 = pd.read_csv(template_filename2, sep="\t", index_col=0, header=0)
# +
# Get concordance genes
concordant_genes = list(
gene_ranking_diff_combined[
(gene_ranking_diff_combined["Percentile 1"] > 80)
& (gene_ranking_diff_combined["Percentile 2"] > 80)
].index
)
# Get disconcordant genes
discordant_genes = set(gene_ranking_diff_combined.index).difference(concordant_genes)
# +
# Distribution of concordant genes in template experiment 1
template1_mean = template_1.mean()
print(
"Percent concordant genes with 0 expression in template 1:",
len(template1_mean[concordant_genes].loc[template1_mean[concordant_genes] == 0])
/ len(template1_mean[concordant_genes]),
)
print(
"Percent nonzero concordant genes in template 1:",
len(
template1_mean[concordant_genes].loc[
(template1_mean[concordant_genes] > 0)
& (template1_mean[concordant_genes] < 1000)
]
)
/ len(template1_mean[concordant_genes]),
)
f1 = sns.distplot(template_1.mean()[concordant_genes], kde=False)
f1.set_title(f"Expression of concordant genes in {project_id1}")
f1.set_xlabel("log(gene expression)")
f1.set_ylabel("log(count)")
f1.set(xscale="log", yscale="log")
# +
# Distribution of concordant genes in template experiment 2
template2_mean = template_2.mean()
print(
"Percent concordant genes with 0 expression in template 2:",
len(template2_mean[concordant_genes].loc[template2_mean[concordant_genes] == 0])
/ len(template2_mean[concordant_genes]),
)
print(
"Percent nonzero concordant genes in template 2:",
len(
template2_mean[concordant_genes].loc[
(template2_mean[concordant_genes] > 0)
& (template2_mean[concordant_genes] < 1000)
]
)
/ len(template2_mean[concordant_genes]),
)
# There are more 0 expressed genes in this template experiment
f2 = sns.distplot(template_2.mean()[concordant_genes], kde=False)
f2.set_title(f"Expression of concordant genes in {project_id2}")
f2.set_xlabel("log(gene expression)")
f2.set_ylabel("log(count)")
f2.set(xscale="log", yscale="log")
# +
# Distribution of discordant gense in template experiment 1
template1_mean = template_1.mean()
print(
"Percent discordant genes with 0 expression in template 1:",
len(template1_mean[discordant_genes].loc[template1_mean[discordant_genes] == 0])
/ len(template1_mean[discordant_genes]),
)
print(
"Percent nonzero discordant genes in template 1:",
len(
template1_mean[discordant_genes].loc[
(template1_mean[discordant_genes] > 0)
& (template1_mean[discordant_genes] < 1000)
]
)
/ len(template1_mean[discordant_genes]),
)
print(
len(template1_mean[discordant_genes].loc[template1_mean[discordant_genes] > 0])
/ len(template1_mean[discordant_genes])
)
f3 = sns.distplot(template_1.mean()[discordant_genes], kde=False)
f3.set_title(f"Expression of discordant genes in {project_id1}")
f3.set_xlabel("log(gene expression)")
f3.set_ylabel("log(count)")
f3.set(xscale="log", yscale="log")
# +
# Distribution of discordant genes in template experiment 2
template2_mean = template_2.mean()
print(
"Percent discordant genes with 0 expression in template 2:",
len(template2_mean[discordant_genes].loc[template2_mean[discordant_genes] == 0])
/ len(template2_mean[discordant_genes]),
)
print(
"Percent nonzero discordant genes in template 2:",
len(
template2_mean[discordant_genes].loc[
(template2_mean[discordant_genes] > 0)
& (template2_mean[discordant_genes] < 1000)
]
)
/ len(template2_mean[discordant_genes]),
)
f4 = sns.distplot(template_2.mean()[discordant_genes], kde=False)
f4.set_title(f"Expression of discordant genes in {project_id2}")
f4.set_xlabel("log(gene expression)")
f4.set_ylabel("log(count)")
f4.set(xscale="log", yscale="log")
# -
# **Takeaway:**
#
# Doesn't appear to be much of a difference between the distribution of average gene expression values for these two experiments.
#
# Theoretically, I would expect the scenario where a gene is lowly expressed in the context of template experiment 1 and therefore not found to be generic. But this same gene could be found to be generic in the context of template experiment 2 if it is more expressed. Its possible that differences in gene expression distribution can change which genes are found to be generic given that the simulation is producing experiments with a similar context.
#
# In this case, despite having similar gene expression distributions there are still many differences in gene ranking. This suggests to me that level of gene expression activity doesn't matter as much as the overall patterns perhaps.
#
# Overall we observe a slight shift showing that concordant genes are more lowly expressed compared to discordant genes, but most genes are still predominantly lowly gene expression. If most genes have expression levels very close to 0, then small fluctuations in the expression of some genes could lead to large changes in rank without changing the overall expression distribution.
| 2.203125 | 2 |
nengo_ssp/__init__.py | nsdumont/nengo_ssp | 0 | 12764095 | <gh_stars>0
"""Top-level package for Spatial Semantic Pointers."""
__author__ = """<NAME>-<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.0'
import nengo_ssp.dists
import nengo_ssp.hrr_algebra
import nengo_ssp.plotting
import nengo_ssp.networks
import nengo_ssp.utils
from nengo_ssp.spatial_semantic_pointer import SpatialSemanticPointer
from nengo_ssp.vector_generation import (
PlaneWaveBasis,
WeightedPlaneWaveBasis,
HexagonalBasis,
RecursiveBasisFun,
GridCellEncoders,
UnitaryVectors
)
| 1.203125 | 1 |
tests/implementations/sqlalchemy_.py | novamera/fastapi-crudrouter | 0 | 12764096 | from fastapi import FastAPI
from sqlalchemy import Column, Float, Integer, String
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy_utils import create_database, database_exists, drop_database
from fastapi_crudrouter import SQLAlchemyCRUDRouter
from tests import (
Carrot,
CarrotCreate,
CarrotUpdate,
CustomPotato,
PAGINATION_SIZE,
Potato,
PotatoType,
CUSTOM_TAGS,
)
SQLALCHEMY_DATABASE_URL = "sqlite:///./test.db"
def _setup_base_app():
if database_exists(SQLALCHEMY_DATABASE_URL):
drop_database(SQLALCHEMY_DATABASE_URL)
create_database(SQLALCHEMY_DATABASE_URL)
app = FastAPI()
engine = create_engine(
SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
def session():
session = SessionLocal()
try:
yield session
session.commit()
finally:
session.close()
return app, engine, Base, session
def sqlalchemy_implementation():
app, engine, Base, session = _setup_base_app()
class PotatoModel(Base):
__tablename__ = "potatoes"
id = Column(Integer, primary_key=True, index=True)
thickness = Column(Float)
mass = Column(Float)
color = Column(String)
type = Column(String)
class CarrotModel(Base):
__tablename__ = "carrots"
id = Column(Integer, primary_key=True, index=True)
length = Column(Float)
color = Column(String)
Base.metadata.create_all(bind=engine)
app.include_router(
SQLAlchemyCRUDRouter(
schema=Potato,
db_model=PotatoModel,
db=session,
prefix="potato",
paginate=PAGINATION_SIZE,
)
)
app.include_router(
SQLAlchemyCRUDRouter(
schema=Carrot,
db_model=CarrotModel,
db=session,
create_schema=CarrotCreate,
update_schema=CarrotUpdate,
prefix="carrot",
tags=CUSTOM_TAGS,
)
)
return app
# noinspection DuplicatedCode
def sqlalchemy_implementation_custom_ids():
app, engine, Base, session = _setup_base_app()
class PotatoModel(Base):
__tablename__ = "potatoes"
potato_id = Column(Integer, primary_key=True, index=True)
thickness = Column(Float)
mass = Column(Float)
color = Column(String)
type = Column(String)
Base.metadata.create_all(bind=engine)
app.include_router(
SQLAlchemyCRUDRouter(schema=CustomPotato, db_model=PotatoModel, db=session)
)
return app
def sqlalchemy_implementation_string_pk():
app, engine, Base, session = _setup_base_app()
class PotatoTypeModel(Base):
__tablename__ = "potato_type"
name = Column(String, primary_key=True, index=True)
origin = Column(String)
Base.metadata.create_all(bind=engine)
app.include_router(
SQLAlchemyCRUDRouter(
schema=PotatoType,
create_schema=PotatoType,
db_model=PotatoTypeModel,
db=session,
prefix="potato_type",
)
)
return app
def sqlalchemy_implementation_integrity_errors():
app, engine, Base, session = _setup_base_app()
class PotatoModel(Base):
__tablename__ = "potatoes"
id = Column(Integer, primary_key=True, index=True)
thickness = Column(Float)
mass = Column(Float)
color = Column(String, unique=True)
type = Column(String)
class CarrotModel(Base):
__tablename__ = "carrots"
id = Column(Integer, primary_key=True, index=True)
length = Column(Float)
color = Column(String)
Base.metadata.create_all(bind=engine)
app.include_router(
SQLAlchemyCRUDRouter(
schema=Potato,
db_model=PotatoModel,
db=session,
create_schema=Potato,
prefix="potatoes",
)
)
app.include_router(
SQLAlchemyCRUDRouter(
schema=Carrot,
db_model=CarrotModel,
db=session,
update_schema=CarrotUpdate,
prefix="carrots",
)
)
return app
| 2.640625 | 3 |
fabfile.py | enterstudio/serapis | 7 | 12764097 | """
Steps:
-get the summer.ai.pem key
-login to the env.host machine and add your public key to the machine's authorized keys
http://www.perrygeo.com/running-python-with-compiled-code-on-aws-lambda.html
"""
from fabric.api import local, sudo, run, warn_only, env, lcd, cd
import yaml
with open("serapis/config/default.yaml") as f:
config = yaml.load(f)
# the user to use for the remote commands
env.user = 'ec2-user'
# the servers where the commands are executed
env.hosts = [config['ec2_ip']]
gitfile = 'serapis.git.zip'
lambdafile = 'serapis.lambda.zip'
lambda_bucket = 'ai.summer.1mwords.test'
lambdafunction = config['lambda_function_name']
corpora = [
'nltk_data/taggers/averaged_perceptron_tagger/averaged_perceptron_tagger.pickle',
'nltk_data/tokenizers/punkt/english.pickle'
]
def pack():
# Make sure machine and dev tools are up to date
sudo('sudo yum -y update')
sudo('sudo yum -y upgrade')
sudo('yum install -y atlas-devel atlas-sse3-devel blas-devel gcc gcc-c++ lapack-devel python27-devel --enablerepo=epel')
sudo('pip install -U pip')
with warn_only():
run('rm ~/wordnik.zip')
sudo('dd if=/dev/zero of=/swapfile bs=1024 count=1500000')
sudo('mkswap /swapfile')
sudo('chmod 0600 /swapfile')
sudo('swapon /swapfile')
run('/usr/bin/virtualenv --python /usr/bin/python build --always-copy --no-site-packages')
run('source build/bin/activate')
# Order is important here, so let's make sure we've got these right
run('pip install -U pip')
run('pip install --use-wheel numpy')
run('pip install --use-wheel scipy')
run('pip install --use-wheel sklearn')
run('pip install --use-wheel pandas')
with open('requirements.txt') as f:
for req in f.read().splitlines():
if req.split("=")[0].lower() not in ('numpy', 'scipy', 'scikit-learn', 'sklearn', 'pandas'):
run('pip install --use-wheel {}'.format(req))
for lib in ('lib', 'lib64'):
# Strip SO files
run('find "$VIRTUAL_ENV/{}/python2.7/site-packages/" -name "*.so" | xargs strip'.format(lib))
with cd('$VIRTUAL_ENV/{}/python2.7/site-packages/'.format(lib)):
run('zip -r -9 -q ~/wordnik.zip *')
# Get the file back onto our local machine
local('scp %s@%s:~/wordnik.zip %s' % (env.user, env.hosts[0], lambdafile))
update()
def install_corpora():
local("python -m nltk.downloader -d nltk_data {}".format(" ".join(config['nltk_corpora'])))
def update():
# Run tests
# local("py.test serapis/tests/")
# Updates code in zip file with current Master without going to EC2 first.
local('git archive --format=zip HEAD -o %s' % gitfile, capture=False)
local('unzip -d git_tmp -o -u %s' % gitfile)
with lcd('git_tmp'):
local('zip -9r ../%s .' % lambdafile)
local('zip -9 %s serapis/config/credentials.yaml' % lambdafile)
for corpus in corpora:
local('zip -9r {} {}'.format(lambdafile, corpus))
local('rm -r git_tmp')
def qu():
local('zip -9 %s lambda_handler.py' % lambdafile)
local('zip -9r %s serapis/' % lambdafile)
local('zip -9r %s temp_models/' % lambdafile)
def deploy():
# If this says that the function is not found, create it first:
# aws lambda create-function --region us-east-1 --function-name WordTask --zip-file fileb://wordnik.lambda.zip --handler lambda_handler.handler --runtime python2.7 --timeout 10 --memory-size 512 --role arn:aws:iam::054978852993:role/lambda_basic_execution
local('aws s3 cp {} s3://{}/{} --profile wordnik'.format(lambdafile, lambda_bucket, lambdafile))
local('aws lambda update-function-code --region us-east-1 --function-name {} --s3-bucket {} --s3-key {} --profile wordnik'.format(lambdafunction, lambda_bucket, lambdafile))
| 1.992188 | 2 |
LDAR_Sim/src/initialization/input_mapper_v1.py | SensorUp/LDAR_Sim | 2 | 12764098 | <filename>LDAR_Sim/src/initialization/input_mapper_v1.py
# ------------------------------------------------------------------------------
# Program: The LDAR Simulator (LDAR-Sim)
# File: LDAR-Sim input mapper sample
# Purpose: Example input mapper
#
# Copyright (C) 2018-2021 Intelligent Methane Monitoring and Management System (IM3S) Group
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the MIT License as published
# by the Free Software Foundation, version 3.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
# You should have received a copy of the MIT License
# along with this program. If not, see <https://opensource.org/licenses/MIT>.
#
# ------------------------------------------------------------------------------
def input_mapper_v1(parameters):
"""Function to perform all input mapping from version 1.0 parameter files to presently compliant
parameter files. This is necessary to ensure reverse compatibility - to allow older parameter
files to run properly.
**** THIS IS AN EXAMPLE FILE THAT IS PROVIDED AS A TEMPLATE ****
This function is a series of hooks that fall into several categories:
1) Construction hooks: these hooks construct newer parameters from older parameters. This is
necessary if parameters were depreciated in the newest version of the model, but relied upon in
older versions. For example, newer versions of the program may simply allow specification of a
start and end date, but not include specification of the number of timesteps. To allow this, a
construction hook could create start and end date from the number of timesteps using any
necessary rules to maintain compatibility.
2) Destruction hooks: these hooks destruct parameters that are no longer in the default set of
parameters in the model. To continue the above example, if the present version of the model
does not use number of timesteps, and the number of timesteps is no longer specified in the
default parameter file - that key should be removed. The issue here is without destruction of
depreciated parameters - this parameter file will fail validation. Of course, destruction must
be called AFTER construction so the information in the depreciated parameters is used to map
to the new parameters before being deleted!
3) Recalculation hooks: these hooks recalculate variables or map the values. For example, if
the units have changed, but the key has not - these hooks are necessary to recalculate the new
units to be compliant with the present version of the model. If the older version of the model
had a spelling mistake in a string specification, this type of hook would map the parameters.
Several examples:
# map a old spelling mistake properly - the present version of LDAR Sim has corrected the
# spelling mistake but old parameter files continue to use the misspelled value
if parameters['calculation_method'] == 'categoricle':
parameters['calculation_method'] = 'categorical'
# map the leak rate parameter from one unit to another, the newest version uses a different
# unit, where 0.003332 is the conversion factor
parameters['leak_rate] = parameters['leak_rate'] * 0.003332
4) Key-change hooks: these hooks change the key name to a new key name to map the old name to
the new name.
:param parameters = a dictionary of parameters that must be mapped to a compliant version
:return returns a model compliant parameter file dictionary and global parameter file (see notes
in code).
In cases where the global parameters are not returned, the global parameters are returned as an
empty dictionary
"""
# ----------------------------------------------------------------------------------------------
# 1. Construction hooks
# NOTES: Version 1.0 parameter files used to create global parameters by pulling some parameters
# from the P_ref program. Thus, this function returns global parameters as well as the
# parameters dictionary under analysis. There were no global parameters in v1.0. They
# were defined inline in the code.
# Version 1.0 files are all program files
# if 'parameter_level' not in parameters:
# parameters['parameter_level'] = 'program'
# Set version
# if 'version' not in parameters:
# parameters['version'] = '1.0'
# Check if this is P_ref, if so, mine the global parameters
mined_global_parameters = {}
# parameters_to_make_global = ['n_simulations', 'timesteps', 'start_year', 'weather_file',
# 'print_from_simulations', 'write_data']
# if parameters['program_name'] == 'P_ref':
# for i in parameters_to_make_global:
# mined_global_parameters[i] = parameters[i]
# Construct a programs key
# mined_global_parameters['programs'] = []
# ----------------------------------------------------------------------------------------------
# 2. Destruction hooks
# Delete the parameters that are now globals from this program definition - these do nothing
# for i in parameters_to_make_global:
# _ = parameters.pop(i)
# ----------------------------------------------------------------------------------------------
# 3. Recalculation hooks
pass
# ----------------------------------------------------------------------------------------------
# 4. Key-change hooks
pass
return(parameters, mined_global_parameters)
| 1.679688 | 2 |
backend/unpp_api/apps/project/migrations/0038_auto_20171107_0801.py | unicef/un-partner-portal | 6 | 12764099 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-11-07 08:01
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('project', '0037_auto_20171031_0715'),
]
operations = [
migrations.RenameField(
model_name='application',
old_name='did_accept_date',
new_name='decision_date',
),
]
| 1.570313 | 2 |
dataBase/imdb.py | AmitNiz/homeflix | 1 | 12764100 | <filename>dataBase/imdb.py
import requests,json,os
url = "https://movie-database-imdb-alternative.p.rapidapi.com/"
headers = {
'x-rapidapi-host': "movie-database-imdb-alternative.p.rapidapi.com",
'x-rapidapi-key': os.environ['IMDB_KEY']
}
def search_by_title(title):
query_string = {'r':'json','s':title}
res = requests.request('GET',url,headers=headers,params=query_string)
return json.loads(res.text)['Search'][0] if 'Search' in json.loads(res.text) else None
def extract_imdb_ID(res):
return res['imdbID']
def query_imdb(title):
search_res = search_by_title(title)
if search_res:
imdb_ID = extract_imdb_ID(search_res)
query_string ={'i': imdb_ID,'r':'json'}
response = requests.request('GET',url,headers=headers,params=query_string)
return json.loads(response.text)
else:
print("[!] Couldn't find {}".format(title))
return None
| 3.28125 | 3 |
serverful/check_collections_for_updates.py | nggdpp/ndc-pipeline | 0 | 12764101 | <reponame>nggdpp/ndc-pipeline<gh_stars>0
'''
This script works through all of the raw collection records dumped out of ScienceBase, figures out if anything has
changed and takes appropriate action. It keeps track of what it's doing by writing a property back into the raw
collection records it has processed. It's essentially like processing a message out of a queue, but I come back to
the collection record for files and webLinks from other processes. This script will move previous versions of
updated collections to another MongoDB collection (to decide how to deal with later) and creates new homogenized and
simplified collection records for exposure via the API.
'''
import pynggdpp.sciencebase
import pynggdpp.serverful
import pynggdpp.item_process
sb_collections = pynggdpp.sciencebase.Collections()
serverful_infrastructure = pynggdpp.serverful.Infrastructure()
ndc_collections_db = serverful_infrastructure.connect_mongodb(collection="ndc_collections")
ndc_collections_raw_db = serverful_infrastructure.connect_mongodb(collection="ndc_collections_raw")
ndc_collection_versions_db = serverful_infrastructure.connect_mongodb(collection="ndc_collection_versions")
collection_to_process = ndc_collections_raw_db.find_one({
"processed_collection": {"$exists": False}
})
while collection_to_process is not None:
insert_new_collection_record = False
existing_collection = ndc_collections_db.find_one(
{
"ndc_collection_id": collection_to_process["id"]
}
)
if existing_collection is not None:
collection_last_updated = next((d["dateString"] for d in
collection_to_process["dates"] if d["type"] == "lastUpdated"), None)
if collection_last_updated != existing_collection["ndc_collection_last_updated"]:
ndc_collection_versions_db.insert_one(existing_collection)
ndc_collections_db.delete_one({"_id": existing_collection["_id"]})
insert_new_collection_record = True
print(f'Updated Collection: {existing_collection["ndc_collection_title"]}')
else:
insert_new_collection_record = True
if insert_new_collection_record:
collection_meta = sb_collections.ndc_collection_meta(
collection_record=collection_to_process
)
ndc_collections_db.insert_one(collection_meta)
print(f'Inserted Collection: {collection_meta["ndc_collection_title"]}')
ndc_collections_raw_db.update_one(
{"_id": collection_to_process["_id"]},
{
"$set": {"processed_collection": True}
}
)
collection_to_process = ndc_collections_raw_db.find_one({
"processed_collection": {"$exists": False}
})
| 2.375 | 2 |
orchestrator/schemas/subscription.py | Georgi2704/orchestrator-core | 1 | 12764102 | <reponame>Georgi2704/orchestrator-core<gh_stars>1-10
# Copyright 2019-2020 SURF.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from typing import List, Optional
from uuid import UUID
from pydantic import Extra
from orchestrator.schemas.base import OrchestratorBaseModel
from orchestrator.schemas.product import ProductBaseSchema
from orchestrator.schemas.product_block import ProductBlockSchema
from orchestrator.schemas.resource_type import ResourceTypeSchema
from orchestrator.schemas.subscription_descriptions import SubscriptionDescriptionSchema
from orchestrator.types import SubscriptionLifecycle, strEnum
class PortMode(strEnum):
"""Valid port modes."""
TAGGED = "tagged"
UNTAGGED = "untagged"
LINKMEMBER = "link_member"
class SubscriptionRelationSchema(OrchestratorBaseModel):
domain_model_attr: str
child_id: UUID
parent_id: UUID
order_id: int
class Config:
orm_mode = True
class SubscriptionInstanceValueBaseSchema(OrchestratorBaseModel):
resource_type_id: UUID
subscription_instance_id: UUID
subscription_instance_value_id: UUID
value: str
resource_type: ResourceTypeSchema
class Config:
orm_mode = True
class SubscriptionInstanceBase(OrchestratorBaseModel):
label: Optional[str]
subscription_id: UUID
product_block_id: UUID
subscription_instance_id: UUID
values: List[SubscriptionInstanceValueBaseSchema]
parent_relations: List[SubscriptionRelationSchema]
children_relations: List[SubscriptionRelationSchema]
product_block: ProductBlockSchema
class Config:
orm_mode = True
class SubscriptionBaseSchema(OrchestratorBaseModel):
subscription_id: Optional[UUID]
start_date: Optional[datetime]
description: str
status: SubscriptionLifecycle
product_id: Optional[UUID]
customer_id: UUID
insync: bool
note: Optional[str]
class SubscriptionSchema(SubscriptionBaseSchema):
name: Optional[str]
subscription_id: UUID
end_date: Optional[datetime]
product: Optional[ProductBaseSchema]
customer_descriptions: Optional[List[SubscriptionDescriptionSchema]]
tag: Optional[str]
class Config:
orm_mode = True
class SubscriptionIdSchema(OrchestratorBaseModel):
subscription_id: UUID
class SubscriptionDomainModelSchema(SubscriptionSchema):
customer_descriptions: List[SubscriptionDescriptionSchema]
class Config:
extra = Extra.allow
| 1.84375 | 2 |
layers/senet.py | zsync/target-classification | 2 | 12764103 | import tensorflow as tf
from layers.utils import CustomLayer
class SELayer(CustomLayer):
def __init__(self):
super().__init__()
def build(self, input_shape):
B, H, W, C = input_shape
self.squeeze = tf.keras.layers.GlobalAveragePooling2D()
self.excitation = tf.keras.Sequential([
tf.keras.layers.Dense(C//16),
tf.keras.layers.Dense(C, activation='sigmoid')
])
self.multi = tf.keras.layers.Multiply()
def call(self, inputs):
scale = inputs
scale = self.squeeze(scale)
scale = self.excitation(scale)
outputs = self.multi([inputs, scale])
return outputs
| 2.8125 | 3 |
src/cleora.py | Synerise/kdd-cup-2021 | 13 | 12764104 | <filename>src/cleora.py
import subprocess
def train_cleora(dim: int, iter_: int, columns: str, input_filename: str, working_dir: str):
"""
Training Cleora. See more details: https://github.com/Synerise/cleora/
"""
command = ['./cleora-v1.1.0-x86_64-unknown-linux-gnu',
'--columns', columns,
'--dimension', str(dim),
'-n', str(iter_),
'--input', input_filename,
'--output-dir', working_dir]
subprocess.run(command, check=True) | 2.3125 | 2 |
meli_challenge/core/__init__.py | rafaelleinio/meli-challenge | 1 | 12764105 | <reponame>rafaelleinio/meli-challenge
"""core module."""
from meli_challenge.core.characters_graph import CharactersGraph
from meli_challenge.core.spark_client import SparkClient
__all__ = ["CharactersGraph", "SparkClient"]
| 0.980469 | 1 |
tests/settings.py | boxed/django-fastdev | 26 | 12764106 | <filename>tests/settings.py
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# These overly specific paths are for jinja2
TEMPLATE_DIRS = [
os.path.join(BASE_DIR, 'tests'),
os.path.join(BASE_DIR, 'tests/templates'),
]
TEMPLATE_DEBUG = True
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': TEMPLATE_DIRS,
'APP_DIRS': True,
'OPTIONS': {
'debug': TEMPLATE_DEBUG,
},
},
]
SECRET_KEY = "foobar"
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django_fastdev',
'tests',
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
| 1.828125 | 2 |
bar_viz.py | YuzheSHI/Plot-Templates | 2 | 12764107 | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
import warnings
from matplotlib import colors
matplotlib.rc("font",family='AR PL SungtiL GB')
warnings.filterwarnings('ignore')
def vis_national(national, native, null, x):
fig = plt.figure()
ax = fig.add_subplot(111)
plt.grid()
plt.title('2018-2020年度各区县馆编目数据占比')
years = [
'国图编目数据库',
'本地编目数据库',
'无编目数据库'
]
total_width, n = 0.8, len(years)
width = total_width / n
xx = np.arange(len(x))
xx = xx - (total_width - width) / 2
plt.bar(xx, national, width=width, label=years[0])
plt.bar(xx + width, native, width=width, label=years[1])
plt.bar(xx + 2 * width, null, width=width, label=years[2])
plt.xticks(range(len(x)), x, rotation = 45)
plt.ylabel('百分比(%)')
plt.xlabel('区县图书馆')
plt.legend(loc='best')
plt.axhline(50, color = 'r')
name = 'data'
plt.savefig(name)
return
def book_purchase(p18, p19, p20, x):
fig = plt.figure()
ax = fig.add_subplot(111)
plt.grid()
plt.title('2018-2020年度各区县馆中文图书入藏量')
years = [
'2018',
'2019',
'2020'
]
total_width, n = 0.8, len(years)
width = total_width / n
xx = np.arange(len(x))
xx = xx - (total_width - width) / 2
plt.bar(xx, p18, width=width, label=years[0])
plt.bar(xx + width, p19, width=width, label=years[1])
plt.bar(xx + 2 * width, p20, width=width, label=years[2])
plt.xticks(range(len(x)), x, rotation = 45)
plt.ylabel('中文图书入藏量(册)')
plt.xlabel('区县图书馆')
plt.legend(loc='best')
plt.axhline(20000, color = 'r')
max18 = np.argmax(np.array(p18))
max19 = np.argmax(np.array(p19))
max20 = np.argmax(np.array(p20))
plt.annotate(
x[max18] + str(p18[max18]),
xy = (max18, p18[max18]),
bbox = dict(fc=(1,0.9,0.9))
)
plt.annotate(
x[max19] + str(p19[max19]),
xy = (max19, p19[max19]),
bbox = dict(fc=(1,0.9,0.9))
)
plt.annotate(
x[max20] + str(p20[max20]),
xy = (max20, p20[max20]),
bbox = dict(fc=(1,0.9,0.9))
)
name = 'purchase'
plt.savefig(name)
return
def delay_time(delay, x):
fig = plt.figure()
ax = fig.add_subplot(111)
plt.grid()
plt.title('2018-2020年度各区县馆无编目数据图书平均滞后周期')
plt.xticks(range(len(x)), x, rotation = 45)
plt.ylabel('无编目数据图书平均滞后周期(月)')
plt.xlabel('区县图书馆')
# plt.legend(loc='best')
xx = range(len(x))
plt.plot(xx, delay, marker = 'o', markersize = 2)
plt.axhline(3, color = 'r')
for xxy in zip(xx, delay):
plt.annotate(
str(xxy[1]),
xy = xxy,
bbox = dict(fc=(1,0.9,0.9))
)
name = 'delay'
plt.savefig(name)
return
if __name__ == '__main__':
national = [
5,
70,
85,
20,
80,
50,
85,
40,
40,
40,
30,
100,
]
native = [
94,
10,
14,
79,
0,
50,
14,
50,
40,
40,
40,
0,
]
null = [
1,
20,
1,
1,
20,
1,
1,
10,
20,
20,
30,
0,
]
p18 = [
22299,
76721,
50234,
50369,
682,
275,
16381,
56261,
16934,
0,
41631,
33474,
]
p19 = [
24528,
28174,
21897,
15263,
1500,
2588,
19133,
23960,
26385,
0,
58390,
23005,
]
p20 = [
0,
14875,
12365,
27157,
0,
3186,
43311,
33822,
38390,
44433,
20068,
18744,
]
delay = [
2,
3,
4,
2,
3,
6,
4,
3,
24,
18,
9,
0.5,
]
x = [
'和平馆',
'河西馆',
'河东馆',
'红桥馆',
'河北馆',
'津南馆',
'西青馆',
'北辰馆',
'东丽馆',
'宁河馆',
'宝坻馆',
'蓟州馆',
]
vis_national(national, native, null, x)
book_purchase(p18, p19, p20, x)
delay_time(delay, x) | 2.9375 | 3 |
ArticlesDataDownloader/ACM/ACMArticlesHandler.py | LechMadeyski/PhD19MarekSosnicki | 2 | 12764108 | <filename>ArticlesDataDownloader/ACM/ACMArticlesHandler.py
import os
import logging
import time
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.action_chains import ActionChains
from ArticlesDataDownloader.ArticleData import ArticleData
from ArticlesDataDownloader.bib_to_article_data import bib_to_article_datas_with_ids
from ArticlesDataDownloader.download_utilities import download_pdf, download_file_from_click_of_button
from ArticlesDataDownloader.text_utilities import format_text_and_split_into_sentences
class ACMArticlesHandler:
def __init__(self, driver):
self.driver = driver
self.__logger = logging.getLogger("ACMArticlesHandler")
def get_article(self, url):
self.driver.get(url)
text = str()
try:
WebDriverWait(self.driver, 10).until(
lambda x: x.find_element_by_xpath("//div[contains(@class, 'abstractSection abstractInFull')]"))
abstract_paragraphs = WebDriverWait(self.driver, 10).until(
lambda x: x.find_elements_by_xpath("//div[contains(@class, 'abstractSection abstractInFull')]/p"))
text = [dict(title='Abstract', paragraphs=[
dict(sentences=format_text_and_split_into_sentences(
par.get_attribute('innerHTML'))) for par in abstract_paragraphs])]
except:
self.__logger.warning("Could not read abstract for " + url)
result = ArticleData(publisher_link=url, text=text, read_status='Full text not avaliable')
cite_button = WebDriverWait(self.driver, 10).until(
lambda x: x.find_element_by_xpath("//a[@data-title='Export Citation']"))
desired_y = (cite_button.size['height'] / 2) + cite_button.location['y']
window_h = self.driver.execute_script('return window.innerHeight')
window_y = self.driver.execute_script('return window.pageYOffset')
current_y = (window_h / 2) + window_y
scroll_y_by = desired_y - current_y
self.driver.execute_script("window.scrollBy(0, arguments[0]);", scroll_y_by)
cite_button.click()
WebDriverWait(self.driver, 10).until(
lambda x: x.find_element_by_xpath("//div[@class='csl-right-inline']"))
WebDriverWait(self.driver, 10).until(
lambda x: x.find_element_by_xpath("//a[@title='Download citation']/i[@class='icon-Icon_Download']"))
download_button = WebDriverWait(self.driver, 10).until(
lambda x: x.find_element_by_xpath("//a[@title='Download citation']"))
downloaded_bibs = download_file_from_click_of_button(self.driver, download_button)
if downloaded_bibs:
article_datas = bib_to_article_datas_with_ids(downloaded_bibs)
if len(article_datas) == 1:
result.merge(article_datas[0][1])
else:
result.read_status = 'Failed reading bibliographic information'
else:
result.read_status = 'Failed downloading bibliographic information'
return result
def download_pdf(self, url):
try:
self.__logger.debug("ACM::download_pdf start " + url)
self.driver.get(url)
WebDriverWait(self.driver, 10).until(lambda x: x.find_elements_by_xpath("//a[@title='PDF']"))
self.__logger.info("Wait end")
python_button = self.driver.find_elements_by_xpath("//a[@title='PDF']")[0]
link = str(python_button.get_property('href'))
PDF_FILENAME = 'ACM_temporary.pdf'
return download_pdf(self.driver, link, PDF_FILENAME)
except:
return str()
def is_applicable(self, url):
return "acm.org" in url
def name(self):
return "ACM"
| 2.828125 | 3 |
pizdabol_week.py | NikitaMikhailov/bot_heroku | 0 | 12764109 | #!/usr/bin/env bash
#!/bin/bash
#!/bin/sh
#!/bin/sh -
from vk_api.utils import get_random_id
from vk_api.bot_longpoll import VkBotLongPoll
from vk_api import VkUpload
import requests
import vk_api
import time
import bot_functions
import bot_variable
if bot_variable.flag_repository:
start_path = ""
else:
start_path = '/root/bot_herobot_chat/'
if bot_variable.flag_smile:
smile = bot_variable.smile_1
else:
smile = bot_variable.smile_2
f = open('{}token.txt'.format(start_path), 'r')
token = f.read()
f.close()
session = requests.Session()
vk_session = vk_api.VkApi(token=token)
longpoll = VkBotLongPoll(vk_session, '178949259')
vk = vk_session.get_api()
upload = VkUpload(vk_session)
def main():
file_stat = open('{}logs_chat.txt'.format(start_path), 'r', encoding="utf-8")
sl = {}
for line in file_stat:
line_1 = line
line = line.split('*_*')
if len(line) > 1 and line[3] == ' 1 ':
number_words_in_message = 0
for i in line[4].split(' '):
if i != " " and i != "\n" and i != "":
number_words_in_message += 1
if line[2][1:-1] not in sl:
last_people = line[2][1:-1]
sl[line[2][1:-1]] = number_words_in_message
else:
last_people = line[2][1:-1]
sl[line[2][1:-1]] += number_words_in_message
elif len(line) == 1 and line != "\n":
number_words_in_message = 0
for i in line_1.split(' '):
if i != " " and i != "\n" and i != "":
number_words_in_message += 1
sl[last_people] += number_words_in_message
file_stat.close()
file_word_in_week = open('{}word_in_week.txt'.format(start_path), 'r', encoding='utf-8')
sl_1 = {}
for line in file_word_in_week:
sl_1[line.split('*_*')[0]] = int(line.split('*_*')[1][:-1:])
file_word_in_week.close()
week_result = {}
for i in sl:
week_result[i] = int(sl[i]) - int(sl_1[i])
file_word_in_week = open('{}word_in_week.txt'.format(start_path), 'w', encoding='utf-8')
for i in sl:
file_word_in_week.write(i+'*_*'+str(sl[i])+'\n')
file_word_in_week.close()
stats = []
kolp = []
for i in week_result:
kolp.append(week_result[i])
kolp.sort()
kolp.reverse()
jstr = []
for i in kolp:
for j in week_result:
if j == '' or j == '\n':
continue
if str(week_result[j]) == str(i) and j not in jstr:
jstr.append(j)
number_2 = ''
for k in str(week_result[j]):
number_2 += smile[k]
fio_1 = requests.get("https://api.vk.com/method/users.get?user_ids=" + str(j)
+ "&fields=bdate&access_token=" + token + "&v=5.92").json()
first_name_1 = fio_1["response"][0]["first_name"]
last_name_1 = fio_1["response"][0]["last_name"]
stats.append(first_name_1 + ' ' + last_name_1 + ': ' + str(
number_2) + ' слов(а).\n')
for i in range(0, len(stats)):
stats[i] = str(i + 1) + ") " + stats[i]
pizdabol = stats[0][stats[0].index(' ')+1:stats[0].index(':'):]
stats = ''.join(stats)
stats = "🔝 ТОП слов в беседе за прошедшую неделю:\n\n" + stats
for i in bot_variable.spisok_chata:
if bot_variable.spisok_chata[i] == pizdabol:
pizdabol_id = i
vk.messages.send(
user_id=195310233,
random_id=get_random_id(),
message='Пиздабол недели обновлен в фоновом режиме, это [id' + str(pizdabol_id) + '|' + pizdabol + "]"
)
attachments = []
image_url = 'https://sun9-47.userapi.com/XOoZN_1DA7BZKe_QiWyPiKvCriZUFNKltkOe1A/nYo9ZMZUegw.jpg'
image = session.get(image_url, stream=True)
photo = upload.photo_messages(photos=image.raw)[0]
attachments.append('photo{}_{}'.format(photo['owner_id'], photo['id']))
vk.messages.send(
chat_id=1,
random_id=get_random_id(),
attachment=','.join(attachments),
message='Пиздабол недели [id' + str(pizdabol_id) + '|' + pizdabol + "]"+"!"
)
time.sleep(5)
vk.messages.send(
chat_id=1,
random_id=get_random_id(),
message=stats
)
if __name__ == "__main__":
main()
| 2.203125 | 2 |
python/cvi_toolkit/transform/onnx_optimizer.py | sophgo/tpu_compiler | 3 | 12764110 | <gh_stars>1-10
from typing import cast
import onnx
import copy
import torch
import numpy as np
import onnxruntime as rt
import onnx.helper
import onnx.numpy_helper
import onnx.shape_inference
from collections import OrderedDict
from numbers import Number
from onnx import TensorProto, mapping
def convert_onnx_attribute_proto(attr_proto):
if attr_proto.HasField('f'):
return attr_proto.f
elif attr_proto.HasField('i'):
return attr_proto.i
elif attr_proto.HasField('s'):
return attr_proto.s
elif attr_proto.HasField('t'):
return attr_proto.t # this is a proto!
elif attr_proto.floats:
return list(attr_proto.floats)
elif attr_proto.ints:
return list(attr_proto.ints)
elif attr_proto.strings:
str_list = list(attr_proto.strings)
return str_list
elif attr_proto.name:
name_list = list(attr_proto.name)
return name_list
else:
raise ValueError("Unsupported ONNX attribute: {}".format(attr_proto))
def onnx_dtype(dtype):
if isinstance(dtype, Number):
onnx_dtype = dtype
elif isinstance(dtype, str):
onnx_dtype = TensorProto.DataType.Value(dtype)
else:
raise RuntimeError("dtype should be number or str.")
return mapping.TENSOR_TYPE_TO_NP_TYPE[onnx_dtype]
onnx_attr_translator = {
"axis": lambda x: int(x),
"axes": lambda x: [int(a) for a in x],
"dtype": lambda x: onnx_dtype(x),
"keepdims": lambda x: bool(x),
"to": lambda x: onnx_dtype(x),
}
def translate_onnx(key, val):
return onnx_attr_translator.get(key, lambda x: x)(val)
def get_attr(attrs, name):
attrs = dict([(attr.name, translate_onnx(attr.name, convert_onnx_attribute_proto(attr)))
for attr in attrs])
return attrs[name]
def fixed_point(fun):
flag = fun()
while True:
if flag:
flag = fun()
continue
break
def dump_model(model, name="opt.onnx"):
data = model.SerializeToString()
with open(name, "wb") as file:
file.write(data)
class PesudoNode(object):
def __init__(self, op_type, input=None, output=None, attr_key_map=None,
const_value=None, default=None, constraint=None):
if input is None:
input = []
if output is None:
output= []
if attr_key_map is None:
attr_key_map = []
if default is None:
default = {}
self.op_type = op_type
self.input = input
self.output = output
# get /set attr and map to new key
self.attr_key_map = attr_key_map
# for constant node
self.const_value = const_value
self.default = default
# for broat cast or other constraint
self.constraint = constraint
class RedundanciesOps(object):
def __init__(self, pattern_input, pattern_output, attr, redundancies_ops):
self.pattern_input = pattern_input
self.pattern_output = pattern_output
self.redundancies_ops = redundancies_ops
self.attr = attr
class FoldUnfoldInfo(object):
def __init__(self, src_node, trg_node):
self.src_node = src_node
self.trg_node = trg_node
class Form_Deform(object):
# support form/deform multi input single output op
def __init__(self, model):
self.op_list = []
self.nodes = model.graph.node
self.weight = model.graph.initializer
# store node shape
self.shape_info = [info for info in model.graph.value_info]
self.shape_info.extend(model.graph.output)
self.shape_info = {info.name: [i.dim_value for i in info.type.tensor_type.shape.dim if i.dim_value > 0]
for info in self.shape_info}
self.tensor = [x.name for x in self.weight]
self.tensor.extend([node.output[0] for node in self.nodes if node.op_type == "Constant"])
def get_node(self, name):
if self.is_tensor(name):
return None
for idx, n in enumerate(self.nodes):
if name in n.output:
return idx, n
def is_tensor(self, name):
return name in self.tensor
def get_tensor_vale(self, name):
for n in self.nodes:
if name == n.output[0] and n.op_type == 'Constant':
return onnx.numpy_helper.to_array(n.attribute[0].t)
for w in self.weight:
if name == w.name:
return onnx.numpy_helper.to_array(w).astype(np.float32)
def get_input_shape(self, name):
for n in self.nodes:
if name == n.output[0]:
return self.shape_info[name]
for w in self.weight:
if name == w.name:
return list(w.dims)
def remove_cast(self):
cast_ops = []
flush_input = False
for idx, node in enumerate(self.nodes):
if node.op_type == "Cast":
cast_ops.append(node)
flush_input = True
continue
if node.op_type == "Constant":
continue
if flush_input:
flush_input = False
for i in range(len(node.input)):
if cast_ops[-1].output[0] == node.input[i]:
self.nodes[idx].input[i] = cast_ops[-1].input[0]
for op in cast_ops:
self.nodes.remove(op)
def constraint(self, node, mode):
if mode == 'broadcast' and len(node.input) == 2:
inp_0, inp_1 = node.input
inp_0_shape = self.get_input_shape(inp_0)
inp_1_shape = self.get_input_shape(inp_1)
# don't worry, in this function shape always be list
if len(inp_0_shape) == 1 or len(inp_1_shape) == 1:
# normal case
if inp_0_shape[-1] == inp_1_shape[-1] \
or inp_0_shape[-1] == 1 or inp_1_shape[-1] == 1:
return True
elif ((inp_0_shape[-2] == 1 or inp_1_shape[-2] == 1) \
and inp_0_shape[:-2] == inp_1_shape[:-2]):
# for group fc
return True
else:
raise ValueError("constrain mode: {} not support now.".format(mode))
return False
def normal_check(self, pninp, ninp):
# check one node
tensor_attr = {}
const_tensor_remove = []
outside_input = []
if len(pninp) != len(ninp):
return False, outside_input, tensor_attr, const_tensor_remove
for p, n in zip(pninp, ninp):
# Fold p defined as:
# Activation: 1. (pre_pnode.output, idx), if pre_pnode has muti-output
# 2. pre_pnode.output, if pre_pnode has one-output
# 3. 'input', this will get input from ninp at same position
#
# Tensor: 1. ('input', 'tensor'), if you concern input type. eg. constant, weight
# 2. otherwise 'input' is enough
# 3. attr_key, get attr from tensor eg. 'axes', 'dims'. or just set any str
# if u don't care what the tensor will doing.
if type(p) == tuple:
i, j = p
if type(i) == str and type(j) == str \
and i.lower() == 'input' and j.lower() == 'tensor':
if self.is_tensor(n):
outside_input.append(n)
else:
return False, outside_input, tensor_attr, const_tensor_remove
elif type(i) == list and type(j) == int:
if i[j] != n:
return False, outside_input, tensor_attr, const_tensor_remove
else:
raise ValueError("Wrong defination. {}".format(p))
elif type(p) == str:
if p.lower() == 'input':
outside_input.append(n)
elif self.is_tensor(n):
# get attr form tensor value
tensor_attr[p] = float(self.get_tensor_vale(n))
const_tensor_remove.append(n)
else:
raise ValueError("Invalid. This should set to be input or activation.")
# return False, outside_input, tensor_attr, const_tensor_remove
elif type(p) == list and len(p) == 1:
if p[0] != n:
return False, outside_input, tensor_attr, const_tensor_remove
else:
raise ValueError("Wrong input defination. {}".format(p))
return True, outside_input, tensor_attr, const_tensor_remove
def match_node(self, pnode, node):
# match node, determin the inputs out of pattern, get attr form tensor, record the nodes which should be removed
# check and process node's input
matched, outside_inp, attr, rm_tensor = self.normal_check(pnode.input, node.input)
if not matched and (node.op_type == 'Mul' or node.op_type == 'Concat' or node.op_type == 'Add'):
# swap input, maybe concat with >3 input doesn't work
matched, outside_inp, attr, rm_tensor = self.normal_check(pnode.input[::-1], node.input)
# process current node
if matched:
# process constraint
if pnode.constraint is not None:
matched = self.constraint(node, pnode.constraint)
rm_tensor.append(node.output[0])
# get extra attr from node's attr
for km in pnode.attr_key_map:
if type(km) == tuple:
raw_node_attr = km[0]
new_node_attr = km[-1]
else:
raw_node_attr, new_node_attr = km, km
if new_node_attr in attr:
raise ValueError("Duplicate attr name please check.")
attr.update({new_node_attr: get_attr(node.attribute, raw_node_attr)})
return matched, outside_inp, attr, rm_tensor
def match_pattern(self, pattern):
redundancies_ops_list = []
redundancies_ops = []
pattern_input = []
pattern_attr = {}
pattern_idx = 0
for node in self.nodes:
match_success = False
# for different torch version onnx graph may different.
# eg. insert Cast, sometimes use Tensor sometimes use Weight and so on...
if node.op_type == "Constant":
continue
if node.op_type == pattern[pattern_idx].op_type:
match_success, pinp, attr, rm_op = self.match_node(pattern[pattern_idx], node)
if match_success:
# flush output info, so next pnode's input will be updated
pattern[pattern_idx].output.clear()
pattern[pattern_idx].output.extend(node.output)
pattern_idx += 1
pattern_attr.update(attr)
redundancies_ops.extend(rm_op)
# store pattern input info
for i in pinp:
if i not in pattern_input:
pattern_input.append(i)
if pattern_idx == len(pattern):
redundancies_ops_list.append(
RedundanciesOps(copy.copy(pattern_input), copy.copy(list(node.output)),
copy.copy(pattern_attr), copy.copy(redundancies_ops)))
pattern_idx = 0
redundancies_ops.clear()
pattern_input.clear()
pattern_attr.clear()
else:
pattern_idx = 0
redundancies_ops.clear()
pattern_input.clear()
pattern_attr.clear()
return redundancies_ops_list
def replace_pattern(self):
self.remove_cast()
replaced = False
for op_info in self.op_list:
ops = op_info.trg_node
pattern = op_info.src_node
redundancies_ops_list = self.match_pattern(pattern)
if len(redundancies_ops_list) > 0:
replaced = True
for nodes in redundancies_ops_list: # [pattern0, patter1, ...]
node_idx, _ = self.get_node(nodes.redundancies_ops[0])
rm_node = []
for oname in nodes.redundancies_ops:
idx_node = self.get_node(oname)
if idx_node is not None:
rm_node.append(idx_node[1])
out = nodes.pattern_output
for i, op in enumerate(ops):
attr = {}
_input = []
_output = []
prefix = "replace_{}_{}".format(node_idx, op.op_type)
# get attr
for k in op.attr_key_map:
if type(k) == tuple:
if len(k) > 2:
raise ValueError("key must be one in replace pattern")
attr.update([(k[-1], nodes.attr[k[0]])])
elif type(k) == str:
attr.update([(k, nodes.attr[k])])
else:
raise ValueError("Wrong attr defination.")
attr.update(op.default)
# get input
for inp, idx in op.input:
if inp == "input":
_input.append(nodes.pattern_input[idx])
else:
_input.append(inp[idx])
# set output
if i == len(ops) - 1: # output
_output = out
else:
op.output.clear()
op.output.extend([prefix])
_output = [prefix]
# form onnx node
if op.op_type == "Constant":
value = np.array(op.const_value)
new_node = onnx.helper.make_node("Constant", name=prefix, inputs=[], outputs=_output,
value=onnx.helper.make_tensor("value", onnx.TensorProto.FLOAT,
value.shape, value))
else:
new_node = onnx.helper.make_node(op.op_type, name=prefix, inputs=_input,
outputs=_output, **attr)
self.nodes.insert(node_idx, new_node)
node_idx += 1
for n in rm_node:
self.nodes.remove(n)
return replaced
def run(self, op_list):
self.op_list = op_list
fixed_point(self.replace_pattern)
return self.nodes
class OnnxOpt(object):
def __init__(self, model, batch_size):
self.batch_size = batch_size
self.model = copy.deepcopy(model)
onnx.checker.check_model(self.model)
self.const_tensors = []
def get_inputs(self):
initializer_names = [x.name for x in self.model.graph.initializer]
return [ipt for ipt in self.model.graph.input if ipt.name not in initializer_names]
def get_input_names(self):
input_names = [ipt.name for ipt in self.get_inputs()]
return input_names
def generate_specific_rand_input(self, input_shapes):
inputs = {}
for key, shape in input_shapes.items():
if len(shape) > 0 and (shape[0] == 0 or shape[0] == -1):
if self.batch_size > 0:
shape [0] = self.batch_size
else:
shape[0] = 1
if not np.all(np.array(shape) > 0):
raise RuntimeError("The shape of input '{}' has dynamic size '{}', "
"please determine the input size when export "
"onnx".format(key, shape))
elem_type = self.get_elem_type(key)
elem_type = self.get_np_type_from_elem_type(elem_type)
if elem_type == np.bool : # for mask
inputs.update({key: np.random.randint(0, 2, shape, dtype=elem_type)})
# elif elem_type == np.int64:
# inputs.update({key: np.random.randint(0, 10, size=shape, dtype=elem_type)})
elif len(shape) == 0: # for idx
inputs.update({key: np.array(0, dtype=elem_type)})
else:
inputs.update({key: np.random.rand(*shape).astype(elem_type)})
return inputs
def get_value_info_all(self, name):
for v in self.model.graph.value_info:
if v.name == name:
return v
for v in self.model.graph.input:
if v.name == name:
return v
for v in self.model.graph.output:
if v.name == name:
return v
return None
@staticmethod
def insert_elem(nodes, idx, element):
nodes.extend([nodes[-1]])
for i in reversed(range(idx + 1, len(nodes) - 1)):
nodes[i].CopyFrom(nodes[i - 1])
nodes[idx].CopyFrom(element)
@staticmethod
def get_shape_from_value_info_proto(vinfo):
return [dim.dim_value for dim in vinfo.type.tensor_type.shape.dim]
@staticmethod
def get_np_type_from_elem_type(elem_type):
types = (None, np.float32, np.uint8, np.int8, np.uint16, np.int16, np.int32,
np.int64, str, np.bool, np.float16, np.double, np.uint32, np.uint64,
np.complex64, np.complex128, np.float16)
assert len(types) == 17
_type = types[elem_type]
assert _type is not None
return _type
def get_shape(self, name):
vinfo = self.get_value_info_all(name)
if vinfo is None:
raise RuntimeError("Can't get shape of '{}'".format(name))
return self.get_shape_from_value_info_proto(vinfo)
def get_elem_type(self, name):
vinfo = self.get_value_info_all(name)
if vinfo is None:
raise RuntimeError("Can't get dtype of '{}'".format(name))
return vinfo.type.tensor_type.elem_type
def is_dynamic(self, node):
if node.op_type in ["NonMaxSuppression", "NonZero", "Unique"] \
and node.input[0] not in self.const_tensors:
return True
if node.op_type in ["Reshape", "Expand", "Upsample", "ConstantOfShape"] \
and len(node.input) > 1 and node.input[1] not in self.const_tensors:
return True
if node.op_type in ["Resize"] \
and ((len(node.input) > 2 and node.input[2] not in self.const_tensors) \
or (len(node.input) > 3 and node.input[3] not in self.const_tensors)):
return True
return False
def has_subgraph_in_node(self, node):
for attr in node.attribute:
if attr.type in [onnx.AttributeProto.GRAPH, onnx.AttributeProto.GRAPHS]:
return True
return False
def is_quantizeLinear(self, node):
return node.op_type in ["DequantizeLinear", "QuantizeLinear"]
def is_non_determinstic_node(self, node):
return node.op_type in ["RandomNormal", "RandomNormalLike", "RandomUniformLike"]
def get_constant_nodes(self):
const_nodes = []
dynamic_tensors = []
self.const_tensors = [x.name for x in self.model.graph.initializer]
self.const_tensors.extend([node.output[0] for node in self.model.graph.node if node.op_type == "Constant"])
for node in self.model.graph.node:
if any(x in dynamic_tensors for x in node.input):
dynamic_tensors.extend(node.output)
elif node.op_type == "Shape":
const_nodes.append(node)
self.const_tensors.extend(node.output)
elif self.is_dynamic(node):
dynamic_tensors.extend(node.output)
elif self.is_quantizeLinear(node):
pass
elif self.has_subgraph_in_node(node):
pass
elif len(node.input) > 0 and all([x in self.const_tensors for x in node.input]) \
and not self.is_non_determinstic_node(node):
const_nodes.append(node)
self.const_tensors.extend(node.output)
return copy.deepcopy(const_nodes)
def forward(self, model):
input_shapes = {}
sess_options = rt.SessionOptions()
sess_options.graph_optimization_level = rt.GraphOptimizationLevel(0)
sess_options.log_severity_level = 3
sess = rt.InferenceSession(model.SerializeToString(), sess_options=sess_options,
providers=["CPUExecutionProvider"])
input_names = self.get_input_names()
inputs = {}
for name in input_names:
shape = self.get_shape(name)
input_shapes.update({name: shape})
inputs.update(self.generate_specific_rand_input(input_shapes))
outputs = [x.name for x in sess.get_outputs()]
run_options = rt.RunOptions()
run_options.log_severity_level = 3
return OrderedDict(zip(outputs, sess.run(outputs, inputs, run_options=run_options)))
def forward_for_node_outputs(self, const_nodes):
model = copy.deepcopy(self.model)
for node in const_nodes:
for output in node.output:
model.graph.output.extend([onnx.ValueInfoProto(name=output)])
return self.forward(model)
def eliminate_const_nodes(self, const_node, res):
do_eliminate = False
for i, node in enumerate(self.model.graph.node):
if node in const_node:
for output in node.output:
new_node = copy.deepcopy(node)
new_node.name = "node_" + output
new_node.op_type = "Constant"
new_attr = onnx.helper.make_attribute(
"value",
onnx.numpy_helper.from_array(res[output], name=output)
)
del new_node.input[:]
del new_node.attribute[:]
del new_node.output[:]
new_node.output.extend([output])
new_node.attribute.extend([new_attr])
self.insert_elem(self.model.graph.node, i + 1, new_node)
del self.model.graph.node[i]
do_eliminate = True
return do_eliminate
def remove_unused_nodes(self):
node_inputs = []
unused_node = []
for n in self.model.graph.node:
node_inputs.extend(n.input)
node_inputs.extend([out.name for out in self.model.graph.output])
node_inputs = set(node_inputs)
for n in self.model.graph.node:
if len(set(n.output).intersection(node_inputs)) == 0:
unused_node.append(n)
for n in unused_node:
self.model.graph.node.remove(n)
def infer_shapes(self):
try:
self.model = onnx.shape_inference.infer_shapes(self.model)
except:
pass
def constant_folding(self, infer_shapes=True):
const_nodes = self.get_constant_nodes()
res = self.forward_for_node_outputs(const_nodes)
const_node = [node for node in const_nodes if node.output[0] in res]
do_eliminate = self.eliminate_const_nodes(const_node, res)
onnx.checker.check_model(self.model)
if infer_shapes:
self.infer_shapes()
return do_eliminate
def run(self, dump):
fixed_point(self.constant_folding)
self.remove_unused_nodes()
#if dump:
# dump_model(self.model, "constant_opt.onnx")
return self.model
def onnx_opt(model, batch_size, dump=False):
constant_opt = OnnxOpt(model, batch_size)
model = constant_opt.run(dump)
fdef = Form_Deform(model)
# torch.not_equal
eq_not_op0 = PesudoNode("Equal", ["input", "input"])
eq_not_op1 = PesudoNode("Not", [eq_not_op0.output,])
eq_not_op = PesudoNode("Equal", [("input", 0), ("input", 1)], default={"not": True})
eq_not = FoldUnfoldInfo([eq_not_op0, eq_not_op1], [eq_not_op])
# torch.std
std_ub_op0 = PesudoNode("ReduceMean", ["input",], attr_key_map=[("axes", "dim"),])
std_ub_op1 = PesudoNode("Sub", ["input", std_ub_op0.output])
std_ub_op2 = PesudoNode("Mul", [std_ub_op1.output, std_ub_op1.output])
std_ub_op3 = PesudoNode("ReduceMean", [std_ub_op2.output,], attr_key_map=["keepdims",])
std_ub_op5 = PesudoNode("Mul", [std_ub_op3.output, "dont_care"])
std_ub_op7 = PesudoNode("Div", [std_ub_op5.output, "dont_care"])
std_ub_op8 = PesudoNode("Sqrt", [std_ub_op7.output,])
std_ub_op = PesudoNode("Std", [("input", 0),], attr_key_map=["dim", "keepdims",], default={"unbiased": True})
std_ub = FoldUnfoldInfo([std_ub_op0, std_ub_op1, std_ub_op2, std_ub_op3,
std_ub_op5, std_ub_op7, std_ub_op8], [std_ub_op])
std_op0 = PesudoNode("ReduceMean", ["input",], attr_key_map=[("axes", "dim"),])
std_op1 = PesudoNode("Sub", ["input", std_op0.output])
std_op2 = PesudoNode("Mul", [std_op1.output, std_op1.output])
std_op3 = PesudoNode("ReduceMean", [std_op2.output,], attr_key_map=["keepdims",])
std_op4 = PesudoNode("Sqrt", [std_op3.output,])
std_op = PesudoNode("Std", [("input", 0),], attr_key_map=["dim", "keepdims",], default={"unbiased": False})
std = FoldUnfoldInfo([std_op0, std_op1, std_op2, std_op3, std_op4], [std_op])
# torch.Where
where_op = PesudoNode("Where", ["input", "input", "input"])
where_op0 = PesudoNode("Mul", [("input", 0), ("input", 1)]) # mask * cond
where_op1 = PesudoNode("Constant", const_value=[-1])
where_op2 = PesudoNode("Constant", const_value=[1])
where_op3 = PesudoNode("Mul", [("input", 0), (where_op1.output, 0)]) # mask * -1
where_op4 = PesudoNode("Add", [(where_op3.output, 0), (where_op2.output, 0)]) # -mask + 1
where_op5 = PesudoNode("Mul", [("input", 2), (where_op4.output, 0)]) # y * (-mask + 1)
where_op6 = PesudoNode("Add", [(where_op0.output, 0), (where_op5.output, 0)]) # -mask + 1
where = FoldUnfoldInfo([where_op], [where_op0, where_op1, where_op2, where_op3, where_op4, where_op5, where_op6])
# torch.LayerNorm
layernorm_aff_op0 = PesudoNode("ReduceMean", ["input",], attr_key_map=[("axes",)])
layernorm_aff_op1 = PesudoNode("Sub", ["input", layernorm_aff_op0.output])
layernorm_aff_op3 = PesudoNode("Pow", [layernorm_aff_op1.output, "dont_care"])
layernorm_aff_op4 = PesudoNode("ReduceMean", [layernorm_aff_op3.output,])
layernorm_aff_op6 = PesudoNode("Add", [layernorm_aff_op4.output, "eps"])
layernorm_aff_op7 = PesudoNode("Sqrt", [layernorm_aff_op6.output,])
layernorm_aff_op8 = PesudoNode("Div", [layernorm_aff_op1.output, layernorm_aff_op7.output])
layernorm_aff_op9 = PesudoNode("Mul", [layernorm_aff_op8.output, "input"])
layernorm_aff_op10 = PesudoNode("Add", [layernorm_aff_op9.output, "input"])
layernorm_aff_op = PesudoNode("LayerNorm", [("input", 0), ("input", 1), ("input", 2)],
attr_key_map=["axes", "eps",], default={"elementwise_affine": True})
layernorm_aff = FoldUnfoldInfo([layernorm_aff_op0, layernorm_aff_op1,
layernorm_aff_op3, layernorm_aff_op4,
layernorm_aff_op6, layernorm_aff_op7, layernorm_aff_op8,
layernorm_aff_op9, layernorm_aff_op10], [layernorm_aff_op])
layernorm_op0 = PesudoNode("ReduceMean", ["input",], attr_key_map=[("axes",)])
layernorm_op1 = PesudoNode("Sub", ["input", layernorm_op0.output])
layernorm_op3 = PesudoNode("Pow", [layernorm_op1.output, "dont_care"])
layernorm_op4 = PesudoNode("ReduceMean", [layernorm_op3.output, ])
layernorm_op6 = PesudoNode("Add", [layernorm_op4.output, "eps"])
layernorm_op7 = PesudoNode("Sqrt", [layernorm_op6.output, ])
layernorm_op8 = PesudoNode("Div", [layernorm_op1.output, layernorm_op7.output])
layernorm_op = PesudoNode("LayerNorm", [("input", 0),],
attr_key_map=["axes", "eps", ], default={"elementwise_affine": False})
layernorm = FoldUnfoldInfo([layernorm_op0, layernorm_op1, layernorm_op3, layernorm_op4,
layernorm_op6, layernorm_op7, layernorm_op8], [layernorm_op])
# matmul + bias
matmul_bias_op0 = PesudoNode("MatMul", ["input", ("input", "tensor")])
matmul_bias_op1 = PesudoNode("Add", [matmul_bias_op0.output, ("input", "tensor")], constraint="broadcast")
matmul_bias_op = PesudoNode("MatMul", [("input", 0), ("input", 1), ("input", 2)],)
matmul_bias = FoldUnfoldInfo([matmul_bias_op0, matmul_bias_op1], [matmul_bias_op])
# hard_swish
hard_swish_op0 = PesudoNode("Add", ["input", ("input", "tensor")])
hard_swish_op1 = PesudoNode("Clip", [hard_swish_op0.output, ("input", "tensor"), ("input", "tensor")])
hard_swish_op2 = PesudoNode("Mul", ["input", hard_swish_op1.output])
hard_swish_op3 = PesudoNode("Div", [hard_swish_op2.output, ("input", "tensor")])
hard_swish_op_0 = PesudoNode("HardSigmoid", [("input", 0),])
hard_swish_op_1 = PesudoNode("Mul", [("input", 0), (hard_swish_op_0.output, 0)],)
hard_swish = FoldUnfoldInfo([hard_swish_op0, hard_swish_op1, hard_swish_op2, hard_swish_op3],
[hard_swish_op_0, hard_swish_op_1])
# hard_sigmoid
hard_sigmoid_op0 = PesudoNode("Add", ["input", ("input", "tensor")])
hard_sigmoid_op1 = PesudoNode("Clip", [hard_sigmoid_op0.output, ("input", "tensor"), ("input", "tensor")])
hard_sigmoid_op2 = PesudoNode("Div", [hard_sigmoid_op1.output, ("input", "tensor")])
hard_sigmoid_op = PesudoNode("HardSigmoid", [("input", 0),])
hard_sigmoid = FoldUnfoldInfo([hard_sigmoid_op0, hard_sigmoid_op1, hard_sigmoid_op2],
[hard_sigmoid_op,])
# matmul_HSigmoid(relu6_inplace)
matmul_Hsigmoid_op0 = PesudoNode("MatMul", ["input", ("input", "tensor"), ("input", "tensor")])
matmul_Hsigmoid_op1 = PesudoNode("Clip", [matmul_Hsigmoid_op0.output, ("input", "tensor"), ("input", "tensor")])
matmul_Hsigmoid_op2 = PesudoNode("Div", [matmul_Hsigmoid_op1.output, ("input", "tensor")])
matmul_Hsigmoid_op_0 = PesudoNode("MatMul", [("input", 0), ("input", 1)])
matmul_Hsigmoid_op_1 = PesudoNode("HardSigmoid", [(matmul_Hsigmoid_op_0.output, 0),])
matmul_Hsigmoid = FoldUnfoldInfo([matmul_Hsigmoid_op0, matmul_Hsigmoid_op1, matmul_Hsigmoid_op2,],
[matmul_Hsigmoid_op_0, matmul_Hsigmoid_op_1])
fdef.run([eq_not, std_ub, std, where, layernorm_aff, layernorm, matmul_bias,
hard_swish, hard_sigmoid, matmul_Hsigmoid])
if dump:
dump_model(model, "final_opt.onnx")
return model
| 2.03125 | 2 |
tutoriales/tutorial08_03.py | pgentil/PC | 0 | 12764111 | #%% VARIABLES
'Variables'
# var1 = 10
# var2 = "Hello World"
# var3 = None
# var4 = 3.5
# if 0:
# print ("hello world 0") #el 0 fnciona como Falsey
# if 1:
# print ("hello world 1") #el 1 funciona como Truthy
# x1 = 100
# x2 = 20
# x3 = -5
# y = x1 + x2 + x3
# z = x1 - x2 * x3
# w = (x1+x2+x3) - (x1-x2*x3)
# num = 23
# data = True
# var = 40.0
# res1 = num + data
# res2 = data/var
# res3 = num*var
# num = 23
# data = False
# var = 40.0
# res1 = num + data
# res2 = data/var
# res3 = num*var
# result = 70
# data = False
# value = '158'
# var1 = result*data
# var2 = data+value
# var3 = result/value
# result = 5
# value = '158'
# location = 'payunia'
# name = 'Mike '
# phrase = 'needs a coffee'
# full_phrase = name + phrase
# extended_phrase = full_phrase + ' urgente!'
# subnet = '192.168.0'
# host = '34'
# ip = subnet + '.' + host
# message = 'IP address: ' + ip
#%% ACTIVIDADES
'Actividad 1'
a = 50
b = 6
c = 8
d = 2*a + 1/(b-5*c)
d1 = ((a*b+c)/(2-a) + ((a*b+c)/(2-a) + 2)/(c+b)) * (1 + (a*b+c)/(2-a))
'Actividad 2'
# word0 = 'Life'
# word1 = 'ocean'
# word2 = 'up'
# word3 = 'down'
# word4 = word0 + ' is like the ' + word1 + ", it goes " + word2 +\
# " and " + word3
word0 ='Mars'
word1 = 'Earth'
word2 = 'round'
word3 = 'round'
word4 = word0 + ' is like the ' + word1 + ", it goes " + word2 +\
" and " + word3 | 3.375 | 3 |
sympy/matrices/expressions/tests/test_funcmatrix.py | ovolve/sympy | 319 | 12764112 | from sympy import (symbols, FunctionMatrix, MatrixExpr, Lambda, Matrix)
def test_funcmatrix():
i, j = symbols('i,j')
X = FunctionMatrix(3, 3, Lambda((i, j), i - j))
assert X[1, 1] == 0
assert X[1, 2] == -1
assert X.shape == (3, 3)
assert X.rows == X.cols == 3
assert Matrix(X) == Matrix(3, 3, lambda i, j: i - j)
assert isinstance(X*X + X, MatrixExpr)
| 3.203125 | 3 |
resources/Wireshark/WiresharkDissectorFoo/test/suite_dfilter/group_double.py | joshis1/C_Programming | 2 | 12764113 | # Copyright (c) 2013 by <NAME> <<EMAIL>>
#
# SPDX-License-Identifier: GPL-2.0-or-later
import unittest
import fixtures
from suite_dfilter.dfiltertest import *
@fixtures.uses_fixtures
class case_double(unittest.TestCase):
trace_file = "icmp.pcapng.gz"
def test_eq_1(self, checkDFilterCount):
dfilter = "icmp.resptime == 492.204"
checkDFilterCount(dfilter, 1)
def test_eq_2(self, checkDFilterCount):
dfilter = "icmp.resptime == 492.205"
checkDFilterCount(dfilter, 0)
def test_gt_1(self, checkDFilterCount):
dfilter = "icmp.resptime > 492"
checkDFilterCount(dfilter, 1)
def test_gt_2(self, checkDFilterCount):
dfilter = "icmp.resptime > 492.203"
checkDFilterCount(dfilter, 1)
def test_gt_3(self, checkDFilterCount):
dfilter = "icmp.resptime > 493"
checkDFilterCount(dfilter, 0)
def test_ge_1(self, checkDFilterCount):
dfilter = "icmp.resptime >= 493"
checkDFilterCount(dfilter, 0)
def test_ge_2(self, checkDFilterCount):
dfilter = "icmp.resptime >= 492"
checkDFilterCount(dfilter, 1)
def test_ge_3(self, checkDFilterCount):
dfilter = "icmp.resptime >= 492.204"
checkDFilterCount(dfilter, 1)
def test_lt_1(self, checkDFilterCount):
dfilter = "icmp.resptime < 493"
checkDFilterCount(dfilter, 1)
def test_lt_2(self, checkDFilterCount):
dfilter = "icmp.resptime < 492"
checkDFilterCount(dfilter, 0)
def test_lt_3(self, checkDFilterCount):
dfilter = "icmp.resptime < 492.204"
checkDFilterCount(dfilter, 0)
def test_le_1(self, checkDFilterCount):
dfilter = "icmp.resptime <= 492.204"
checkDFilterCount(dfilter, 1)
def test_le_2(self, checkDFilterCount):
dfilter = "icmp.resptime <= 493"
checkDFilterCount(dfilter, 1)
def test_le_3(self, checkDFilterCount):
dfilter = "icmp.resptime <= 492"
checkDFilterCount(dfilter, 0)
| 2.078125 | 2 |
scrapers/blazar2.py | joshdabosh/neutrinos-analysis-2018 | 0 | 12764114 | from webdriver_manager.chrome import ChromeDriverManager
from bs4 import BeautifulSoup as bs
from selenium import webdriver
import json
browser = webdriver.Chrome(ChromeDriverManager().install())
browser.get("https://www.asdc.asi.it/bzcat/")
page = browser.execute_script("setPageSizeValue(0); setHead(Head, 1, 0); writeBottom(); setHead(Head, 1, 2); writeBottom(); setHead(Head, 1, 2); writeBottom(); setHead(Head, 1, 3); writeBottom(); setHead(Head, 1, 6); writeBottom(); return document.body.innerHTML")
soup = bs(page, "html.parser")
bottom = soup.find("div", {"id": "bottom"})
lines = []
table = bottom.find("table", {"class":"table_catalog"})
tbody = table.find("tbody")
for tr in tbody.findAll("tr", {"id":lambda x: x == "second_line" or x == "first_line"}):
lines.append([i.text.strip() for i in tr.findAll("td")][3:7])
f = open("../data/blazar2.json", "w")
final = [{"a": a, "ra": ra, "de": de, "z": z} for a, ra, de, z in lines]
f.write(json.dumps(
final,
sort_keys=True,
indent=4,
separators=(",", ": ")
)
)
f.close()
print("finished")
| 3.0625 | 3 |
code_examples/tensorflow/basic_nmt_example/nmt-tf.py | Splendon/examples | 0 | 12764115 | # Copyright 2019 Graphcore Ltd.
import tensorflow as tf
import os
import time
import argparse
import numpy as np
import random
from tensorflow.python.ipu.scopes import ipu_scope
from tensorflow.python.ipu import ipu_compiler
from seq2seq_edits import AttentionWrapperNoAssert, dynamic_decode, TrainingHelperNoCond, GreedyEmbeddingHelperNoCond
from data_gen.reader import Data, Vocabulary
from tensorflow.python.ipu import utils
import util
try:
import __builtin__
input = getattr(__builtin__, 'raw_input')
except (ImportError, AttributeError):
pass
tf.logging.set_verbosity(tf.logging.ERROR)
time_major = True
DTYPE = tf.float16
forget_bias = 1.0
max_gradient_norm = 1
learning_rate = 1
CHECKPOINT_FILE = './weights/'
def print_data(src, src_vocab, tgt, tgt_vocab):
for i, s in enumerate(src.T):
t = tgt.T[i]
src_end_idx = list(s).index(src_vocab.end_id())
try:
tgt_end_idx = list(t).index(tgt_vocab.end_id())
except ValueError:
tgt_end_idx = len(t) - 1
print("{} -> {}".format(
''.join(src_vocab.int_to_string(s[:src_end_idx])),
''.join(tgt_vocab.int_to_string(t[:tgt_end_idx])),
))
class Nmt(object):
def __init__(self, opts):
self.opts = opts
self.src_length = opts.sequence_length
self.tgt_length = 11 # YYYY-MM-DD<eot>
def _build_generator(self, data, vocab):
instance_id = range(len(data.inputs))
priming = True
if priming and self.opts.infer:
priming = False
batch_ids = random.sample(instance_id, self.opts.batch_size)
src = np.array(data.inputs[batch_ids], dtype=np.int32)
yield {self.placeholders['source']: src.T, }
while True:
batch_ids = random.sample(instance_id, self.opts.batch_size)
src = np.array(data.inputs[batch_ids], dtype=np.int32)
if self.opts.infer:
if self.opts.interact:
src = np.array([vocab[0].string_to_int(input("Enter a human date: ").strip())])
yield {
self.placeholders['source']: src.T,
}
else:
tgt = np.roll(np.array(data.targets[batch_ids], dtype=np.int32), 1)
tgt[:, 0] = self.start_id
lbl = np.array(data.targets[batch_ids], dtype=np.int32)
mask = np.zeros(lbl.shape)
for i, label in enumerate(lbl):
end_idx = list(label).index(self.end_id)
mask[i][:end_idx+1] = 1
yield {
self.placeholders['source']: src.T,
self.placeholders['target']: tgt.T,
self.placeholders['label']: lbl.T,
self.placeholders['mask']: mask.T
}
def _build_inputs(self):
input_vocab = Vocabulary('./data/human_vocab.json', padding=self.src_length)
output_vocab = Vocabulary('./data/machine_vocab.json', padding=self.tgt_length)
self.src_vocab_size = input_vocab.size()
self.tgt_vocab_size = output_vocab.size()
self.start_id = output_vocab.start_id()
self.end_id = output_vocab.end_id()
data_file = './data/validation.csv' if self.opts.infer else './data/training.csv'
data = Data(data_file, input_vocab, output_vocab)
data.load()
data.transform()
self.placeholders = {
'source': tf.placeholder(tf.int32, shape=[self.src_length, self.opts.batch_size], name="source"),
'target': tf.placeholder(tf.int32, shape=[self.tgt_length, self.opts.batch_size], name="target"),
'label': tf.placeholder(tf.int32, shape=[self.tgt_length, self.opts.batch_size], name="label"),
'mask': tf.placeholder_with_default(
tf.constant(1, shape=[self.tgt_length, self.opts.batch_size], dtype=tf.float16),
[self.tgt_length, self.opts.batch_size],
name="mask")
}
vocab = (input_vocab, output_vocab)
generator = self._build_generator(data, vocab)
return generator, vocab
def infer(self):
def build_infer():
embedding = Nmt._build_embedding(self.src_vocab_size, self.opts.embedding_size,
name="source_embedding")
input_, encoder_outputs, encoder_state = self._build_encoder(embedding)
embedding = Nmt._build_embedding(self.tgt_vocab_size, self.opts.embedding_size, name="tgt_embedding")
samples, logits = self._build_decoder(encoder_outputs, encoder_state, embedding, train=False)
return samples, logits
with ipu_scope('/device:IPU:0'):
data, vocab = self._build_inputs()
batch = ipu_compiler.compile(build_infer, [])
# Create a restoring object
saver = tf.train.Saver()
ipu_options = util.get_config(report_n=0)
utils.configure_ipu_system(ipu_options)
session = tf.Session()
checkpoint = CHECKPOINT_FILE + 'ckpt'
saver.restore(session, checkpoint)
# Run a dummy value to force the graph compilation
session.run(batch, feed_dict=next(data))
while True:
feed_dict = next(data)
predictions, _ = session.run(batch, feed_dict=feed_dict)
print_data(feed_dict[self.placeholders['source']], vocab[0], predictions, vocab[1])
if not self.opts.interact:
break
def train(self):
def build_train():
embedding = Nmt._build_embedding(self.src_vocab_size, self.opts.embedding_size,
name="source_embedding")
input_, encoder_outputs, encoder_state = self._build_encoder(embedding)
embedding = Nmt._build_embedding(self.tgt_vocab_size, self.opts.embedding_size, name="tgt_embedding")
samples, logits = self._build_decoder(encoder_outputs, encoder_state, embedding, train=True)
loss, update = self._build_optimiser(logits)
return loss, samples, logits, update
with ipu_scope('/device:IPU:0'):
data, _ = self._build_inputs()
batch = ipu_compiler.compile(build_train, [])
# Create a restoring object
saver = tf.train.Saver()
if self.opts.save_graph:
# Dump the graph to a logdir
writer = tf.summary.FileWriter(os.path.join('./logs', 'NMT', time.strftime('%Y%m%d_%H%M%S_%Z')))
writer.add_graph(tf.get_default_graph())
ipu_options = util.get_config(report_n=0)
utils.configure_ipu_system(ipu_options)
session = tf.Session()
checkpoint = CHECKPOINT_FILE + 'ckpt'
if self.opts.ckpt:
saver.restore(session, checkpoint)
else:
utils.move_variable_initialization_to_cpu()
session.run(tf.global_variables_initializer())
print("Init done.")
session.run(batch, feed_dict=next(data)) # Warmup
duration = 0
avg_loss = 0
best_loss = float('Inf')
for e in range(1, 1 + self.opts.steps):
start = time.time()
l, _, _ = session.run(batch, feed_dict=next(data))
duration += time.time() - start
avg_loss += l
if (e <= 1000 and not e % 100) or not e % 1000:
duration /= 100 if e <= 1000 else 1000
avg_loss /= 100 if e <= 1000 else 1000
print("Step: {:>5}. Average Loss {:.3}. Items/sec {:.4}. Tokens/sec {}".format(
e,
avg_loss,
self.opts.batch_size / duration,
self.opts.batch_size * (self.src_length + self.tgt_length) / duration))
if avg_loss < best_loss:
best_loss = avg_loss
saver.save(session, checkpoint)
duration = 0
avg_loss = 0
@staticmethod
def _build_embedding(vocab_size, embedding_size, name="embedding"):
with tf.variable_scope("embedding", dtype=DTYPE, use_resource=True) as scope:
# Random embedding
embedding = tf.get_variable(
name, [vocab_size, embedding_size], scope.dtype,
initializer=tf.initializers.random_uniform(maxval=1.0, dtype=scope.dtype), trainable=False)
return embedding
@staticmethod
def _build_cell(num_units, num_layers):
if num_layers is 1:
return tf.contrib.rnn.BasicLSTMCell(num_units, forget_bias=forget_bias, state_is_tuple=False)
cell_list = []
for i in range(num_layers):
cell_list.append(tf.contrib.rnn.BasicLSTMCell(
num_units,
forget_bias=forget_bias, state_is_tuple=False))
return tf.contrib.rnn.MultiRNNCell(cell_list)
def _build_encoder(self, embedding):
with tf.variable_scope("input", dtype=DTYPE, use_resource=True) as scope:
source = self.placeholders['source']
encoder_emb_inp = tf.nn.embedding_lookup(
embedding, source)
with tf.variable_scope("encoder", dtype=DTYPE, use_resource=True) as scope: # use resource
dtype = scope.dtype
cell = Nmt._build_cell(self.opts.num_units, self.opts.num_layers)
if self.opts.bi:
outputs, states = tf.nn.bidirectional_dynamic_rnn(
cell,
Nmt._build_cell(self.opts.num_units, self.opts.num_layers),
encoder_emb_inp,
dtype=dtype,
time_major=time_major,
swap_memory=False)
encoder_outputs = tf.add_n(outputs)
encoder_state = states[0] + states[1]
else:
encoder_outputs, encoder_state = tf.nn.dynamic_rnn(
cell,
encoder_emb_inp,
dtype=dtype,
time_major=time_major,
swap_memory=False)
return source, encoder_outputs, encoder_state
def _build_attention(self, encoder_outputs, decoder_cell):
with tf.variable_scope("attention", dtype=DTYPE, use_resource=True) as scope:
# Attention is batch major
inputs = tf.transpose(encoder_outputs, [1, 0, 2])
if self.opts.attention == "luong":
attention_mechanism = tf.contrib.seq2seq.LuongAttention(
self.opts.num_units,
inputs,
dtype=scope.dtype,
)
else:
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
self.opts.num_units,
inputs,
dtype=scope.dtype,
)
return AttentionWrapperNoAssert(
decoder_cell, attention_mechanism)
def _build_decoder(self, encoder_outputs, encoder_state, embedding, train=False):
with tf.variable_scope("decoder", dtype=DTYPE, use_resource=True) as decoder_scope:
dtype = decoder_scope.dtype
tgt_length = self.src_length * 2
decoder_num_units = self.opts.num_units
atten_num_units = self.opts.num_units
# RNN Cell
cell = Nmt._build_cell(decoder_num_units, self.opts.num_layers)
initial_state = encoder_state
# Attention wrapper
if self.opts.attention:
cell = self._build_attention(encoder_outputs, cell)
initial_state = tf.contrib.seq2seq.AttentionWrapperState(
cell_state=encoder_state,
attention=tf.zeros([self.opts.batch_size, atten_num_units], dtype),
time=tf.constant(0, tf.int32),
alignments=tf.zeros([self.opts.batch_size, self.src_length], dtype),
alignment_history=(),
attention_state=tf.zeros([self.opts.batch_size, self.src_length], dtype)
)
# Projection Layer
projection_layer = tf.layers.Dense(units=self.tgt_vocab_size, use_bias=False, name="projection")
if train:
tgt_length = self.tgt_length
target = self.placeholders['target']
decoder_emb_inp = tf.nn.embedding_lookup(
embedding, target)
helper = TrainingHelperNoCond(
decoder_emb_inp, np.full([self.opts.batch_size], tgt_length, dtype=np.int32), time_major=time_major)
else:
# Inference
tgt_sos_id = self.start_id
tgt_eos_id = self.end_id
start_tokens = np.full([self.opts.batch_size], tgt_sos_id, dtype=np.int32)
end_token = tgt_eos_id
helper = GreedyEmbeddingHelperNoCond(
embedding, start_tokens, end_token)
decoder = tf.contrib.seq2seq.BasicDecoder(
cell,
helper,
initial_state=initial_state,
output_layer=projection_layer if not train else None # applied per timestep
)
# Dynamic decoding
outputs, final_context_state, _ = dynamic_decode( # Contains the XLA check
decoder,
maximum_iterations=tgt_length, # Required for static TensorArrays
output_time_major=time_major,
swap_memory=False,
scope=decoder_scope)
if train:
# Specify dynamic shapes to avoid Assert
logits = outputs.rnn_output
logits.set_shape([tgt_length, self.opts.batch_size, atten_num_units])
logits = projection_layer(logits)
return outputs.sample_id, logits
else:
return outputs.sample_id, outputs.rnn_output
def _build_optimiser(self, logits):
with tf.variable_scope("loss", use_resource=True):
labels = self.placeholders['label']
mask = self.placeholders['mask']
# Logits is dynamic so an Assert is added to check shapes
crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
train_loss = (tf.reduce_sum(crossent*mask) / self.opts.batch_size)
# Calculate and clip gradients
params = tf.trainable_variables()
gradients = tf.gradients(train_loss, params)
clipped_gradients = [tf.clip_by_norm(grad, max_gradient_norm) for grad in gradients]
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
update_step = optimizer.apply_gradients(
zip(clipped_gradients, params))
return train_loss, update_step
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='NMT model in TensorFlow to run on the IPU')
parser.add_argument('--infer', action="store_true",
help="Inference Only")
parser.add_argument('--bi', action="store_true",
help="Use bidirectional layer in encoder (with outputs summed)")
parser.add_argument('--attention', choices=['luong', 'bahdanau'], default='luong',
help="Add an attention model")
parser.add_argument('--batch-size', type=int, default=1,
help="Set batch-size")
parser.add_argument('--num-units', type=int, default=512,
help="Number of units in each LSTM cell")
parser.add_argument('--num-layers', type=int, default=1,
help="Size of LSTM stack in the encoder and decoder")
parser.add_argument('--embedding-size', type=int, default=32,
help="Size of source and target embedding")
parser.add_argument('--sequence-length', type=int, default=20,
help="Size of input length (by padding or truncating)")
parser.add_argument('--ckpt', action="store_true",
help="load weights from latest checkpoint")
parser.add_argument('--seed', type=int, default=1984,
help="Random seed")
parser.add_argument('--interact', action="store_true",
help="Perform inference on values entered from the command line")
parser.add_argument('--save-graph', action="store_true",
help="Save the graph to './logs' to be viewed by TensorBoard")
parser.add_argument('--steps', type=int, default=50000,
help="Number of steps to complete in training")
args = parser.parse_args()
random.seed(args.seed)
if args.interact:
args.batch_size = 1
args.infer = True
print("NMT {}.\n Batch size: {}. Hidden units: {}. Layers: {}.".format(
"Inference" if args.infer else "Training", args.batch_size, args.num_units, args.num_layers))
n = Nmt(args)
if args.infer:
n.infer()
else:
n.train()
| 2.140625 | 2 |
testing/freeze/create_executable.py | solackerman/pytest | 0 | 12764116 | """
Generates an executable with pytest runner embedded using PyInstaller.
"""
if __name__ == '__main__':
import pytest
import subprocess
hidden = []
for x in pytest.freeze_includes():
hidden.extend(['--hidden-import', x])
args = ['pyinstaller', '--noconfirm'] + hidden + ['runtests_script.py']
subprocess.check_call(' '.join(args), shell=True)
| 1.992188 | 2 |
plugins/modules/oci_appmgmt_control_monitored_instance_actions.py | LaudateCorpus1/oci-ansible-collection | 0 | 12764117 | #!/usr/bin/python
# Copyright (c) 2020, 2022 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_appmgmt_control_monitored_instance_actions
short_description: Perform actions on a MonitoredInstance resource in Oracle Cloud Infrastructure
description:
- Perform actions on a MonitoredInstance resource in Oracle Cloud Infrastructure
- For I(action=activate_monitoring_plugin), activates Resource Plugin for compute instance identified by the instance ocid.
Stores monitored instances Id and its state. Tries to enable Resource Monitoring plugin by making
remote calls to Oracle Cloud Agent and Management Agent Cloud Service.
- For I(action=publish_top_processes_metrics), starts cpu and memory top processes collection.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
monitored_instance_id:
description:
- OCID of monitored instance.
type: str
aliases: ["id"]
required: true
action:
description:
- The action to perform on the MonitoredInstance.
type: str
required: true
choices:
- "activate_monitoring_plugin"
- "publish_top_processes_metrics"
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Perform action activate_monitoring_plugin on monitored_instance
oci_appmgmt_control_monitored_instance_actions:
# required
monitored_instance_id: "ocid1.monitoredinstance.oc1..xxxxxxEXAMPLExxxxxx"
action: activate_monitoring_plugin
- name: Perform action publish_top_processes_metrics on monitored_instance
oci_appmgmt_control_monitored_instance_actions:
# required
monitored_instance_id: "ocid1.monitoredinstance.oc1..xxxxxxEXAMPLExxxxxx"
action: publish_top_processes_metrics
"""
RETURN = """
monitored_instance:
description:
- Details of the MonitoredInstance resource acted upon by the current operation
returned: on success
type: complex
contains:
instance_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of monitored instance.
returned: on success
type: str
sample: "ocid1.instance.oc1..xxxxxxEXAMPLExxxxxx"
compartment_id:
description:
- Compartment Identifier L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm)
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
display_name:
description:
- A user-friendly name of the monitored instance. It is binded to L(Compute
Instance,https://docs.cloud.oracle.com/Content/Compute/Concepts/computeoverview.htm).
DisplayName is fetched from L(Core Service API,https://docs.cloud.oracle.com/api/#/en/iaas/20160918/Instance/).
returned: on success
type: str
sample: display_name_example
management_agent_id:
description:
- Management Agent Identifier L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm).
Used to invoke manage operations on Management Agent Cloud Service.
returned: on success
type: str
sample: "ocid1.managementagent.oc1..xxxxxxEXAMPLExxxxxx"
time_created:
description:
- The time the MonitoredInstance was created. An RFC3339 formatted datetime string
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
time_updated:
description:
- The time the MonitoredInstance was updated. An RFC3339 formatted datetime string
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
monitoring_state:
description:
- Monitoring status. Can be either enabled or disabled.
returned: on success
type: str
sample: ENABLED
lifecycle_state:
description:
- The current state of the monitored instance.
returned: on success
type: str
sample: CREATING
lifecycle_details:
description:
- A message describing the current state in more detail. For example, can be used to provide actionable information for a resource in Failed
state.
returned: on success
type: str
sample: lifecycle_details_example
sample: {
"instance_id": "ocid1.instance.oc1..xxxxxxEXAMPLExxxxxx",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"display_name": "display_name_example",
"management_agent_id": "ocid1.managementagent.oc1..xxxxxxEXAMPLExxxxxx",
"time_created": "2013-10-20T19:20:30+01:00",
"time_updated": "2013-10-20T19:20:30+01:00",
"monitoring_state": "ENABLED",
"lifecycle_state": "CREATING",
"lifecycle_details": "lifecycle_details_example"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIActionsHelperBase,
get_custom_class,
)
try:
from oci.appmgmt_control import AppmgmtControlClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class MonitoredInstanceActionsHelperGen(OCIActionsHelperBase):
"""
Supported actions:
activate_monitoring_plugin
publish_top_processes_metrics
"""
@staticmethod
def get_module_resource_id_param():
return "monitored_instance_id"
def get_module_resource_id(self):
return self.module.params.get("monitored_instance_id")
def get_get_fn(self):
return self.client.get_monitored_instance
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_monitored_instance,
monitored_instance_id=self.module.params.get("monitored_instance_id"),
)
def activate_monitoring_plugin(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.activate_monitoring_plugin,
call_fn_args=(),
call_fn_kwargs=dict(
monitored_instance_id=self.module.params.get("monitored_instance_id"),
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
def publish_top_processes_metrics(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.publish_top_processes_metrics,
call_fn_args=(),
call_fn_kwargs=dict(
monitored_instance_id=self.module.params.get("monitored_instance_id"),
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
MonitoredInstanceActionsHelperCustom = get_custom_class(
"MonitoredInstanceActionsHelperCustom"
)
class ResourceHelper(
MonitoredInstanceActionsHelperCustom, MonitoredInstanceActionsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=False, supports_wait=True
)
module_args.update(
dict(
monitored_instance_id=dict(aliases=["id"], type="str", required=True),
action=dict(
type="str",
required=True,
choices=["activate_monitoring_plugin", "publish_top_processes_metrics"],
),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="monitored_instance",
service_client_class=AppmgmtControlClient,
namespace="appmgmt_control",
)
result = resource_helper.perform_action(module.params.get("action"))
module.exit_json(**result)
if __name__ == "__main__":
main()
| 1.71875 | 2 |
simpleseo/models.py | gelo-zhukov/django-simple-seo | 1 | 12764118 | from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.utils.translation import gettext_lazy as _
from simpleseo.utils import get_generic_lang_code
class SeoMetadata(models.Model):
content_type = models.ForeignKey(
ContentType, on_delete=models.CASCADE, null=True, blank=True)
object_id = models.PositiveIntegerField(null=True, blank=True)
content_object = GenericForeignKey('content_type', 'object_id')
path = models.CharField(verbose_name=_('Path'), max_length=255, db_index=True,
help_text=_("This should be an absolute path, excluding "
"the domain name. Example: '/foo/bar/'."))
lang_code = models.CharField(verbose_name=_('Language'), max_length=255,
choices=settings.LANGUAGES,
default=get_generic_lang_code())
title = models.CharField(verbose_name=_('Title'), max_length=255, blank=True,
help_text=_("Recommended length: up to 70 symbols"))
description = models.CharField(verbose_name=_('Description'), max_length=255, blank=True,
help_text=_("Recommended length: up to 160 symbols."))
keywords = models.CharField(verbose_name=_('Keywords'), max_length=255, blank=True,
help_text=_("Recommended length: up to 10 keyword phrases."))
text = models.TextField(verbose_name=_('Text'), blank=True)
class Meta:
verbose_name = _('SEO metadata')
verbose_name_plural = _('SEO metadata')
db_table = 'seo_metadata'
unique_together = (('path', 'lang_code'), )
ordering = ('path', 'lang_code')
def __str__(self):
return "Language: %s | URL: %s" % (self.lang_code, self.path)
def get_absolute_url(self):
return self.path
def update_seo(sender, instance, **kwargs):
newpath = instance.get_absolute_url()
SeoMetadata.objects.filter(content_object=instance).update(path=newpath)
def register_seo_signals():
for app, model in getattr(settings, 'SEO_MODELS', []):
ctype = ContentType.objects.get(app_label=app, model=model)
if not hasattr(ctype.model_class(), 'get_absolute_url'):
raise ImproperlyConfigured(
"Needed get_absolute_url method not defined on %s.%s model." % (app, model)
)
models.signals.post_save.connect(update_seo, sender=ctype.model_class(), weak=False)
| 1.921875 | 2 |
cloudpassage/fim_policy.py | patricksanders/cloudpassage-halo-python-sdk | 8 | 12764119 | <filename>cloudpassage/fim_policy.py
"""FimPolicy and FimBaseline classes"""
import cloudpassage.sanity as sanity
from .halo_endpoint import HaloEndpoint
from .http_helper import HttpHelper
class FimPolicy(HaloEndpoint):
"""FimPolicy class:
The list_all() method allows filtering of results with keyword arguments.
An exhaustive list of keyword arguments can be found here:
https://api-doc.cloudpassage.com/help#file-integrity-policies
Args:
session (:class:`cloudpassage.HaloSession`): This will define how you
interact with the Halo API, including proxy settings and API keys
used for authentication.
Keyword args:
endpoint_version (int): Endpoint version override.
"""
object_name = "fim_policy"
objects_name = "fim_policies"
default_endpoint_version = 1
def endpoint(self):
"""Return endpoint for API requests."""
return "/v{}/{}".format(self.endpoint_version, self.objects_name)
@classmethod
def pagination_key(cls):
"""Defines the pagination key for parsing paged results"""
return cls.objects_name
@classmethod
def object_key(cls):
"""Defines the key used to pull the policy from the json document"""
return cls.object_name
class FimBaseline(HaloEndpoint):
"""Initializing the FimBaseline class:
Args:
session (:class:`cloudpassage.HaloSession`): This will define how you
interact with the Halo API, including proxy settings and API keys
used for authentication.
"""
object_name = "baseline"
objects_name = "baselines"
default_endpoint_version = 1
def endpoint(self, policy_id):
"""Return endpoint for API requests."""
return "/v{}/fim_policies/{}/{}".format(self.endpoint_version,
policy_id, self.objects_name)
def list_all(self, fim_policy_id):
"""Returns a list of all baselines for the indicated FIM policy
Args:
fim_policy_id (str): ID of fim policy
Returns:
list: List of all baselines for the given policy
"""
request = HttpHelper(self.session)
endpoint = self.endpoint(fim_policy_id)
max_pages = 30
response = request.get_paginated(endpoint, self.objects_name,
max_pages)
return response
def describe(self, fim_policy_id, baseline_id):
"""Returns the body of the baseline indicated by fim_baseline_id.
Args
fim_policy_id (str): ID of FIM policy
fim_baseline_id (str): ID of baseline
Returns:
dict: Dictionary describing FIM baseline
"""
request = HttpHelper(self.session)
endpoint = "{}/{}/details".format(self.endpoint(fim_policy_id),
baseline_id)
response = request.get(endpoint)
result = response[self.object_name]
return result
def create(self, fim_policy_id, server_id, **kwargs):
"""Creates a FIM baseline
Args:
fim_policy_id (str): ID of FIM policy to baseline
server_id (str): ID of server to use for generating baseline
Keyword Args:
expires (int): Number of days from today for expiration of baseline
comment (str): Guess.
Returns:
str: ID of new baseline
"""
sanity.validate_object_id([fim_policy_id, server_id])
request = HttpHelper(self.session)
endpoint = self.endpoint(fim_policy_id)
request_body = {"baseline": {"server_id": server_id,
"expires": None,
"comment": None}}
if "expires" in kwargs:
request_body["baseline"]["expires"] = kwargs["expires"]
if "comment" in kwargs:
request_body["baseline"]["comment"] = kwargs["comment"]
response = request.post(endpoint, request_body)
policy_id = response["baseline"]["id"]
return policy_id
def delete(self, fim_policy_id, fim_baseline_id):
"""Delete a FIM baseline by ID
Args:
fim_policy_id (str): ID of FIM policy
fim_baseline_id (str): ID of baseline to be deleted
Returns:
None if successful, exceptions throw otherwise.
"""
sanity.validate_object_id([fim_policy_id, fim_baseline_id])
request = HttpHelper(self.session)
endpoint = "{}/{}".format(self.endpoint(fim_policy_id),
fim_baseline_id)
request.delete(endpoint)
return None
def update(self, fim_policy_id, fim_baseline_id, server_id):
"""Update a FIM policy baseline.
Args:
fim_policy_id (str): ID of fim policy
fim_baseline_id (str): ID of baseline to be updated
server_id (str): ID of server to use when generating new baseline
Returns:
None if successful, exceptions throw otherwise.
"""
sanity.validate_object_id([fim_policy_id, fim_baseline_id, server_id])
request = HttpHelper(self.session)
endpoint = "{}/{}".format(self.endpoint(fim_policy_id),
fim_baseline_id)
request_body = {"baseline": {"server_id": server_id}}
request.put(endpoint, request_body)
return None
| 2.515625 | 3 |
tree/python/leetcode106_Construct_Binary_Tree_from_Inorder_and_Postorder_Traversal.py | wenxinjie/leetcode | 0 | 12764120 | # Given inorder and postorder traversal of a tree, construct the binary tree.
# Note:
# You may assume that duplicates do not exist in the tree.
# For example, given
# inorder = [9,3,15,20,7]
# postorder = [9,15,7,20,3]
# Return the following binary tree:
# 3
# / \
# 9 20
# / \
# 15 7
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def buildTree(self, inorder, postorder):
"""
:type inorder: List[int]
:type postorder: List[int]
:rtype: TreeNode
"""
if not inorder or not postorder or len(inorder) == 0 or len(postorder) == 0:
return None
else:
index = inorder.index(postorder[-1])
root = TreeNode(inorder[index])
root.left = self.buildTree(inorder[:index], postorder[:index])
root.right = self.buildTree(inorder[index+1:], postorder[index:-1])
return root
# Time:O(n)
# Space: O(n)
# Difficulty: medium
| 4.09375 | 4 |
imagefarm.py | eyihluyc/cubiciser | 0 | 12764121 | import cv2
import numpy as np
from random import randint
from functools import reduce
from os import walk
from scipy.spatial import ConvexHull
DIMENSIONS = (512, 512)
def fragment_overlay(background_img, masked_fragment):
mask = masked_fragment.astype(int).sum(-1) == np.zeros(DIMENSIONS)
background_img = np.where(mask[..., None], background_img, masked_fragment)
return background_img
def transparent_superimposition(background_img, masked_fragment):
mask = masked_fragment.astype(int).sum(-1) == np.zeros(DIMENSIONS)
background_img = np.where(mask[..., None], background_img, cv2.addWeighted(background_img, 0.5, masked_fragment, 0.5, 0))
return background_img
def polygon_area(vertices):
x, y = vertices[:, 0], vertices[:, 1]
correction = x[-1] * y[0] - y[-1] * x[0]
main_area = np.dot(x[:-1], y[1:]) - np.dot(y[:-1], x[1:])
return 0.5 * np.abs(main_area + correction)
def lab_adjust(image, delta_light=0, clip_limit=1.0):
lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
l, a, b = cv2.split(lab)
clahe = cv2.createCLAHE(clipLimit=clip_limit)
cl = clahe.apply(l)
cl = cv2.add(cl, delta_light)
limg = cv2.merge((cl, a, b))
final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
return final
def rotate(image, angle=0, scale=1.0):
center = tuple(ti//2 for ti in DIMENSIONS)
M = cv2.getRotationMatrix2D(center, angle, scale)
rotated = cv2.warpAffine(image, M, DIMENSIONS)
return rotated
def darken(image, delta_light=-25):
return lab_adjust(image, delta_light=delta_light)
def lighten(image, delta_light=25):
return lab_adjust(image, delta_light=delta_light)
def increase_contrast(image, clip_limit=2.5):
return lab_adjust(image, clip_limit=clip_limit)
def blur(image):
blurred = cv2.GaussianBlur(image, (3, 3), 0)
return blurred
def do_nothing(image):
return image
def random_image_adjustment():
# change contrast, brightness, rotate?, blur? - functions with respective probabilities of being chosen
chosen_transformations = []
for list_of_functions in all_transformations:
chosen_transformations.append(np.random.choice(list_of_functions, p=[0.6, 0.2, 0.2]))
return compose_functions(*chosen_transformations)
def compose_functions(*func):
def compose(f, g):
return lambda x: f(g(x))
return reduce(compose, func, lambda x: x)
def load(path='images'):
_, _, filenames = next(walk(path))
return np.array(list(map(lambda x: cv2.resize(cv2.imread(path + '/' + x), DIMENSIONS), filenames)), dtype='uint8')
def save(image, identifier=123):
cv2.imwrite(f"results/{identifier}_{randint(0, 1000)}.jpg", image)
def random_point(shift=[255, 255], deviation=256):
x, y = randint(shift[0] - deviation, shift[0] + deviation), randint(shift[1] - deviation, shift[1] + deviation)
return np.array([x, y])
def random_mask():
mask = np.zeros(DIMENSIONS, dtype='uint8')
cv2.fillPoly(mask, pts=[random_polygon(9)], color=255)
return mask
def random_polygon(n):
points = np.random.randint(0, 511, size=(n, 2))
hull = ConvexHull(points)
return points[hull.vertices]
all_transformations = [[do_nothing, do_nothing, increase_contrast],
[do_nothing, darken, lighten],
[do_nothing, do_nothing, blur]]
| 2.296875 | 2 |
ptrello/cli.py | Ibistylus/ptrello | 0 | 12764122 | # -*- coding: utf-8 -*-
"""Console script for ptrello."""
import sys
import click
import logging
from ptrello import api
from ptrello.core.config import logger
# from ptrello.core.config import settings
# import inspect
logger = logging.getLogger("ptrello."+__name__)
default_note = "quicknote.txt"
class Config(object):
l = []
pass
# pass_config = click.make_pass_decorator(Config, ensure=True)
@click.group(chain=True)
# @pass_config
@click.pass_context
def main(ctx):
ctx.obj ={'trello':None}
pass
@click.pass_context
def populate_context(ctx, args, show_all_lists, board_filter, card_filter, target_list=None):
input_text = args
_output = []
_target = []
ctx.obj['input_args'] = args
try:
if not ctx.obj['trello']:
ctx.obj['input_args'] = args
e = api.guess_card_list_board(input_text, board_filter=board_filter, card_filter=card_filter,
show_all_lists=show_all_lists)
_output.extend(e)
ctx.obj['trello'] = _output
if target_list:
ctx.obj['input_args_target_list'] = target_list
_target = []
_target.extend( api.guess_card_list_board(target_list, board_filter=board_filter
, card_filter=card_filter, show_all_lists=False))
ctx.obj['target_ctx'] = _target
except ValueError as err:
handle_error(err)
logger.warning("Could not retrieve trello objects. {}".format(err))
@main.command('card')
@click.pass_context
@click.argument('args', nargs=3, required=False)
@click.option('--match_all_lists/--match_intersect_lists', default=False, help='Show all lists, or only those that match')
@click.option('--card_filter', default='open', help='Card filter (open, closed)')
@click.option('--board_filter', default='starred', help='Board filter (starred, open, close)')
def card(ctx, args, match_all_lists, board_filter, card_filter):
try:
if not ctx.obj['trello']:
populate_context(args,board_filter=board_filter, card_filter=card_filter, show_all_lists=match_all_lists)
print_context_sorted_list()
# print(50*'*')
# print_context_cards()
# print(50*'*')~
# print_context_lists()
except ValueError as err:
handle_error(err)
pass
@main.command()
@click.pass_context
@click.argument('args', nargs=3, required=False)
@click.option('--match_all_lists/--match_intersect_lists', default=False, help='Show all lists, or only those that match')
@click.option('--card_filter', default='open', help='Card filter (open, closed)')
@click.option('--board_filter', default='starred', help='Board filter (starred, open, close)')
def show(ctx, args, match_all_lists, board_filter, card_filter):
try:
if not ctx.obj['trello']:
populate_context(args,board_filter=board_filter, card_filter=card_filter, show_all_lists=match_all_lists)
except:
pass
try:
print_context_card_detail()
except Exception as e:
logger.error("Handled error occured: {}".format(e.args[0]))
click.secho(e.args[0], fg='red')
@main.command()
@click.pass_context
@click.argument('args', nargs=3, required=False)
@click.option('--match_all_lists/--match_intersect_lists', default=False, help='Show all lists, or only those that match')
@click.option('--card_filter', default='open', help='Card filter (open, closed)')
@click.option('--board_filter', default='starred', help='Board filter (starred, open, close)')
def add(ctx, args, match_all_lists, board_filter, card_filter):
try:
if not ctx.obj['trello']:
populate_context(args,board_filter=board_filter, card_filter=card_filter, show_all_lists=match_all_lists)
except:
pass
cards = get_context_filtered_cards()
list = get_context_filtered_lists()
if len(cards) or len(list) > 1:
error_string = "There were {} lists and {} cards matching.Please make the card name is unique " \
"and there is only one list to place the card on.".format(len(list), len(cards))
click.secho(error_string, fg='red')
return
description = click.prompt('Enter a description', default='')
labels = click.prompt('Enter labels seperated by spaces', default='personal', show_default=True)
due_date = click.prompt('Enter due date', show_default=True, default='')
print(ctx.obj['input_args'][-1])
print(ctx.obj['trello'][0]['filtered_lists'][0])
api.add_card(list=ctx.obj['trello'][0]['filtered_lists'][0], name=ctx.obj['input_args'][-1], description= description,
labels=labels, due_date=due_date)
@main.command()
@click.pass_context
@click.argument('args', nargs=3, required=False)
@click.option('--text',default=None, required=False)
@click.option('--match_all_lists/--match_intersect_lists', default=False, help='Show all lists, or only those that match')
@click.option('--card_filter', default='open', help='Card filter (open, closed)')
@click.option('--board_filter', default='starred', help='Board filter (starred, open, close)')
def comment(ctx, args, text, match_all_lists, board_filter, card_filter):
try:
if not ctx.obj['trello']:
populate_context(args,board_filter=board_filter, card_filter=card_filter, show_all_lists=match_all_lists)
except:
pass
cards = get_context_filtered_cards()
if len(cards) > 1:
error_string = "More than one card found--(). Could not add comments".format( len(cards))
click.secho(error_string, fg='red')
return
if not text:
text = click.prompt('Enter comment', show_default=True, default='')
api.add_comment(cards[0], text)
@main.command()
@click.pass_context
@click.argument('args', nargs=3, required=False)
@click.option('--target_list', nargs=2, required=True)
@click.option('--match_all_lists/--match_intersect_lists', default=False, help='Show all lists, or only those that match')
@click.option('--card_filter', default='open', help='Card filter (open, closed)')
@click.option('--board_filter', default='starred', help='Board filter (starred, open, close)')
def move(ctx, args, target_list, match_all_lists, board_filter, card_filter):
try:
if not args and ctx.obj['input_args']:
args = ctx.obj['input_args']
print(args)
else:
pass
populate_context(args, board_filter=board_filter, card_filter=card_filter
, show_all_lists=match_all_lists, target_list=target_list)
c = get_context_filtered_cards()
if len(c) > 0:
yn = click.prompt("There are {} cards selected, are you sure you want to move them all?".format(len(c)))
if str.lower(yn) == 'y':
target_board = ctx.obj['target_ctx'][0]['board']
api.move_card(card=c, target_board_id=target_board.id, target_list_id=ctx.obj['target_ctx'][0]['filtered_lists'][0].id)
else:
click.secho("card(s) not moved")
return
except Exception as e:
handle_error(e, sys._getframe().f_code.co_name)
@click.pass_context
def get_context_sorted_list(ctx):
for obj in ctx.obj['trello']:
return obj['sorted_list']
@click.pass_context
def get_context_filtered_cards(ctx):
for obj in ctx.obj['trello']:
# print(obj['filtered_cards'])
return obj['filtered_cards']
@click.pass_context
def get_context_filtered_lists(ctx):
for obj in ctx.obj['trello']:
return obj['filtered_lists']
def print_context_sorted_list():
try:
for item in get_context_sorted_list():
click.secho(api.print_trello_object(item)[0], fg='yellow')
except Exception as e:
pass
def print_context_cards():
for item in get_context_filtered_cards():
click.secho(api.print_trello_object(item)[0], fg='yellow')
def print_context_lists():
for item in get_context_filtered_lists():
click.secho(api.print_trello_object(item)[0], fg='yellow')
def get_context_card_detail(get_comments=False):
list_of_card_dicts = []
for item in get_context_filtered_cards():
comments = []
card_dict = {}
if get_comments:
comments.extend(reversed(item.get_comments()))
card_dict['short_id'] = item.short_id
card_dict['name'] = item.name
card_dict['board_name'] = item.board.name
card_dict['list_name'] = api.get_list_name_for_card(item ,get_context_filtered_lists())
card_dict['card_created_date'] = item.card_created_date
card_dict['due_date'] = item.due_date
card_dict['description'] = item.description
card_dict['labels'] = item.labels
card_dict['comments'] = comments
list_of_card_dicts.append(card_dict)
return list_of_card_dicts
def print_context_card_detail():
loc = get_context_card_detail(True)
for item in loc:
click.secho(100 * "-", fg='blue')
click.secho("\n", fg='blue')
click.secho("### Name({}): {}".format(item['short_id'], item['name']), fg='green', bold=True, nl="\n")
click.secho("Path: {} > {}".format(item['board_name'],item['list_name']), fg='yellow', nl="\n\n")
click.secho("Create Date: {} ".format(item['card_created_date']), fg='yellow', nl="\n")
click.secho("Due Date: {} ".format(item['due_date']), fg='yellow', nl="\n")
click.secho("Desc: {} ".format(item['description']), fg='yellow', nl="\n")
click.secho("Labels: {} ".format(item['labels']), fg='yellow', nl="\n")
click.secho("Comments: ", fg='yellow')
for comm in item['comments']:
click.secho("{} - {} ".format(comm['date'], comm['data']['text']), fg='yellow', nl="\n")
click.secho("\n")
def handle_error(err, name=None):
ep = ""
for e in err.args:
ep += e
click.secho(e, fg='red')
logger.warning("{} type error encountered from function {}: {} ".format(type(err), name, err))
if __name__ == "__main__":
main(obj={'trello':None})
| 2.421875 | 2 |
resnet_conv_svs.py | brain-research/conv-sv | 67 | 12764123 | <gh_stars>10-100
"""
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""Plot the singular values of the official resnet pretrained imagenet
network (as of 5/1/2018).
Load the pretrained resnet network and print out its singular values.
"""
import tensorflow as tf
import numpy as np
import os
import re
import sys
import conv2d_singular_values as convsv
import matplotlib.pyplot as plt
import matplotlib2tikz
from matplotlib2tikz import save as tikz_save
import absl
from absl import flags
from absl import app
FLAGS = flags.FLAGS
flags.DEFINE_string("checkpoint_dir",
"/Users/hsedghi/Documents/git/convo_singular_values/resnet_v2_imagenet_checkpoint/",
"Official imagenet resnet checkpoint directory on 5/1/2018")
flags.DEFINE_string("graph",
"model.ckpt-250200.meta",
"Official imagenet resnet graph on 5/1/2018")
flags.DEFINE_string("plot_output_prefix",
"/Users/hsedghi/Documents/git/convo_singular_values//tmp/conv_svd_plot",
"File where to output the plot.")
flags.DEFINE_boolean("filter_one_by_one",
True,
"This is true if convolutions with 1x1 filters are not "
"included in the output")
flags.DEFINE_integer("xlim",
700000,
"The right endpoint of the range of values on the x-axis"
"of the plot")
MAX_LAYER_NUMBER = 56.0
def get_layer_number(conv_layer_name):
match = re.search('conv2d_(\d+)',conv_layer_name)
if match == None:
return 0
else:
return int(match.group(1))
def conv_singular_values():
"""Get singular values."""
with tf.Graph().as_default() as g:
saver = tf.train.import_meta_graph(FLAGS.checkpoint_dir + FLAGS.graph,
clear_devices=True)
with tf.Session().as_default() as sess:
saver.restore(sess,tf.train.latest_checkpoint(FLAGS.checkpoint_dir))
svd_list = list()
for op in g.get_operations():
if op.type == 'Conv2D':
print '---'
print op.name
if len(op.inputs) == 2:
feature_map, kernel = op.inputs
print feature_map.shape.as_list()
batch_size, num_channels, height, width = feature_map.shape.as_list()
print kernel.shape.as_list()
kernel_size_height, kernel_size_width, input_channels, output_channels = kernel.shape.as_list()
if ((FLAGS.filter_one_by_one == False) or
(kernel_size_height > 1) or
(kernel_size_width > 1)):
kernel_np = sess.run(kernel)
tf_np = convsv.SVD_Conv_Tensor_NP(kernel_np, [height, width])
this_layers_svds = np.flip(np.sort(tf_np.flatten()),0)
svd_list.append([op.name,this_layers_svds])
sys.stdout.flush()
n = float(len(svd_list))
for i in range(len(svd_list)):
name, svds = svd_list[i]
normalized_layer_number = (0.1 + get_layer_number(name))/(0.2 + MAX_LAYER_NUMBER)
this_color = (1 - normalized_layer_number, normalized_layer_number, 0.1)
short_name = name.replace('resnet_model/','')
short_name = short_name.replace('/Conv2D','')
plt.plot(range(len(svds)), svds, label = short_name, color = this_color)
axes = plt.gca()
plt.legend(fontsize='xx-small', ncol=3)
axes.set_xlim([0,FLAGS.xlim])
plt.xlabel('Singular value rank',fontsize=20)
plt.ylabel('Singular value',fontsize=20)
png_output_name = FLAGS.plot_output_prefix + ".png"
plot_directory = os.path.dirname(png_output_name)
if not os.path.isdir(plot_directory):
os.mkdir(plot_directory)
f = open(png_output_name, 'w')
plt.savefig(f, dpi=256)
tikz_save(FLAGS.plot_output_prefix + '.tex')
def main(argv):
del argv
conv_singular_values()
if __name__ == "__main__":
app.run(main) | 1.851563 | 2 |
chain_builder.py | jrigden/name_machine | 0 | 12764124 | import gzip
import io
import json
import os
import random
import time
import requests
import requests_cache
requests_cache.install_cache()
request = requests.get('https://rpg.rigden.us/seeds_of_infinity/resources/json/names.json')
NAMES = request.json()['data']
HOME_PATH = os.path.dirname(os.path.realpath(__file__))
JSON_PATH = os.path.join(HOME_PATH, "json")
def json_formatter(data, title):
data_dict = {}
data_dict['meta'] = {}
data_dict['meta']['author'] = "<NAME>"
data_dict['meta']['generator'] = "https://github.com/jrigden/Seeds_of_Infinity"
data_dict['meta']['license'] = "http://unlicense.org"
#data_dict['meta']['time_created'] = int(time.time())
data_dict['meta']['title'] = title
data_dict['data'] = data
data_json = json.dumps(data_dict, ensure_ascii=False, indent=4, separators=(',', ': '))
return data_json
def save_json(data, filename, title):
file_path = os.path.join(JSON_PATH, filename)
file_gz_path = os.path.join(JSON_PATH, filename + ".gz")
data_json = json_formatter(data, title)
with io.open(file_path, 'w', encoding='utf-8') as f:
f.write(data_json)
f_in = open(file_path, 'rb')
f_out = gzip.open(file_gz_path, 'w')
f_out.writelines(f_in)
f_out.close()
f_in.close()
class Chain(object):
def __init__(self):
self.chain = {}
def build_link(self, current_item, next_item):
if not current_item in self.chain:
self.chain[current_item] = []
self.chain[current_item].append(next_item)
def generate_item(self, current_item):
item = random.choice(self.chain[current_item])
return item
def generate_series(self):
series_active = True
series = []
current_item = None
while series_active:
current_item = self.generate_item(current_item)
if current_item is None:
series_active = False
else:
series.append(current_item)
return series
def build_one_letter_chain(list_of_words):
chain = Chain()
for word in list_of_words:
chain.build_link(None, word[0])
word_length = len(word)
for i in range(word_length):
try:
chain.build_link(word[i], word[i+1])
except IndexError:
chain.build_link(word[i], None)
return chain
def generate_first_name_json():
first_names = []
first_names.extend(NAMES['first_names']['feminine'])
first_names.extend(NAMES['first_names']['masculine'])
first_names = list(set(first_names))
chain = build_one_letter_chain(first_names)
save_json(chain.chain, "first_name_chain.json", "first_name_chain")
def generate_gendered_first_name_json(gender):
first_names = NAMES['first_names'][gender]
first_names = list(set(first_names))
chain = build_one_letter_chain(first_names)
title = gender + "_first_name_chain"
save_json(chain.chain, title + ".json", title)
def generate_last_name_json():
last_names = NAMES['last_names']
last_names = list(set(last_names))
chain = build_one_letter_chain(last_names)
save_json(chain.chain, "last_name_chain.json", "last_name_chain")
generate_first_name_json()
generate_gendered_first_name_json('feminine')
generate_gendered_first_name_json('masculine')
generate_last_name_json()
| 2.5 | 2 |
GeoDjango/geodjango/points/models.py | lmrissi/piloto_geodjango | 0 | 12764125 | <gh_stars>0
# This is an auto-generated Django model module created by ogrinspect.
from django.contrib.gis.db import models
from django.template.defaultfilters import date
class pocos(models.Model):
proprietario = models.CharField("proprietario", max_length=254)
orgao = models.CharField("orgao", max_length=254)
data_perfuracao = models.DateField("data_perfuracao")
profundidade = models.FloatField("profundidade")
q_m3h = models.FloatField("q_m3h")
equipamento = models.CharField("equipamento", max_length=254)
geom = models.PointField("geom", srid=4326)
def __str__(self):
return self.proprietario
@property
def popup_content(self):
popup = f'<span>Proprietario:{self.proprietario} </span>'
popup += f'<span>Órgão:{self.orgao} </span>'
popup += f'<span>Proprietario:{self.profundidade} </span>'
popup += f'<span>Profundidade (m):{self.q_m3h} </span>'
popup += f'<span>Equipamento:{self.equipamento} </span>'
popup += f"<span>Data de Perfuração:{self.data_perfuracao, 'd/m/Y'} </span>"
return popup | 1.945313 | 2 |
modules/drive/messages.py | xochilt/cousebuilder | 0 | 12764126 | <filename>modules/drive/messages.py<gh_stars>0
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Help text and other strings used in the Drive module. """
__author__ = [
'<EMAIL> (<NAME>)',
]
SERVICE_ACCOUNT_JSON_DESCRIPTION = """
Create a service account in Google App Engine and paste the JSON here.
"""
SERVICE_ACCOUNT_JSON_PARSE_FAILURE = """
The JSON is invalid. Make sure you copy the whole thing including any curly
braces. Do not use the P12 format."""
SERVICE_ACCOUNT_JSON_MISSING_FIELDS = """
The JSON is valid but it doesn't look like a service account key. Try creating a
new "Service account key" in the Credentials section of the developer console.
You can only download this JSON when you first create the key. You can't use
the "Download JSON" button later as this will not include the key."""
SYNC_FREQUENCY_DESCRIPTION = """
The document will be checked for changes in Google Drive this often.
"""
AVAILABILITY_DESCRIPTION = """
Synced items default to the availability of the course, but may also be
restricted to admins (Private) or open to the public (Public).
"""
SHARE_PERMISSION_ERROR = """
You do not have permission to share this file.
"""
SHARE_UNKNOWN_ERROR = """
An unknown error occurred when sharing this file. Check your Drive or Google
API configuration or try again.
"""
SHARE_META_ERROR = """
File shared, but Drive API failed to fetch metadata. Please try again or check
your Drive configuration.
"""
TIMEOUT_ERROR = """
Google Drive timed out. Please try again.
"""
| 2.375 | 2 |
how_tos/how_to_invoke_a_contract.py | casper-network/pycspr | 11 | 12764127 | import argparse
import os
import pathlib
import typing
import pycspr
from pycspr import NodeClient
from pycspr import NodeConnection
from pycspr.crypto import KeyAlgorithm
from pycspr.types import CL_ByteArray
from pycspr.types import CL_U256
from pycspr.types import Deploy
from pycspr.types import DeployParameters
from pycspr.types import ModuleBytes
from pycspr.types import PrivateKey
from pycspr.types import PublicKey
from pycspr.types import StoredContractByHash
# Path to NCTL network assets.
_PATH_TO_NCTL_ASSETS = pathlib.Path(os.getenv("NCTL")) / "assets" / "net-1"
# CLI argument parser.
_ARGS = argparse.ArgumentParser("Demo illustrating how to install an ERC-20 smart contract.")
# CLI argument: path to contract operator secret key - defaults to NCTL faucet.
_ARGS.add_argument(
"--operator-secret-key-path",
default=_PATH_TO_NCTL_ASSETS / "faucet" / "secret_key.pem",
dest="path_to_operator_secret_key",
help="Path to operator's secret_key.pem file.",
type=str,
)
# CLI argument: type of contract operator secret key - defaults to ED25519.
_ARGS.add_argument(
"--operator-secret-key-type",
default=KeyAlgorithm.ED25519.name,
dest="type_of_operator_secret_key",
help="Type of operator's secret key.",
type=str,
)
# CLI argument: path to user to whom tokens will be transferred - defaults to NCTL user 1.
_ARGS.add_argument(
"--user-public-key-path",
default=_PATH_TO_NCTL_ASSETS / "users" / "user-1" / "public_key_hex",
dest="path_to_user_public_key",
help="Path to user's public_key_hex file.",
type=str,
)
# CLI argument: name of target chain - defaults to NCTL chain.
_ARGS.add_argument(
"--chain",
default="casper-net-1",
dest="chain_name",
help="Name of target chain.",
type=str,
)
# CLI argument: amount in motes to be offered as payment.
_ARGS.add_argument(
"--payment",
default=int(1e9),
dest="deploy_payment",
help="Amount in motes to be offered as payment.",
type=int,
)
# CLI argument: host address of target node - defaults to NCTL node 1.
_ARGS.add_argument(
"--node-host",
default="localhost",
dest="node_host",
help="Host address of target node.",
type=str,
)
# CLI argument: Node API JSON-RPC port - defaults to 11101 @ NCTL node 1.
_ARGS.add_argument(
"--node-port-rpc",
default=11101,
dest="node_port_rpc",
help="Node API JSON-RPC port. Typically 7777 on most nodes.",
type=int,
)
# CLI argument: amount of ERC-20 tokens to be transferred to user..
_ARGS.add_argument(
"--amount",
default=int(2e9),
dest="amount",
help="Amount of ERC-20 tokens to be transferred to user.",
type=int,
)
def _main(args: argparse.Namespace):
"""Main entry point.
:param args: Parsed command line arguments.
"""
# Set node client.
client: NodeClient = _get_client(args)
# Set contract operator / user.
operator, user = _get_operator_and_user_keys(args)
# Set contract hash.
contract_hash: bytes = _get_contract_hash(args, client, operator)
# Set deploy.
deploy: Deploy = _get_deploy(args, contract_hash, operator, user)
# Approve deploy.
deploy.approve(operator)
# Dispatch deploy to a node.
client.send_deploy(deploy)
print("-" * 72)
print(f"Deploy dispatched to node [{args.node_host}]: {deploy.hash.hex()}")
print("-" * 72)
def _get_client(args: argparse.Namespace) -> NodeClient:
"""Returns a pycspr client instance.
"""
return NodeClient(NodeConnection(
host=args.node_host,
port_rpc=args.node_port_rpc,
))
def _get_operator_and_user_keys(args: argparse.Namespace) -> typing.Tuple[PrivateKey, PublicKey]:
"""Returns the smart contract operator's private key.
"""
operator = pycspr.parse_private_key(
args.path_to_operator_secret_key,
args.type_of_operator_secret_key,
)
user = pycspr.parse_public_key(
args.path_to_user_public_key,
)
return operator, user
def _get_contract_hash(
args: argparse.Namespace,
client: NodeClient,
operator: PrivateKey
) -> bytes:
"""Returns on-chain contract identifier.
"""
# Query operator account for a named key == ERC20 & return parsed named key value.
account_info = client.get_account_info(operator.account_key)
for named_key in account_info["named_keys"]:
if named_key["name"] == "ERC20":
return bytes.fromhex(named_key["key"][5:])
raise ValueError("ERC-20 uninstalled ... see how_tos/how_to_install_a_contract.py")
def _get_deploy(
args: argparse.Namespace,
contract_hash: bytes,
operator: PrivateKey,
user: PublicKey
) -> Deploy:
"""Returns delegation deploy to be dispatched to a node.
"""
# Set standard deploy parameters.
params: DeployParameters = pycspr.create_deploy_parameters(
account=operator,
chain_name=args.chain_name
)
# Set payment logic.
payment: ModuleBytes = pycspr.create_standard_payment(args.deploy_payment)
# Set session logic.
session: StoredContractByHash = StoredContractByHash(
entry_point="transfer",
hash=contract_hash,
args={
"amount": CL_U256(args.amount),
"recipient": CL_ByteArray(user.account_hash)
}
)
return pycspr.create_deploy(params, payment, session)
# Entry point.
if __name__ == "__main__":
_main(_ARGS.parse_args())
| 1.921875 | 2 |
systems/views.py | rtucker-mozilla/minventory | 0 | 12764128 | <reponame>rtucker-mozilla/minventory
import csv
import re
import simplejson as json
from django.views.decorators.csrf import csrf_exempt
from django.core.exceptions import ValidationError
from django.urls import reverse
from django.db import IntegrityError
from django.db.models import Q
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import redirect, get_object_or_404, render, render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.test.client import RequestFactory
from django.views.generic.list import ListView
from reversion.models import Version
from reversion_compare.mixins import CompareMixin
from middleware.restrict_to_remote import allow_anyone
from systems import models
from systems.models import System, SystemStatus
from systems.forms import SystemForm
# Use this object to generate request objects for calling tastypie views
factory = RequestFactory()
# Source: http://nedbatchelder.com/blog/200712/human_sorting.html
# Author: <NAME>
def tryint(s):
try:
return int(s)
except: # pylint: disable=bare-except
return s
def alphanum_key(s):
""" Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
"""
return [tryint(c) for c in re.split('([0-9]+)', s)]
def sort_nicely(l):
""" Sort the given list in the way that humans expect.
"""
l.sort(key=alphanum_key)
def parse_title_num(title):
val = 0
try:
val = int(title.rsplit('#')[-1])
except ValueError:
pass
return val
def check_dupe_nic(request, system_id, adapter_number): # pylint: disable=unused-argument
try:
system = models.System.objects.get(id=system_id)
found = system.check_for_adapter(adapter_number)
except: # pylint: disable=bare-except
pass
return HttpResponse(found)
def check_dupe_nic_name(request, system_id, adapter_name): # pylint: disable=unused-argument
try:
system = models.System.objects.get(id=system_id)
found = system.check_for_adapter_name(adapter_name)
except: # pylint: disable=bare-except
pass
return HttpResponse(found)
@allow_anyone
def system_auto_complete_ajax(request):
query = request.GET['query']
system_list = models.System.objects.filter(hostname__icontains=query)
hostname_list = [system.hostname for system in system_list]
id_list = [system.id for system in system_list]
ret_dict = {}
ret_dict['query'] = query
ret_dict['suggestions'] = hostname_list
ret_dict['data'] = id_list
return HttpResponse(json.dumps(ret_dict))
@allow_anyone
def list_all_systems_ajax(request):
#iSortCol_0 = which column is sorted
#sSortDir_0 = which direction
cols = [
'hostname',
'serial',
'asset_tag',
'server_model',
'system_rack',
'oob_ip',
'system_status'
]
sort_col = cols[0]
if 'iSortCol_0' in request.GET:
sort_col = cols[int(request.GET['iSortCol_0'])]
sort_dir = 'asc'
if 'sSortDir_0' in request.GET:
sort_dir = request.GET['sSortDir_0']
if 'sEcho' in request.GET:
sEcho = request.GET['sEcho']
if 'sSearch' in request.GET and request.GET['sSearch'] > '':
search_term = request.GET['sSearch']
else:
search_term = None
if 'iDisplayLength' in request.GET and request.GET['iDisplayLength'] > '':
iDisplayLength = request.GET['iDisplayLength']
else:
iDisplayLength = 100
if 'iDisplayStart' in request.GET\
and request.GET['iDisplayStart'] > '':
iDisplayStart = request.GET['iDisplayStart']
else:
iDisplayStart = 0
if search_term is None:
end_display = int(iDisplayStart) + int(iDisplayLength)
system_count = models.System.objects.all().count()
systems = models.System.objects.all()[int(iDisplayStart):int(end_display)]
the_data = build_json(
request,
systems,
sEcho,
system_count,
iDisplayLength,
sort_col,
sort_dir
)
if search_term is not None and len(search_term) > 0: # pylint: disable=len-as-condition
if search_term.startswith('/') and len(search_term) > 1:
try:
search_term = search_term[1:]
search_q = Q(hostname__regex=search_term)
except: # pylint: disable=bare-except
search_q = Q(hostname__icontains=search_term)
else:
search_q = Q(hostname__icontains=search_term)
search_q |= Q(serial__icontains=search_term)
search_q |= Q(notes__icontains=search_term)
search_q |= Q(asset_tag=search_term)
search_q |= Q(oob_ip__icontains=search_term)
search_q |= Q(keyvalue__value__icontains=search_term)
try:
total_count = models.System.with_related\
.filter(search_q).values('hostname').distinct().count()
except: # pylint: disable=bare-except
total_count = 0
end_display = int(iDisplayStart) + int(iDisplayLength)
try:
systems = models.System.objects.filter(
pk__in=models.System.with_related\
.filter(search_q).values_list('id', flat=True).distinct()
)[int(iDisplayStart):int(end_display)]
the_data = build_json(
request,
systems,
sEcho,
total_count,
iDisplayLength,
sort_col,
sort_dir
)
except: # pylint: disable=bare-except
the_data = '{"sEcho": %s, "iTotalRecords":0, "iTotalDisplayRecords":0, "aaData":[]}' % (sEcho) # pylint: disable=line-too-long
return HttpResponse(the_data)
def build_json(request, systems, sEcho, total_records, display_count, sort_col, sort_dir):
system_list = []
for system in systems:
if system.serial is not None:
serial = system.serial.strip()
else:
serial = ''
if system.server_model is not None:
server_model = str(system.server_model)
else:
server_model = ''
if system.system_rack is not None:
system_rack = "%s - %s" % (str(system.system_rack), system.rack_order)
system_rack_id = str(system.system_rack.id)
else:
system_rack = ''
system_rack_id = ''
if system.system_status is not None:
system_status = str(system.system_status)
else:
system_status = ''
if system.asset_tag is not None:
asset_tag = system.asset_tag.strip()
else:
asset_tag = ''
if system.oob_ip is not None:
oob_ip = system.oob_ip.strip()
else:
oob_ip = ''
ro = getattr(request, 'read_only', False)
if ro:
system_id = 0
else:
system_id = system.id
system_list.append(
{
'hostname': system.hostname.strip(),
'oob_ip': oob_ip,
'serial': serial,
'asset_tag': asset_tag,
'server_model': server_model,
'system_rack':system_rack,
'system_status':system_status,
'id':system_id,
'system_rack_id': system_rack_id
}
)
the_data = '{"sEcho": %s, "iTotalRecords":0, "iTotalDisplayRecords":0, "aaData":[]}' % (sEcho)
#try:
if system_list:
system_list.sort(key=lambda x: alphanum_key(x[sort_col]))
if sort_dir == 'desc':
system_list.reverse()
the_data = '{"sEcho": %s, "iTotalRecords":%i, "iTotalDisplayRecords":%i, "aaData":[' % (
sEcho,
total_records,
total_records
)
counter = 0
for system in system_list:
if int(counter) < int(display_count):
the_data += '["%i,%s","%s","%s","%s","%s,%s", "%s", "%s", "%i"],' % (
system['id'],
system['hostname'],
system['serial'],
system['asset_tag'],
system['server_model'],
system['system_rack_id'],
system['system_rack'],
system['oob_ip'],
system['system_status'],
system['id']
)
counter += 1
else:
counter = display_count
the_data = the_data[:-1]
the_data += ']}'
return the_data
#@ldap_group_required('build')
#@LdapGroupRequired('build_team', exclusive=False)
@allow_anyone
def home(request): # pylint: disable=unused-argument
"""Index page"""
return render_to_response('systems/index.html', {
'read_only': False,
})
@allow_anyone
def system_quicksearch_ajax(request):
"""Returns systems sort table"""
search_term = request.POST['quicksearch']
search_q = Q(hostname__icontains=search_term)
search_q |= Q(serial__contains=search_term)
search_q |= Q(notes__contains=search_term)
search_q |= Q(asset_tag=search_term)
systems = models.System.with_related.filter(search_q).order_by('hostname')
if 'is_test' not in request.POST:
return render_to_response('systems/quicksearch.html', {
'systems': systems,
'read_only': getattr(request, 'read_only', False),
}, RequestContext(request))
else:
from django.core import serializers
systems_data = serializers.serialize("json", systems)
return HttpResponse(systems_data)
def get_key_value_store(request, a_id):
system = models.System.objects.get(id=a_id)
key_value_store = models.KeyValue.objects.filter(obj=system)
return render_to_response('systems/key_value_store.html', {
'key_value_store': key_value_store,
}, RequestContext(request))
def delete_key_value(request, a_id, system_id):
kv = models.KeyValue.objects.get(id=a_id)
matches = re.search(r'^nic\.(\d+)', str(kv.key))
if matches:
try:
existing_dhcp_scope = models.KeyValue.objects.filter(obj=kv.system)\
.filter(key='nic.%s.dhcp_scope.0' % matches.group(1))[0].value
models.ScheduledTask(task=existing_dhcp_scope, type='dhcp').save()
except: # pylint: disable=bare-except
pass
kv.delete()
system = models.System.objects.get(id=system_id)
key_value_store = models.KeyValue.objects.filter(obj=system)
return render_to_response('systems/key_value_store.html', {
'key_value_store': key_value_store,
}, RequestContext(request))
@csrf_exempt
def save_key_value(request, a_id):
validated = True
resp = {'success': True, 'errorMessage' : ''}
post_key = request.POST.get('key').strip()
post_value = request.POST.get('value').strip()
try:
tmp = models.KeyValue.objects.get(id=a_id)
system = tmp.system
except Exception as exc: # pylint: disable=broad-except
print(exc)
# This is probably actually an issue but this code never gets called
acl = KeyValueACL(request) # pylint: disable=bad-option-value,undefined-variable
if post_key == 'shouldfailvalidation':
resp['success'] = False
resp['errorMessage'] = 'Validation Failed'
validated = False
kv = models.KeyValue.objects.get(id=id)
if kv is not None and validated:
# Here we eant to check if the existing key is a network adapter.
# If so we want to find out if it has a dhcp scope.
# If so then we want to add it to ScheduledTasks so that the dhcp file gets regenerated
matches = re.search(r'^nic\.(\d+)', str(kv.key).strip())
"""
Check to see if we have a network adapter
If so we need to flag the dhcp zone file to be regenerated
"""
if matches and matches.group(1):
"""
Check to see if it's an ipv4_address key
run KeyValueACL.check_ip_not_exist_other_system
"""
if re.search(r'^nic\.(\d+)\.ipv4_address', str(post_key).strip()):
try:
acl.check_ip_not_exist_other_system(system, post_value)
except Exception as exc: # pylint: disable=broad-except
resp['success'] = False
resp['errorMessage'] = str(exc)
return HttpResponse(json.dumps(resp))
try:
existing_dhcp_scope = models.KeyValue.objects.filter(obj=kv.system)\
.filter(key='nic.%s.dhcp_scope.0' % matches.group(1))[0].value
if existing_dhcp_scope is not None:
models.ScheduledTask(task=existing_dhcp_scope, type='dhcp').save()
except Exception: # pylint: disable=broad-except
pass
try:
existing_reverse_dns_zone = models.KeyValue.objects\
.filter(obj=kv.system)\
.filter(key='nic.%s.reverse_dns_zone.0' % matches.group(1))[0].value
if existing_reverse_dns_zone is not None:
models.ScheduledTask(
task=existing_reverse_dns_zone,
type='reverse_dns_zone'
).save()
except Exception: # pylint: disable=broad-except
pass
try:
kv.key = request.POST.get('key').strip()
kv.value = request.POST.get('value').strip()
kv.save()
except: # pylint: disable=bare-except
kv.key = None
kv.value = None
# Here we eant to check if the new key is a network adapter.
# If so we want to find out if it has a dhcp scope.
# If so then we want to add it to ScheduledTasks so that the dhcp file gets regenerated
if kv.key is not None:
matches = re.search(r'nic\.(\d+)', kv.key)
if matches and matches.group(1):
new_dhcp_scope = None
new_reverse_dns_zone = None
try:
new_dhcp_scope = models.KeyValue.objects\
.filter(obj=kv.system)\
.filter(key='nic.%s.dhcp_scope.0' % matches.group(1))[0].value
except: # pylint: disable=bare-except
pass
try:
new_reverse_dns_zone = models.KeyValue.objects\
.filter(obj=kv.system)\
.filter(key='nic.%s.reverse_dns_zone.0' % matches.group(1))[0].value
except: # pylint: disable=bare-except
pass
if new_dhcp_scope is not None:
try:
models.ScheduledTask(task=new_dhcp_scope, type='dhcp').save()
except Exception as exc: # pylint: disable=broad-except
print(exc)
if new_reverse_dns_zone is not None:
try:
models.ScheduledTask(
task=new_reverse_dns_zone,
type='reverse_dns_zone'
).save()
except: # pylint: disable=bare-except
pass
return HttpResponse(json.dumps(resp))
#return HttpResponseRedirect('/en-US/systems/get_key_value_store/' + system_id + '/')
@csrf_exempt
def create_key_value(request, a_id):
system = models.System.objects.get(id=a_id)
key = 'None'
value = 'None'
if 'key' in request.POST:
key = request.POST['key'].strip()
if 'value' in request.POST:
value = request.POST['value'].strip()
kv = models.KeyValue(obj=system, key=key, value=value)
print("Key is %s: Value is %s." % (key, value))
kv.save()
matches = re.search(r'^nic\.(\d+)', str(kv.key))
if matches:
try:
existing_dhcp_scope = models.KeyValue.objects\
.filter(obj=kv.system)\
.filter(key='nic.%s.dhcp_scope.0' % matches.group(1))[0].value
models.ScheduledTask(task=existing_dhcp_scope, type='dhcp').save()
except: # pylint: disable=bare-except
pass
key_value_store = models.KeyValue.objects.filter(obj=system)
return render_to_response('systems/key_value_store.html', {
'key_value_store': key_value_store,
}, RequestContext(request))
def get_network_adapters(request, a_id):
adapters = models.NetworkAdapter.objects.filter(system_id=a_id)
return render_to_response('systems/network_adapters.html', {
'adapters': adapters,
'switches': models.System.objects.filter(is_switch=1),
'dhcp_scopes': models.DHCP.objects.all()
}, RequestContext(request))
def delete_network_adapter(request, a_id, system_id):
adapter = models.NetworkAdapter.objects.get(id=a_id)
adapter.delete()
adapters = models.NetworkAdapter.objects.filter(system_id=system_id)
return render_to_response('systems/network_adapters.html', {
'adapters': adapters,
'dhcp_scopes': models.DHCP.objects.all(),
'switches': models.System.objects.filter(is_switch=1)
}, RequestContext(request))
def create_network_adapter(request, a_id):
nic = models.NetworkAdapter(system_id=a_id)
nic.save()
adapters = models.NetworkAdapter.objects.filter(system_id=a_id)
return render_to_response('systems/network_adapters.html', {
'adapters': adapters,
'dhcp_scopes': models.DHCP.objects.all(),
'switches': models.System.objects.filter(is_switch=1)
}, RequestContext(request))
def save_network_adapter(request, a_id):
nic = models.NetworkAdapter.objects.get(id=a_id)
if nic is not None:
mac = request.POST['mac_address']
mac = mac.replace(':', '').replace(' ', '').replace('.', '')
tmp = mac[0:2]+ ':'\
+ mac[2:4] + ':'\
+ mac[4:6] + ':'\
+ mac[6:8] + ':'\
+ mac[8:10] + ':'\
+ mac[10:12]
mac = tmp
nic.dhcp_scope_id = request.POST['dhcp_scope_id']
nic.mac_address = mac
nic.ip_address = request.POST['ip_address']
nic.filename = request.POST['filename']
nic.option_host_name = request.POST['option_host_name']
nic.option_domain_name = request.POST['option_domain_name']
nic.adapter_name = request.POST['adapter_name']
if request.POST['switch_id']:
nic.switch_id = request.POST['switch_id']
else:
nic.switch_id = None
nic.switch_port = request.POST['switch_port']
nic.save()
return HttpResponseRedirect('/systems/get_network_adapters/' + id)
def sync_external_data_ajax(request):
attr, source, system_pk = (
request.POST.get('attr', None),
request.POST.get('source', None),
request.POST.get('system_pk', None)
)
if not (attr and source and system_pk):
return HttpResponse(json.dumps({
'error': "attr, source, and system_pk are required"
}), status=400)
system = get_object_or_404(models.System, pk=system_pk)
if not hasattr(system, attr):
return HttpResponse(json.dumps({
'error': "System has no attribute {0}".format(attr)
}), status=400)
try:
ed = system.externaldata_set.get(source=source, name=attr)
except system.externaldata_set.model.DoesNotExist:
return HttpResponse(
json.dumps(
{
'error': "System {0} has no external attribute '{1}' for source '{2}'".format(
system.hostname, attr, source
)
}
), status=400)
conflict_seen = system.external_data_conflict(attr)
cur_value = getattr(system, attr)
if attr == 'oob_ip' and cur_value.strip().startswith('ssh'):
new_value = 'ssh ' + ed.data
else:
new_value = ed.data
setattr(system, attr, new_value)
system.save(request=request)
return HttpResponse(json.dumps({
'conflict-seen': conflict_seen,
'new-value': new_value
}))
@allow_anyone
def system_show(request, a_id):
system = get_object_or_404(models.System, pk=a_id)
if system.notes:
system.notes = system.notes.replace("\n", "<br />")
show_nics_in_key_value = False
is_release = False
if (system.serial and
system.server_model and
system.server_model.part_number and
system.server_model.vendor == "HP"):
system.warranty_link = "http://www11.itrc.hp.com/service/ewarranty/warrantyResults.do?productNumber=%s&serialNumber1=%s&country=US" % (system.server_model.part_number, system.serial) # noqa pylint: disable=line-too-long
if show_nics_in_key_value:
key_values = system.keyvalue_set.all()
else:
key_values = system.keyvalue_set.exclude(key__istartswith='nic.')
sregs = []
groups = []
object_search_str = "(/^{0}$".format(system)
for sreg in filter(lambda sreg: not sreg.decommissioned, sregs):
object_search_str += " OR /^{0}$".format(sreg.fqdn)
object_search_str += " OR /^{0}$".format(sreg.ip_str)
object_search_str += " ) AND !type=:sreg AND !type=:sys"
return render(request, 'systems/system_show.html', {
'system': system,
'object_search_str': object_search_str,
'sregs': sregs,
'groups': groups,
'key_values': key_values,
'is_release': is_release,
'read_only': getattr(request, 'read_only', False),
})
@allow_anyone
def system_show_by_asset_tag(request, a_id):
system = get_object_or_404(models.System, asset_tag=a_id)
if (system.serial and
system.server_model and
system.server_model.part_number and
system.server_model.vendor == "HP"):
system.warranty_link = "http://www11.itrc.hp.com/service/ewarranty/warrantyResults.do?productNumber=%s&serialNumber1=%s&country=US" % (system.server_model.part_number, system.serial) # pylint: disable=line-too-long
return render_to_response('systems/system_show.html', {
'system': system,
'is_release': True,
'read_only': getattr(request, 'read_only', False),
}, RequestContext(request))
def system_view(request, template, data, instance=None):
if request.method == 'POST':
form = SystemForm(request.POST, instance=instance)
if form.is_valid():
s = form.save(commit=False)
s.save(request=request)
return redirect(system_show, s.pk)
else:
form = SystemForm(instance=instance)
data['form'] = form
return render_to_response(
template,
data,
RequestContext(request)
)
@csrf_exempt
def system_new(request):
return system_view(request, 'systems/system_new.html', {})
@csrf_exempt
def system_edit(request, a_id):
system = get_object_or_404(models.System, pk=a_id)
versions = Version.objects.get_for_object(system)
return system_view(request, 'systems/system_edit.html', {
'system': system,
'revision_history':versions
}, system)
def system_delete(request, a_id):
system = get_object_or_404(models.System, pk=a_id)
try:
kv_length = len(system.keyvalue_set.all())
except AttributeError:
kv_length = 0
if kv_length == 0:
try:
system.delete()
except IntegrityError as exc:
content = "Unable to Delete system: {message}".format(message=exc)
return render_to_response(
'systems/generic_output.html',
{
'system': system,
'content': content,
},
RequestContext(request))
elif kv_length > 0:
link = '/core/keyvalue/keyvalue/{id}'.format(id=system.id)
content = """Unable to Delete system. <br />
Please <a href="{link}">Delete Key/Value Entries</a>
""".format(link=link)
return render_to_response(
'systems/generic_output.html',
{
'system': system,
'content': content,
},
RequestContext(request))
return redirect(home)
def system_csv(request): # pylint: disable=unused-argument
systems = models.System.objects.all().order_by('hostname')
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=systems.csv'
writer = csv.writer(response)
writer.writerow(
[
'Host Name',
'Serial',
'Asset Tag',
'Model',
'Allocation',
'Rack',
'Switch Ports',
'OOB IP'
]
)
for s in systems:
try:
writer.writerow(
[
s.hostname,
s.serial,
s.asset_tag,
s.server_model,
s.system_rack,
s.switch_ports,
s.oob_ip
]
)
except: # pylint: disable=bare-except
writer.writerow(
[
s.hostname,
s.serial,
s.asset_tag,
s.server_model,
'',
s.system_rack,
s.switch_ports,
s.oob_ip
]
)
return response
def get_expanded_key_value_store(request, system_id):
try:
system = models.System.objects.get(id=system_id)
request = factory.get(
'/api/v2/keyvalue/3/',
{
'key_type':'adapters_by_system',
'system':system.hostname
}
)
h = KeyValueHandler()
request = factory.get('/api/keyvalue/?keystore=%s' % (system.hostname), follow=True)
resp = json.dumps(h.read(request, key_value_id='3'))
return_obj = resp.replace(",", ",<br />")
except: # pylint: disable=bare-except
return_obj = 'This failed'
return HttpResponse(return_obj)
def new_rack_system_ajax(request, rack_id):
from .forms import RackSystemForm
rack = get_object_or_404(models.SystemRack, pk=rack_id)
data = {}
resp_data = {}
template = 'systems/rack_form_partial.html'
if request.method == 'POST':
rack_form = RackSystemForm(request.POST)
if rack_form.is_valid():
new_system = rack_form.save(commit=False)
new_system.system_rack = rack
new_system.save()
data['system'] = new_system
resp_data['success'] = True
template = 'systems/rack_row_partial.html'
else:
resp_data['success'] = False
else:
rack_form = RackSystemForm()
data['form'] = rack_form
data['rack'] = rack
resp_data['payload'] = render_to_string(template, data, RequestContext(request)).strip(' ')
return HttpResponse(json.dumps(resp_data), mimetype="application/json")
@allow_anyone
def racks_by_site(request, site_pk=0): # pylint: disable=unused-argument
ret_list = []
if int(site_pk) > 0:
site = models.Site.objects.get(id=site_pk)
l_racks = models.SystemRack.objects\
.select_related('site')\
.filter(site=site).order_by('name')
else:
l_racks = models.SystemRack.objects.select_related('site').order_by('site', 'name')
for r in l_racks:
ret_list.append({'name':'%s %s' % (r.site.full_name if r.site else '', r.name), 'id':r.id})
return HttpResponse(json.dumps(ret_list))
@allow_anyone
def racks(request):
from systems.forms import RackFilterForm
filter_form = RackFilterForm(request.GET)
l_racks = models.SystemRack.objects.select_related('site')
system_query = Q()
if 'site' in request.GET:
site_id = request.GET['site']
has_query = True
if site_id and int(site_id) > 0:
site = models.Site.objects.get(id=site_id)
filter_form.fields['rack'].choices = [('', 'ALL')] + [
(m.id, m.site.full_name + ' ' + m.name)
for m in models.SystemRack.objects.filter(site=site).order_by('name')
]
else:
has_query = False
if filter_form.is_valid():
if filter_form.cleaned_data['rack']:
l_racks = l_racks.filter(id=filter_form.cleaned_data['rack'])
has_query = True
if filter_form.cleaned_data['site'] and int(filter_form.cleaned_data['site']) > 0:
l_racks = l_racks.filter(site=filter_form.cleaned_data['site'])
has_query = True
filter_status = filter_form.cleaned_data['status']
if filter_status:
system_query &= Q(system_status=filter_form.cleaned_data['status'])
has_query = True
if not filter_form.cleaned_data['show_decommissioned']:
decommissioned = models.SystemStatus.objects.get(status='decommissioned')
system_query = system_query & ~Q(system_status=decommissioned)
##Here we create an object to hold decommissioned systems for the following filter
if not has_query:
l_racks = []
else:
l_racks = [(k, list(k.system_set.select_related(
'server_model',
'system_status',
).filter(system_query).order_by('-rack_order'))) for k in l_racks]
return render_to_response('systems/racks.html', {
'racks': l_racks,
'filter_form': filter_form,
'read_only': getattr(request, 'read_only', False),
}, RequestContext(request))
class OperatingSystemDeleteView(DeleteView):
model = models.OperatingSystem
template_name = "generic_delete.html"
fields = '__all__'
def get_success_url(self):
return reverse("operatingsystem-list")
class OperatingSystemCreateView(CreateView):
model = models.OperatingSystem
template_name = "systems/generic_form.html"
fields = '__all__'
def get_success_url(self):
return reverse("operatingsystem-list")
class OperatingSystemEditView(UpdateView):
model = models.OperatingSystem
template_name = "systems/generic_form.html"
fields = '__all__'
def get_success_url(self):
return reverse("operatingsystem-list")
class OperatingSystemListView(ListView):
model = models.OperatingSystem
template_name = "operating_system_list"
class SystemRevision(CompareMixin, UpdateView):
template_name = "systems/revision_confirm_restore.html"
model = Version
fields = '__all__'
compare_exclude = ['current_revision']
def get_queryset(self):
self.queryset = Version.objects.all()
return self.queryset
def post(self, request, pk=None): # pylint: disable=arguments-differ
version = Version.objects.get(pk=pk)
system = System.objects.get(pk=version.object_id)
system.current_revision = pk
system.save()
version.revision.revert()
return HttpResponseRedirect("/systems/show/{}/".format(version.object.id))
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
_id = self.kwargs['pk']
version = Version.objects.get(pk=_id)
system = System.objects.get(pk=version.object_id)
if system.current_revision > 0:
current = Version.objects.get(pk=system.current_revision)
else:
current = Version.objects.get_for_object(version.object).last()
context['revision'] = version
context['current'] = current
compare = self.compare(version.object, current, version)[0]
context['compare'] = compare
return context
def rack_delete(request, object_id):
from .models import SystemRack
rack = get_object_or_404(SystemRack, pk=object_id)
if request.method == "POST":
rack.delete()
return HttpResponseRedirect('/systems/racks/')
else:
return render_to_response('systems/rack_confirm_delete.html', {
'rack': rack,
}, RequestContext(request))
def rack_edit(request, object_id):
rack = get_object_or_404(models.SystemRack, pk=object_id)
from .forms import SystemRackForm
if request.method == 'POST':
form = SystemRackForm(request.POST, instance=rack)
if form.is_valid():
form.save()
return HttpResponseRedirect('/systems/racks/')
else:
form = SystemRackForm(instance=rack)
return render_to_response(
'systems/generic_form.html',
{
'form': form,
},
RequestContext(request))
def rack_new(request):
from .forms import SystemRackForm
initial = {}
if request.method == 'POST':
form = SystemRackForm(request.POST, initial=initial)
if form.is_valid():
form.save()
return HttpResponseRedirect('/systems/racks/')
else:
form = SystemRackForm(initial=initial)
return render_to_response(
'generic_form.html',
{
'form': form,
},
RequestContext(request))
def ajax_racks_by_site(request, site_pk):
site = get_object_or_404(models.Site, pk=site_pk)
decom = SystemStatus.objects.get(status='decommissioned')
def filter_decom(system_Q):
return system_Q.exclude(system_status=decom)
return render(request, 'systems/rack_ajax_by_site.html', {
'racks': site.systemrack_set.all(),
'site': site,
'systems': System.objects,
'filter_decom': filter_decom
})
def server_model_create(request):
from .forms import ServerModelForm
if request.method == 'POST':
form = ServerModelForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect('/systems/server_models/')
else:
form = ServerModelForm()
return render_to_response(
'generic_form.html',
{
'form': form,
},
RequestContext(request))
def server_model_edit(request, object_id):
server_model = get_object_or_404(models.ServerModel, pk=object_id)
from systems.forms import ServerModelForm
if request.method == 'POST':
form = ServerModelForm(request.POST, instance=server_model)
if form.is_valid():
form.save()
return HttpResponseRedirect('/systems/server_models/')
else:
form = ServerModelForm(instance=server_model)
return render_to_response(
'generic_form.html',
{
'form': form,
},
RequestContext(request))
@csrf_exempt
def operating_system_create_ajax(request):
if request.method == "POST":
if 'name' in request.POST and 'version' in request.POST:
name = request.POST['name']
version = request.POST['version']
models.OperatingSystem(name=name, version=version).save()
return operating_system_list_ajax(request)
else:
return HttpResponse("OK")
@csrf_exempt
def server_model_create_ajax(request):
if request.method == "POST":
if 'model' in request.POST and 'vendor' in request.POST:
model = request.POST['model']
vendor = request.POST['vendor']
models.ServerModel(vendor=vendor, model=model).save()
return server_model_list_ajax(request)
else:
return HttpResponse("OK")
def operating_system_list_ajax(request):
ret = []
for m in models.OperatingSystem.objects.all():
ret.append({'id': m.id, 'name': "%s - %s" % (m.name, m.version)})
return HttpResponse(json.dumps(ret))
def server_model_list_ajax(request):
ret = []
for m in models.ServerModel.objects.all():
ret.append({'id': m.id, 'name': "%s - %s" % (m.vendor, m.model)})
return HttpResponse(json.dumps(ret))
def server_model_show(request, object_id):
_object = get_object_or_404(models.ServerModel, pk=object_id)
return render_to_response(
'systems/servermodel_detail.html',
{
'object': _object,
},
RequestContext(request))
def server_model_list(request):
object_list = models.ServerModel.objects.all()
return render_to_response(
'systems/servermodel_list.html',
{
'object_list': object_list,
},
RequestContext(request))
def csv_import(request):
from .forms import CSVImportForm
def generic_getter(field):
return field
def uppercase_getter(field):
return field.upper()
def system_status_getter(field):
try:
return models.SystemStatus.objects.get(status=field)
except models.SystemStatus.DoesNotExist:
return
def server_model_getter(field):
try:
return models.ServerModel.objects.get(id=field)
except models.ServerModel.DoesNotExist:
return
def rack_getter(field):
try:
return models.SystemRack.objects.get(name=field)
except models.SystemRack.DoesNotExist:
return None
ALLOWED_COLUMNS = {
'hostname': generic_getter,
'asset_tag': generic_getter,
'serial': uppercase_getter,
'notes': generic_getter,
'oob_ip': generic_getter,
'system_status': system_status_getter,
'system_rack': rack_getter,
'rack_order': generic_getter,
'server_model': server_model_getter,
'purchase_price': generic_getter,
}
new_systems = 0
if request.method == 'POST':
form = CSVImportForm(request.POST, request.FILES)
if form.is_valid():
csv_reader = csv.reader(form.cleaned_data['csv'])
headers = csv_reader.next()
for line in csv_reader:
cur_data = dict(zip(headers, line))
system_data = dict(
(a, getter(cur_data.get(a, None)))
for a, getter in ALLOWED_COLUMNS.iteritems())
s = models.System(**system_data)
try:
s.full_clean()
except ValidationError as exc:
print(exc)
else:
s.save()
new_systems += 1
form = None
else:
form = CSVImportForm()
return render_to_response(
'systems/csv_import.html',
{
'form': form,
'allowed_columns': ALLOWED_COLUMNS,
'new_systems': new_systems,
},
RequestContext(request))
| 2.21875 | 2 |
matplotlibTUT/plt12_contours.py | subshine/tutorials | 10,786 | 12764129 | # View more python tutorials on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
# 12 - contours
"""
Please note, this script is for python3+.
If you are using python2+, please modify it accordingly.
Tutorial reference:
http://www.scipy-lectures.org/intro/matplotlib/matplotlib.html
"""
import matplotlib.pyplot as plt
import numpy as np
def f(x,y):
# the height function
return (1 - x / 2 + x**5 + y**3) * np.exp(-x**2 -y**2)
n = 256
x = np.linspace(-3, 3, n)
y = np.linspace(-3, 3, n)
X,Y = np.meshgrid(x, y)
# use plt.contourf to filling contours
# X, Y and value for (X,Y) point
plt.contourf(X, Y, f(X, Y), 8, alpha=.75, cmap=plt.cm.hot)
# use plt.contour to add contour lines
C = plt.contour(X, Y, f(X, Y), 8, colors='black', linewidth=.5)
# adding label
plt.clabel(C, inline=True, fontsize=10)
plt.xticks(())
plt.yticks(())
plt.show()
| 4.125 | 4 |
dipy/utils/arrfuncs.py | oesteban/dipy | 3 | 12764130 | """ Utilities to manipulate numpy arrays """
import sys
import numpy as np
from nibabel.volumeutils import endian_codes, native_code, swapped_code
def as_native_array(arr):
""" Return `arr` as native byteordered array
If arr is already native byte ordered, return unchanged. If it is opposite
endian, then make a native byte ordered copy and return that
Parameters
----------
arr : ndarray
Returns
-------
native_arr : ndarray
If `arr` was native order, this is just `arr`. Otherwise it's a new
array such that ``np.all(native_arr == arr)``, with native byte
ordering.
"""
if endian_codes[arr.dtype.byteorder] == native_code:
return arr
return arr.byteswap().newbyteorder()
| 3.3125 | 3 |
pipeline/pipeline.py | Salazar-99/Kubeflow-DigitalOcean | 0 | 12764131 | import kfp
import kfp.dsl as dsl
from kfp.components import create_component_from_func
import kfp.components as comp
IMAGE = 'salazar99/python-kubeflow:latest'
DATA_URL = 'https://gs-kubeflow-pipelines.nyc3.digitaloceanspaces.com/clean-spam-data.csv'
# Download data
# def download_data(source_path: str, output_csv: comp.OutputPath('CSV')):
# import pandas as pd
# data = pd.read_csv(source_path)
# print(output_csv)
# data.to_csv(output_csv, index=False)
# download_op = create_component_from_func(func=download_data,
# base_image=IMAGE)
web_downloader_op = kfp.components.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/master/components/web/Download/component.yaml')
# Preprocess and store data
def preprocess_data(source_path: comp.InputPath('CSV'),
x_train_output_path: str,
x_test_output_path: str,
y_train_output_path: str,
y_test_output_path: str):
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from sklearn.model_selection import train_test_split
from typing import List
import pandas as pd
import numpy as np
# Load and split data
data = pd.read_csv(source_path + '.csv')
x_train, x_test, y_train, y_test = train_test_split(data['text'], data['label'], test_size=0.2)
# Convert to required format
x_train = list(x_train)
y_train = y_train.to_numpy()
x_test = list(x_test)
y_test = y_test.to_numpy()
# Function for preprocessing data
def ngram_vectorize(train_text: List[str], train_labels: np.ndarray, test_text: List[str]):
# Arguments for vectorizor
kwargs = {
'ngram_range': NGRAM_RANGE, # Use 1-grams + 2-grams.
'dtype': 'int32',
'strip_accents': 'unicode',
'decode_error': 'replace',
'analyzer': TOKEN_MODE, # Split text into word tokens.
'min_df': MIN_DOCUMENT_FREQUENCY,
}
vectorizer = TfidfVectorizer(**kwargs)
# Vectorize training text
x_train = vectorizer.fit_transform(train_text)
# Vectorize test text
x_test = vectorizer.transform(test_text)
# Select top k features
selector = SelectKBest(f_classif, k=TOP_K)
selector.fit(x_train, train_labels)
x_train = selector.transform(x_train).astype('float32')
x_test = selector.transform(x_test).astype('float32')
return x_train, x_test
# Preprocess data
x_train, x_test = ngram_vectorize(x_train, y_train, x_test)
# Save data
np.save(x_train, x_train_output_path)
np.save(x_test, x_test_output_path)
np.save(y_train, y_train_output_path)
np.save(y_test, y_test_output_path)
preprocess_op = create_component_from_func(func=preprocess_data,
base_image=IMAGE)
# Train model
# Evaluate model
# Save model
# Build pipeline
@dsl.pipeline(
name="SMS Spam Detection Model Pipeline",
description="Train an MLP to detect spam messages from csv data"
)
def pipeline(url=DATA_URL):
download = web_downloader_op(url=url)
preprocess = preprocess_op(download.outputs['data'],
'x_train.npy',
'x_test.npy',
'y_train.npy',
'y_test.npy').after(download)
if __name__ == '__main__':
kfp.compiler.Compiler().compile(
pipeline_func=pipeline,
package_path='pipeline.yaml'
) | 2.765625 | 3 |
tools/win/linker_verbose_tracking.py | google-ar/chromium | 777 | 12764132 | <filename>tools/win/linker_verbose_tracking.py
# Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script parses the /verbose output from the VC++ linker and uses it to
explain why a particular object file is being linked in. It parses records
like these:
Found "public: static void * __cdecl SkTLS::Get(void * (__cdecl*)(void)...
Referenced in chrome_crash_reporter_client_win.obj
Referenced in skia.lib(SkError.obj)
Loaded skia.lib(SkTLS.obj)
and then uses the information to answer questions such as "why is SkTLS.obj
being linked in. In this case it was requested by SkError.obj, and the process
is then repeated for SkError.obj. It traces the dependency tree back to a file
that was specified on the command line. Typically that file is part of a
source_set, and if that source_set is causing unnecessary code and data to be
pulled in then changing it to a static_library may reduce the binary size. See
crrev.com/2556603002 for an example of a ~900 KB savings from such a change.
In other cases the source_set to static_library fix does not work because some
of the symbols are required, while others are pulling in unwanted object files.
In these cases it can be necessary to see what symbol is causing one object file
to reference another. Removing or moving the problematic symbol can fix the
problem. See crrev.com/2559063002 for an example of such a change.
One complication is that there are sometimes multiple source files with the
same name, such as crc.c, which can make analysis more difficult or
ambiguous. If this becomes a blocking issue they it may be necessary to
temporarily rename the source file.
Object file name matching is case sensitive.
Typical output when run on chrome.dll verbose link output is:
>python tools\win\linker_verbose_tracking.py chrome_verbose_02.txt flac_crc
Database loaded - 11277 xrefs found
flac_crc.obj pulled in for symbol "_FLAC__crc8" by
stream_decoder.obj
bitwriter.obj
stream_decoder.obj pulled in for symbol "_FLAC__stream_decoder_new" by
stream_encoder.obj
bitwriter.obj pulled in for symbol "_FLAC__bitwriter_new" by
stream_encoder.obj
stream_encoder.obj pulled in for symbol "_FLAC__stream_encoder_new" by
Command-line obj file: audio_encoder.obj
"""
import pdb
import re
import sys
def ParseVerbose(input_file):
# This matches line like this:
# Referenced in skia.lib(SkError.obj)
# with the groups()[0] referring to the object file name without the file
# extension.
obj_match = re.compile('.*\((.*)\.obj\)')
# Prefix used for symbols that are referenced:
found_prefix = ' Found'
cross_refs = {}
cross_refed_symbols = {}
references = None
for line in open(input_file):
if line.startswith(found_prefix):
references = []
# Grab the symbol name
symbol = line[len(found_prefix):].strip()
if symbol[0] == '"':
# Strip off leading and trailing quotes if present.
symbol = symbol[1:-1]
continue
if type(references) == type([]):
sub_line = line.strip()
match = obj_match.match(sub_line)
# See if the line is part of the list of places where this symbol was
# referenced
if sub_line.count('Referenced ') > 0:
if match:
# This indicates a match that is xxx.lib(yyy.obj), so a referencing
# .obj file that was itself inside of a library. We discard the
# library name.
reference = match.groups()[0]
else:
# This indicates a match that is just a pure .obj file name
# I think this means that the .obj file was specified on the linker
# command line.
reference = ('Command-line obj file: ' +
sub_line[len('Referenced in '): -len('.obj')])
references.append(reference)
elif sub_line.count('Loaded ') > 0:
if match:
loaded = match.groups()[0]
cross_refs[loaded] = references
cross_refed_symbols[loaded] = symbol
references = None
if line.startswith('Finished pass 1'):
# Stop now because the remaining 90% of the verbose output is
# not of interest. Could probably use /VERBOSE:REF to trim out
# boring information.
break
return cross_refs, cross_refed_symbols
def TrackObj(cross_refs, cross_refed_symbols, obj_name):
if obj_name.lower().endswith('.obj'):
obj_name = obj_name[:-len('.obj')]
# Keep track of which references we've already followed.
tracked = {}
# Initial set of object files that we are tracking.
targets = [obj_name]
printed = False
for i in range(100):
new_targets = {}
for target in targets:
if not target in tracked:
tracked[target] = True
if target in cross_refs.keys():
symbol = cross_refed_symbols[target]
printed = True
print '%s.obj pulled in for symbol "%s" by' % (target, symbol)
for ref in cross_refs[target]:
print '\t%s.obj' % ref
new_targets[ref] = True
if len(new_targets) == 0:
break
print
targets = new_targets.keys()
if not printed:
print 'No references to %s.obj found.' % obj_name
def main():
if len(sys.argv) < 3:
print r'Usage: %s <verbose_output_file> <objfile>' % sys.argv[0]
print r'Sample: %s chrome_dll_verbose.txt SkTLS' % sys.argv[0]
return 0
cross_refs, cross_refed_symbols = ParseVerbose(sys.argv[1])
print 'Database loaded - %d xrefs found' % len(cross_refs)
TrackObj(cross_refs, cross_refed_symbols, sys.argv[2])
if __name__ == '__main__':
sys.exit(main())
| 1.195313 | 1 |
setup.py | berryweinst/pytorch-attention | 149 | 12764133 | from distutils.core import setup
setup(
name='attention',
version='0.1.0',
author='tllake',
author_email='<EMAIL>',
packages=['attention'],
description='An attention function for PyTorch.',
long_description=open('README.md').read()) | 1.085938 | 1 |
service/models.py | acslaszlo/docker-test | 0 | 12764134 | <reponame>acslaszlo/docker-test<gh_stars>0
from flywheel import Field, Model
class Data(Model):
id = Field(data_type=str, hash_key=True)
val1 = Field(data_type=str)
val2 = Field(data_type=int)
val3 = Field(data_type=str)
| 2.140625 | 2 |
tests/test_socketutil.py | gst/Pyro5 | 0 | 12764135 | import sys
import os
import platform
import threading
import socket
import pytest
from Pyro5 import config, socketutil
# determine ipv6 capability
has_ipv6 = socket.has_ipv6
if has_ipv6:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
try:
s.connect(("::1", 53))
s.close()
socket.getaddrinfo("localhost", 53, socket.AF_INET6)
except socket.error:
has_ipv6 = False
class TestSocketutil:
@classmethod
def setup_class(cls):
config.POLLTIMEOUT = 0.1
def testGetIP(self):
config.PREFER_IP_VERSION = 4
myip = socketutil.get_ip_address("")
assert len(str(myip)) > 4
myip = socketutil.get_ip_address("", workaround127=True)
assert len(str(myip)) > 4
assert not str(myip).startswith("127.")
addr = socketutil.get_ip_address("127.0.0.1", workaround127=False)
assert "127.0.0.1" == str(addr)
assert addr.version == 4
addr = socketutil.get_ip_address("127.0.0.1", workaround127=True)
assert "127.0.0.1" != str(addr)
assert addr.version == 4
def testGetIP6(self):
if not has_ipv6:
pytest.skip("no ipv6 capability")
addr = socketutil.get_ip_address("::1", version=6)
assert addr.version == 6
assert ":" in str(addr)
addr = socketutil.get_ip_address("localhost", version=6)
assert addr.version == 6
assert ":" in str(addr)
def testGetInterface(self):
addr = socketutil.get_interface("localhost")
assert addr.version == 4
assert str(addr).startswith("127.")
assert str(addr.ip).startswith("127.0")
assert str(addr.network).startswith("127.0")
if has_ipv6:
addr = socketutil.get_interface("::1")
assert addr.version == 6
assert ":" in str(addr)
assert ":" in str(addr.ip)
assert ":" in str(addr.network)
def testUnusedPort(self):
port1 = socketutil.find_probably_unused_port()
port2 = socketutil.find_probably_unused_port()
assert port1 > 0
assert port1 != port2
port1 = socketutil.find_probably_unused_port(socktype=socket.SOCK_DGRAM)
port2 = socketutil.find_probably_unused_port(socktype=socket.SOCK_DGRAM)
assert port1 > 0
assert port1 != port2
def testUnusedPort6(self):
if not has_ipv6:
pytest.skip("no ipv6 capability")
port1 = socketutil.find_probably_unused_port(family=socket.AF_INET6)
port2 = socketutil.find_probably_unused_port(family=socket.AF_INET6)
assert port1 > 0
assert port1 != port2
port1 = socketutil.find_probably_unused_port(family=socket.AF_INET6, socktype=socket.SOCK_DGRAM)
port2 = socketutil.find_probably_unused_port(family=socket.AF_INET6, socktype=socket.SOCK_DGRAM)
assert port1 > 0
assert port1 != port2
def testBindUnusedPort(self):
sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port1 = socketutil.bind_unused_port(sock1)
port2 = socketutil.bind_unused_port(sock2)
assert port1 > 0
assert port1 != port2
assert sock1.getsockname() == ("127.0.0.1", port1)
sock1.close()
sock2.close()
def testBindUnusedPort6(self):
if not has_ipv6:
pytest.skip("no ipv6 capability")
sock1 = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock2 = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
port1 = socketutil.bind_unused_port(sock1)
port2 = socketutil.bind_unused_port(sock2)
assert port1 > 0
assert port1 != port2
host, port, _, _ = sock1.getsockname()
assert ":" in host
assert port1 == port
sock1.close()
sock2.close()
def testCreateUnboundSockets(self):
s = socketutil.create_socket()
assert socket.AF_INET == s.family
bs = socketutil.create_bc_socket()
assert socket.AF_INET == bs.family
try:
host, port = s.getsockname()
# can either fail with socket.error or return (host,0)
assert 0 == port
except socket.error:
pass
try:
host, port = bs.getsockname()
# can either fail with socket.error or return (host,0)
assert 0 == port
except socket.error:
pass
s.close()
bs.close()
def testCreateUnboundSockets6(self):
if not has_ipv6:
pytest.skip("no ipv6 capability")
s = socketutil.create_socket(ipv6=True)
assert socket.AF_INET6 == s.family
bs = socketutil.create_bc_socket(ipv6=True)
assert socket.AF_INET6 == bs.family
try:
host, port, _, _ = s.getsockname()
# can either fail with socket.error or return (host,0)
assert 0 == port
except socket.error:
pass
try:
host, port, _, _ = bs.getsockname()
# can either fail with socket.error or return (host,0)
assert 0 == port
except socket.error:
pass
s.close()
bs.close()
def testCreateBoundSockets(self):
s = socketutil.create_socket(bind=('127.0.0.1', 0))
assert socket.AF_INET == s.family
bs = socketutil.create_bc_socket(bind=('127.0.0.1', 0))
assert '127.0.0.1' == s.getsockname()[0]
assert '127.0.0.1' == bs.getsockname()[0]
s.close()
bs.close()
with pytest.raises(ValueError):
socketutil.create_socket(bind=('localhost', 12345), connect=('localhost', 1234))
def testCreateBoundSockets6(self):
if not has_ipv6:
pytest.skip("no ipv6 capability")
s = socketutil.create_socket(bind=('::1', 0))
assert socket.AF_INET6 == s.family
bs = socketutil.create_bc_socket(bind=('::1', 0))
assert ':' in s.getsockname()[0]
assert ':' in bs.getsockname()[0]
s.close()
bs.close()
with pytest.raises(ValueError):
socketutil.create_socket(bind=('::1', 12345), connect=('::1', 1234))
def testCreateBoundUnixSockets(self):
if not hasattr(socket, "AF_UNIX"):
pytest.skip("no unix domain sockets capability")
SOCKNAME = "test_unixsocket"
if os.path.exists(SOCKNAME):
os.remove(SOCKNAME)
s = socketutil.create_socket(bind=SOCKNAME)
assert socket.AF_UNIX == s.family
assert SOCKNAME == s.getsockname()
s.close()
if os.path.exists(SOCKNAME):
os.remove(SOCKNAME)
with pytest.raises(ValueError):
socketutil.create_socket(bind=SOCKNAME, connect=SOCKNAME)
def testAbstractNamespace(self):
if not hasattr(socket, "AF_UNIX") and not sys.platform.startswith("linux"):
pytest.skip("no unix domain sockets capability, and not Linux")
SOCKNAME = "\0test_unixsocket_abstract_ns" # mind the \0 at the start
s = socketutil.create_socket(bind=SOCKNAME)
assert bytes(SOCKNAME, "ascii") == s.getsockname()
s.close()
def testSend(self):
ss = socketutil.create_socket(bind=("localhost", 0))
port = ss.getsockname()[1]
cs = socketutil.create_socket(connect=("localhost", port))
socketutil.send_data(cs, b"foobar!" * 10)
cs.shutdown(socket.SHUT_WR)
a = ss.accept()
data = socketutil.receive_data(a[0], 5)
assert b"fooba" == data
data = socketutil.receive_data(a[0], 5)
assert b"r!foo" == data
a[0].close()
ss.close()
cs.close()
def testSendUnix(self):
if not hasattr(socket, "AF_UNIX"):
pytest.skip("no unix domain sockets capability")
SOCKNAME = "test_unixsocket"
if os.path.exists(SOCKNAME):
os.remove(SOCKNAME)
ss = socketutil.create_socket(bind=SOCKNAME)
cs = socketutil.create_socket(connect=SOCKNAME)
socketutil.send_data(cs, b"foobar!" * 10)
cs.shutdown(socket.SHUT_WR)
a = ss.accept()
data = socketutil.receive_data(a[0], 5)
assert b"fooba" == data
data = socketutil.receive_data(a[0], 5)
assert b"r!foo" == data
a[0].close()
ss.close()
cs.close()
if os.path.exists(SOCKNAME):
os.remove(SOCKNAME)
def testBroadcast(self):
ss = socketutil.create_bc_socket((None, 0))
port = ss.getsockname()[1]
cs = socketutil.create_bc_socket()
for bcaddr in config.BROADCAST_ADDRS:
try:
cs.sendto(b"monkey", 0, (bcaddr, port))
except socket.error as x:
err = getattr(x, "errno", x.args[0])
# handle some errno that some platforms like to throw
if err not in socketutil.ERRNO_EADDRNOTAVAIL and err not in socketutil.ERRNO_EADDRINUSE:
raise
data, _ = ss.recvfrom(500)
assert b"monkey" == data
cs.close()
ss.close()
def testMsgWaitallProblems(self):
ss = socketutil.create_socket(bind=("localhost", 0), timeout=2)
port = ss.getsockname()[1]
cs = socketutil.create_socket(connect=("localhost", port), timeout=2)
a = ss.accept()
# test some sizes that might be problematic with MSG_WAITALL and check that they work fine
for size in [1000, 10000, 32000, 32768, 32780, 41950, 41952, 42000, 65000, 65535, 65600, 80000]:
socketutil.send_data(cs, b"x" * size)
data = socketutil.receive_data(a[0], size)
socketutil.send_data(a[0], data)
data = socketutil.receive_data(cs, size)
assert size == len(data)
a[0].close()
ss.close()
cs.close()
def testMsgWaitallProblems2(self):
class ReceiveThread(threading.Thread):
def __init__(self, sock, sizes):
super(ReceiveThread, self).__init__()
self.sock = sock
self.sizes = sizes
def run(self):
cs, _ = self.sock.accept()
for size in self.sizes:
data = socketutil.receive_data(cs, size)
socketutil.send_data(cs, data)
cs.close()
ss = socketutil.create_socket(bind=("localhost", 0))
SIZES = [1000, 10000, 32000, 32768, 32780, 41950, 41952, 42000, 65000, 65535, 65600, 80000, 999999]
serverthread = ReceiveThread(ss, SIZES)
serverthread.setDaemon(True)
serverthread.start()
port = ss.getsockname()[1]
cs = socketutil.create_socket(connect=("localhost", port), timeout=2)
# test some sizes that might be problematic with MSG_WAITALL and check that they work fine
for size in SIZES:
socketutil.send_data(cs, b"x" * size)
data = socketutil.receive_data(cs, size)
assert size == len(data)
serverthread.join()
ss.close()
cs.close()
def testMsgWaitAllConfig(self):
if platform.system() == "Windows":
# default config should be False on these platforms even though socket.MSG_WAITALL might exist
assert not socketutil.USE_MSG_WAITALL
else:
# on all other platforms, default config should be True (as long as socket.MSG_WAITALL exists)
if hasattr(socket, "MSG_WAITALL"):
assert socketutil.USE_MSG_WAITALL
else:
assert not socketutil.USE_MSG_WAITALL
| 2.34375 | 2 |
keno/biased_rng.py | matthewdeanmartin/keno | 0 | 12764136 | # coding=utf-8
"""
Attempt to creat an RNG that picks numbers like humans
# favors date parts (1-31, 1-12, 19/20, 50-99/00-18)
# seeks/avoids patterns (i.e. 1,2,3,4,5 or 2,22,32,42)
# favors past winning numbers
# favors culturally meaningful numbers, 777, 888, etc.
# http://ww2.amstat.org/publications/jse/v13n2/mecklin.html
Past number strategies
Choosing winning combinations from previous draws
Modifying previous winning combinations (e.g. adding 1 to each number in a previous winning combination)
Choosing “hot” or “cold” numbers (a statistically nonsensical strategy suggested in many of the lay books about lotteries)
"Numerology"
factors of 1, 2, 3, etc., eg. 7, 14, 21, etc
Choosing arithmetic progressions (e.g. 1-2-3-4-5-6 or 2-5-8-11-14-17)
Choosing powers of 2 (e.g. 1-2-4-8-16-32)
Choosing perfect squares (e.g. 1-4-9-16-25-36)
Choosing all prime numbers (e.g. 2-3-5-7-11-13)
Choosing Fibonacci numbers (e.g. 1-2-3-5-8-13)
Dates
Choosing only numbers that are less than or equal to 31; many people choose numbers based on birthdays, anniversaries, etc.
"""
from typing import List, Set
import random
from datetime import date, timedelta
class BiasedRng(object):
"""
Birthday numbers. Simulate what happens if you only pick birthday numbers.
Against an unbiased state RNG, you expect a higher risk of capped payouts from
too many people winning, otherwise no change-- all numbers are just as good as
any other.
If you were playing keno with friends (and not the state), one player could exploit the fact that
the other is using a BiasedRng.
"""
def __init__(self) -> None:
pass
def dates_only(self) -> List[int]:
"""
Pick number drawn from a biased RNG
:return:
"""
pick = set() # type: Set[int]
while len(pick) < 20:
birthday = self.random_birthday()
pick.update(self.keno_range(birthday))
pick_list = [x for x in pick]
pick_list.sort()
return pick_list
def random_birthday(self) -> date:
"""
Birthdays for people up to 80 years old.
:return:
"""
days = 365 * 80
oldest_birthday = date.today() - timedelta(days=days)
days_since_random_birthday = random.randint(0, days)
return oldest_birthday + timedelta(days=days_since_random_birthday)
def keno_range(self, value: date) -> List[int]:
"""
Break date into part and return set of parts from 1 to 80
:param value:
:return:
"""
full_range = {
int(str(value.year)[0:2]),
int(str(value.year)[2:]),
value.day,
value.month,
} - {0}
pick = set()
for x in full_range:
if x <= 80:
pick.add(x)
return list(pick)
if __name__ == "__main__":
rng = BiasedRng()
bday = rng.random_birthday()
print(bday)
print(rng.keno_range(bday))
print(rng.dates_only())
| 3.734375 | 4 |
Extras/at 17.py | SkaarlK/Learning-Python | 2 | 12764137 | #Ler 80 números e informar quantos estão no intervalo entre 10 (inclusive) e 150 (inclusive).
nums = []
quantia = str(input("Quantos números você deseja inserir (deixe em branco para 80)? "))
def addNumber():
newNumber = int(input("Insira o Número: "))
if newNumber >= 10 and newNumber <= 150:
nums.append(newNumber)
if quantia:
for i in range(int(quantia)):
addNumber()
else:
for i in range(80):
addNumber()
print("No intervalo, houveram %d números. Sendo eles: " % len(nums) + str(nums)) | 3.734375 | 4 |
Python/diagonal-traverse.py | sm2774us/leetcode_interview_prep_2021 | 0 | 12764138 | # Time: O(m * n)
# Space: O(1)
class Solution(object):
def findDiagonalOrder(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[int]
"""
if not matrix or not matrix[0]:
return []
result = []
row, col, d = 0, 0, 0
dirs = [(-1, 1), (1, -1)]
for i in range(len(matrix) * len(matrix[0])):
result.append(matrix[row][col])
row += dirs[d][0]
col += dirs[d][1]
if row >= len(matrix):
row = len(matrix) - 1
col += 2
d = 1 - d
elif col >= len(matrix[0]):
col = len(matrix[0]) - 1
row += 2
d = 1 - d
elif row < 0:
row = 0
d = 1 - d
elif col < 0:
col = 0
d = 1 - d
return result
| 3.25 | 3 |
opp/sharpen_up_upper_test.py | heeryoncho/sensors2018cnnhar | 10 | 12764139 | import numpy as np
from sklearn.metrics import accuracy_score, confusion_matrix
from keras.models import load_model
import select_data as sd
import warnings
warnings.simplefilter(action='ignore', category=UserWarning)
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
'''
See paper: Sensors 2018, 18(4), 1055; https://doi.org/10.3390/s18041055
"Divide and Conquer-Based 1D CNN Human Activity Recognition Using Test Data Sharpening"
by <NAME> & <NAME>
This code investigates the effects of test data sharpening on
1D CNN UP position activity classification model using UPPER body TEST data.
The performance is measured using X_test, y_test dataset.
See right line graph in Figure 13 (Test Data Recognition Accuracy).
(Sensors 2018, 18(4), 1055, page 16 of 24)
'''
X_train, y_train, X_valid, y_valid, X_test, y_test = sd.load_data("upper", "up")
print "\n=== COMPARE ACCURACY: NO SHARPEN vs. SHARPENED ==="
print "=== [UPPER body sensors data] UP Class ==="
print "=== 1D CNN MODEL ==="
print "=== Evaluation on TEST DATA ===\n"
# Load model
model = load_model('model/upper_up.hdf5')
print ">>> RAW:"
pred = model.predict(np.expand_dims(X_test, axis=2), batch_size=32)
print accuracy_score(y_test, np.argmax(pred, axis=1))
print confusion_matrix(y_test, np.argmax(pred, axis=1)), '\n'
alpha = np.arange(0.5, 15.5, 0.5)
sigma = np.arange(3, 8, 1)
for s in sigma:
for a in alpha:
x_test_sharpen = sd.sharpen(X_test, s, a)
pred_sharpened = model.predict(np.expand_dims(x_test_sharpen, axis=2), batch_size=32)
print ">>> SHARPENED: sigma={}, alpha={:.2f}".format(s, a)
print accuracy_score(y_test, np.argmax(pred_sharpened, axis=1))
print confusion_matrix(y_test, np.argmax(pred_sharpened, axis=1))
'''
/usr/bin/python2.7 /home/hcilab/Documents/OSS/sensors2018cnnhar/opp/sharpen_up_upper_test.py
/home/hcilab/.local/lib/python2.7/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
Using TensorFlow backend.
=== COMPARE ACCURACY: NO SHARPEN vs. SHARPENED ===
=== [UPPER body sensors data] UP Class ===
=== 1D CNN MODEL ===
=== Evaluation on TEST DATA ===
>>> RAW:
0.803821517751
[[5190 136]
[1671 2214]]
>>> SHARPENED: sigma=3, alpha=0.50
0.822820540658
[[5128 198]
[1434 2451]]
>>> SHARPENED: sigma=3, alpha=1.00
0.83020301813
[[5113 213]
[1351 2534]]
>>> SHARPENED: sigma=3, alpha=1.50
0.832482900879
[[5097 229]
[1314 2571]]
>>> SHARPENED: sigma=3, alpha=2.00
0.833242861796
[[5092 234]
[1302 2583]]
>>> SHARPENED: sigma=3, alpha=2.50
0.834219954402
[[5088 238]
[1289 2596]]
>>> SHARPENED: sigma=3, alpha=3.00
0.834762783628
[[5087 239]
[1283 2602]]
>>> SHARPENED: sigma=3, alpha=3.50
0.835305612854
[[5087 239]
[1278 2607]]
>>> SHARPENED: sigma=3, alpha=4.00
0.835522744545
[[5084 242]
[1273 2612]]
>>> SHARPENED: sigma=3, alpha=4.50
0.835305612854
[[5083 243]
[1274 2611]]
>>> SHARPENED: sigma=3, alpha=5.00
0.835739876235
[[5084 242]
[1271 2614]]
>>> SHARPENED: sigma=3, alpha=5.50
0.835414178699
[[5081 245]
[1271 2614]]
>>> SHARPENED: sigma=3, alpha=6.00
0.835522744545
[[5081 245]
[1270 2615]]
>>> SHARPENED: sigma=3, alpha=6.50
0.83563131039
[[5080 246]
[1268 2617]]
>>> SHARPENED: sigma=3, alpha=7.00
0.835739876235
[[5080 246]
[1267 2618]]
>>> SHARPENED: sigma=3, alpha=7.50
0.835739876235
[[5080 246]
[1267 2618]]
>>> SHARPENED: sigma=3, alpha=8.00
0.83563131039
[[5079 247]
[1267 2618]]
>>> SHARPENED: sigma=3, alpha=8.50
0.835957007925
[[5079 247]
[1264 2621]]
>>> SHARPENED: sigma=3, alpha=9.00
0.83606557377
[[5079 247]
[1263 2622]]
>>> SHARPENED: sigma=3, alpha=9.50
0.836174139616
[[5079 247]
[1262 2623]]
>>> SHARPENED: sigma=3, alpha=10.00
0.836391271306
[[5079 247]
[1260 2625]]
>>> SHARPENED: sigma=3, alpha=10.50
0.836499837151
[[5079 247]
[1259 2626]]
>>> SHARPENED: sigma=3, alpha=11.00
0.836499837151
[[5079 247]
[1259 2626]]
>>> SHARPENED: sigma=3, alpha=11.50
0.836391271306
[[5078 248]
[1259 2626]]
>>> SHARPENED: sigma=3, alpha=12.00
0.836608402996
[[5079 247]
[1258 2627]]
>>> SHARPENED: sigma=3, alpha=12.50
0.836499837151
[[5079 247]
[1259 2626]]
>>> SHARPENED: sigma=3, alpha=13.00
0.836825534687
[[5080 246]
[1257 2628]]
>>> SHARPENED: sigma=3, alpha=13.50
0.836934100532
[[5080 246]
[1256 2629]]
>>> SHARPENED: sigma=3, alpha=14.00
0.837042666377
[[5080 246]
[1255 2630]]
>>> SHARPENED: sigma=3, alpha=14.50
0.837042666377
[[5080 246]
[1255 2630]]
>>> SHARPENED: sigma=3, alpha=15.00
0.837042666377
[[5080 246]
[1255 2630]]
>>> SHARPENED: sigma=4, alpha=0.50
0.821517750516
[[5126 200]
[1444 2441]]
>>> SHARPENED: sigma=4, alpha=1.00
0.828683096298
[[5107 219]
[1359 2526]]
>>> SHARPENED: sigma=4, alpha=1.50
0.831397242428
[[5092 234]
[1319 2566]]
>>> SHARPENED: sigma=4, alpha=2.00
0.833242861796
[[5088 238]
[1298 2587]]
>>> SHARPENED: sigma=4, alpha=2.50
0.83313429595
[[5083 243]
[1294 2591]]
>>> SHARPENED: sigma=4, alpha=3.00
0.833351427641
[[5080 246]
[1289 2596]]
>>> SHARPENED: sigma=4, alpha=3.50
0.833894256867
[[5077 249]
[1281 2604]]
>>> SHARPENED: sigma=4, alpha=4.00
0.833785691022
[[5073 253]
[1278 2607]]
>>> SHARPENED: sigma=4, alpha=4.50
0.833894256867
[[5071 255]
[1275 2610]]
>>> SHARPENED: sigma=4, alpha=5.00
0.833894256867
[[5069 257]
[1273 2612]]
>>> SHARPENED: sigma=4, alpha=5.50
0.834002822712
[[5069 257]
[1272 2613]]
>>> SHARPENED: sigma=4, alpha=6.00
0.833894256867
[[5069 257]
[1273 2612]]
>>> SHARPENED: sigma=4, alpha=6.50
0.833785691022
[[5068 258]
[1273 2612]]
>>> SHARPENED: sigma=4, alpha=7.00
0.834111388557
[[5068 258]
[1270 2615]]
>>> SHARPENED: sigma=4, alpha=7.50
0.834545651938
[[5069 257]
[1267 2618]]
>>> SHARPENED: sigma=4, alpha=8.00
0.834328520248
[[5069 257]
[1269 2616]]
>>> SHARPENED: sigma=4, alpha=8.50
0.834545651938
[[5069 257]
[1267 2618]]
>>> SHARPENED: sigma=4, alpha=9.00
0.834545651938
[[5068 258]
[1266 2619]]
>>> SHARPENED: sigma=4, alpha=9.50
0.834437086093
[[5068 258]
[1267 2618]]
>>> SHARPENED: sigma=4, alpha=10.00
0.834654217783
[[5068 258]
[1265 2620]]
>>> SHARPENED: sigma=4, alpha=10.50
0.834654217783
[[5068 258]
[1265 2620]]
>>> SHARPENED: sigma=4, alpha=11.00
0.834654217783
[[5067 259]
[1264 2621]]
>>> SHARPENED: sigma=4, alpha=11.50
0.834654217783
[[5066 260]
[1263 2622]]
>>> SHARPENED: sigma=4, alpha=12.00
0.834654217783
[[5066 260]
[1263 2622]]
>>> SHARPENED: sigma=4, alpha=12.50
0.834654217783
[[5066 260]
[1263 2622]]
>>> SHARPENED: sigma=4, alpha=13.00
0.834762783628
[[5066 260]
[1262 2623]]
>>> SHARPENED: sigma=4, alpha=13.50
0.834871349473
[[5067 259]
[1262 2623]]
>>> SHARPENED: sigma=4, alpha=14.00
0.834871349473
[[5067 259]
[1262 2623]]
>>> SHARPENED: sigma=4, alpha=14.50
0.834871349473
[[5067 259]
[1262 2623]]
>>> SHARPENED: sigma=4, alpha=15.00
0.834979915319
[[5067 259]
[1261 2624]]
>>> SHARPENED: sigma=5, alpha=0.50
0.820866355445
[[5122 204]
[1446 2439]]
>>> SHARPENED: sigma=5, alpha=1.00
0.828031701227
[[5103 223]
[1361 2524]]
>>> SHARPENED: sigma=5, alpha=1.50
0.830528715666
[[5087 239]
[1322 2563]]
>>> SHARPENED: sigma=5, alpha=2.00
0.831722939963
[[5080 246]
[1304 2581]]
>>> SHARPENED: sigma=5, alpha=2.50
0.831614374118
[[5072 254]
[1297 2588]]
>>> SHARPENED: sigma=5, alpha=3.00
0.831397242428
[[5072 254]
[1299 2586]]
>>> SHARPENED: sigma=5, alpha=3.50
0.831940071653
[[5069 257]
[1291 2594]]
>>> SHARPENED: sigma=5, alpha=4.00
0.83270003257
[[5068 258]
[1283 2602]]
>>> SHARPENED: sigma=5, alpha=4.50
0.833351427641
[[5068 258]
[1277 2608]]
>>> SHARPENED: sigma=5, alpha=5.00
0.833351427641
[[5066 260]
[1275 2610]]
>>> SHARPENED: sigma=5, alpha=5.50
0.833785691022
[[5066 260]
[1271 2614]]
>>> SHARPENED: sigma=5, alpha=6.00
0.833785691022
[[5066 260]
[1271 2614]]
>>> SHARPENED: sigma=5, alpha=6.50
0.833894256867
[[5066 260]
[1270 2615]]
>>> SHARPENED: sigma=5, alpha=7.00
0.834002822712
[[5065 261]
[1268 2617]]
>>> SHARPENED: sigma=5, alpha=7.50
0.833894256867
[[5064 262]
[1268 2617]]
>>> SHARPENED: sigma=5, alpha=8.00
0.833785691022
[[5064 262]
[1269 2616]]
>>> SHARPENED: sigma=5, alpha=8.50
0.833785691022
[[5062 264]
[1267 2618]]
>>> SHARPENED: sigma=5, alpha=9.00
0.833785691022
[[5061 265]
[1266 2619]]
>>> SHARPENED: sigma=5, alpha=9.50
0.833894256867
[[5061 265]
[1265 2620]]
>>> SHARPENED: sigma=5, alpha=10.00
0.834219954402
[[5061 265]
[1262 2623]]
>>> SHARPENED: sigma=5, alpha=10.50
0.834219954402
[[5060 266]
[1261 2624]]
>>> SHARPENED: sigma=5, alpha=11.00
0.834219954402
[[5059 267]
[1260 2625]]
>>> SHARPENED: sigma=5, alpha=11.50
0.834328520248
[[5060 266]
[1260 2625]]
>>> SHARPENED: sigma=5, alpha=12.00
0.834328520248
[[5060 266]
[1260 2625]]
>>> SHARPENED: sigma=5, alpha=12.50
0.834437086093
[[5060 266]
[1259 2626]]
>>> SHARPENED: sigma=5, alpha=13.00
0.834545651938
[[5060 266]
[1258 2627]]
>>> SHARPENED: sigma=5, alpha=13.50
0.834545651938
[[5060 266]
[1258 2627]]
>>> SHARPENED: sigma=5, alpha=14.00
0.834437086093
[[5059 267]
[1258 2627]]
>>> SHARPENED: sigma=5, alpha=14.50
0.834545651938
[[5059 267]
[1257 2628]]
>>> SHARPENED: sigma=5, alpha=15.00
0.834654217783
[[5059 267]
[1256 2629]]
>>> SHARPENED: sigma=6, alpha=0.50
0.820323526219
[[5122 204]
[1451 2434]]
>>> SHARPENED: sigma=6, alpha=1.00
0.827380306156
[[5103 223]
[1367 2518]]
>>> SHARPENED: sigma=6, alpha=1.50
0.830420149821
[[5090 236]
[1326 2559]]
>>> SHARPENED: sigma=6, alpha=2.00
0.830854413202
[[5082 244]
[1314 2571]]
>>> SHARPENED: sigma=6, alpha=2.50
0.831288676582
[[5077 249]
[1305 2580]]
>>> SHARPENED: sigma=6, alpha=3.00
0.831722939963
[[5073 253]
[1297 2588]]
>>> SHARPENED: sigma=6, alpha=3.50
0.831831505808
[[5071 255]
[1294 2591]]
>>> SHARPENED: sigma=6, alpha=4.00
0.831831505808
[[5070 256]
[1293 2592]]
>>> SHARPENED: sigma=6, alpha=4.50
0.832265769189
[[5070 256]
[1289 2596]]
>>> SHARPENED: sigma=6, alpha=5.00
0.832808598415
[[5069 257]
[1283 2602]]
>>> SHARPENED: sigma=6, alpha=5.50
0.832808598415
[[5069 257]
[1283 2602]]
>>> SHARPENED: sigma=6, alpha=6.00
0.83270003257
[[5066 260]
[1281 2604]]
>>> SHARPENED: sigma=6, alpha=6.50
0.83291716426
[[5066 260]
[1279 2606]]
>>> SHARPENED: sigma=6, alpha=7.00
0.833242861796
[[5065 261]
[1275 2610]]
>>> SHARPENED: sigma=6, alpha=7.50
0.833568559331
[[5065 261]
[1272 2613]]
>>> SHARPENED: sigma=6, alpha=8.00
0.833351427641
[[5062 264]
[1271 2614]]
>>> SHARPENED: sigma=6, alpha=8.50
0.83313429595
[[5060 266]
[1271 2614]]
>>> SHARPENED: sigma=6, alpha=9.00
0.83313429595
[[5060 266]
[1271 2614]]
>>> SHARPENED: sigma=6, alpha=9.50
0.833242861796
[[5060 266]
[1270 2615]]
>>> SHARPENED: sigma=6, alpha=10.00
0.833242861796
[[5060 266]
[1270 2615]]
>>> SHARPENED: sigma=6, alpha=10.50
0.833242861796
[[5060 266]
[1270 2615]]
>>> SHARPENED: sigma=6, alpha=11.00
0.833242861796
[[5060 266]
[1270 2615]]
>>> SHARPENED: sigma=6, alpha=11.50
0.83313429595
[[5059 267]
[1270 2615]]
>>> SHARPENED: sigma=6, alpha=12.00
0.833025730105
[[5057 269]
[1269 2616]]
>>> SHARPENED: sigma=6, alpha=12.50
0.833025730105
[[5058 268]
[1270 2615]]
>>> SHARPENED: sigma=6, alpha=13.00
0.83313429595
[[5058 268]
[1269 2616]]
>>> SHARPENED: sigma=6, alpha=13.50
0.833242861796
[[5058 268]
[1268 2617]]
>>> SHARPENED: sigma=6, alpha=14.00
0.833242861796
[[5058 268]
[1268 2617]]
>>> SHARPENED: sigma=6, alpha=14.50
0.83313429595
[[5057 269]
[1268 2617]]
>>> SHARPENED: sigma=6, alpha=15.00
0.833025730105
[[5057 269]
[1269 2616]]
>>> SHARPENED: sigma=7, alpha=0.50
0.819020736076
[[5124 202]
[1465 2420]]
>>> SHARPENED: sigma=7, alpha=1.00
0.825643252633
[[5105 221]
[1385 2500]]
>>> SHARPENED: sigma=7, alpha=1.50
0.828465964608
[[5095 231]
[1349 2536]]
>>> SHARPENED: sigma=7, alpha=2.00
0.830420149821
[[5086 240]
[1322 2563]]
>>> SHARPENED: sigma=7, alpha=2.50
0.831288676582
[[5084 242]
[1312 2573]]
>>> SHARPENED: sigma=7, alpha=3.00
0.830854413202
[[5076 250]
[1308 2577]]
>>> SHARPENED: sigma=7, alpha=3.50
0.831288676582
[[5072 254]
[1300 2585]]
>>> SHARPENED: sigma=7, alpha=4.00
0.831722939963
[[5071 255]
[1295 2590]]
>>> SHARPENED: sigma=7, alpha=4.50
0.831940071653
[[5071 255]
[1293 2592]]
>>> SHARPENED: sigma=7, alpha=5.00
0.832265769189
[[5071 255]
[1290 2595]]
>>> SHARPENED: sigma=7, alpha=5.50
0.832482900879
[[5071 255]
[1288 2597]]
>>> SHARPENED: sigma=7, alpha=6.00
0.832808598415
[[5071 255]
[1285 2600]]
>>> SHARPENED: sigma=7, alpha=6.50
0.832808598415
[[5070 256]
[1284 2601]]
>>> SHARPENED: sigma=7, alpha=7.00
0.832808598415
[[5070 256]
[1284 2601]]
>>> SHARPENED: sigma=7, alpha=7.50
0.832808598415
[[5070 256]
[1284 2601]]
>>> SHARPENED: sigma=7, alpha=8.00
0.833025730105
[[5071 255]
[1283 2602]]
>>> SHARPENED: sigma=7, alpha=8.50
0.833025730105
[[5068 258]
[1280 2605]]
>>> SHARPENED: sigma=7, alpha=9.00
0.83270003257
[[5065 261]
[1280 2605]]
>>> SHARPENED: sigma=7, alpha=9.50
0.832482900879
[[5064 262]
[1281 2604]]
>>> SHARPENED: sigma=7, alpha=10.00
0.832591466725
[[5064 262]
[1280 2605]]
>>> SHARPENED: sigma=7, alpha=10.50
0.832591466725
[[5064 262]
[1280 2605]]
>>> SHARPENED: sigma=7, alpha=11.00
0.832808598415
[[5064 262]
[1278 2607]]
>>> SHARPENED: sigma=7, alpha=11.50
0.83270003257
[[5063 263]
[1278 2607]]
>>> SHARPENED: sigma=7, alpha=12.00
0.83270003257
[[5063 263]
[1278 2607]]
>>> SHARPENED: sigma=7, alpha=12.50
0.83270003257
[[5063 263]
[1278 2607]]
>>> SHARPENED: sigma=7, alpha=13.00
0.832808598415
[[5062 264]
[1276 2609]]
>>> SHARPENED: sigma=7, alpha=13.50
0.832808598415
[[5062 264]
[1276 2609]]
>>> SHARPENED: sigma=7, alpha=14.00
0.83291716426
[[5061 265]
[1274 2611]]
>>> SHARPENED: sigma=7, alpha=14.50
0.832808598415
[[5060 266]
[1274 2611]]
>>> SHARPENED: sigma=7, alpha=15.00
0.83270003257
[[5059 267]
[1274 2611]]
Process finished with exit code 0
''' | 3.09375 | 3 |
bip_utils/bip/bip38/__init__.py | MIPPLTeam/bip_utils | 0 | 12764140 | from bip_utils.bip.bip38.bip38_addr import Bip38PubKeyModes
from bip_utils.bip.bip38.bip38_ec import Bip38EcKeysGenerator
from bip_utils.bip.bip38.bip38 import Bip38Decrypter, Bip38Encrypter
| 1.0625 | 1 |
test/test_photokeeper.py | virantha/photokeeper | 0 | 12764141 | <reponame>virantha/photokeeper<filename>test/test_photokeeper.py
import photokeeper.photokeeper as P
import pytest
import os
import logging
import smtplib
from mock import Mock
from mock import patch, call
from mock import MagicMock
from mock import PropertyMock
class Testphotokeeper:
def setup(self):
self.p = P.PhotoKeeper()
| 1.765625 | 2 |
setup.py | moonbings/synthtiger | 1 | 12764142 | <filename>setup.py
import os
from setuptools import find_packages, setup
ROOT = os.path.abspath(os.path.dirname(__file__))
def read_version():
data = {}
path = os.path.join(ROOT, "synthtiger", "_version.py")
with open(path, "r", encoding="utf-8") as fp:
exec(fp.read(), data)
return data["__version__"]
def read_long_description():
path = os.path.join(ROOT, "README.md")
with open(path, "r", encoding="utf-8") as fp:
text = fp.read()
return text
setup(
name="synthtiger",
version=read_version(),
description="Synthetic text image generator for OCR model",
long_description=read_long_description(),
long_description_content_type="text/markdown",
author="<NAME>, <NAME>, <NAME>, <NAME>",
url="https://github.com/clovaai/synthtiger",
license="MIT",
packages=find_packages(exclude=["tests"]),
include_package_data=True,
python_requires=">=3.6",
install_requires=[
"arabic-reshaper",
"blend-modes",
"fonttools",
"imgaug",
"numpy",
"opencv-python",
"pillow>=8.2.0",
"pygame",
"python-bidi",
"pytweening",
"pyyaml",
"regex",
"scipy",
],
entry_points={
"console_scripts": [
"synthtiger = synthtiger.main:main",
],
},
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Image Processing",
"Topic :: Scientific/Engineering :: Image Recognition",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| 1.625 | 2 |
programmers/P49994.py | hanjoondev/zb-study | 0 | 12764143 | <gh_stars>0
from time import perf_counter_ns as ns
def solution(dirs):
d = {c: direc for c, direc in
zip('UDRL', ((1, 0), (-1, 0), (0, 1), (0, -1)))}
v = set()
r = c = 0
for cmd in dirs:
dr, dc = d[cmd]
nr, nc = r + dr, c + dc
if -5 <= nr <= 5 and -5 <= nc <= 5:
v.add(tuple(sorted(((r, c), (nr, nc)))))
r, c = nr, nc
return len(v)
if __name__ == '__main__':
ITERATIONS = 1_000
print(f'Running the basic tests {ITERATIONS:,} times...')
tests = (
("ULURRDLLU", 7),
("LULLLLLLU", 7)
)
for directions, expected in tests:
print(f'solution("{directions}") returned', end=' ')
if (result := solution(directions)) == expected:
print(f'the expected result {expected}', end=' ')
fastest = float('inf')
slowest = total = 0
for _ in range(ITERATIONS):
start = ns()
solution(directions)
end = ns()
time = end - start
fastest, slowest = min(time, fastest), max(time, slowest)
total += time
print(f'in an average of {total / ITERATIONS / 1e3:,.2f}μs '
f'(min: {fastest / 1e3:,.2f}μs, '
f'max: {slowest / 1e3:,.2f}μs)')
else:
print(f'a wrong result {result} (expected: {expected})')
| 2.75 | 3 |
maya/app/renderSetup/model/dragAndDropBehavior.py | arjun-namdeo/py_stubs | 0 | 12764144 | """
This module provides the drag and drop base and concrete classes.
They are derived from the API class MPxDragAndDropBehavior and are
used to handle the connections to make when the user drops a node
or an attribute on to another node or attribute.
The interface to implement for each derived class is the following:
- shouldBeUsedFor(sourceNode, destinationNode, sourcePlug, destinationPlug) -> bool
# Returns True if the class should be used for this connection
- connectAttrToAttr(sourcePlug, destinationPlug, force) -> None
# Create all connections needed when sourcePlug is dropped on destinationPlug
- connectAttrToNode(sourcePlug, destinationNode, force) -> None
# Create all connections needed when sourcePlug is dropped on destinationNode
- connectNodeToAttr(sourceNode, destinationPlug, force) -> None
# Create all connections needed when sourceNode is dropped on destinationPlug
- connectNodeToNode(sourceNode, destinationNode, force) -> None
# Create all connections needed when sourceNode is dropped on destinationNode
"""
from maya.app.renderSetup.model.connectionOverride import *
class _MPxDragAndDropBehavior(object):
"""
This is the base class for user defined drag and drop behaviors.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def connectAttrToAttr(*args, **kwargs):
"""
connectAttrToAttr(sourcePlug, destinationPlug, force) -> None
This method is called by the defaultNavigation command to connect a source attribute to a destination attribute.
If this method is overidden it should attempt to determine what the user probably wants this connection to be, and set up the connection appropriately. If the force argument is true it is intended to notify the user to break any existing connections to the plug, similar to what the mel command 'connectAttr' -f flag is used for.
* sourcePlug (MPlug) - Source plug in the connection.
* destinationPlug (MPlug) - Destination plug in the connection.
* force (bool) - Tells whether or not to break any existing connections to the destination attribute.
"""
pass
def connectAttrToNode(*args, **kwargs):
"""
connectAttrToNode(sourcePlug, destinationNode, force) -> None
This method is called by the defaultNavigation command to connect a source attribute to a destination node.
You should override this method if you can determine from the type of source node and attribute and the type of destination node what the user is trying to do and you know the appropriate connections that must be made for the end result to be what the user expects.
* sourcePlug (MPlug) - Source plug in the connection.
* destinationNode (MObject) - Destination node for the connection.
* force (bool) - Tells whether or not to break any existing connections to the destination node.
"""
pass
def connectNodeToAttr(*args, **kwargs):
"""
connectNodeToAttr(sourceNode, destinationPlug, force) -> None
This method is called by the defaultNavigation command to connect a source node to a destination attribute.
You should override this method if you can determine from the type of source node and the type of destination node and attribute what the user is trying to do and you know the appropriate connections that must be made for the end result to be what the user expects.
* sourceNode (MObject) - Source node in the connection.
* destinationPlug (MPlug) - Destination plug for the connection.
* force (bool) - Tells whether or not to break any existing connections to the destination attribute.
"""
pass
def connectNodeToNode(*args, **kwargs):
"""
connectNodeToNode(sourceNode, destinationNode, force) -> None
This method is called by the defaultNavigation command to connect a source node to a destination node.
You should override this method if you can determine from the type of source node and the type of destination node what the user is trying to do and you know the appropriate connections that must be made for the end result to be what the user expects.
* sourceNode (MObject) - Source node in the connection.
* destinationNode (MObject) - Destination node for the connection.
* force (bool) - Tells whether or not to break any existing connections to the destination node.
"""
pass
def shouldBeUsedFor(*args, **kwargs):
"""
shouldBeUsedFor(sourceNode, destinationNode, sourcePlug, destinationPlug) -> bool
This method must be overridden in order to use a drag and drop behavior.
The overridden method will be called by the defaultNavigation command to determine wether or not to use this drag and drop behavior to finish a connection. If the user would like to handle the connection between sourceNode/Plug and destinationNode/Plug then this routine must pass back true, otherwise the routine must pass back false in order for the default connection mechanism to work between these two nodes. sourcePlug and destinationPlug may be null depending on if there were any attributes given in the drag and drop. Use the isNull() method on MPlug to assure the plugs are valid.
* sourceNode (MObject) - The source node of the drag and drop or the node being dragged.
* destinationNode (MObject) - the destination node of the drag and drop or the node being dropped upon.
* sourcePlug (MPlug) - The source plug of the drag and drop or the plug being dragged (this may be null).
* destinationPlug (MPlug) - The destination plug of the drag and drop or the plug being dropped upon (this may be null).
"""
pass
__new__ = None
class DragAndDropBehavior(_MPxDragAndDropBehavior):
"""
Base class for drag and drop behavior for render setup nodes.
"""
def connect(sourcePlug, destinationPlug):
"""
Try to connect two plugs and catch any plug type mismatches.
"""
pass
def findCandidatePlug(sourceNode, destinationPlug):
"""
Return a plug to the first matching attribute in the candidate list.
If no attribute is found, None is returned.
"""
pass
def findNode(node, typeId=4, acceptor=None):
"""
Find a node of given type in a network, starting with the given node
and searching downstream if needed. If an acceptor, user defined callable,
is given we use that to accept or reject nodes during the search.
The acceptor signature should be: func(MObject) -> bool
"""
pass
def isMatchingClass(node, classificationString):
"""
Returns True if the given node has a matching classification string.
"""
pass
def raiseWarning(msg):
"""
Give an warning message to the user to avoid raising an exception here.
"""
pass
__dict__ = None
__weakref__ = None
kAttributeCandidates = []
kErrorMsg_IncompatibleTypes = []
kErrorMsg_NoAttributeFound = []
kErrorMsg_NoShadingGroup = []
kErrorMsg_NoShadingGroupFound = []
kErrorMsg_NoSurfaceShader = []
kErrorMsg_NoSurfaceShaderFound = []
class ConnectionOverrideDragAndDrop(DragAndDropBehavior):
"""
Class handling drag and drop for connection override nodes.
"""
def __init__(self):
pass
def connectAttrToAttr(self, sourcePlug, destinationPlug, force):
"""
Handle connection requests from source attribute to destination attribute.
"""
pass
def connectAttrToNode(self, sourcePlug, destinationNode, force):
"""
Handle connection requests from source attribute to destination node.
"""
pass
def connectNodeToAttr(self, sourceNode, destinationPlug, force):
"""
Handle connection requests from source node to destination attribute.
"""
pass
def connectNodeToNode(self, sourceNode, destinationNode, force):
"""
Handle connection requests from source node to destination node.
"""
pass
def shouldBeUsedFor(self, sourceNode, destinationNode, sourcePlug, destinationPlug):
"""
Return True if the given nodes/plugs are handled by this class.
"""
pass
def creator():
pass
kNodeSearchIgnoreList = []
kTypeName = 'connectionOverrideDragAndDrop'
| 2.5 | 2 |
practice/coursera/p03_dynamic_prog/p03_2_primitive_calculator.py | deehzee/dsalgo | 0 | 12764145 | # Uses python3
from collections import deque
import sys
### Solution 0: Bruteforce ###
def optimal_sequence_recursive(n):
if n == 1:
return [1]
n_ops = n + 1
if n % 3 == 0:
seq = optimal_sequence_recursive(n // 3)
if len(seq) + 1 < n_ops:
n_ops = 1 + len(seq)
opt_seq = seq
if n % 2 == 0:
seq = optimal_sequence_recursive(n // 2)
if len(seq) + 1 < n_ops:
n_ops = 1 + len(seq)
opt_seq = seq
if n > 1:
seq = optimal_sequence_recursive(n - 1)
if len(seq) + 1 < n_ops:
n_ops = len(seq) + 1
opt_seq = seq
opt_seq.append(n)
return opt_seq
### Solution 2: Memoization ###
def memoize(func):
res = {}
def memoized_func(n):
if n not in res:
print('calculating', n)
res[n] = func(n)
return res[n]
return memoized_func
# optimal_sequence_memoize = memoize(optimal_sequence_recursive)
@memoize
def optimal_sequence_memoize(n):
if n == 1:
return [1]
n_ops = n + 1
if n % 3 == 0:
seq = optimal_sequence_memoize(n // 3)
if len(seq) + 1 < n_ops:
n_ops = 1 + len(seq)
opt_seq = seq
if n % 2 == 0:
seq = optimal_sequence_memoize(n // 2)
if len(seq) + 1 < n_ops:
n_ops = 1 + len(seq)
opt_seq = seq
if n > 1:
seq = optimal_sequence_memoize(n - 1)
if len(seq) + 1 < n_ops:
n_ops = len(seq) + 1
opt_seq = seq
opt_seq.append(n)
return opt_seq
### Solutino 3: Dynamic Programming ###
def optimal_sequence_dynprog(n):
nops = [n + 1] * (n + 1)
nops[0] = 0
nops[1] = 0
for i in range(2, n + 1):
if i % 3 == 0 and nops[i // 3] + 1 < nops[i]:
nops[i] = 1 + nops[i // 3]
if i % 2 == 0 and nops[i // 2] + 1 < nops[i]:
nops[i] = 1 + nops[i // 2]
if nops[i - 1] + 1 < nops[i]:
nops[i] = 1 + nops[i - 1]
seq = unwind(nops)
return seq
def unwind(nops):
seq = []
i = len(nops) - 1
while i > 0:
seq.append(i)
if i % 3 == 0 and nops[i // 3] + 1 == nops[i]:
i //= 3
elif i % 2 == 0 and nops[i // 2] + 1 == nops[i]:
i //= 2
elif nops[i - 1] + 1 == nops[i] or i == 1:
i -= 1
return reversed(seq)
### Main ###
optimal_sequence = optimal_sequence_dynprog
def main():
n = int(input())
sequence = list(optimal_sequence(n))
print(len(sequence) - 1)
for x in sequence:
print(x, end=' ')
print()
if __name__ == '__main__':
main()
| 3.734375 | 4 |
tensorflow_datasets/core/_sharded_files_test.py | shikhar2707/datasets | 7 | 12764146 | # coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for tensorflow_datasets.core._sharded_files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_datasets import testing
from tensorflow_datasets.core import _sharded_files
class GetReadInstructionsTest(testing.TestCase):
def test_read_all_even_sharding(self):
# Even sharding
res = _sharded_files.get_read_instructions(
0, 12, ["f1", "f2", "f3"], [4, 4, 4])
self.assertEqual(res, [
{"filename": "f1", "skip": 0, "take": -1},
{"filename": "f2", "skip": 0, "take": -1},
{"filename": "f3", "skip": 0, "take": -1},
])
def test_read_all_empty_shard(self):
res = _sharded_files.get_read_instructions(
0, 12, ["f1", "f2", "f3", "f4"], [4, 4, 0, 4])
self.assertEqual(res, [
{"filename": "f1", "skip": 0, "take": -1},
{"filename": "f2", "skip": 0, "take": -1},
{"filename": "f4", "skip": 0, "take": -1},
])
def test_from1_to10(self):
res = _sharded_files.get_read_instructions(
1, 10, ["f1", "f2", "f3", "f4"], [4, 4, 0, 4])
self.assertEqual(res, [
{"filename": "f1", "skip": 1, "take": -1},
{"filename": "f2", "skip": 0, "take": -1},
{"filename": "f4", "skip": 0, "take": 2},
])
def test_nothing_to_read(self):
res = _sharded_files.get_read_instructions(
0, 0, ["f1", "f2", "f3", "f4"], [0, 3, 0, 2])
self.assertEqual(res, [])
res = _sharded_files.get_read_instructions(
4, 4, ["f1", "f2", "f3", "f4"], [0, 3, 0, 2])
self.assertEqual(res, [])
res = _sharded_files.get_read_instructions(
5, 5, ["f1", "f2", "f3", "f4"], [0, 3, 0, 2])
self.assertEqual(res, [])
if __name__ == "__main__":
testing.test_main()
| 2.1875 | 2 |
source/pkgsrc/benchmarks/glmark2/patches/patch-waflib_Tools_c__config.py | Scottx86-64/dotfiles-1 | 1 | 12764147 | $NetBSD: patch-waflib_Tools_c__config.py,v 1.1 2019/12/22 22:21:58 joerg Exp $
When detecting the C++ compiler, force C++ mode for stdin as the wrappers
add -std=c++11 and that breaks for C input.
--- waflib/Tools/c_config.py.orig 2019-12-21 22:11:24.000906920 +0000
+++ waflib/Tools/c_config.py
@@ -632,7 +632,7 @@ def cxx_load_tools(conf):
conf.load('cxx')
@conf
def get_cc_version(conf,cc,gcc=False,icc=False,clang=False):
- cmd=cc+['-dM','-E','-']
+ cmd=cc+(['-x','c++']if cc[0].endswith('+')else[])+['-dM','-E','-']
env=conf.env.env or None
try:
out,err=conf.cmd_and_log(cmd,output=0,input='\n'.encode(),env=env)
| 1.351563 | 1 |
gluonar/data/dataset.py | haoxintong/gluon-audio | 9 | 12764148 | <gh_stars>1-10
# MIT License
#
# Copyright (c) 2019 haoxintong
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Audio Recognition Dataset"""
import os
import warnings
import numpy as np
from mxnet import nd
from mxnet.gluon.data import Dataset
try:
from av import container
def _load(path):
fin = container.open(path)
audio_frames = [frame for frame in fin.decode()]
audios = list(map(lambda x: np.frombuffer(x.planes[0], format_dtypes[x.format.name],
x.samples), audio_frames))
audio = np.concatenate(audios, axis=0)
return nd.array(audio)
except ImportError:
import librosa as rosa
def _load(path):
audio = rosa.load(path, sr=16000)[0]
return nd.array(audio)
__all__ = ["VoxAudioFolderDataset", "VoxAudioValFolderDataset"]
format_dtypes = {
'dbl': '<f8',
'dblp': '<f8',
'flt': '<f4',
'fltp': '<f4',
's16': '<i2',
's16p': '<i2',
's32': '<i4',
's32p': '<i4',
'u8': 'u1',
'u8p': 'u1',
}
format_scale = {
'flt': 1,
'fltp': 1,
's16': 2 ** 15,
's16p': 2 ** 15,
's32': 2 ** 31,
's32p': 2 ** 31
}
class VoxAudioFolderDataset(Dataset):
"""Load an audio file .
Parameters
----------
root : string
path to vox root.
sr : int, default is 16k.
target sampling rate
min_length: int, default is 3.
min length audio required.
"""
def __init__(self, root, sr=16000, min_length=3, transform=None):
self._root = os.path.expanduser(root)
self._sr = sr
self._transform = transform
self._min_length = min_length
self._exts = ['.wav', '.m4a']
self._list_audios(self._root)
self.num_classes = len(self.synsets)
def _list_audios(self, root):
self.synsets = []
self.items = []
for folder in sorted(os.listdir(root)):
path = os.path.join(root, folder)
if not os.path.isdir(path):
warnings.warn('Ignoring %s, which is not a directory.' % path, stacklevel=3)
continue
label = len(self.synsets)
self.synsets.append(folder)
for subfolder in sorted(os.listdir(path)):
subpath = os.path.join(path, subfolder)
if not os.path.isdir(subpath):
warnings.warn('Ignoring %s, which is not a directory.' % subpath, stacklevel=3)
continue
for filename in sorted(os.listdir(subpath)):
filename = os.path.join(subpath, filename)
ext = os.path.splitext(filename)[1]
if ext.lower() not in self._exts:
warnings.warn('Ignoring %s of type %s. Only support %s' % (
filename, ext, ', '.join(self._exts)))
continue
self.items.append((filename, label))
def __getitem__(self, idx):
while True:
audio = _load(self.items[idx][0])
if audio.shape[0] < self._sr * self._min_length:
idx = np.random.randint(low=0, high=len(self))
continue
label = self.items[idx][1]
if self._transform is not None:
return self._transform(audio, label)
return audio, label
def __len__(self):
return len(self.items)
class VoxAudioValFolderDataset(Dataset):
"""
Parameters
----------
lst_path : str. Path of Val Audio list.
root : str. Path to face folder. Default is '$(HOME)/.mxnet/datasets/sound'
transform : callable, default None
A function that takes data and transforms them:
::
transform = lambda data: data.astype(np.float32)/255
"""
def __init__(self, lst_path, root=os.path.expanduser('~/.mxnet/datasets/sound'),
sr=16000, transform=None):
super().__init__()
self._transform = transform
self._sr = sr
self._items, self._issame_list = [], []
with open(lst_path, 'r') as f:
for line in f.readlines():
tmp = line.strip().split(" ")
self._issame_list.append(int(tmp[0]))
self._items.append((os.path.join(root, tmp[1]),
os.path.join(root, tmp[2])))
def __getitem__(self, idx):
audio0 = _load(self._items[idx][0])
audio1 = _load(self._items[idx][1])
issame = self._issame_list[idx]
if self._transform is not None:
audio0 = self._transform(audio0)
audio1 = self._transform(audio1)
return (audio0, audio1), issame
def __len__(self):
return len(self._items)
class TIMITDataset(Dataset):
def __init__(self, root, is_train=True, sr=16000, min_length=3, transform=None):
self._sr = sr
self._transform = transform
self._min_length = min_length
self._exts = ['.wav', '.m4a']
_root = os.path.expanduser(root)
self._root = os.path.join(_root, 'TRAIN' if is_train else 'TEST')
self._list_audios(self._root)
self.num_classes = len(self.synsets)
def _list_audios(self, root):
self.synsets = []
self.items = []
for folder_dr in sorted(os.listdir(root)):
path = os.path.join(root, folder_dr)
if not os.path.isdir(path):
warnings.warn('Ignoring %s, which is not a directory.' % path, stacklevel=3)
continue
for folder_idt in sorted(os.listdir(path)):
audio_root = os.path.join(path, folder_idt)
if not os.path.isdir(audio_root):
warnings.warn('Ignoring %s, which is not a directory.' % path, stacklevel=3)
continue
label = len(self.synsets)
self.synsets.append(folder_idt)
for fn in sorted(os.listdir(audio_root)) :
sound_fp = os.path.join(audio_root, fn)
ext = os.path.splitext(sound_fp)[-1]
if ext.lower() not in self._exts:
continue
self.items.append((sound_fp, label))
def __getitem__(self, idx):
while True:
audio = _load(self.items[idx][0])
if audio.shape[0] < self._sr * self._min_length:
idx = np.random.randint(low=0, high=len(self))
continue
label = self.items[idx][1]
if self._transform is not None:
return self._transform(audio, label)
return audio, label
def __len__(self):
return len(self.items)
| 2.015625 | 2 |
rnbgrader/tests/test_tmpdirs.py | matthew-brett/rnbgrader | 0 | 12764149 | <gh_stars>0
""" Test tmpdirs module """
from __future__ import division, print_function, absolute_import
from os import unlink
from os.path import isfile, isdir
from ..tmpdirs import in_dtemp, dtemporize
def test_in_dtemp():
# Test working in temporary directory
with in_dtemp() as tmpdir:
with open('test.txt', 'wt') as fobj:
fobj.write('Some text')
assert not isdir(tmpdir)
def test_dtmeporize():
# Test decorator to work in temporary directory
def func1():
with open('test.txt', 'wt') as fobj:
fobj.write('Some text')
@dtemporize
def func2():
with open('test.txt', 'wt') as fobj:
fobj.write('Some text')
with in_dtemp():
func1()
assert isfile('test.txt')
unlink('test.txt')
# Next one should be in a temporary directory, not here
func2()
assert not isfile('test.txt')
| 2.5625 | 3 |
polyaxon_schemas/ops/run/op.py | granularai/polyaxon-schemas | 0 | 12764150 | <reponame>granularai/polyaxon-schemas<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from marshmallow import fields
from polyaxon_schemas.ops.build_job import BuildSchema
from polyaxon_schemas.ops.operation import BaseOpConfig, BaseOpSchema
class BaseRunSchema(BaseOpSchema):
build = fields.Nested(BuildSchema)
@staticmethod
def schema_config():
return BaseRunConfig
class BaseRunConfig(BaseOpConfig):
SCHEMA = BaseRunSchema
IDENTIFIER = 'run'
REDUCED_ATTRIBUTES = BaseOpConfig.REDUCED_ATTRIBUTES + ['build']
def __init__(self,
version=None,
kind=None,
logging=None,
name=None,
description=None,
tags=None,
environment=None,
params=None,
declarations=None,
inputs=None,
outputs=None,
build=None,
):
super(BaseRunConfig, self).__init__(
version=version,
kind=kind,
logging=logging,
name=name,
description=description,
tags=tags,
environment=environment,
params=params,
declarations=declarations,
inputs=inputs,
outputs=outputs,
)
self.build = build
| 1.945313 | 2 |