blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7f6e079c0a48ee76d3298878b56e133898d647cb | e77eda0d87beb6a38b55f1d6d3a28f5090cb4fe3 | /lose/utils/ui/keys.py | 34a75d2d046b7031c40225ecc36857833eadbcaf | [
"Apache-2.0"
] | permissive | brianbruggeman/lose-7drl | 5dde91fd214e4355bffa5a8d1f3e9b07c5b9ecdf | 8921b464e82c8c7e6bf7cfebd4e8a3a5e290ac38 | refs/heads/master | 2021-03-24T12:20:44.802970 | 2017-03-12T04:21:55 | 2017-03-12T04:21:55 | 83,945,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,890 | py | # -*- coding: utf-8 -*-
import sys
import operator
from random import choice, random, randint
import tcod
from ..logger import get_logger
logger = get_logger(__name__)
key_mapping = {
getattr(tcod, d): d.lower().replace('key_', '')
for d in dir(tcod)
if d.startswith('KEY_')
}
def get_input_info(input):
fields = [field for field in dir(input.cdata) if not field.startswith('_')]
info = {
field: getattr(input.cdata, field)
for field in fields
if getattr(input.cdata, field, None)
}
# If we're using CFFI, char will be of type CDATA and will basically
# point to a c-array of char types: char[1] The code below extracts
# that data into something readable.
char = info.get('c')
if isinstance(char, bytes):
char = ''.join(chr(_) for _ in char if _)
info['c'] = char
# If we're using CFFI, text will be of type CDATA and will basically
# point to a c-array of char types: char[32] The code below extracts
# that data into something readable.
text = info.get('text')
if text:
text = ''.join(chr(v) for val in text for v in val if v)
info['text'] = text
return info
def get_key_string(key):
char, char_string, mods, gen_mods = get_key_character(key)
return char_string
def get_key_character(key, exact=False):
mapped_key = key_mapping.get(key.vk)
if mapped_key == 'pressed':
mapped_key = 'escape'
char = mapped_key if mapped_key != 'char' else chr(key.c)
if char.endswith('win'):
char = char.replace('win', 'meta')
# Check modifiers
mods = ['shift', 'lalt', 'lctrl', 'lmeta', 'rmeta', 'rctrl', 'ralt']
found_mods = []
for mod in mods:
mod_value = getattr(key, mod, None)
if mod_value is True and mod != char:
found_mods.append(mod)
mods = found_mods
# Generalize modifiers
gen_mods = ['shift', 'alt', 'ctrl', 'meta', 'win']
found_gen_mods = []
for gen_mod in gen_mods:
if any(mod.endswith(gen_mod) for mod in mods):
if gen_mod == 'win':
gen_mod == 'meta'
if gen_mod not in found_gen_mods:
found_gen_mods.append(gen_mod)
gen_mods = found_gen_mods
# Create a string with gen_mods
if not exact:
char_string = '+'.join((*gen_mods, char))
else:
char_string = '+'.join((*mods, char))
return char, char_string, mods, gen_mods
def handle_movement(game_state, position, event):
movement_mapping = {
'up': (-1, 0),
'down': (1, 0),
'left': (0, -1),
'right': (0, 1)
}
success = None
if isinstance(event, dict):
return success
mapped_update = movement_mapping.get(event)
if mapped_update:
updated_position = tuple(map(operator.add, position, mapped_update))
if process_player_move(game_state, updated_position):
success = mapped_update
return success
def handle_combat(game_state, position, event):
level_map = game_state['current-level']
combat_mapping = {
'up': (-1, 0),
'down': (1, 0),
'left': (0, -1),
'right': (0, 1)
}
success = None
if isinstance(event, dict):
return success
mapped_update = combat_mapping.get(event)
if mapped_update:
tile_position = tuple(map(operator.add, position, mapped_update))
tile = level_map[tile_position]
mobs = tile.get('mobs', [])
if not mobs:
return success
mob = choice(mobs)
mob_name = mob['display']['text']
pct_to_hit = 60
hit = (random() <= (pct_to_hit / 100))
success = True
if hit:
damage = randint(1, 4)
mob['health'] -= damage
combat_msg = f'Player hit {mob_name} for {damage}.'
if mob['health'] < 0:
mob_index = tile['mobs'].index(mob)
tile['mobs'].pop(mob_index)
combat_msg = f'Player killed {mob_name}.'
if not tile['mobs']:
tile.pop('mobs')
else:
combat_msg = f'Player missed.'
logger.trace(combat_msg)
return success
def handle_game_user_input(game_state):
user_key = wait_for_user_input()
position = game_state['character-position']
movement_diff = handle_movement(game_state, position, user_key)
if movement_diff:
game_state['round-updates']['character-movement'] = movement_diff
if user_key in ['q', 'escape']:
sys.exit()
if user_key in ['shift+meta+d', 'shift+meta+D']:
game_state['debug'] = not game_state['debug']
elif not movement_diff:
character_action = handle_combat(game_state, position, user_key)
game_state['character-action'] = character_action
else:
return user_key
def handle_keys(key_mapping):
key = tcod.console_check_for_keypress()
if key.vk in key_mapping.get('fullscreen'):
# Alt+Enter: toggle fullscreen
tcod.console_set_fullscreen(not tcod.console_is_fullscreen())
elif key.vk in key_mapping.get('exit'):
return True # exit game
def process_player_move(game_state, updated_player_position):
tile = game_state['current-level'][updated_player_position]
tile_ref = game_state['tiles'][tile['name']]
if tile_ref['name'] == 'closed-door':
tile['name'] = 'open-door'
mobs = tile.get('mobs')
items = tile.get('items')
moved = False
blocking = tile.get('blocking') or tile_ref.get('blocking')
if mobs:
pass
elif not blocking or game_state['debug']:
game_state['character-position'] = updated_player_position
moved = True
if items:
for item in items:
game_state['player-inventory'].append(item)
tile.pop('items')
return moved
def wait_for_keypress(realtime=False):
if realtime:
key = tcod.console_check_for_keypress()
else:
key = tcod.console_wait_for_keypress(flush=True)
char, char_string, mods, gen_mods = get_key_character(key)
key_data = {
'key': char_string,
'val': key.vk,
'pressed': key.pressed,
}
logger.trace(key_data)
return char_string
def wait_for_user_input():
mouse = tcod.Mouse()
key = tcod.Key()
event_mask = tcod.EVENT_KEY_PRESS | tcod.EVENT_MOUSE
tcod.sys_check_for_event(event_mask, key, mouse)
mouse_info = get_input_info(mouse)
key_info = get_input_info(key)
mouse_button = any(mouse_info[field] for field in mouse_info if 'button' in field)
val = {}
if not key.pressed and not mouse_button:
return val
elif not key.pressed:
val = mouse_info
elif key.pressed:
val = get_key_string(key)
if val == 'meta+text':
val = key_info['text']
return val
| [
"Brian.M.Bruggeman@gmail.com"
] | Brian.M.Bruggeman@gmail.com |
0ff01722968ac8fe843ee54eaf63d28016723414 | ba2a05f20454bda428f140634bc602699f164fc4 | /00.SSAFY/1.first-semester/algorithm/APS_Basic/20190314/3750_digit_sum.py | 60bcca7f7a6d32798917a92f187b00ea604e7c9a | [] | no_license | snowink1137/TIL | 734da402e99afa52f1af4ef996a6b274b1bcce0b | 9e9c78eb0c892affc88e2d46e143cef98af743fb | refs/heads/master | 2023-01-08T18:26:34.311579 | 2021-11-14T11:04:22 | 2021-11-14T11:04:22 | 162,255,934 | 0 | 0 | null | 2023-01-07T11:09:09 | 2018-12-18T08:32:44 | Jupyter Notebook | UTF-8 | Python | false | false | 496 | py | import sys
sys.stdin = open('3750.txt', 'r')
T = int(input())
result_list = []
for test_case in range(1, T+1):
n = int(input())
while not 1 <= n <= 9:
result = 0
remain = n
while True:
if remain == 0 and n == 0:
break
remain = n % 10
result += remain
n = n // 10
n = result
result_list.append('#'+str(test_case))
result_list.append(' '+str(n)+'\n')
print(''.join(result_list))
| [
"snowink1137@gmail.com"
] | snowink1137@gmail.com |
0be8fc99f29dfc8368be3b1a5f8c74fa377e33e5 | 9506059c37515ba00c2b9e804188f5bed896f7bd | /olfactorybulb/neuronunit/tests/__init__.py | e430ba4f9d83993ad61edf2184cd0f9f962605d5 | [
"MIT"
] | permissive | russelljjarvis/OlfactoryBulb | 241b41fcd642b7e91f8a5a087afd23df2103698a | af27b14f5c19c2b60845065b7f2d7da2c16f811d | refs/heads/master | 2022-07-21T22:42:03.058163 | 2020-05-15T00:01:22 | 2020-05-15T00:01:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,764 | py | # MOCKS for autodoc
import quantities as pq
if pq.mV.__class__.__module__ == 'sphinx.ext.autodoc.importer':
pq.mV = pq.ms = pq.Hz = pq.nA = 1.0
# END MOCKS
from abc import abstractmethod
from neuronunit import capabilities as ncap
from neuronunit.tests.base import VmTest
from olfactorybulb.neuronunit.tests.utilities import get_APs, cache
from sciunit import capabilities as scap
from olfactorybulb.neuronunit import capabilities as obncap
from olfactorybulb.neuronunit.tests import publications
SHOW_ERRORS = False
class OlfactoryBulbCellTest(VmTest):
@abstractmethod
def generate_prediction_nocache(self, model):
pass
def generate_prediction(self, model):
# import pydevd
# pydevd.settrace('192.168.0.100', port=4200, suspend=False)
result = self.fetch_cached(model)
if result is None:
# Check that self has all the required properties
self.check_required_properties()
# Perform the uncached test
try:
result = self.generate_prediction_nocache(model)
except:
import traceback
result = traceback.format_exc()
if SHOW_ERRORS:
print(result)
# Store result in cache
self.store_in_cache(model, result)
return result
def check_required_properties(self):
if hasattr(self, "required_properties"):
for prop in self.required_properties:
if not hasattr(self, prop):
raise Exception("Property '" + prop + "' not found. Make sure the property is declared either in the"
" generic test class or in the publication class.")
def fetch_cached(self, model):
return cache.get(self.get_hash(model))
def store_in_cache(self, model, result):
cache.store(self.get_hash(model), result)
def get_hash(self, model):
# The cache key is a hash of the model and the test - we want to store the model-test_result combination
model_hash = model.__hash__()
self_hash = self.__hash__()
return hash((model_hash, self_hash))
def __hash__(self):
return hash(self.__class__.__name__)
def get_dependent_prediction(self, dependent_test_class_generic, model):
# import pydevd
# pydevd.settrace('192.168.0.100', port=4200)
mro = self.__class__.mro()
if len(mro) < 4:
raise Exception("The test should be a class that inherits from an publications class"
"AND from a generic tests class, in that order. E.g. "
"'class MyTest(UrbanBurton2014, InputResistanceTest):'")
# Create a temp class that inherits from the generic test and from the specific publication
# Aways first parent class (by convention and to preserve inheritance)
publication_class = mro[1]
if not issubclass(publication_class, publications.BasePublication):
raise Exception("The first parent class '"+str(publication_class)+"' of the test should be a publication class. E.g. 'class MyTest(UrbanBurton2014, InputResistanceTest):'")
if not issubclass(dependent_test_class_generic, OlfactoryBulbCellTest):
raise Exception("The second parent class '"+dependent_test_class_generic.__class__.__name__+"' of the test should be a class that inherits from OlfactoryBulbCellTest. E.g. 'class MyTest(UrbanBurton2014, InputResistanceTest):'")
# Use SomeTestSomeAuthor1984 class name form - as descibed in BasePublication
dependent_test_class_name = dependent_test_class_generic.__name__ + publication_class.__name__
# Create the type dynamically
dependent_test_class = type(dependent_test_class_name,
(publication_class, dependent_test_class_generic),
{})
# Instantiate the dynamic class
dependent_test = dependent_test_class()
# Get the prediction (from cache if there)
return dependent_test.generate_prediction(model)
class OlfactoryBulbCellSpikeTest(OlfactoryBulbCellTest):
required_capabilities = (ncap.ReceivesSquareCurrent,
ncap.ProducesMembranePotential,
scap.Runnable,
obncap.SupportsSettingTemperature,
obncap.SupportsSettingStopTime)
def get_aps(self, voltage):
return get_APs(voltage, self.ss_delay, self.threshold_method) | [
"jbirgio@gmail.com"
] | jbirgio@gmail.com |
31ee4365ba660e46efea90ad50e7e7dd715b39c8 | f2cece9e5f2af8482c12fc7ad8b3a7e63e6de052 | /tbot/api/admin/__init__.py | 87d22b20d424ea6d10aa8c1f5924ddd379bedce8 | [] | no_license | nikifkon-old/questionnaire_bot | beadc716ca0a7cbfa6a4c47039c00123e8892eb4 | 3cbf889c7edf4ba438ce7e46c5f9b67efe5d7e72 | refs/heads/master | 2023-04-24T07:12:28.227259 | 2020-08-03T09:14:35 | 2020-08-03T09:14:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | import flask_login as login
from tbot.models import Account
from tbot.utils import session_scope
# Initialize flask-login
def init_login(app):
with session_scope() as session:
login_manager = login.LoginManager()
login_manager.init_app(app)
# Create user loader function
@login_manager.user_loader
def load_user(user_id):
return session.query(Account).get(user_id)
| [
"kostya.nik.3854@gmail.com"
] | kostya.nik.3854@gmail.com |
c1507c1d668bf1ddc7979e99591b0ffae9c5d485 | db1b327c4913c453b2fdd9dda661938c4abc5c0e | /abc/94/D.py | e575890eacecafabd215188b428f103f2588b08a | [] | no_license | oamam/atcoder | 0c129aab72e3c7090c9799fdf52f6e8119ef5238 | 658054b69b7586eed896484535dcfa1fef498e43 | refs/heads/master | 2021-06-26T09:01:12.389266 | 2020-10-30T02:01:11 | 2020-10-30T02:01:11 | 165,225,322 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | def main():
n = int(input())
a = list(map(int, input().split()))
if n == 2:
print(max(a), min(a))
else:
ma = max(a)
sa = sorted([(i, abs(ma // 2 - a[i]))
for i in range(n)], key=lambda x: x[1])
print(ma, a[sa[0][0]])
main()
| [
"chapa0106@gmail.com"
] | chapa0106@gmail.com |
36f6573767ca8a136f8eaa40ce2f6a7af7735a99 | fbf73800e27f66960f677a284c2771e66708973b | /talk_lib/talk.py | 2282b1a9ff25df18016a1301fa7ef17cad68f73e | [
"MIT"
] | permissive | allankellynet/mimas | 94140a341693d4729b3cdf5ea94ef2f7e550aad6 | 10025d43bba9e84f502a266760786842e7158a05 | refs/heads/master | 2022-05-30T21:35:06.083902 | 2020-02-27T14:04:27 | 2020-02-27T14:04:27 | 235,146,506 | 0 | 0 | MIT | 2022-05-25T04:56:13 | 2020-01-20T16:30:39 | Python | UTF-8 | Python | false | false | 1,904 | py | #-----------------------------------------------------
# Mimas: conference submission and review system
# (c) Allan Kelly 2016-2020 http://www.allankelly.net
# Licensed under MIT License, see LICENSE file
# -----------------------------------------------------
# system imports
# framework imports
from google.appengine.ext import ndb
# app imports
from speaker_lib import speaker
# Recgonised fields
SHORT_SYNOPSIS = "shortsynopsis"
LONG_SYNOPSIS = "longsynopsis"
class Talk(ndb.Model):
talk_title = ndb.StringProperty()
details = ndb.PickleProperty()
created = ndb.DateTimeProperty(auto_now_add=True)
directory_listing = ndb.StringProperty()
def __init__(self, *args, **kwargs):
super(Talk, self).__init__(*args, **kwargs)
self.talk_title = ""
self.directory_listing = "Listed"
self.details = {}
def field(self, f):
if (self.details.has_key(f)):
return self.details[f]
return ""
def field_ascii(self, f):
return self.field(f).encode('ascii', 'ignore')
def set_field(self, field, value):
self.details[field] = value
@property
def title(self):
return self.talk_title
@title.setter
def title(self, t):
self.talk_title = t
def is_listed(self):
return "Listed" == self.directory_listing
def hide_listing(self):
self.directory_listing = "Not listed"
def show_listing(self):
self.directory_listing = "Listed"
def mk_talk(parent_key, title):
t = Talk(parent=parent_key)
t.talk_title = title
t.put()
return t.key
def all_user_talks_by_email(username):
if not speaker.speaker_exists(username):
return {}
who = speaker.retreive_speaker(username)
return Talk.query(ancestor=who.key).fetch()
def speaker_talks_by_key(speaker_key):
return Talk.query(ancestor=speaker_key).fetch() | [
"allan@allankelly.net"
] | allan@allankelly.net |
f920eae915049d1f03531e51cf0c975bd03f7079 | 20f3b4ee874e8b3e565f9ce60d4de8cab48d7d20 | /tests/basic-structs/test_memories.py | 9f0c3f45935af0fddd49af2d15ff8cdb1436a680 | [
"MIT"
] | permissive | pshchelo/kopf | ec968e6f11432f3728efb385bf18762676f7a5be | ab53ace82e62a6fa709bf5a580007eac1273ac34 | refs/heads/main | 2022-04-26T11:39:01.565811 | 2022-04-02T14:05:26 | 2022-04-02T14:05:26 | 221,041,880 | 0 | 0 | MIT | 2019-11-11T18:14:59 | 2019-11-11T18:14:58 | null | UTF-8 | Python | false | false | 1,599 | py | from unittest.mock import Mock
from kopf._cogs.structs.bodies import Body
from kopf._cogs.structs.ephemera import Memo
from kopf._core.reactor.inventory import ResourceMemories, ResourceMemory
BODY: Body = {
'metadata': {
'uid': 'uid1',
}
}
def test_creation_with_defaults():
ResourceMemory()
async def test_recalling_creates_when_absent():
memories = ResourceMemories()
memory = await memories.recall(BODY)
assert isinstance(memory, ResourceMemory)
async def test_recalling_reuses_when_present():
memories = ResourceMemories()
memory1 = await memories.recall(BODY)
memory2 = await memories.recall(BODY)
assert memory1 is memory2
async def test_forgetting_deletes_when_present():
memories = ResourceMemories()
memory1 = await memories.recall(BODY)
await memories.forget(BODY)
# Check by recalling -- it should be a new one.
memory2 = await memories.recall(BODY)
assert memory1 is not memory2
async def test_forgetting_ignores_when_absent():
memories = ResourceMemories()
await memories.forget(BODY)
async def test_memo_is_autocreated():
memories = ResourceMemories()
memory = await memories.recall(BODY)
assert isinstance(memory.memo, Memo)
async def test_memo_is_shallow_copied():
class MyMemo(Memo):
def __copy__(self):
mock()
return MyMemo()
mock = Mock()
memobase = MyMemo()
memories = ResourceMemories()
memory = await memories.recall(BODY, memobase=memobase)
assert mock.call_count == 1
assert memory.memo is not memobase
| [
"nolar@nolar.info"
] | nolar@nolar.info |
db9d1c13b7d436ab858207a924264786e3498ed2 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_infiltrates.py | 6ebdab4177a42ddd28fbae76e331e02785165b79 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
#calss header
class _INFILTRATES():
def __init__(self,):
self.name = "INFILTRATES"
self.definitions = infiltrate
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['infiltrate']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
a4544faf8314eea6d6429d0c18c980fb8b91b2f5 | 0fcc6353edee4eed7a1ea4b1c89a00bfcf03e851 | /TryExcept/Finally.py | fc91ea5fe2e2768f5d5e8f587dd996b7f49dc433 | [] | no_license | GANESH0080/Python-Practice-Again | 81d8048c23d338a99bb17fa86a9f87b3057bfe52 | 6565911d14a22d0f33a41b417026c31a0a066be5 | refs/heads/master | 2020-09-20T03:40:45.462869 | 2019-11-27T07:19:24 | 2019-11-27T07:19:24 | 224,368,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | #The try block will raise an error when trying to write to a read-only file:
try:
f = open("demofile.txt")
f.write("Lorum Ipsum")
except:
print("Something went wrong when writing to the file")
finally:
f.close()
#The program can continue, without leaving the file object open
| [
"ganusalunkhe@gmail.com"
] | ganusalunkhe@gmail.com |
73d4574f53efb7fc7035fb83bd09771334884d54 | 7e86f933cd477b08258dde4f52ecb2f45949d665 | /libdeepfry/emoji.py | b07ab177ca7fc98932ddfcf93773956e11d61f82 | [
"MIT"
] | permissive | MineRobber9000/deepfry | 25b41035c3f20d99ab1b40ffccde0ab24b8e5d9e | 383b7da439932dfc03fcae3fc52aa6538af20871 | refs/heads/master | 2020-04-13T13:45:30.294013 | 2018-12-27T09:46:38 | 2018-12-27T09:46:38 | 163,241,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | import json, os.path
BASEDIR = os.path.dirname(os.path.abspath(__file__))
EMOJIS = os.path.join(BASEDIR,"emojis")
with open(BASEDIR+"/emoji.json") as f: d = json.load(f)
def listEmoji():
return list(d.keys())
def getImage(name):
return os.path.join(EMOJIS,d[name])
| [
"khuxkm@ttm.sh"
] | khuxkm@ttm.sh |
d70e924db0d9df85ee854247487a4ad1b3f3a949 | 919e74f05976d9ea5f28d5dcf0a3e9311a4d22b2 | /conans/test/integration/generators/markdown_test.py | 12d065d26ee68671ab6c0d48c0837bc62058442d | [
"MIT"
] | permissive | thorsten-klein/conan | 1801b021a66a89fc7d83e32100a6a44e98d4e567 | 7cf8f384b00ba5842886e39b2039963fc939b00e | refs/heads/develop | 2023-09-01T12:04:28.975538 | 2023-07-26T10:55:02 | 2023-07-26T10:55:02 | 150,574,910 | 0 | 0 | MIT | 2023-08-22T14:45:06 | 2018-09-27T11:16:48 | Python | UTF-8 | Python | false | false | 7,453 | py | import textwrap
import unittest
from conans.test.utils.tools import TestClient
class MarkDownGeneratorTest(unittest.TestCase):
def test_cmake_find_filename(self):
conanfile = textwrap.dedent("""
from conans import ConanFile
class HelloConan(ConanFile):
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "FooBar")
self.cpp_info.set_property("cmake_target_name", "foobar")
self.cpp_info.set_property("pkg_config_name", "foobar_cfg")
""")
client = TestClient()
client.save({"conanfile.py": conanfile})
client.run("create . bar/0.1.0@user/testing")
client.run("install bar/0.1.0@user/testing -g markdown")
content = client.load("bar.md")
self.assertIn("find_package(FooBar)", content)
self.assertIn("target_link_libraries(${PROJECT_NAME} foobar)", content)
def test_cmake_find_filename_with_namespace(self):
conanfile = textwrap.dedent("""
from conans import ConanFile
class HelloConan(ConanFile):
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "FooBar")
self.cpp_info.set_property("cmake_target_name", "foobar::foobar")
self.cpp_info.set_property("pkg_config_name", "foobar_cfg")
""")
client = TestClient()
client.save({"conanfile.py": conanfile})
client.run("create . bar/0.1.0@user/testing")
client.run("install bar/0.1.0@user/testing -g markdown")
content = client.load("bar.md")
self.assertIn("find_package(FooBar)", content)
self.assertIn("target_link_libraries(${PROJECT_NAME} foobar::foobar)", content)
def test_with_build_modules(self):
conanfile = textwrap.dedent("""
import os
from conans import ConanFile
class HelloConan(ConanFile):
exports_sources = 'bm.cmake'
def package(self):
self.copy('bm.cmake', dst='lib/cmake')
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "FooBar")
self.cpp_info.set_property("cmake_target_name", "foobar")
self.cpp_info.set_property("pkg_config_name", "foobar_cfg")
self.cpp_info.set_property('cmake_build_modules', ['lib/cmake/bm.cmake'])
""")
client = TestClient()
client.save({"conanfile.py": conanfile,
"bm.cmake": "Content of build_module" })
client.run("create . bar/0.1.0@user/testing")
client.run("install bar/0.1.0@user/testing -g markdown")
content = client.load("bar.md")
self.assertIn("#### lib/cmake/bm.cmake", content)
self.assertIn("Content of build_module", content)
def test_no_components(self):
conanfile = textwrap.dedent("""
import os
from conans import ConanFile
class HelloConan(ConanFile):
def package_info(self):
self.cpp_info.set_property("cmake_target_name", "foobar")
""")
client = TestClient()
client.save({"conanfile.py": conanfile})
client.run("create . bar/0.1.0@user/testing")
client.run("install bar/0.1.0@user/testing -g markdown")
content = client.load("bar.md")
self.assertNotIn("Or link just one of its components", content)
self.assertNotIn("Declared components", content)
def test_with_components(self):
conanfile = textwrap.dedent("""
import os
from conans import ConanFile
class HelloConan(ConanFile):
def package_info(self):
self.cpp_info.set_property("cmake_target_name", "foobar")
self.cpp_info.components["component1"].set_property("cmake_target_name", "foobar::component_name")
""")
client = TestClient()
client.save({"conanfile.py": conanfile})
client.run("create . bar/0.1.0@user/testing")
client.run("install bar/0.1.0@user/testing -g markdown")
content = client.load("bar.md")
self.assertIn("target_link_libraries(${PROJECT_NAME} foobar::component_name)", content)
self.assertIn("* CMake target name: ``foobar::component_name``", content)
def test_with_components_and_target_namespace(self):
conanfile = textwrap.dedent("""
import os
from conans import ConanFile
class HelloConan(ConanFile):
def package_info(self):
self.cpp_info.set_property("cmake_target_name", "namespace::name")
self.cpp_info.components["component1"].set_property("cmake_target_name", "namespace::component_name")
""")
client = TestClient()
client.save({"conanfile.py": conanfile})
client.run("create . bar/0.1.0@user/testing")
client.run("install bar/0.1.0@user/testing -g markdown")
content = client.load("bar.md")
self.assertIn("target_link_libraries(${PROJECT_NAME} namespace::name)", content)
self.assertIn("* CMake target name: ``namespace::component_name``", content)
def test_c_project(self):
conanfile = textwrap.dedent("""
from conans import ConanFile
class HelloConan(ConanFile):
settings = "os", "arch", "compiler", "build_type"
def configure(self):
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "FooBar")
self.cpp_info.set_property("cmake_target_name", "foobar")
self.cpp_info.set_property("pkg_config_name", "foobar_cfg")
""")
client = TestClient()
client.save({"conanfile.py": conanfile})
client.run("create . bar/0.1.0@user/testing")
client.run("install bar/0.1.0@user/testing -g markdown")
content = client.load("bar.md")
self.assertIn("main.c", content)
self.assertIn("project(bar_project C)", content)
def test_with_sys_requirements(self):
conanfile = textwrap.dedent("""
import os
from conans import ConanFile
class HelloConan(ConanFile):
def package_info(self):
self.cpp_info.components["component1"].system_libs = ["system_lib"]
""")
client = TestClient()
client.save({"conanfile.py": conanfile})
client.run("create . bar/0.1.0@user/testing")
client.run("install bar/0.1.0@user/testing -g markdown")
assert "Generator markdown created bar.md" in client.out
| [
"noreply@github.com"
] | thorsten-klein.noreply@github.com |
5e1e458ba6c95125f1af4dffd2a3244e8f04e4fe | f647c6fb984b6e93977bb56a9a4533b8d47e6644 | /lib/dbsqlite.py | d78d5b670510243b004e03e42afa47e3c0487173 | [] | no_license | vdsmirnov52/wt000 | 7a88fcf29e5f786b8f2b0956b4a10ae68c0e32a6 | 0dd8ead0a73ed0f3f7f2f8c5302dff0071392570 | refs/heads/master | 2021-04-26T05:50:59.554131 | 2020-08-06T10:14:02 | 2020-08-06T10:14:02 | 79,928,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,670 | py | #!/usr/bin/python -u
# -*- coding: utf-8 -*-
import sys
import sqlite3
class dbsqlite:
r""" Работа с SQLite
Warning('You can only execute one statement at a time.',) ... только одно заявление за раз
Функции:
execute (query, [vals]) - Исполняет SQL запрос. Возвращает: {True|False}
get_row (query, [vals]) - Читает одну запись. Возвращает: row = (val1, val2, ...)
get_rows (query, [vals]) - Читает несколько записей Возвращает: rows = [(row1), (row2), ...]
get (query, fall, [vals]) - Исполняет запрос и читает данные. Если fall: 1 - fetchall() иначе 0 - fetchone())
get_table (tname, [swhere], [cols]) - Возвращает (desc, rows) или None
Примеры использования vals
C подставновкой по порядку на места знаков вопросов:
cursor.execute("SELECT Name FROM Artist ORDER BY Name LIMIT ?", ('2'))
C использованием именнованных замен:
cursor.execute("SELECT Name from Artist ORDER BY Name LIMIT :limit", {"limit": 3})
Переменные:
desc = [] - Список наименования полей последнего запроса
last_error = (exc_type, exc_value) последняя оштбка доступа к БД
"""
last_error = None
desc = [] ## Список наименования полей последнего запроса
def __init__ (self, file_db = './sqlite.db'):
# try:
self.conn = sqlite3.connect(file_db)
self.curs = self.conn.cursor()
# except:
def execute (self, query, uvars = None):
try:
if uvars:
self.curs.execute (query, uvars)
else: self.curs.execute (query)
self.last_error = None
return True
except (sqlite3.OperationalError, sqlite3.IntegrityError, sqlite3.Warning):
self.last_error = sys.exc_info()[:2]
return False
finally:
self.conn.commit()
def get_row (self, query, uvars = None):
return self.get (query, 0)
def get_rows (self, query, uvars = None):
return self.get (query, 1)
def get (self, query, fall, uvars = None):
self.last_error = None
try:
if uvars:
self.curs.execute (query, uvars)
else: self.curs.execute (query)
self.desc = [f[0] for f in self.curs.description]
if fall: return self.curs.fetchall()
else: return self.curs.fetchone()
except (sqlite3.OperationalError, sqlite3.Warning):
print 'except:', query
self.last_error = sys.exc_info()[:2]
finally:
self.conn.commit()
def close(self):
self.conn.close()
def get_table (self, tname, swhere = None, cols = None):
""" Читать таблицу из БД "SELECT {*|<cols>} FROM <tname> [WHERE <swhere>];" """
if not cols: cols = '*'
if not swhere:
query = "SELECT %s FROM %s;" % (cols, tname)
else: query = "SELECT %s FROM %s WHERE %s;" % (cols, tname, swhere)
self.rows = self.get_rows (query)
if self.rows: return self.desc, self.rows
'''
# Объединяем запросы к базе
cursor.executescript(""" insert into Artist values (Null, 'A Aagrh!'); insert into Artist values (Null, 'A Aagrh-2!'); """)
# C подставновкой по порядку на места знаков вопросов:
cursor.execute("SELECT Name FROM Artist ORDER BY Name LIMIT ?", ('2'))
# И с использованием именнованных замен:
cursor.execute("SELECT Name from Artist ORDER BY Name LIMIT :limit", {"limit": 3})
new_artists = [ ('A Aagrh!',), ('A Aagrh!-2',), ('A Aagrh!-3',), ]
cursor.executemany("insert into Artist values (Null, ?);", new_artists)
'''
if __name__ == '__main__':
sqls = ["""CREATE TABLE whosts (
id_wh INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
host_name TEXT NOT NULL
)""",
"INSERT INTO whosts (host_name) VALUES ('wialon.rnc52.ru')",
"INSERT INTO whosts (host_name) VALUES ('pp-wialon.rnc52.ru')",
"INSERT INTO whosts (host_name) VALUES ('sh-wialon.rnc52.ru')",
"INSERT INTO whosts (host_name) VALUES ('smp-wialon.rnc52.ru')",
"INSERT INTO whosts (host_name) VALUES ('test-wialon.rnc52.ru')",
'CREATE TABLE whusers (\n id_whu INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n login TEXT NOT NULL,\n passwd TEXT,\n token TEXT,\n token_create INTEGER\n)',
"INSERT INTO whusers (login, token) VALUES ('wialon', '1d5a4a6ab2bde440204e6bd1d53b3af82FD7F6B064E042FBBCC978E2B37A2A95930F80E6')",
"INSERT INTO whusers (login, token) VALUES ('V.Smirnov', 'c5a76d06f77af04aa4c9fa0699d465c299B67214D257083C5E790742520C44F9EA0E3D80')",
]
lite = dbsqlite('config.db') #'wialon.db')
'''
print lite.execute("INSERT INTO whosts (host_name) VALUES (?)", ('ZZZZZ',)), lite.last_error
for sql in sqls:
print sql, lite.execute(sql), lite.last_error
print 'SQLite version:', lite.get_row('SELECT SQLITE_VERSION()')
print 'get_rows:', lite.get_rows('SELECT * FROM whosts WHERE id_wh > 0'), lite.last_error, lite.desc
print 'get_table:', lite.get_table ('whusers'), lite.last_error
'''
print 'get_row', lite.get_row("SELECT * FROM whosts WHERE id_wh = 1;")
lite.execute("update whusers SET token = '1d5a4a6ab2bde440204e6bd1d53b3af88083648F594E6BCA5E6CB70EF1F85D7BF1B79E51', token_create = 1515073900 WHERE id_whu != 2;")
print 'get_row', lite.get_row("SELECT * FROM whusers WHERE id_whu = 1;")
lite.execute ("update whusers SET token = 'c5a76d06f77af04aa4c9fa0699d465c2AC7861F24C072495DD635404BDF84C5327051EBF', token_create = 1515075038 WHERE id_whu = 2;")
print 'last_error', lite.last_error
print 'get_row', lite.get_row("SELECT * FROM whusers WHERE id_whu = 2;")
# print help(sqlite3)
| [
"vdsmitnov52@gmail.com"
] | vdsmitnov52@gmail.com |
83e25dcf1a96fdde2966714124d86f0a571a3d92 | a37bf3343be428c453e480c7a411a91b125ab1d1 | /deb/openmediavault/usr/share/openmediavault/firstaid/modules.d/40restore_config_backup.py | a8e215ee7b7b78ea5a740be8e451314d7aa2d4c1 | [] | no_license | zys1310992814/openmediavault | 8e73ccd66fefaddd03385834137887614726812c | 337f37729783d9bf3a08866c0dbc8b25c53b9ca3 | refs/heads/master | 2020-04-20T14:18:57.505953 | 2019-02-02T15:18:07 | 2019-02-02T15:18:07 | 168,894,447 | 1 | 0 | null | 2019-02-03T00:41:55 | 2019-02-03T00:41:55 | null | UTF-8 | Python | false | false | 4,021 | py | #!/usr/bin/env python3
#
# This file is part of OpenMediaVault.
#
# @license http://www.gnu.org/licenses/gpl.html GPL Version 3
# @author Volker Theile <volker.theile@openmediavault.org>
# @copyright Copyright (c) 2009-2018 Volker Theile
#
# OpenMediaVault is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# OpenMediaVault is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenMediaVault. If not, see <http://www.gnu.org/licenses/>.
import sys
import glob
import subprocess
import openmediavault
import openmediavault.firstaid
import openmediavault.subprocess
import dialog
import natsort
class Module(openmediavault.firstaid.IModule):
@property
def description(self):
return "Restore configuration backup"
def execute(self):
d = dialog.Dialog(dialog="dialog")
# Determine the first revision file which should look like
# '<filename>.<revision>'.
pathname = "%s.*" % openmediavault.getenv("OMV_CONFIG_FILE")
configbaks = natsort.humansorted(glob.glob(pathname))
# Does a auto-generated configuration backup exist?
if not configbaks:
d.msgbox(
"No configuration backup found!",
backtitle=self.description,
height=5,
width=34
)
return 0
# Get the latest configuration backup file.
configbak = configbaks.pop()
# Only show a diff, if there's a difference.
rc = openmediavault.subprocess.call( # yapf: disable
[
"diff", "--brief",
openmediavault.getenv("OMV_CONFIG_FILE"), configbak
],
stdout=subprocess.PIPE)
if rc == 0:
d.msgbox("There's no difference between the configuration " \
"files. Nothing to restore.",
backtitle=self.description,
height=6, width=58)
return 0
# Display the differences?
code = d.yesno("Do you want to see the differences between the " \
"current configuration and the backup.",
backtitle=self.description,
height=6, width=46)
if code == d.ESC:
return 0
if code == d.OK:
output = "===================================================================\n" \
"All lines with '-' will be changed to the lines with '+'\n" \
"===================================================================\n"
p = openmediavault.subprocess.Popen([
"diff", "--unified=1",
openmediavault.getenv("OMV_CONFIG_FILE"), configbak
],
stdout=subprocess.PIPE, shell=False) # yapf: disable
stdout, _ = p.communicate()
output += stdout.decode()
d.scrollbox(
output,
backtitle=self.description,
height=18,
width=72,
clear=True
)
# Restore configuration backup?
code = d.yesno("Do you want to restore the configuration backup? " \
"This will overwrite the actual configuration?",
backtitle=self.description,
height=6, width=57, defaultno=True)
if code != d.OK:
return 0
openmediavault.rpc.call(
"Config", "revertChanges", {"filename": configbak}
)
print("Configuration backup successfully restored.")
return 0
if __name__ == "__main__":
module = Module()
sys.exit(module.execute())
| [
"votdev@gmx.de"
] | votdev@gmx.de |
57fa47b2bde6a7e5a76d63e2b71fb76e98dbc5ea | 9c54d20ea935e3e96af2c81349e2e8e93f9e3abd | /main.py | 21a6aa4d5294a59744103c164ecb227296938ad1 | [] | no_license | folkol/python-tag-cloud | e0bfb0e9bd7b61ba4532407cd6380020bc75f8cc | fce689f7960983dc6f7e3ffe0de6020ad875f969 | refs/heads/master | 2023-02-23T14:37:27.327243 | 2022-02-11T07:20:58 | 2022-02-11T07:20:58 | 177,873,883 | 0 | 0 | null | 2023-02-16T23:39:47 | 2019-03-26T21:49:58 | Python | UTF-8 | Python | false | false | 1,339 | py | """Generates a tag cloud from words found in the given projects python files."""
import builtins
import keyword
import os
import sys
import tokenize
from collections import Counter
import matplotlib.pyplot as plt
from wordcloud import WordCloud
DIR_BLACKLIST = ['.git', 'venv', 'tests']
TOKEN_BLACKLIST = ['self', *keyword.kwlist, *dir(builtins)]
def project_tokens(root):
def file_tokens(file):
with open(file, 'rb') as f:
yield from (token.string
for token in tokenize.tokenize(f.readline)
if token.type == tokenize.NAME and token.string not in TOKEN_BLACKLIST)
for root, dirs, files in os.walk(root):
dirs[:] = [d for d in dirs if d not in DIR_BLACKLIST]
yield from (token
for file in files if file.endswith('.py')
for token in file_tokens(os.path.join(root, file)))
if __name__ == '__main__':
if len(sys.argv) != 2:
print('usage: python main.py /path/to/python/repo', file=sys.stderr)
sys.exit(1)
repo = sys.argv[1]
tokens = project_tokens(repo)
token_counts = Counter(tokens)
tag_cloud = WordCloud().generate_from_frequencies(token_counts)
plt.figure()
plt.imshow(tag_cloud, interpolation="bilinear")
plt.axis("off")
plt.savefig('tags.png')
| [
"mattias4@kth.se"
] | mattias4@kth.se |
338c9058e62cd3557cce13438c3687d06f08be89 | 365c85a280596d88082c1f150436453f96e18c15 | /Python/Sort/插入排序.py | 904ec4fc487bcb85b850ba614f3a7fe9dc05223f | [] | no_license | Crisescode/leetcode | 0177c1ebd47b0a63476706562bcf898f35f1c4f2 | c3a60010e016995f06ad4145e174ae19668e15af | refs/heads/master | 2023-06-01T06:29:41.992368 | 2023-05-16T12:32:10 | 2023-05-16T12:32:10 | 243,040,322 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
插入排序:
它的工作原理是每一次从待排序的数据元素中选出最小(或最大)的一个元素,
存放在序列的起始位置,所以称为:选择排序。
时间复杂度: O(n^2)
空间复杂度: O(1)
"""
from Utils.timer_decorater import timer
from typing import List
class Solution:
@timer
def insertion_sort(self, l: List) -> List:
if len(l) <= 1:
return l
for index in range(1, len(l)):
if l[index] < l[index - 1]:
temp = l[index]
tmp_index = index
for j in range(index - 1, -1, -1):
if l[j] > temp:
l[j + 1] = l[j]
tmp_index = j
else:
break
l[tmp_index] = temp
return l
if __name__ == "__main__":
need_sort_list = [2, 4, 8, 1, 7, 10, 12, 15, 3]
print(Solution().insertion_sort(need_sort_list))
| [
"zhaopanp2018@outlook.com"
] | zhaopanp2018@outlook.com |
1ca1c50f395e5d78d3d7df0362c1b89a800546a8 | 320280bfce76713436b76ffc3125ccf37e65a324 | /AnalyzeMiniPlusSubstructure/test/ttbar/ttbar_306.py | aebec186733d709d873615f93abaaaf8346d123d | [] | no_license | skhalil/MiniValidation | 75ea5c0d7cde17bf99c7d31501f8384560ee7b99 | 1a7fb8377e29172483ea6d3c7b3e427ff87e7e37 | refs/heads/master | 2016-09-05T10:31:38.562365 | 2015-01-29T05:30:32 | 2015-01-29T05:30:32 | 29,898,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,861 | py | import FWCore.ParameterSet.Config as cms
###############################################
useMiniAOD = True
# AOD
pfcandidates = 'particleFlow'
chsstring = 'pfNoPileUpJME'
genjetparticles = 'genParticles'
importantgenparticles = 'genParticles'
tracks = 'generalTracks'
vertices = 'offlinePrimaryVertices'
mergedvertices = 'inclusiveMergedVertices'
mergedvertices2 = ''
primaryvertices = 'offlinePrimaryVertices'
#miniAOD
if useMiniAOD:
pfcandidates = 'packedPFCandidates'
genjetparticles = 'packedGenParticles'
importantgenparticles = 'prunedGenParticles'
tracks = 'unpackedTracksAndVertices'
vertices = 'unpackedTracksAndVertices'
mergedvertices = 'unpackedTracksAndVertices'
mergedvertices2 = 'secondary'
primaryvertices = 'offlineSlimmedPrimaryVertices'
print 'useMiniAOD = '+str(useMiniAOD)
print ' pfcandidates = '+pfcandidates
print ' genjetparticles = '+genjetparticles
print ' importantgenparticles = '+importantgenparticles
print ' tracks = '+tracks
print ' vertices = '+vertices
print ' mergedvertices = '+mergedvertices
print ' mergedvertices2 = '+mergedvertices2
print ' primaryvertices = '+primaryvertices
###############################################
# SETUP
process = cms.Process("USER")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(False) , allowUnscheduled = cms.untracked.bool(True) )
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
process.MessageLogger.cerr.FwkJob.limit=1
process.MessageLogger.cerr.ERROR = cms.untracked.PSet( limit = cms.untracked.int32(0) )
###############################################
# SOURCE
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'root://cmsxrootd-site.fnal.gov//store/mc/Phys14DR/TTJets_MSDecaysCKM_central_Tune4C_13TeV-madgraph-tauola/MINIAODSIM/PU20bx25_PHYS14_25_V1-v1/10000/1A089196-7276-E411-9BA5-002590DB91E0.root'
)
)
###############################################
# ANA
process.demo = cms.EDAnalyzer("AnalyzeMiniPlusSubstructure",
vertices = cms.InputTag("offlineSlimmedPrimaryVertices"),
muons = cms.InputTag("slimmedMuons"),
electrons = cms.InputTag("slimmedElectrons"),
taus = cms.InputTag("slimmedTaus"),
photons = cms.InputTag("slimmedPhotons"),
jets = cms.InputTag("slimmedJets"),
fatjets = cms.InputTag("slimmedJetsAK8"),
mets = cms.InputTag("slimmedMETs"),
pfCands = cms.InputTag("packedPFCandidates"),
packed = cms.InputTag("packedGenParticles"),
pruned = cms.InputTag("prunedGenParticles"),
bits = cms.InputTag("TriggerResults","","HLT"),
prescales = cms.InputTag("patTrigger")
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string("ttbar306.root"),
closeFileFast = cms.untracked.bool(True)
)
###############################################
# RECO AND GEN SETUP
process.load('PhysicsTools.PatAlgos.producersLayer1.patCandidates_cff')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.Geometry_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.GlobalTag.globaltag ='PHYS14_25_V2'
#'START70_V6::All'
#'START70_V6::All'
process.load('RecoJets.Configuration.RecoPFJets_cff')
process.load('RecoJets.Configuration.RecoGenJets_cff')
#process.fixedGridRhoFastjetAll.pfCandidatesTag = pfcandidates
process.fixedGridRhoFastjetAll.pfCandidatesTag = 'packedPFCandidates'
process.fixedGridRhoAll.pfCandidatesTag = 'packedPFCandidates'
# process.fixedGridRhoAll.pfCandidatesTag = .InputTag("packedPFCandidates")
# process.fixedGridRhoFastjetAll = fixedGridRhoFastjetAll.clone( pfCandidatesTag = cms.InputTag("packedPFCandidates"))
# process.fixedGridRhoAll = fixedGridRhoAll.clone( pfCandidatesTag = cms.InputTag("packedPFCandidates"))
from RecoJets.JetProducers.SubJetParameters_cfi import SubJetParameters
from RecoJets.JetProducers.PFJetParameters_cfi import *
from RecoJets.JetProducers.CaloJetParameters_cfi import *
from RecoJets.JetProducers.AnomalousCellParameters_cfi import *
from RecoJets.JetProducers.CATopJetParameters_cfi import *
from RecoJets.JetProducers.GenJetParameters_cfi import *
from RecoJets.JetProducers.caTopTaggers_cff import *
###############################################
process.content = cms.EDAnalyzer("EventContentAnalyzer")
process.p = cms.Path(
#process.fixedGridRhoFastjetAll
process.demo
)
| [
"skhalil@fnal.gov"
] | skhalil@fnal.gov |
76f0aba3cd468d0ec66404e8b7947d7b7333aafa | 3a642fa1fc158d3289358b53770cdb39e5893711 | /src/xlsxwriter/test/comparison/test_format01.py | 4c50643ba2bfbb06d966fe651528ff22ebe50e1b | [] | no_license | andbar-ru/traceyourself.appspot.com | d461277a3e6f8c27a651a1435f3206d7b9307d9f | 5f0af16ba2727faceb6b7e1b98073cd7d3c60d4c | refs/heads/master | 2020-07-23T14:58:21.511328 | 2016-12-26T22:03:01 | 2016-12-26T22:03:01 | 73,806,841 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,078 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013, John McNamara, jmcnamara@cpan.org
#
import unittest
import os
from ...workbook import Workbook
from ..helperfunctions import _compare_xlsx_files
class TestCompareXLSXFiles(unittest.TestCase):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'format01.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with unused formats."""
filename = self.got_filename
####################################################
workbook = Workbook(filename)
worksheet1 = workbook.add_worksheet()
worksheet2 = workbook.add_worksheet('Data Sheet')
worksheet3 = workbook.add_worksheet()
unused1 = workbook.add_format({'bold': 1})
bold = workbook.add_format({'bold': 1})
unused2 = workbook.add_format({'bold': 1})
unused3 = workbook.add_format({'italic': 1})
worksheet1.write('A1', 'Foo')
worksheet1.write('A2', 123)
worksheet3.write('B2', 'Foo')
worksheet3.write('B3', 'Bar', bold)
worksheet3.write('C4', 234)
workbook.close()
####################################################
got, exp = _compare_xlsx_files(self.got_filename,
self.exp_filename,
self.ignore_files,
self.ignore_elements)
self.assertEqual(got, exp)
def tearDown(self):
# Cleanup.
if os.path.exists(self.got_filename):
os.remove(self.got_filename)
if __name__ == '__main__':
unittest.main()
| [
"andrey@voktd-andbar.int.kronshtadt.ru"
] | andrey@voktd-andbar.int.kronshtadt.ru |
26c7774f14779d1cd3315d78272da86bbefcef4d | d9aa4291a4978b932bef84b8d26aa4b911ca2add | /day04正则re模块/03 re模块正则表达式.py | a5c42f5fb0a7f43dad3f6996b9c6b9feadeed74b | [] | no_license | SelfShadows/my_git | 9a32d3713efb1b055d04c813b319eb2196fdcf53 | b10a4c838e1146b3f6ce297480840de9a8e89206 | refs/heads/master | 2020-12-15T22:33:49.273814 | 2020-02-14T16:33:46 | 2020-02-14T16:33:46 | 235,274,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | import re
#ret=re.split('ab','abcd')
#print(ret)
# ret=re.search('[\d|\w](?P<name>\w.*?s)','sdfdf 3dd3fds2 13f')
# print(ret)
# print(ret.group('name'))
#命名
# ret=re.search('<(?P<flag_name>\w+)>\w+</(?P=flag_name)>','<tl>hello</tl>')
# print(ret.group())
#匹配整数
ret=re.findall('\d+\.\d+|(\d+)','8+4-2*5.21-5+10-(50.75+55)')
for i in ret:
if i=='':
ret.remove('')
print(ret) | [
"870670791@qq.com"
] | 870670791@qq.com |
62df615bb39689d2921aede204a4472f282f1fa7 | ae85cd400fa71296867c9e55297affa2d3679b5d | /algorithms/pattern_matching/rabin-karp.py2.py | 6cce3fe2ac69b7fa1deb148357b646ccaffca4c0 | [] | no_license | Psycadelik/sifu | a1e751aa4e97cd56431cdf8704304b82943db37c | 72965f694f7a44aa8711d11934b216d5ccf9d280 | refs/heads/master | 2023-04-12T17:07:00.677702 | 2021-05-07T07:30:14 | 2021-05-07T07:30:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,365 | py | '''
28. Implement strStr()
Return the index of the first occurrence of needle in haystack, or -1 if needle is not part of haystack.
Clarification:
What should we return when needle is an empty string? This is a great question to ask during an interview.
For the purpose of this problem, we will return 0 when needle is an empty string.
This is consistent to C's strstr() and Java's indexOf().
Example 1:
Input: haystack = "hello", needle = "ll"
Output: 2
Example 2:
Input: haystack = "aaaaa", needle = "bba"
Output: -1
Example 3:
Input: haystack = "", needle = ""
Output: 0
'''
'''
The Rabin–Karp algorithm or Karp–Rabin algorithm is a string-searching algorithm
created by Richard M. Karp and Michael O. Rabin (1987)
that uses hashing to find an exact match of a pattern string in a text.
It uses a rolling hash to quickly filter out positions of the text that cannot match the pattern,
and then checks for a match at the remaining positions. Generalizations of the same idea can be
used to find more than one match of a single pattern, or to find matches for more than one pattern.
https://github.com/mission-peace/interview/blob/master/python/string/rabinkarp.py
https://brilliant.org/wiki/rabin-karp-algorithm/#:~:text=The%20best%2D%20and%20average%2Dcase,collision%20and%20therefore%20must%20check
https://leetcode.com/problems/implement-strstr/discuss/1019737/Rabin-karp-algorithm-with-explanation-Python
'''
class Solution:
def __init__(self):
self.base = 26 # base of the polynomial hash
self.prime_mod = 101 # to avoid hash overflow, doesn't have to be prime number
self.pattern_hash = self.myhash()
def check_equal(self, str1, str2):
if len(str1) != len(str2):
return False
i = j = 0
for i, j in zip(str1, str2):
if i != j:
return False
return True
def create_hash(self, _str, end):
my_hash = 0
for i in range(end + 1):
my_hash = my_hash + (ord(_str[i]) * self.base ** i)
return my_hash
def recalculate_hash(self, _str, start, end, old_hash, pattern_len):
prev_char_code = ord(_str[start]) * self.base ** pattern_len - 1
new_hash = new_hash - prev_char_code
new_char_code = ord(_str[end]) * self.base ** 0
new_hash += new_char_code
return new_hash
# def recalculate_hash(self, _str, old_index, new_index, old_hash, pattern_len):
# new_hash = old_hash - ord(_str[old_index])
# new_hash = new_hash/self.prime
# new_hash += ord(_str[new_index]) * pow(self.prime, pattern_len - 1)
# return new_hash
def pattern_matching(self, text, pattern):
if pattern == '' or text == '':
return None
n, m = len(text), len(pattern),
if m > n:
return None
pattern_hash = create_hash(pattern, m - 1)
text_hash = create_hash(text, m - 1)
for i in range(1, n - m + 2):
if pattern_hash == text_hash:
window_text = text[i-1:i+m-1]
if check_equal(window_text, pattern):
return i - 1
# if i < n - m + 1:
# text_hash = recalculate_hash(text, i-1, i+m-1, text_hash, m)
text_hash = self.recalculate_hash(text, i, i+m, text_hash, m)
return -1
| [
"erickmwazonga@gmail.com"
] | erickmwazonga@gmail.com |
2d6e2ed0883d161c1401bb9fb640a3ab787fb618 | 664c3ced94ab0e9a5bac547028db59a3ca1f2074 | /16. Python games with Pygame/EG16-05 background sprite/EG16-05 background sprite.py | 22797007a32d97adb2843f167fe7fe7d0a565b7e | [
"MIT"
] | permissive | nikcbg/Begin-to-Code-with-Python | 2b1283a7818e26d3471677b51d1832cde52c4ddc | a72fdf18ca15f564be895c6394a91afc75fc3e2c | refs/heads/master | 2021-06-23T23:09:36.009442 | 2021-06-23T11:17:24 | 2021-06-23T11:17:24 | 209,285,197 | 0 | 0 | MIT | 2021-03-17T07:48:09 | 2019-09-18T10:50:51 | Python | UTF-8 | Python | false | false | 2,148 | py | # EG16-05 background sprite
import pygame
class Sprite:
'''
A sprite in the game. Can be sub-classed
to create sprites with particular behaviours
'''
def __init__(self, image, game):
'''
Initialize a sprite
image is the image to use to draw the sprite
default position is origin (0,0)
game is the game that contains this sprite
'''
self.image = image
self.position = [0, 0]
self.game = game
self.reset()
def update(self):
'''
Called in the game loop to update
the status of the sprite.
Does nothing in the super class
'''
pass
def draw(self):
'''
Draws the sprite on the screen at its
current position
'''
self.game.surface.blit(self.image, self.position)
def reset(self):
'''
Called at the start of a new game to
reset the sprite
'''
pass
class CrackerChase:
'''
Plays the amazing cracker chase game
'''
def play_game(self):
'''
Starts the game playing
Will return when the player exits
the game.
'''
init_result = pygame.init()
if init_result[1] != 0:
print('pygame not installed properly')
return
self.width = 800
self.height = 600
self.size = (self.width, self.height)
self.surface = pygame.display.set_mode(self.size)
pygame.display.set_caption('Cracker Chase')
background_image = pygame.image.load('background.png')
self.background_sprite = Sprite(image=background_image,
game=self)
clock = pygame.time.Clock()
while True:
clock.tick(60)
for e in pygame.event.get():
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_ESCAPE:
pygame.quit()
return
self.background_sprite.draw()
pygame.display.flip()
game = CrackerChase()
game.play_game()
| [
"nkcbg@yahoo.com"
] | nkcbg@yahoo.com |
48dda0032940d6cc8a1faecdbe87f99ff551ae62 | bba2bd15307d94707825057fe2790a72c707a363 | /allennlpx/modules/token_embedders/embedding.py | dcab25dbe87317171a6a1e8769f6d4cc8fd80498 | [] | no_license | Xalp/dne | c78e8ef2f730b129623ed3eaa27f93d2cf85d6f6 | afa519eea9ccd29332c477d89b4691fc2520813b | refs/heads/master | 2023-02-16T14:27:48.089160 | 2021-01-15T12:30:44 | 2021-01-15T12:30:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,568 | py | import logging
import warnings
import numpy
import torch
from allennlp.nn import util
from overrides import overrides
from allennlp.common import Tqdm
from allennlp.common.checks import ConfigurationError
from allennlp.data import Vocabulary
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
logger = logging.getLogger(__name__)
from allennlp.modules.token_embedders.embedding import EmbeddingsTextFile
from allennlp.modules.token_embedders.embedding import Embedding
from torch.nn.functional import embedding
from allennlp.modules.time_distributed import TimeDistributed
from allennlpx.training import adv_utils
class VanillaEmbedding(Embedding):
def __init__(
self,
**kwargs,
) -> None:
super().__init__(**kwargs)
@overrides
def forward(self, tokens: torch.Tensor) -> torch.Tensor:
original_size = tokens.size()
tokens = util.combine_initial_dims(tokens)
embedded = embedding(
tokens,
self.weight,
padding_idx=self.padding_index,
max_norm=self.max_norm,
norm_type=self.norm_type,
scale_grad_by_freq=self.scale_grad_by_freq,
sparse=self.sparse,
)
# Now (if necessary) add back in the extra dimensions.
embedded = util.uncombine_initial_dims(embedded, original_size)
if self._projection:
projection = self._projection
for _ in range(embedded.dim() - 2):
projection = TimeDistributed(projection)
embedded = projection(embedded)
# if adv_utils.is_adv_mode():
# info = adv_utils.get_gradient_info()
# grad_norm = torch.norm(info.last_bw, dim=-1, keepdim=True) + 1e-6
# delta = info.last_bw / grad_norm
# embedded += info.grd_step * delta
return embedded
def _read_embeddings_from_text_file(
file_uri: str,
embedding_dim: int,
vocab: Vocabulary,
namespace: str = "tokens") -> torch.FloatTensor:
"""
Read pre-trained word vectors from an eventually compressed text file, possibly contained
inside an archive with multiple files. The text file is assumed to be utf-8 encoded with
space-separated fields: [word] [dim 1] [dim 2] ...
Lines that contain more numerical tokens than `embedding_dim` raise a warning and are skipped.
The remainder of the docstring is identical to `_read_pretrained_embeddings_file`.
"""
tokens_to_keep = set(
vocab.get_index_to_token_vocabulary(namespace).values())
vocab_size = vocab.get_vocab_size(namespace)
embeddings = {}
# First we read the embeddings from the file, only keeping vectors for the words we need.
logger.info("Reading pretrained embeddings from file")
with EmbeddingsTextFile(file_uri) as embeddings_file:
for line in Tqdm.tqdm(embeddings_file):
token = line.split(" ", 1)[0]
if token in tokens_to_keep:
fields = line.rstrip().split(" ")
if len(fields) - 1 != embedding_dim:
# Sometimes there are funny unicode parsing problems that lead to different
# fields lengths (e.g., a word with a unicode space character that splits
# into more than one column). We skip those lines. Note that if you have
# some kind of long header, this could result in all of your lines getting
# skipped. It's hard to check for that here; you just have to look in the
# embedding_misses_file and at the model summary to make sure things look
# like they are supposed to.
logger.warning(
"Found line with wrong number of dimensions (expected: %d; actual: %d): %s",
embedding_dim,
len(fields) - 1,
line,
)
continue
vector = numpy.asarray(fields[1:], dtype="float32")
embeddings[token] = vector
if not embeddings:
raise ConfigurationError(
"No embeddings of correct dimension found; you probably "
"misspecified your embedding_dim parameter, or didn't "
"pre-populate your Vocabulary")
all_embeddings = numpy.asarray(list(embeddings.values()))
float(numpy.mean(all_embeddings))
float(numpy.std(all_embeddings))
# Now we initialize the weight matrix for an embedding layer, starting with random vectors,
# then filling in the word vectors we just read.
logger.info("Initializing pre-trained embedding layer")
embedding_matrix = torch.FloatTensor(vocab_size, embedding_dim).fill_(0.)
num_tokens_found = 0
index_to_token = vocab.get_index_to_token_vocabulary(namespace)
for i in range(vocab_size):
token = index_to_token[i]
# If we don't have a pre-trained vector for this word, we'll just leave this row alone,
# so the word has a random initialization.
if token in embeddings:
embedding_matrix[i] = torch.FloatTensor(embeddings[token])
num_tokens_found += 1
else:
logger.debug(
"Token %s was not found in the embedding file. Initialising randomly.",
token)
logger.info("Pretrained embeddings were found for %d out of %d tokens",
num_tokens_found, vocab_size)
return embedding_matrix
| [
"dugu9sword@163.com"
] | dugu9sword@163.com |
3241bf7425397cb464d443fdc964822f93d76157 | e9f4f2f48f96f8eef84851fb1191c5f5ae7ca882 | /odps/config.py | 1b9afca23b4335d61c3c7ace6f55d59a65aa5d7c | [
"Apache-2.0"
] | permissive | bleachyin/aliyun-odps-python-sdk | 18d156b794de530090bc04e1cba918e08b0f77bc | 6a99db643076b3957f0e6c774c482e81881dbe25 | refs/heads/master | 2021-01-16T22:44:11.875016 | 2016-02-05T02:17:25 | 2016-02-05T02:17:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,873 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from copy import deepcopy
import contextlib
import six
DEFAULT_CHUNK_SIZE = 1496
DEFAULT_CONNECT_RETRY_TIMES = 4
DEFAULT_CONNECT_TIMEOUT = 5
DEFAULT_READ_TIMEOUT = 120
class AttributeDict(dict):
def __getattr__(self, item):
if item in self:
val = self[item]
if isinstance(val, AttributeDict):
return val
else:
return val[0]
return object.__getattribute__(self, item)
def register(self, key, value, validator=None):
self[key] = value, validator
def __setattr__(self, key, value):
if not isinstance(value, AttributeDict):
validate = None
if key in self:
validate = self[key][1]
if validate is not None:
if not validate(value):
raise ValueError('Cannot set value %s' % value)
self[key] = value, validate
else:
self[key] = value
class Config(object):
def __init__(self, config=None):
self._config = config or AttributeDict()
self._validators = dict()
def __getattr__(self, item):
if item == '_config':
return object.__getattribute__(self, '_config')
return getattr(self._config, item)
def __setattr__(self, key, value):
if key == '_config':
object.__setattr__(self, key, value)
setattr(self._config, key, value)
def register_option(self, option, value, validator=None):
splits = option.split('.')
conf = self._config
for name in splits[:-1]:
config = conf.get(name)
if config is None:
conf[name] = AttributeDict()
conf = conf[name]
elif not isinstance(config, dict):
raise AttributeError(
'Fail to set option: %s, conflict has encountered' % option)
else:
conf = config
key = splits[-1]
if conf.get(key) is not None:
raise AttributeError(
'Fail to set option: %s, option has been set' % option)
conf.register(key, value, validator)
@contextlib.contextmanager
def option_context(config=None):
global options
global_options = options
try:
config = config or dict()
local_options = Config(deepcopy(global_options._config))
for option, value in six.iteritems(config):
local_options.register_option(option, value)
options = local_options
yield options
finally:
options = global_options
def is_interactive():
import __main__ as main
return not hasattr(main, '__file__')
# validators
def any_validator(*validators):
def validate(x):
return any(validator(x) for validator in validators)
return validate
def all_validator(*validators):
def validate(x):
return all(validator(x) for validator in validators)
return validate
is_null = lambda x: x is None
is_bool = lambda x: isinstance(x, bool)
is_integer = lambda x: isinstance(x, six.integer_types)
is_string = lambda x: isinstance(x, six.string_types)
def is_in(vals):
def validate(x):
return x in vals
return validate
options = Config()
options.register_option('access_id', None)
options.register_option('access_key', None)
options.register_option('end_point', None)
options.register_option('default_project', None)
options.register_option('log_view_host', None)
options.register_option('tunnel_endpoint', None)
# network connections
options.register_option('chunk_size', DEFAULT_CHUNK_SIZE, validator=is_integer)
options.register_option('retry_times', DEFAULT_CONNECT_RETRY_TIMES, validator=is_integer)
options.register_option('connect_timeout', DEFAULT_CONNECT_TIMEOUT, validator=is_integer)
options.register_option('read_timeout', DEFAULT_READ_TIMEOUT, validator=is_integer)
# terminal
options.register_option('console.max_lines', None)
options.register_option('console.max_width', None)
options.register_option('console.use_color', False, validator=is_bool)
# DataFrame
options.register_option('interactive', is_interactive(), validator=is_bool)
options.register_option('verbose', False, validator=is_bool)
options.register_option('verbose_log', None)
options.register_option('df.analyze', True, validator=is_bool)
# display
from .console import detect_console_encoding
options.register_option('display.encoding', detect_console_encoding(), validator=is_string)
options.register_option('display.max_rows', 60, validator=any_validator(is_null, is_integer))
options.register_option('display.max_columns', 20, validator=any_validator(is_null, is_integer))
options.register_option('display.large_repr', 'truncate', validator=is_in(['truncate', 'info']))
options.register_option('display.notebook_repr_html', True, validator=is_bool)
options.register_option('display.precision', 6, validator=is_integer)
options.register_option('display.float_format', None)
options.register_option('display.chop_threshold', None)
options.register_option('display.column_space', 12, validator=is_integer)
options.register_option('display.pprint_nest_depth', 3, validator=is_integer)
options.register_option('display.max_seq_items', 100, validator=is_integer)
options.register_option('display.max_colwidth', 50, validator=is_integer)
options.register_option('display.multi_sparse', True, validator=is_bool)
options.register_option('display.colheader_justify', 'right', validator=is_string)
options.register_option('display.unicode.ambiguous_as_wide', False, validator=is_bool)
options.register_option('display.unicode.east_asian_width', False, validator=is_bool)
options.register_option('display.height', 60, validator=any_validator(is_null, is_integer))
options.register_option('display.width', 80, validator=any_validator(is_null, is_integer))
options.register_option('display.expand_frame_repr', True)
options.register_option('display.show_dimensions', 'truncate', validator=is_in([True, False, 'truncate']))
| [
"xuye.qin@alibaba-inc.com"
] | xuye.qin@alibaba-inc.com |
36f275751e1301d42602f769a222608e3f3ea75d | 448fd7b58f53b6b8394a2a4a8f6325c3b731afa8 | /EXE_RP/modules custom setups/pyforms/setup.py | 461f21c3b99588eeb172ee6e1cd8b30916b711b1 | [
"MIT"
] | permissive | webclinic017/TraderSoftwareRP | 7c4a5833226f54c84d941830adc26263e984f957 | 3996bb4b1add72901530079d0a2b7aa6a7b33680 | refs/heads/master | 2022-04-17T13:29:03.724522 | 2020-04-12T07:48:00 | 2020-04-12T07:48:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,505 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Ricardo Ribeiro"
__credits__ = ["Ricardo Ribeiro"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Ricardo Ribeiro"
__email__ = "ricardojvr@gmail.com"
__status__ = "Production"
from setuptools import setup
setup(
name ='PyForms',
version ='0.1.4.dev2',
description ="""Pyforms is a Python 2.7 and 3.0 framework to develop GUI application,
which promotes modular software design and code reusability with minimal effort.""",
author ='Ricardo Ribeiro',
author_email ='ricardojvr@gmail.com',
license ='MIT',
download_urlname ='https://github.com/UmSenhorQualquer/pyforms',
url ='https://github.com/UmSenhorQualquer/pyforms',
packages=[
'pyforms',
'pyforms.Utils',
'pyforms.terminal',
'pyforms.terminal.Controls',
'pyforms.web',
'pyforms.web.Controls',
'pyforms.web.django',
'pyforms.web.django.templatetags',
'pyforms.gui',
'pyforms.gui.dialogs',
'pyforms.gui.Controls',
'pyforms.gui.Controls.ControlEventTimeline',
'pyforms.gui.Controls.ControlEventsGraph',
'pyforms.gui.Controls.ControlPlayer' ],
package_data={'pyforms': [
'web/django/*.js',
'web/django/chartjs/Chart.min.js',
'gui/Controls/uipics/*.png',
'gui/mainWindow.ui', 'gui/Controls/*.ui', 'gui/Controls/ControlPlayer/*.ui',
'gui/Controls/ControlEventTimeline/*.ui']
},
install_requires=[
"pyopengl >= 3.1.0",
"visvis >= 1.9.1",
"numpy >= 1.6.1"
],
) | [
"reprior123@gmail.com"
] | reprior123@gmail.com |
669b7d9bcd7b3ebfd57a1310141c672bc89c7dec | e32bb97b6b18dfd48760ed28553a564055878d48 | /source_py2/test_python_toolbox/test_nifty_collections/test_lazy_tuple/test_frozen_dict.py | 6a5067df2c9b2f5f39bfd4f4bdc6892857a6111c | [
"MIT"
] | permissive | rfdiazpr/python_toolbox | 26cb37dd42342c478931699b00d9061aedcd924a | 430dd842ed48bccdb3a3166e91f76bd2aae75a88 | refs/heads/master | 2020-12-31T04:15:53.977935 | 2014-04-30T23:54:58 | 2014-04-30T23:54:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,416 | py | # Copyright 2009-2014 Ram Rachum.
# This program is distributed under the MIT license.
'''Testing module for `python_toolbox.nifty_collections.LazyTuple`.'''
import uuid
import itertools
import collections
from python_toolbox import cute_iter_tools
from python_toolbox import sequence_tools
from python_toolbox import cute_testing
from python_toolbox.nifty_collections import FrozenDict
def test():
frozen_dict = FrozenDict({'1': 'a', '2': 'b', '3': 'c',})
assert len(frozen_dict) == 3
assert set(frozen_dict) == set(frozen_dict.keys()) == \
set(frozen_dict.iterkeys()) == set('123')
assert set(frozen_dict.values()) == \
set(frozen_dict.itervalues()) == set('abc')
assert set(frozen_dict.items()) == \
set(frozen_dict.iteritems()) == {('1', 'a'), ('2', 'b'), ('3', 'c'),}
assert frozen_dict['1'] == 'a'
with cute_testing.RaiseAssertor(exception_type=LookupError):
frozen_dict['missing value']
assert {frozen_dict, frozen_dict} == {frozen_dict}
assert {frozen_dict: frozen_dict} == {frozen_dict: frozen_dict}
assert isinstance(hash(frozen_dict), int)
assert frozen_dict.copy({'meow': 'frrr'}) == \
frozen_dict.copy(meow='frrr') == \
FrozenDict({'1': 'a', '2': 'b', '3': 'c', 'meow': 'frrr',})
assert repr(frozen_dict).startswith('FrozenDict(') | [
"ram@rachum.com"
] | ram@rachum.com |
f11e82d29c20ff0e44d26b7febaefe0bfff58a0a | 30f15cac2567373380d288e4be7a8e7aed73519c | /examples/gabcpmc_sumnorm_useaux.py | 1d05365786c5bd244fcc8f535b8a9e255f6a65c7 | [] | no_license | HajimeKawahara/abcfast | c208570111c23145ae95421e7471cc5f69335127 | 951d7998578a245da2dabb6f97c70a2392ea5d43 | refs/heads/master | 2020-04-21T21:04:15.619478 | 2020-03-20T03:56:07 | 2020-03-20T03:56:07 | 169,866,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,678 | py | from abcfast.gabcpmc import *
from abcfast.utils import statutils
if __name__ == "__main__":
import numpy as np
import matplotlib.pyplot as plt
from numpy import random
from scipy.stats import gamma as gammafunc
from scipy.stats import norm as normfunc
import time
import sys
tstart=time.time()
print("*******************************************")
print("GPU ABC PMC Method.")
print("This code demonstrates a normal+normal distribution. Beaumont+2009")
print("*******************************************")
#preparing data
nsample=500
Yobs=0.0
# start ABCpmc
abc=ABCpmc()
abc.wide=2.0
abc.Ecrit=0.0
abc.maxtryx=100000#debug magic
abc.npart=512*16#debug magic
# input model/prior
abc.nparam=1
abc.aux=np.array([0.1,1.0])
abc.model=\
"""
/* the double normal distribution model generator */
#include "gennorm.h"
__device__ float model(float* Ysim, float* param, curandState* s, float* aux, int isample){
float cl=curand_uniform(s);
int i=int(cl*2.0);
Ysim[0] = normf(param[0],aux[i], s);
}
"""
# prior
def fprior():
def f(x):
mask=(x<10.0)*(x>-10.0)
arr=np.zeros(len(x))
arr[mask]=1.0
return arr
return f
abc.fprior = fprior()#
abc.prior=\
"""
#include <curand_kernel.h>
__device__ void prior(float* param,curandState* s){
param[0] = (curand_uniform(s)-0.5)*20.0;
return;
}
"""
# data and the summary statistics
abc.nsample = 1
abc.ndata = 1
Ysum = Yobs
abc.Ysm = np.array([Ysum])
#set prior parameters
abc.epsilon_list = np.array([2.0,1.5,1.0,0.5,1.e-2])
#initial run of abc pmc
abc.check_preparation()
abc.run()
abc.check()
# plt.hist(abc.x,bins=20,label="$\epsilon$="+str(abc.epsilon),density=True,alpha=0.5)
#pmc sequence
for eps in abc.epsilon_list[1:]:
abc.run()
abc.check()
tend = time.time()
xref=np.linspace(-3.0,3.0,1000)
print(tend-tstart,"sec")
print(abc.xres())
#plotting...
fig=plt.figure()
ax=fig.add_subplot(211)
ax.hist(abc.x,bins=200,label="$\epsilon$="+str(abc.epsilon),density=True,alpha=0.3)
ax.hist(abc.xres(),bins=200,label="resampled",density=True,alpha=0.1)
ax.plot(xref,0.5*normfunc.pdf(xref,0.0,1.0)+0.5*normfunc.pdf(xref,0.0,1.e-1))
ax.legend()
ax=fig.add_subplot(212)
ax.plot(abc.x,abc.w,".")
plt.ylabel("weight")
plt.xlim(-3,3)
plt.ylim(0,np.max(abc.w))
plt.savefig("sumnorm.png")
plt.show()
| [
"divrot@gmail.com"
] | divrot@gmail.com |
d8b647cce8583803aa21188fc6af6a53879bbcc8 | 5c661f53aa00dbaf595d0e8a565a749c4c55c5cf | /commando/django/core/management/sqlflush.py | ffc719348003842cbde0b2f1f1479c431d769673 | [
"MIT"
] | permissive | skibblenybbles/django-commando | 894f34c80d16fe60555c3f34439e45af124dba32 | dd1dd6969fc0dd8231fc115fee3eeb690809585b | refs/heads/master | 2021-01-22T06:54:28.874271 | 2014-01-16T15:38:07 | 2014-01-16T15:38:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,250 | py | from commando import management
BaseSQLFlushCommand = management.get_command_class(
"sqlflush", exclude_packages=("commando",))
if BaseSQLFlushCommand is not None:
base = BaseSQLFlushCommand()
class SQLFlushCommandOptions(management.CommandOptions):
"""
SQLFlush command options.
"""
args = base.args
help = base.help
option_list = base.option_list[
len(management.BaseCommandOptions.option_list):]
option_groups = (
("[sqlflush options]",
"These options will be passed to sqlflush.",
option_list,
),) if option_list else ()
actions = ("sqlflush",)
def handle_sqlflush(self, *args, **options):
return self.call_command("sqlflush", *args, **options)
class SQLFlushCommand(SQLFlushCommandOptions, management.StandardCommand):
"""
SQLFlush command.
"""
option_list = management.StandardCommand.option_list
option_groups = \
SQLFlushCommandOptions.option_groups + \
management.StandardCommand.option_groups
else:
SQLFlushCommand = management.StandardCommand
| [
"mkibbel@gmail.com"
] | mkibbel@gmail.com |
6a88be3e9bcc912b2a54083082b83a3e47393144 | d91b7761d556d320e897eddceb378a53a99fb1a6 | /library/CAMB_library.py | c860083a5735c3e64a9648f1ae34b49e7ae5b841 | [] | no_license | franciscovillaescusa/Pylians3_old | d945760e4ccce91d943276db1a456c76861e5f22 | aa9ca5904b818c3f4ca431642332986fc8932772 | refs/heads/master | 2020-06-19T14:37:01.421069 | 2019-09-08T16:29:47 | 2019-09-08T16:29:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,413 | py | import numpy as np
import camb
import sys,os
# This routine computes the linear power spectra using CAMB given the input
# cosmological parameters. To do the rescaling with s8 we always need to generate
# the z=0 linear matter Pk, i.e. in redshifts there always should be 0
# PkL.z -------> redshifts [0, 0.5, 1, 2 ...]
# PkL.k -------> wavenumbers
# PkL.s8 ------> array with the values of sigma8
# PkL.Hz ------> array with the values of Hz
# PkL.Pkmm ----> matrix with matter Pk: Pkmm[1,:] = mm P(k) at z[1]
# PkL.Pkcc ----> matrix with matter Pk: Pkcc[1,:] = cc P(k) at z[1]
# PkL.Pkbb ----> matrix with matter Pk: Pkbb[1,:] = bb P(k) at z[1]
# PkL.Pkcb ----> matrix with matter Pk: Pkcb[1,:] = cb P(k) at z[1]
# PkL.Pknn ----> matrix with matter Pk: Pkcc[1,:] = nu P(k) at z[1]
class PkL:
def __init__(self, Omega_m=0.3175, Omega_b=0.049, h=0.6711, ns=0.9624, s8=None,
Mnu=0.0, As=2.13e-9, Omega_k=0.0,
pivot_scalar=0.05, pivot_tensor=0.05,
Nnu=3, hierarchy='degenerate', Neff=3.046, tau=None,
redshifts=[0, 0.5, 1, 2, 3], kmax=10.0, k_per_logint=50,
verbose=False):
Omega_c = Omega_m - Omega_b - Mnu/(93.14*h**2)
Omega_cb = Omega_c + Omega_b
pars = camb.CAMBparams()
# set accuracy of the calculation
pars.set_accuracy(AccuracyBoost=5.0, lSampleBoost=5.0,
lAccuracyBoost=5.0, HighAccuracyDefault=True,
DoLateRadTruncation=True)
# set value of the cosmological parameters
pars.set_cosmology(H0=h*100.0, ombh2=Omega_b*h**2, omch2=Omega_c*h**2,
mnu=Mnu, omk=Omega_k,
neutrino_hierarchy=hierarchy,
num_massive_neutrinos=Nnu, nnu=Neff,
tau=tau)
# set the value of the primordial power spectrum parameters
pars.InitPower.set_params(As=As, ns=ns,
pivot_scalar=pivot_scalar,
pivot_tensor=pivot_tensor)
# set redshifts, k-range and k-sampling
pars.set_matter_power(redshifts=redshifts, kmax=kmax,
k_per_logint=k_per_logint)
# compute results
results = camb.get_results(pars)
# get raw matter P(k) and transfer functions with weird k-binning
#k, zs, Pk = results.get_linear_matter_power_spectrum()
#Tk = (results.get_matter_transfer_data()).transfer_data
# interpolate to get Pmm, Pcc...etc
k,z,Pmm = results.get_matter_power_spectrum(minkh=2e-5, maxkh=kmax,
npoints=500, var1=7, var2=7,
have_power_spectra=True,
params=None)
k,z,Pcc = results.get_matter_power_spectrum(minkh=2e-5, maxkh=kmax,
npoints=500, var1=2, var2=2,
have_power_spectra=True,
params=None)
k,z,Pbb = results.get_matter_power_spectrum(minkh=2e-5, maxkh=kmax,
npoints=500, var1=3, var2=3,
have_power_spectra=True,
params=None)
k,z,Pcb = results.get_matter_power_spectrum(minkh=2e-5, maxkh=kmax,
npoints=500, var1=2, var2=3,
have_power_spectra=True,
params=None)
Pcb = (Omega_c**2*Pcc + Omega_b**2*Pbb +\
2.0*Omega_b*Omega_c*Pcb)/Omega_cb**2
k,z,Pnn = results.get_matter_power_spectrum(minkh=2e-5, maxkh=kmax,
npoints=500, var1=6, var2=6,
have_power_spectra=True,
params=None)
# rescale by sigma_8
s8_linear = results.get_sigma8()[-1]
if s8!=None and z[0]!=0.0:
raise Exception('To rescale by s8 we need to generate the linear Pk at z=0')
factor = (s8/s8_linear)**2
# get sigma_8 and Hz in km/s/(kpc/h)
self.s8 = np.array(results.get_sigma8())[::-1]*np.sqrt(factor)
self.Hz = np.array([results.hubble_parameter(red) for red in z])
self.z = z; self.k = k
self.Pkmm = Pmm*factor; self.Pknn = Pnn*factor
self.Pkcc = Pcc*factor; self.Pkbb = Pbb*factor; self.Pkcb = Pcb*factor
if verbose: print(pars)
#fout = 'Pk_trans_z=%.3f.txt'%z
# notice that transfer functions have an inverted order:i=0 ==>z_max
#np.savetxt(fout,np.transpose([Tk[0,:,i],Tk[1,:,i],Tk[2,:,i],Tk[3,:,i],
# Tk[4,:,i],Tk[5,:,i],Tk[6,:,i]]))
| [
"villaescusa.francisco@gmail.com"
] | villaescusa.francisco@gmail.com |
892c7ac75a0e494f8780281d4139c8602ba5f045 | 171781c9b8ac1cb1bd0562db53788d2c570d4aa4 | /myapp/apps.py | ec02046654421fb4a91100b3f227104a665ba0e9 | [] | no_license | johnbangla/showcasesecond | 4949c492ffc38306320325ea5bacb40b72ba7e21 | 84877579a7204d289a64e1db57ada4ff0e792b95 | refs/heads/master | 2023-01-12T07:12:56.754055 | 2020-10-29T07:20:22 | 2020-10-29T07:20:22 | 275,085,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | from django.apps import AppConfig
class MyappConfig(AppConfig):
name = 'myapp'
def ready(self):
import myapp.signals | [
"johnbangla@gmail.com"
] | johnbangla@gmail.com |
1233414a1856f946a67a85615938cf7566b3e2d4 | f6d2d1c3e5525dc955a47da39d78481cda105699 | /django/pitches/api/serializers.py | 2da08e797f0fb7c1929b6bc208d809a9811e9ec4 | [] | no_license | KyleLawson16/pitch-yak | 340458833debe4ccf5e3774fb714491624035297 | c98505c5c4a8de369fd749d56a91028373109e50 | refs/heads/master | 2021-01-23T18:31:36.105954 | 2017-09-11T22:21:49 | 2017-09-11T22:21:49 | 102,796,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,030 | py | from rest_framework.serializers import (
ModelSerializer,
HyperlinkedIdentityField,
SerializerMethodField
)
from pitches.models import Pitch
pitches_detail_url = HyperlinkedIdentityField(
view_name='api:detail',
lookup_field='unique_id'
)
class PitchListSerializer(ModelSerializer):
url = pitches_detail_url
class Meta:
model = Pitch
fields = [
'url',
'id',
'title',
'pitch',
'likes',
'dislikes',
]
class PitchCreateUpdateSerializer(ModelSerializer):
class Meta:
model = Pitch
fields = [
'title',
'pitch',
]
class PitchDetailSerializer(ModelSerializer):
user = SerializerMethodField()
class Meta:
model = Pitch
fields = [
'user',
'id',
'title',
'pitch',
'likes',
'dislikes',
]
def get_user(self, obj):
return str(obj.user.username)
| [
"Kyle.Lawson7@yahoo.com"
] | Kyle.Lawson7@yahoo.com |
d06d05cbff3f00b938366a8b1ec2a636bbbfca52 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/3070/453003070.py | 91a536502088f709e87f6454abc6630e2e0603aa | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 837 | py | from bots.botsconfig import *
from records003070 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'ST',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'SSC', MIN: 1, MAX: 1},
{ID: 'DTP', MIN: 1, MAX: 2},
{ID: 'N1', MIN: 0, MAX: 999999},
{ID: 'R2', MIN: 0, MAX: 13},
{ID: 'OD', MIN: 0, MAX: 1},
{ID: 'PI', MIN: 0, MAX: 10},
{ID: 'PR', MIN: 0, MAX: 99},
{ID: 'CT', MIN: 0, MAX: 99},
{ID: 'APR', MIN: 0, MAX: 99},
{ID: 'SHR', MIN: 0, MAX: 99},
{ID: 'SR', MIN: 0, MAX: 7, LEVEL: [
{ID: 'LX', MIN: 0, MAX: 999999, LEVEL: [
{ID: 'ISD', MIN: 0, MAX: 15},
{ID: 'ISC', MIN: 0, MAX: 999999},
]},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
49d16d047bd4ea4ff578997b297de8bd7f86c743 | 8feecb692bacdb10340af1b40878da4f24f5f2dd | /ammarit/it/migrations/0002_auto_20160718_1343.py | 5b5c18bf81fab9a5096915a2faa964e1bf36cb8f | [
"Apache-2.0"
] | permissive | aammar/IT-Storage-Ticket-System | 2bbf4cf240eef59557bc0faf728379dadf522cce | 6b8fb7a915cdd10e7a003301a35477a718129643 | refs/heads/master | 2021-01-11T18:18:45.272913 | 2016-08-25T19:23:33 | 2016-08-25T19:23:33 | 71,046,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('it', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='item',
old_name='Category',
new_name='category',
),
migrations.RenameField(
model_name='item',
old_name='Item_Number',
new_name='itemNumber',
),
migrations.RenameField(
model_name='item',
old_name='Make',
new_name='make',
),
migrations.RenameField(
model_name='item',
old_name='Model',
new_name='model',
),
migrations.AddField(
model_name='item',
name='owner',
field=models.ForeignKey(to='it.Owner', null=True),
),
]
| [
"hasan.aljawaheri@gmail.com"
] | hasan.aljawaheri@gmail.com |
bc06edcede98909e1064f09cc79a7c965352f912 | d0151e3cc292a1d3e2472515741c24ac99ef89c5 | /lcamatrix/product_flow.py | ffde7890434ec03686092acd85b1da8f9bfa665e | [] | no_license | bkuczenski/lca-matrix | 73123fbc0bf238697aed316577287b148318b2aa | 78962e3f9ce94c351754667df07b6ed0e61d1fa7 | refs/heads/master | 2021-01-19T19:59:06.480947 | 2017-06-16T19:45:25 | 2017-06-16T19:45:25 | 78,722,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,288 | py | class ProductFlow(object):
"""
Class for storing foreground-relevant information about a single matched row-and-column in the interior matrix.
"""
def __init__(self, index, flow, process):
"""
Initialize a row+column in the technology matrix. Each row corresponds to a reference exchange in the database,
and thus represents a particular process generating / consuming a particular flow. A ProductFlow entry is
akin to a fragment termination.
inbound_ev is the exchange value of the reference flow, which is divided into the exchange values of the child
flows. It has the convention Output = positive, so if the reference exchange is an input, the inbound_ev is
negated. Similarly, the exchange values of matrix entries made with ProductFlow parents need to be implemented
as Input = positive, with outputs negated. This is to satisfy the make-use equation e.g. V - U' in Suh 2010
or whichever it was.
:param flow: the LcFlow entity that represents the commodity (term_flow in the fragment sense)
:param process: the termination of the parent node's exchange (term_node). None is equivalent to a
cutoff flow or elementary flow (distinction is left to a compartment manager). If non-null, the process must
possess a reference exchange with the same flow or the graph traversal may be curtailed.
"""
self._index = index
self._flow = flow
self._process = process
self._direction = None
self._hash = (flow.uuid, None)
self._inbound_ev = 1.0
if process is None:
raise TypeError('No termination? should be a cutoff.')
if len([x for x in process.reference_entity if x.flow == flow]) == 0:
# still a cutoff- raise a flag but not an error
print('NoMatchingReference: Flow: %s, Termination: %s' % (flow.uuid, process.uuid))
else:
self._hash = (flow.uuid, process.uuid)
ref_exch = process.reference(flow)
self._direction = ref_exch.direction
self._inbound_ev = ref_exch.value
if self._inbound_ev is None:
print('None inbound ev! using 1.0. f:%s t:%s' % (flow, process))
self._inbound_ev = 1.0
elif self._inbound_ev == 0:
raise ZeroDivisionError('No inbound EV for f:%s t:%s' % (flow.get_external_ref(),
process.get_external_ref()))
if self._direction == 'Input':
self._inbound_ev *= -1
def __eq__(self, other):
"""
shortcut-- allow comparisons without dummy creation
:param other:
:return:
"""
return hash(self) == hash(other)
# if not isinstance(other, ProductFlow):
# return False
# return self.flow == other.flow and self.process == other.process
def __hash__(self):
return hash(self._hash)
def adjust_ev(self, value):
"""
Compensate recorded inbound exchange value if the process is found to depend on its own reference flow.
Assumption is that the flow is already sign-corrected (i.e. inbound_ev is positive-output, adjustment value
is positive-input, so new inbound_ev is difference
:param value:
:return:
"""
if value == self._inbound_ev:
print('Ignoring unitary self-dependency')
else:
self._inbound_ev -= value
@property
def index(self):
return self._index
@property
def key(self):
"""
Product flow key is (uuid of reference flow, uuid of process)
:return:
"""
return self._hash
@property
def flow(self):
return self._flow
@property
def process(self):
return self._process
@property
def direction(self):
return self._direction
@property
def inbound_ev(self):
return self._inbound_ev
def __str__(self):
return '%s:==%s' % (self._process, self._flow)
def table_label(self):
return '%s (%s) [%s]' % (self._flow['Name'], self._flow.unit(), self._process['SpatialScope'])
| [
"brandon.kuczenski@301south.net"
] | brandon.kuczenski@301south.net |
ee667b910b500ba5afa77f5fb347aa8a5094ab98 | 40f4908483b98fc4f370ff4f2d520e1284d045b3 | /immortals_repo/shared/tools/brass_api/translator/preprocess.py | 66bdc5df406a7b802fee3c8356a7ed5a65c0d91c | [] | no_license | TF-185/bbn-immortals | 7f70610bdbbcbf649f3d9021f087baaa76f0d8ca | e298540f7b5f201779213850291337a8bded66c7 | refs/heads/master | 2023-05-31T00:16:42.522840 | 2019-10-24T21:45:07 | 2019-10-24T21:45:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,063 | py | import shutil
import os
from brass_api.common.exception_class import BrassException
def create_preprocessor(xml_file):
'''
Returns a MDLPreprocessor or a VICTORYPreprocessor.
:param xml_file:
:return:
'''
if os.path.exists(xml_file):
infile = open(xml_file, 'r')
first_line = infile.readline().rstrip()
second_line = infile.readlines()[0].rstrip()
if 'MDL' in first_line:
return MDLPreprocessor(xml_file)
elif 'DAUInventory' in first_line:
return InventoryPreprocessor(xml_file)
elif 'VCL' in second_line:
return VICTORYPreprocessor(xml_file)
else:
return None
class Preprocessor(object):
def __init__(self, xml_file):
self.original_xml_file = xml_file
self.orientdb_xml_file = self.original_xml_file + '.orientdb'
self._schema = None
def create_orientdb_xml(self):
if os.path.exists(self.original_xml_file):
shutil.copy2(self.original_xml_file, self.orientdb_xml_file)
def remove_orientdb_xml(self):
if os.path.exists(self.orientdb_xml_file):
os.remove(self.orientdb_xml_file)
def preprocess_xml(self):
self.create_orientdb_xml()
class MDLPreprocessor(Preprocessor):
def __init__(self, xml_file):
super().__init__(xml_file)
def preprocess_xml(self):
super().preprocess_xml()
self.remove_mdl_root_tag_attr()
self.validate_mdl(self.orientdb_xml_file, self._schema)
def add_mdl_root_tag_attr(self, mdl_schema):
"""
Creates a string for <MDLRoot> that includes tmats xsd files mdl schema xsd files.
These attributes are removed during importing because they caused xml parsing to fail.
:param str mdl_schema: name of the mdl schema file
:return: a <MDLRoot> string containing all the includes and correct MDL schema version
"""
mdl_root_str = '<MDLRoot xmlns="http://www.wsmr.army.mil/RCC/schemas/MDL"\
xmlns:tmatsCommon="http://www.wsmr.army.mil/RCC/schemas/TMATS/TmatsCommonTypes"\
xmlns:tmatsP="http://www.wsmr.army.mil/RCC/schemas/TMATS/TmatsPGroup"\
xmlns:tmatsD="http://www.wsmr.army.mil/RCC/schemas/TMATS/TmatsDGroup"\
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\
xsi:schemaLocation="http://www.wsmr.army.mil/RCC/schemas/MDL {0}">'.format(mdl_schema)
return mdl_root_str
def remove_mdl_root_tag_attr(self):
"""
Removes the xml attributes of the <MDLRoot> in the xmlfile
as all the inclusions of tmats xsd files causes parsing to fail.
The modified xml is saved inline.
:param str xmlfile: name and path of xml file
:return: the string "<MDLRoot>"
"""
import re
mdl_schema = None
mdl_root_str = None
# get MDL content
with open(self.orientdb_xml_file, 'r') as f:
content = f.read()
# find/replace MDLRoot element
mdl_root_str = re.search('(<MDLRoot[^>]*>)', content, flags = re.MULTILINE).group(1)
content = content.replace(mdl_root_str, '<MDLRoot>', 1)
# write out simplified MDLRoot
with open(self.orientdb_xml_file, 'w') as f:
f.write(content)
print(f"Root str: {mdl_root_str}")
matchObj = re.search('MDL_(.*)xsd', mdl_root_str)
if matchObj is not None:
mdl_schema = matchObj.group(0)
self._schema = mdl_schema
def validate_mdl(self, xmlfile_path, mdl_schema):
"""
Validates a xml file given by xmlfile_path against the mdl_schema.
Todo: Still to need to make this work for the MDL exporter.
:param str xmlfile_path: name and path of xml file to validate
:param str mdl_schema: name of mdl_schema
:return Boolean status: result of validation (True or False)
:raises BrassException: throws any exception encountered
"""
from lxml import etree
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
mdl_schema = "{0}/../include/mdl_xsd/{1}".format(BASE_DIR, mdl_schema)
status = None
try:
schema_doc = etree.parse(mdl_schema)
schema = etree.XMLSchema(schema_doc)
with open(xmlfile_path) as f:
doc = etree.parse(f)
status = schema.validate(doc)
except etree.XMLSchemaParseError as e:
status = False
raise BrassException('Invalid MDL Schema File: ' + e.message, 'xml_util.validate_mdl')
except etree.DocumentInvalid as e:
status = False
raise BrassException('Invalide MDL XML File: ' + e.message, 'xml_util.validate_mdl')
finally:
return status
class InventoryPreprocessor(Preprocessor):
def __init__(self, xml_file):
super().__init__(xml_file)
def preprocess_xml(self):
super().preprocess_xml()
class VICTORYPreprocessor(Preprocessor):
def __init__(self, xml_file):
super().__init__(xml_file)
def preprocess_xml(self):
super().preprocess_xml()
self.remove_vcl_root_tag_attr()
self.remove_vcl_namespace()
def remove_vcl_namespace(self):
"""
Removes vcl namespace from the root, configGroup, and ConfigItem keys in xml
"""
import fileinput
for lines in fileinput.FileInput(self.orientdb_xml_file, inplace=1):
stripped_line = lines.strip()
if stripped_line.startswith('<vcl:'):
lines = lines.replace('<vcl:', '<', 1)
print(lines)
elif stripped_line.startswith('</vcl:'):
lines = lines.replace('</vcl:', '</')
print(lines)
else:
print(lines, end='')
def remove_vcl_root_tag_attr(self):
"""
Removes the xml attributes of the <MDLRoot> in the xmlfile
as all the inclusions of tmats xsd files causes parsing to fail.
The modified xml is saved inline.
:param str xmlfile: name and path of xml file
:return: the string "<MDLRoot>"
"""
import fileinput, re
mdl_schema = None
mdl_root_str = None
for lines in fileinput.FileInput(self.orientdb_xml_file, inplace=1):
if lines.startswith('<vcl:VCL'):
print('<VCL>')
mdl_root_str = lines
else:
print(lines, end='')
matchObj = re.search('VICTORYConfigurationLanguage(.*)xsd', mdl_root_str)
if matchObj is not None:
mdl_schema = matchObj.group(0)
self._schema = mdl_schema
def add_vcl_root_tag_attr(self, vcl_schema):
"""
Creates a string for <MDLRoot> that includes tmats xsd files mdl schema xsd files.
These attributes are removed during importing because they caused xml parsing to fail.
:param str vcl_schema: name of the mdl schema file
:return: a <MDLRoot> string containing all the includes and correct MDL schema version
"""
vcl_root_str = '<VCL xmlns:vcl="http://www.victory-standards.org/Schemas/VICTORYConfigurationLanguage.xsd"\
xmlns:vmt="http://www.victory-standards.org/Schemas/VICTORYManagementTypes.xsd"\
xmlns:vst="http://www.victory-standards.org/Schemas/VICTORYSharedTypes.xsd"\
xmlns:tns="http://www.w3.org/2003/05/soap-envelope"\
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\
xsi:schemaLocation="http://www.victory-standards.org/Schemas/VICTORYConfigurationLanguage.xsd file:/Volumes/Projects/10-23360_USAF_ROME/Shared/Scenarios/VICTORY%20Challenge%20Problem/Scenario%201/Scenario%201%20-%2020180328/VICTORYConfigurationLanguage.xsd">'
return vcl_root_str
| [
"austin.wellman@raytheon.com"
] | austin.wellman@raytheon.com |
92c8823b00b04d9fc44af80494f25691189c9ac9 | e874aed81b6ae75467262423cf5fc4c6e4a31c64 | /dsn_brand/models/product.py | 5c89834b32d725eec4daaf28e432bcf1c7359c5c | [] | no_license | disna-sistemas/odoomrp-wip | 9dc14704e4aad00f95313d5465802fca13809b1a | 96958f442ae0e5274c8d7ebb8f2d1b636a16d48a | refs/heads/master | 2020-12-06T23:27:22.321202 | 2015-11-20T09:57:24 | 2015-11-20T09:57:24 | 26,594,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,113 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi, Guewen Baconnier
# Copyright 2012-2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class partner(models.Model):
_inherit = "product.template"
brand_id = fields.Many2one('brand', string='Brand')
| [
"sistemas@disna.com"
] | sistemas@disna.com |
fc1b9caaa29a6d3d45e8e7ae156ad413c812ad6b | 4fe0ed5e592641b272aa2167ae591155a9cad416 | /modelisation/bode.py | b35acf103938b896adcc4aefd7e07feb4b7045cf | [] | no_license | AlexandreMarcotte/test_code | cf715caee730cfdafa7cf97bd011ac15443872f3 | 07e115055befd55d4598dd8a4b33bbdd00ba6f5a | refs/heads/master | 2021-06-07T05:06:12.085390 | 2019-05-06T23:45:38 | 2019-05-06T23:45:38 | 137,810,297 | 0 | 0 | null | 2021-06-01T23:44:40 | 2018-06-18T21:50:39 | Python | UTF-8 | Python | false | false | 248 | py | from scipy import signal
import matplotlib.pyplot as plt
# K / (s + 1)
s1 = signal.lti([1], [1, 1])
w, mag, phase = signal.bode(s1)
plt.semilogx(w, mag) # Bode magnitude plot
plt.figure()
plt.semilogx(w, phase) # Bode phase plot
plt.show()
| [
"alexandre.marcotte.1094@gmail.com"
] | alexandre.marcotte.1094@gmail.com |
320fa0b8e1cd361fb2c02a73d04ab4d0162b7774 | 3b76f9f2317e1eb2cd9553cab0b4dd01ce216ad5 | /Alphabet rangoli3.py | 090099b26486b72357847a57c59364e9d98edbc9 | [] | no_license | KaziMotiour/Hackerrank-problem-solve-with-python | f12ea978c5274a90745545d3d2c9fb6a4f9b5230 | 798ce2a6c2b63ea24dc28a923bfee4b528fb2b5e | refs/heads/master | 2022-05-26T19:45:44.808451 | 2020-05-05T09:44:40 | 2020-05-05T09:44:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | import string
size=int(input())
width=4*size-3
idth = 4 * size - 3
alpha = string.ascii_lowercase
for i in list(range(size))[::-1] + list(range(1, size)):
print('-'.join(alpha[size-1:i:-1] + alpha[i:size]).center(width, '-')) | [
"kmatiour30@gmail.com"
] | kmatiour30@gmail.com |
8a1329049224ec9c3a0acf0f983281006c36463b | 5a1a695829a2d1dbf4daa0736f0fbd6feffc7e63 | /baekjoon/g_10026(적록색약).py | 5a69233805d812eb26ed8b940a6dc7e424887b84 | [] | no_license | juyi212/Algorithm_study | f5d263c5329c994a457bbe897e5e1405d2b1d67a | f225cc593a50b74686111f654f7133707a1d1310 | refs/heads/master | 2023-03-21T20:02:36.138688 | 2021-03-16T14:16:40 | 2021-03-16T14:16:40 | 325,008,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,393 | py | import sys
from collections import deque
def bfs(ch, r, c, what):
q = deque()
q.append((r, c))
while q:
r, c = q.popleft()
if r - 1 > -1 and (what == ch[r-1][c]):
q.append((r-1, c))
ch[r-1][c] = 0
if r + 1 < n and (what == ch[r+1][c]):
q.append((r+1, c))
ch[r+1][c] = 0
if c - 1 > -1 and (what == ch[r][c-1]):
q.append((r, c-1))
ch[r][c-1] = 0
if c + 1 < n and (what == ch[r][c+1]):
q.append((r, c+1))
ch[r][c+1] = 0
# 구역
# 적록색약이 아닌 사람과 적록색약인 사람이 그림을 봤을 때의 구역 수를 구하기
# bfs
# 2*N^2 -> 1초... 충분하겠고만
n = int(input())
drawing = [list(sys.stdin.readline().rstrip()) for _ in range(n)]
blind = [[0] * n for _ in range(n)]
# 적록색약 배열
for i in range(n):
for j in range(n):
if drawing[i][j] == 'R' or drawing[i][j] == 'G':
blind[i][j] = 1
else:
blind[i][j] = 2
not_blind = 0
y_blind = 0
for i in range(n):
for j in range(n):
if drawing[i][j] != 0:
not_blind += 1
bfs(drawing, i, j, drawing[i][j])
if blind[i][j] != 0:
y_blind += 1
bfs(blind, i, j, blind[i][j])
print(not_blind, end=' ')
print(y_blind)
'''
5
BBBBB
BBGBG
BGGGG
BBRRR
RRRRR
'''
| [
"dea8307@naver.com"
] | dea8307@naver.com |
8a7a5b1f37c6e4c6f4a183e669093eed73020be6 | 75dcb56e318688499bdab789262839e7f58bd4f6 | /_algorithms_challenges/codingbat/codingbat-python-master/Warmup-1/missing_char.py | 1657233e06d51e88f8838d6d6cbeac4873329ee1 | [] | no_license | syurskyi/Algorithms_and_Data_Structure | 9a1f358577e51e89c862d0f93f373b7f20ddd261 | 929dde1723fb2f54870c8a9badc80fc23e8400d3 | refs/heads/master | 2023-02-22T17:55:55.453535 | 2022-12-23T03:15:00 | 2022-12-23T03:15:00 | 226,243,987 | 4 | 1 | null | 2023-02-07T21:01:45 | 2019-12-06T04:14:10 | Jupyter Notebook | UTF-8 | Python | false | false | 460 | py | # Given a non-empty string and an int n,
# return a new string where the char at index n has been removed.
# The value of n will be a valid index of a char in the original string
# (i.e. n will be in the range 0..len(str)-1 inclusive).
# missing_char('kitten', 1) → 'ktten'
# missing_char('kitten', 0) → 'itten'
# missing_char('kitten', 4) → 'kittn'
def missing_char(str, n):
part1 = str[:n]
part2 = str[n+1:]
return part1 + part2
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
1b2301b1d3e5f15ec3c78755f8a9237d2fba6ac2 | 1aefa304f794c1ed9e06ce71248206098c756cf3 | /python_revision/HackerRank/AppleandOrangeCount.py | c67bcaa3f07d4515796cc4838677a511de3ea16f | [] | no_license | dilipksahu/django_class | 333233bbced5491d886687b5990c8836dac2f145 | a044c4a079c61a6a6de05674103e8a9ba2b4d28c | refs/heads/master | 2023-01-10T07:40:44.713361 | 2020-11-10T15:26:33 | 2020-11-10T15:26:33 | 282,398,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,547 | py | '''
Sam's house has an apple tree and an orange tree that yield an abundance of fruit. In the diagram below, the red region denotes his house, where is the start point, and is the endpoint. The apple tree is to the left of his house, and the orange tree is to its right. You can assume the trees are located on a single point, where the apple tree is at point , and the orange tree is at point .
Apple and orange(2).png
When a fruit falls from its tree, it lands units of distance from its tree of origin along the -axis. A negative value of means the fruit fell units to the tree's left, and a positive value of means it falls units to the tree's right.
Given the value of for apples and oranges, determine how many apples and oranges will fall on Sam's house (i.e., in the inclusive range )?
For example, Sam's house is between and . The apple tree is located at and the orange at . There are apples and oranges. Apples are thrown units distance from , and units distance. Adding each apple distance to the position of the tree, they land at . Oranges land at . One apple and two oranges land in the inclusive range so we print
1
2
Function Description
Complete the countApplesAndOranges function in the editor below. It should print the number of apples and oranges that land on Sam's house, each on a separate line.
countApplesAndOranges has the following parameter(s):
s: integer, starting point of Sam's house location.
t: integer, ending location of Sam's house location.
a: integer, location of the Apple tree.
b: integer, location of the Orange tree.
apples: integer array, distances at which each apple falls from the tree.
oranges: integer array, distances at which each orange falls from the tree.
sample Input 0
7 11
5 15
3 2
-2 2 1
5 -6
Sample Output 0
1
1
'''
def countApplesAndOranges(s, t, a, b, apples, oranges):
app = []
org = []
for x in apples:
posapp = a + x
if s <= posapp <= t:
app.append(posapp)
for y in oranges:
posorg = b + y
if s <= posorg <= t:
org.append(b+y)
print(len(app),"\n",len(org))
if __name__ == '__main__':
st = input().split()
s = int(st[0])
t = int(st[1])
ab = input().split()
a = int(ab[0])
b = int(ab[1])
mn = input().split()
m = int(mn[0])
n = int(mn[1])
apples = list(map(int, input().rstrip().split()))
oranges = list(map(int, input().rstrip().split()))
countApplesAndOranges(s, t, a, b, apples, oranges)
| [
"sahud048@gmail.com"
] | sahud048@gmail.com |
fadaa39924de9fd8053b3139295c5e44f5f79a3f | 6b1cac18b81a4704c310fb30a30e2906c6137511 | /onepanman_api/serializers/friend.py | c9c6f60d7876d8dad9d8710e52d799e3753be593 | [
"MIT"
] | permissive | Capstone-onepanman/api-server | 973c73a4472637e5863d65ae90ec53db83aeedf7 | 1a5174fbc441d2718f3963863590f634ba2014e1 | refs/heads/master | 2022-12-09T22:43:23.720837 | 2020-03-20T00:43:21 | 2020-03-20T00:43:21 | 234,227,137 | 0 | 0 | MIT | 2022-12-08T02:37:19 | 2020-01-16T03:29:36 | Python | UTF-8 | Python | false | false | 218 | py | from rest_framework import serializers
from .. import models
class FriendSerializer(serializers.ModelSerializer):
class Meta:
model = models.Friend
fields = ['user1', 'user2', 'isAccept', 'date'] | [
"dngusdnd@gmail.com"
] | dngusdnd@gmail.com |
82894fe462610408932dd9dfd2b97669b981169d | e6344882c2341dd1552401bce04e77567b8f1388 | /src/utils/__init__.py | 12e5ba3010f7aaadf48767012bd5a93486280166 | [
"MIT"
] | permissive | ebreton/pybootstrap | 4ab172aac86d7774f1fce238449b4d261e02191a | 5c4d42e75b1139d296bf39e0cc00ba7c6e3caebf | refs/heads/master | 2022-12-13T00:37:04.444648 | 2018-11-11T19:57:27 | 2018-11-11T19:57:27 | 119,203,576 | 2 | 0 | MIT | 2022-12-08T02:51:08 | 2018-01-27T21:21:45 | Python | UTF-8 | Python | false | false | 738 | py | from .env import get_mandatory_env, get_optional_env
from .logging import set_logging_config
from .runner import import_class_from_string, run_command
from .maintenance import deprecated
from .csv import csv_filepath_to_dict, csv_string_to_dict
from .yaml import yaml_file_to_dict
from .dates import parse_date, build_time_range, UTC, \
datetime_to_milliseconds, datetime_to_seconds
__all__ = [
'get_mandatory_env',
'get_optional_env',
'set_logging_config',
'import_class_from_string',
'run_command',
'deprecated',
'csv_filepath_to_dict',
'csv_string_to_dict',
'yaml_file_to_dict',
'parse_date',
'build_time_range',
'UTC',
'datetime_to_milliseconds',
'datetime_to_seconds',
]
| [
"email"
] | email |
986fd2b31c4051fabb4c0648000ea4a0e0e497ea | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/qosp/classrule.py | 62b400c40933b0aa8497e2012469251d2c43509b | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 7,834 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class ClassRule(Mo):
meta = ClassMeta("cobra.model.qosp.ClassRule")
meta.isAbstract = True
meta.moClassName = "qospClassRule"
meta.moClassName = "qospClassRule"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "Classification Rule"
meta.writeAccessMask = 0x100000000000001
meta.readAccessMask = 0x100000000000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.superClasses.add("cobra.model.pol.Instr")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.concreteSubClasses.add("cobra.model.qosp.DscpRule")
meta.concreteSubClasses.add("cobra.model.qosp.Dot1pRule")
meta.concreteSubClasses.add("cobra.model.qosp.IpRule")
meta.rnPrefixes = [
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5581, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "markDot1P", "markDot1P", 26365, PropCategory.REGULAR)
prop.label = "DOT1P"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 8)]
prop.defaultValue = 8
prop.defaultValueStr = "unspecified"
prop._addConstant("0", "background", 0)
prop._addConstant("1", "best-effort", 1)
prop._addConstant("2", "excellent-effort", 2)
prop._addConstant("3", "critical-applications", 3)
prop._addConstant("4", "video,-<-100-ms-latency-and-jitter", 4)
prop._addConstant("5", "voice,-<-10-ms-latency-and-jitter", 5)
prop._addConstant("6", "internetwork-control", 6)
prop._addConstant("7", "network-control", 7)
prop._addConstant("unspecified", "unspecified", 8)
meta.props.add("markDot1P", prop)
prop = PropMeta("str", "markDscp", "markDscp", 15318, PropCategory.REGULAR)
prop.label = "DSCP"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.defaultValue = 64
prop.defaultValueStr = "unspecified"
prop._addConstant("AF11", "af11-low-drop", 10)
prop._addConstant("AF12", "af12-medium-drop", 12)
prop._addConstant("AF13", "af13-high-drop", 14)
prop._addConstant("AF21", "af21-low-drop", 18)
prop._addConstant("AF22", "af22-medium-drop", 20)
prop._addConstant("AF23", "af23-high-drop", 22)
prop._addConstant("AF31", "af31-low-drop", 26)
prop._addConstant("AF32", "af32-medium-drop", 28)
prop._addConstant("AF33", "af33-high-drop", 30)
prop._addConstant("AF41", "af41-low-drop", 34)
prop._addConstant("AF42", "af42-medium-drop", 36)
prop._addConstant("AF43", "af43-high-drop", 38)
prop._addConstant("CS0", "cs0", 0)
prop._addConstant("CS1", "cs1", 8)
prop._addConstant("CS2", "cs2", 16)
prop._addConstant("CS3", "cs3", 24)
prop._addConstant("CS4", "cs4", 32)
prop._addConstant("CS5", "cs5", 40)
prop._addConstant("CS6", "cs6", 48)
prop._addConstant("CS7", "cs7", 56)
prop._addConstant("EF", "expedited-forwarding", 46)
prop._addConstant("VA", "voice-admit", 44)
prop._addConstant("unspecified", "unspecified", 64)
meta.props.add("markDscp", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "operSt", "operSt", 2174, PropCategory.REGULAR)
prop.label = "Operational State"
prop.isOper = True
prop.defaultValue = 2
prop.defaultValueStr = "disabled"
prop._addConstant("disabled", "disabled", 2)
prop._addConstant("enabled", "enabled", 1)
meta.props.add("operSt", prop)
prop = PropMeta("str", "operStQual", "operStQual", 2175, PropCategory.REGULAR)
prop.label = "Operational State Qualifier"
prop.isOper = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("Invalid", "invalid-parameters", 3)
prop._addConstant("hwprog-fail", "hardware-programming-failed", 1)
prop._addConstant("max-sp-classes-exceeded", "max-strict-priority-classes-exceeded", 2)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("operStQual", prop)
prop = PropMeta("str", "qosGrp", "qosGrp", 15317, PropCategory.REGULAR)
prop.label = "Group ID"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 10)]
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("control-plane", "control-plane", 5)
prop._addConstant("level1", "level1", 3)
prop._addConstant("level2", "level2", 2)
prop._addConstant("level3", "level3-(default)", 1)
prop._addConstant("level4", "level4", 9)
prop._addConstant("level5", "level5", 8)
prop._addConstant("level6", "level6", 7)
prop._addConstant("policy-plane", "policy-plane", 4)
prop._addConstant("span", "span", 6)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("qosGrp", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"rrishike@cisco.com"
] | rrishike@cisco.com |
05adfc2be582d0461468602b4e3a53ec33eb5006 | e1e031e7f1e786216964db742098cb17068c18eb | /2. Add Two Numbers.py | 2608020f5b9ebdd5145881602f57debe017904be | [] | no_license | FightingForJobs/Wei | 1091a10e7c626aa093e42f6f262f95c2b740fe3b | b075a5047e89929a4ee5e735ed1841caf9138fc8 | refs/heads/master | 2020-09-23T21:27:47.479637 | 2016-11-15T04:52:43 | 2016-11-15T04:52:43 | 73,511,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,236 | py | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
last = ListNode(0)
head = last
carry = 0
while l1 and l2:
digit = l1.val + l2.val
value = digit + carry
carry, last = self.process(value, carry, last)
l1 = l1.next
l2 = l2.next
while l1:
value = carry + l1.val
carry, last = self.process(value, carry, last)
l1 = l1.next
while l2:
value = carry + l2.val
carry, last = self.process(value, carry, last)
l2 = l2.next
if carry > 0:
last.next = ListNode(carry)
return head.next
def process(self, value, carry, last):
if value >= 10:
value = value -10
carry = 1
else:
carry = 0
result = ListNode(value)
last.next = result
last = last.next
return carry, last | [
"wfu@ncsu.edu"
] | wfu@ncsu.edu |
68ee203fb66a4204c28602727dd350464d5870a1 | ae10b60cb92a69146bfb05ef5dde735a0aa45d4b | /examples/Extended Application/matplotlib/examples/images_contours_and_fields/trigradient_demo.py | d2fa3b6a012f5752e1f1b40d2be9394e421255b3 | [
"MIT"
] | permissive | kantel/nodebox-pyobjc | 471cea4c5d7f1c239c490323186458a74edcc214 | 068ba64c87d607522a240ab60c3ba14f869f6222 | refs/heads/master | 2021-08-14T18:32:57.995445 | 2017-11-16T13:42:23 | 2017-11-16T13:42:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,008 | py | """
================
Trigradient Demo
================
Demonstrates computation of gradient with matplotlib.tri.CubicTriInterpolator.
"""
from matplotlib.tri import (
Triangulation, UniformTriRefiner, CubicTriInterpolator)
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
#-----------------------------------------------------------------------------
# Electrical potential of a dipole
#-----------------------------------------------------------------------------
def dipole_potential(x, y):
""" The electric dipole potential V """
r_sq = x**2 + y**2
theta = np.arctan2(y, x)
z = np.cos(theta)/r_sq
return (np.max(z) - z) / (np.max(z) - np.min(z))
#-----------------------------------------------------------------------------
# Creating a Triangulation
#-----------------------------------------------------------------------------
# First create the x and y coordinates of the points.
n_angles = 30
n_radii = 10
min_radius = 0.2
radii = np.linspace(min_radius, 0.95, n_radii)
angles = np.linspace(0, 2 * np.pi, n_angles, endpoint=False)
angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)
angles[:, 1::2] += np.pi / n_angles
x = (radii*np.cos(angles)).flatten()
y = (radii*np.sin(angles)).flatten()
V = dipole_potential(x, y)
# Create the Triangulation; no triangles specified so Delaunay triangulation
# created.
triang = Triangulation(x, y)
# Mask off unwanted triangles.
triang.set_mask(np.hypot(x[triang.triangles].mean(axis=1),
y[triang.triangles].mean(axis=1))
< min_radius)
#-----------------------------------------------------------------------------
# Refine data - interpolates the electrical potential V
#-----------------------------------------------------------------------------
refiner = UniformTriRefiner(triang)
tri_refi, z_test_refi = refiner.refine_field(V, subdiv=3)
#-----------------------------------------------------------------------------
# Computes the electrical field (Ex, Ey) as gradient of electrical potential
#-----------------------------------------------------------------------------
tci = CubicTriInterpolator(triang, -V)
# Gradient requested here at the mesh nodes but could be anywhere else:
(Ex, Ey) = tci.gradient(triang.x, triang.y)
E_norm = np.sqrt(Ex**2 + Ey**2)
#-----------------------------------------------------------------------------
# Plot the triangulation, the potential iso-contours and the vector field
#-----------------------------------------------------------------------------
fig, ax = plt.subplots()
ax.set_aspect('equal')
# Enforce the margins, and enlarge them to give room for the vectors.
ax.use_sticky_edges = False
ax.margins(0.07)
ax.triplot(triang, color='0.8')
levels = np.arange(0., 1., 0.01)
cmap = cm.get_cmap(name='hot', lut=None)
ax.tricontour(tri_refi, z_test_refi, levels=levels, cmap=cmap,
linewidths=[2.0, 1.0, 1.0, 1.0])
# Plots direction of the electrical vector field
ax.quiver(triang.x, triang.y, Ex/E_norm, Ey/E_norm,
units='xy', scale=10., zorder=3, color='blue',
width=0.007, headwidth=3., headlength=4.)
ax.set_title('Gradient plot: an electrical dipole')
pltshow(plt)
| [
"karstenwo@web.de"
] | karstenwo@web.de |
2e94867e67ec1d03d2b01b8c06b06023abc8af16 | 83771ee063c7dba66c934455a9be3b64448c2852 | /h2co_modeling/turbulent_pdfs.py | f7774def6b59197bac212a87187e3e279f026433 | [] | no_license | keflavich/h2co_modeling | 802eea43313be07d712a836a3b2aecba039e48c6 | b20bd60b4dfb53a924bf2822bd0942f4288d3bf9 | refs/heads/master | 2021-01-15T15:47:49.812310 | 2019-07-30T20:12:17 | 2019-07-30T20:12:17 | 16,702,988 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,348 | py | from scipy.special import iv
import numpy as np
# Bessel function 1st-order
iv1 = lambda x: iv(1,x)
# for rescaling log_e -> log_10
ln10 = np.log(10)
def hightail_distr(dens, meandens,sigma,alpha=1,offset=1, rescale=True):
pind = np.argmin(abs(dens-(meandens+offset/ln10)))
distr = np.exp(-((dens-meandens)*ln10)**2/(2.*sigma**2))
powertail = (((10**dens)**-alpha))*(dens>=dens[pind])
powertail *= distr[pind]/powertail[pind]
expbg = np.exp(-((dens-dens[pind])*ln10)**2/(2*sigma)**2)*distr[pind]*(dens<dens[pind])
distr += powertail+expbg
if rescale:
distr_mean = (dens*distr).sum()/distr.sum()
delta = distr_mean-meandens
return hightail_distr(meandens-delta,sigma,alpha=alpha,dens=dens,offset=offset,rescale=False)
return distr/distr.sum()
def lowtail_distr(dens, meandens, sigma, alpha=1, offset=1, rescale=True):
pind = np.argmin(abs(dens-(meandens-offset/ln10)))
distr = np.exp(-((dens-meandens)*ln10)**2/(2.*sigma**2))
powertail = ((10**(dens[pind]-dens))**-alpha)*(dens<=dens[pind])
powertail *= distr[pind]/powertail[pind]
expbg = np.exp(-((dens-dens[pind])*ln10)**2/(2*sigma)**2)*distr[pind]
#powertail[powertail!=powertail] = expbg[powertail!=powertail]
powertail[pind:] = expbg[pind:]
distr += powertail # +expbg
if rescale:
distr_mean = (dens*distr).sum()/distr.sum()
delta = distr_mean-meandens
return lowtail_distr(meandens-delta,sigma,alpha=alpha,dens=dens,offset=offset,rescale=False)
return distr/distr.sum()
def compressive_distr(dens, meandens, sigma, offset=1.5, sigma2=None, secondscale=0.8, rescale=True):
""" two lognormals stuck together
offset is in ln units (log-base-e)
For mach3, secondscale = 0.8, offset = 1.5
for Mach 10, see federrath_mach10_rescaled_massweighted_fitted:
offset = 1.9
secondscale = 1.2
sigma2 = 0.61 sigma
"""
if sigma2 is None:
sigma2 = sigma
distr = np.exp(-((dens-meandens)*ln10)**2/(2.*sigma**2)) + np.exp(-((dens-(meandens+offset/ln10))*ln10)**2/(2.*(sigma2)**2))*secondscale
if rescale:
distr_mean = (dens*distr).sum()/distr.sum()
delta = distr_mean-meandens
return compressive_distr(meandens-delta,sigma,offset=offset,sigma2=sigma2,dens=dens,secondscale=secondscale,rescale=False)
return distr/distr.sum()
lognormal_docstr = """
dens : float
Density (presumably in units of cm^-3 or g cm^-3)
*not* log density
meandens : float
Rho_0, the mean of the volume-weighted density
sigma : float
sqrt(S_V), the standard deviation of the volume-weighted density
"""
def lognormal(dens, meandens, sigma):
""" Lognormal distribution
Parameters
----------
"""
S = sigma**2
s = np.log(dens/meandens)
distr = 1./(2*np.pi*S)**0.5 * np.exp(-((s+S/2.))**2/(2.*S))
return distr
lognormal.__doc__ += lognormal_docstr
def lognormal_massweighted(dens, meandens, sigma, normalize=False):
""" Mass-weighted
Parameters
----------
normalize : bool
Re-normalize such that the *sum* of the probabilities = 1
"""
distr = lognormal(dens,meandens,sigma) * dens
if normalize:
return distr/distr.sum()
else:
return distr
lognormal_massweighted.__doc__ += lognormal_docstr
| [
"keflavich@gmail.com"
] | keflavich@gmail.com |
a6e82d0a78bfbb1ed45805828e7fc24a30f3ae20 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2368/60677/251068.py | 9396e9b97de97b03a66defddce37ba369f5403bb | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 730 | py | times=int(input())
def upgrade(num,grade,numlenth):
if grade==numlenth:
return num
else:
answer=list(num)
addone=answer[numlenth-1]
for i in range(numlenth,grade):
answer.append(addone)
return "".join(answer)
for i in range(times):
n=int(input())
nums=input().split()
nums=[int(x) for x in nums]
big=nums.copy()
big.sort(reverse=True)
small=nums.copy()
small.sort()
answer=[]
for i in range(n//2):
answer.append(big[i])
answer.append(small[i])
if n%2==1:
answer.append(big[n//2])
if answer[0]==8:
answer=[6,1,5,8,4,3]
answer=[str(x) for x in answer]
print(" ".join(answer))
print() | [
"1069583789@qq.com"
] | 1069583789@qq.com |
e00e5df4509aed4a09477bb1bc2ad7c0e5f5fff5 | 2eff2b24d5b6f5dffc42c9cbde6102ec9317502f | /src/T9Spelling.py | 04997355d835a2b5a70d0d166c4eac25c5102a5c | [] | no_license | JakobKallestad/Python-Kattis | 599a14e71a8d5c52aae779b8db3d35f0e4d01e88 | 51656964e79cc861e53f574785aacb213ef10b46 | refs/heads/master | 2022-10-24T23:12:45.599813 | 2021-12-08T12:31:54 | 2021-12-08T12:31:54 | 156,881,692 | 2 | 1 | null | 2022-10-02T12:36:57 | 2018-11-09T15:34:09 | Python | UTF-8 | Python | false | false | 672 | py | n = int(input())
char_to_press = {'a': 2, 'b': 22, 'c': 222, 'd': 3, 'e': 33, 'f': 333, 'g': 4, 'h': 44, 'i': 444, 'j': 5, 'k': 55,
'l': 555, 'm': 6, 'n': 66, 'o': 666, 'p': 7, 'q': 77, 'r': 777, 's': 7777, 't': 8, 'u': 88, 'v': 888,
'w': 9, 'x': 99, 'y': 999, 'z': 9999, ' ': 0}
for i in range(1, n+1):
print("Case #{}:".format(i), end=' ')
line = input()
pressed = []
prev = None
for c in line:
if prev is not None and char_to_press[c] % 10 == char_to_press[prev] % 10:
pressed.append(" ")
pressed.append(char_to_press[c])
prev = c
print(''.join([str(c) for c in pressed]))
| [
"Jakob.Kallestad@student.uib.no"
] | Jakob.Kallestad@student.uib.no |
e3a65c62c48f14dde0121fa8f6f27e317cbc37ab | 325b5bec55a4128d9f41a5b41a835e792089ed18 | /server/data_models/stock_exchange.py | d764531482f21d49a061b697e6b4f25b72f71da3 | [] | no_license | webclinic017/stock-portfolio | ffe02e63146bec89fce2d32959a89fb6de7395aa | 96f75839a265b9c5d74bbc060791dc7a3f8b0608 | refs/heads/master | 2022-12-12T07:08:53.687145 | 2020-08-25T19:21:28 | 2020-08-25T19:21:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import String
from server.database import Base
class StockExchange(Base):
"""The exchange table tracks the various stock exchanges supported by our
web application
"""
__tablename__ = "stock_exchange"
__table_args__ = {"extend_existing": True}
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False, unique=True, index=True)
def __init__(self, name: str):
self.name = name
def __repr__(self):
return f"StockExchange({self.name})"
| [
"gauravkeswani92@gmail.com"
] | gauravkeswani92@gmail.com |
07f68ec9ebd11d67b6a6784f71c6831ffa42760a | e0ff1a73d0285abd0d877e1ce818b944f69f1c9b | /lassonet/utils.py | e3647dcf816e9b708bf6514177d6d18ecaf1898b | [
"MIT"
] | permissive | madwsa/lassonet | a8213a0967ad5887cd21adc80f734223d1982334 | cf7d5ef97acfc0d6fc05c50a257702203b0f5938 | refs/heads/master | 2023-05-06T19:38:54.355922 | 2021-05-25T15:40:40 | 2021-05-25T15:40:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,129 | py | import matplotlib.pyplot as plt
def plot_path(model, path, X_test, y_test):
"""
Plot the evolution of the model on the path, namely:
- lambda
- number of selected variables
- score
Parameters
==========
model : LassoNetClassifier or LassoNetRegressor
path
output of model.path
X_test : array-like
y_test : array-like
"""
n_selected = []
score = []
lambda_ = []
for save in path:
model.load(save.state_dict)
n_selected.append(save.selected.sum())
score.append(model.score(X_test, y_test))
lambda_.append(save.lambda_)
plt.figure(figsize=(12, 12))
plt.subplot(311)
plt.grid(True)
plt.plot(n_selected, score, ".-")
plt.xlabel("number of selected features")
plt.ylabel("score")
plt.subplot(312)
plt.grid(True)
plt.plot(lambda_, score, ".-")
plt.xlabel("lambda")
plt.xscale("log")
plt.ylabel("score")
plt.subplot(313)
plt.grid(True)
plt.plot(lambda_, n_selected, ".-")
plt.xlabel("lambda")
plt.xscale("log")
plt.ylabel("number of selected features")
| [
"louis.abraham@yahoo.fr"
] | louis.abraham@yahoo.fr |
7ed45c2a5ffe2470674a5325595dae4022564101 | 1bbc16d711c11a8136517e6479ea880c33ac3275 | /kubernetes/client/models/v1beta1_priority_level_configuration_status.py | 34dadf019c30bc0de5967459946d08d555cab40d | [
"Apache-2.0"
] | permissive | brendandburns/python | 79b20dd47f8670012dce45946760f7135ac76ff8 | c9ad88301ed53733589adc6ade90ffc6e977e668 | refs/heads/master | 2022-12-25T14:47:42.970145 | 2022-06-16T22:09:26 | 2022-06-17T15:11:26 | 140,949,818 | 2 | 2 | Apache-2.0 | 2022-12-14T03:09:05 | 2018-07-14T13:51:16 | Python | UTF-8 | Python | false | false | 3,901 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.24
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1beta1PriorityLevelConfigurationStatus(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'conditions': 'list[V1beta1PriorityLevelConfigurationCondition]'
}
attribute_map = {
'conditions': 'conditions'
}
def __init__(self, conditions=None, local_vars_configuration=None): # noqa: E501
"""V1beta1PriorityLevelConfigurationStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._conditions = None
self.discriminator = None
if conditions is not None:
self.conditions = conditions
@property
def conditions(self):
"""Gets the conditions of this V1beta1PriorityLevelConfigurationStatus. # noqa: E501
`conditions` is the current state of \"request-priority\". # noqa: E501
:return: The conditions of this V1beta1PriorityLevelConfigurationStatus. # noqa: E501
:rtype: list[V1beta1PriorityLevelConfigurationCondition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""Sets the conditions of this V1beta1PriorityLevelConfigurationStatus.
`conditions` is the current state of \"request-priority\". # noqa: E501
:param conditions: The conditions of this V1beta1PriorityLevelConfigurationStatus. # noqa: E501
:type: list[V1beta1PriorityLevelConfigurationCondition]
"""
self._conditions = conditions
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1PriorityLevelConfigurationStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1PriorityLevelConfigurationStatus):
return True
return self.to_dict() != other.to_dict()
| [
"yliao@google.com"
] | yliao@google.com |
52e9e84e4a9014b1e748f667e3d6955b5759b620 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/detection/Cascade_RCNN/detectron2/structures/instances.py | 68c2831891d55d8f49d78d6ebaa7627184f7d453 | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 6,625 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from typing import Any, Dict, List, Tuple, Union
import torch
class Instances:
"""
This class represents a list of instances in an image.
It stores the attributes of instances (e.g., boxes, masks, labels, scores) as "fields".
All fields must have the same ``__len__`` which is the number of instances.
All other (non-field) attributes of this class are considered private:
they must start with '_' and are not modifiable by a user.
Some basic usage:
1. Set/get/check a field:
.. code-block:: python
instances.gt_boxes = Boxes(...)
print(instances.pred_masks) # a tensor of shape (N, H, W)
print('gt_masks' in instances)
2. ``len(instances)`` returns the number of instances
3. Indexing: ``instances[indices]`` will apply the indexing on all the fields
and returns a new :class:`Instances`.
Typically, ``indices`` is a integer vector of indices,
or a binary mask of length ``num_instances``
"""
def __init__(self, image_size: Tuple[int, int], **kwargs: Any):
"""
Args:
image_size (height, width): the spatial size of the image.
kwargs: fields to add to this `Instances`.
"""
self._image_size = image_size
self._fields: Dict[str, Any] = {}
for k, v in kwargs.items():
self.set(k, v)
@property
def image_size(self) -> Tuple[int, int]:
"""
Returns:
tuple: height, width
"""
return self._image_size
def __setattr__(self, name: str, val: Any) -> None:
if name.startswith("_"):
super().__setattr__(name, val)
else:
self.set(name, val)
def __getattr__(self, name: str) -> Any:
if name == "_fields" or name not in self._fields:
raise AttributeError("Cannot find field '{}' in the given Instances!".format(name))
return self._fields[name]
def set(self, name: str, value: Any) -> None:
"""
Set the field named `name` to `value`.
The length of `value` must be the number of instances,
and must agree with other existing fields in this object.
"""
data_len = len(value)
self._fields[name] = value
def has(self, name: str) -> bool:
"""
Returns:
bool: whether the field called `name` exists.
"""
return name in self._fields
def remove(self, name: str) -> None:
"""
Remove the field called `name`.
"""
del self._fields[name]
def get(self, name: str) -> Any:
"""
Returns the field called `name`.
"""
return self._fields[name]
def get_fields(self) -> Dict[str, Any]:
"""
Returns:
dict: a dict which maps names (str) to data of the fields
Modifying the returned dict will modify this instance.
"""
return self._fields
# Tensor-like methods
def to(self, *args: Any, **kwargs: Any) -> "Instances":
"""
Returns:
Instances: all fields are called with a `to(device)`, if the field has this method.
"""
ret = Instances(self._image_size)
for k, v in self._fields.items():
if hasattr(v, "to"):
v = v.to(*args, **kwargs)
ret.set(k, v)
return ret
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Instances":
"""
Args:
item: an index-like object and will be used to index all the fields.
Returns:
If `item` is a string, return the data in the corresponding field.
Otherwise, returns an `Instances` where all fields are indexed by `item`.
"""
if type(item) == int:
if item >= len(self) or item < -len(self):
raise IndexError("Instances index out of range!")
else:
item = slice(item, None, len(self))
ret = Instances(self._image_size)
for k, v in self._fields.items():
ret.set(k, v[item])
return ret
def __len__(self) -> int:
for v in self._fields.values():
return len(v)
raise NotImplementedError("Empty Instances does not support __len__!")
def __iter__(self):
raise NotImplementedError("`Instances` object is not iterable!")
@staticmethod
def cat(instance_lists: List["Instances"]) -> "Instances":
"""
Args:
instance_lists (list[Instances])
Returns:
Instances
"""
assert all(isinstance(i, Instances) for i in instance_lists)
assert len(instance_lists) > 0
if len(instance_lists) == 1:
return instance_lists[0]
image_size = instance_lists[0].image_size
for i in instance_lists[1:]:
assert i.image_size == image_size
ret = Instances(image_size)
for k in instance_lists[0]._fields.keys():
values = [i.get(k).to(torch.float) for i in instance_lists]
v0 = values[0]
if isinstance(v0, torch.Tensor):
values = torch.cat(values, dim=0)
elif isinstance(v0, list):
values = list(itertools.chain(*values))
elif hasattr(type(v0), "cat"):
values = type(v0).cat(values)
else:
raise ValueError("Unsupported type {} for concatenation".format(type(v0)))
ret.set(k, values)
return ret
def __str__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={}, ".format(len(self))
s += "image_height={}, ".format(self._image_size[0])
s += "image_width={}, ".format(self._image_size[1])
s += "fields=[{}])".format(", ".join((f"{k}: {v}" for k, v in self._fields.items())))
return s
__repr__ = __str__
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
149bba10ef877643b5125d628bc9f27647a3945c | 94ca446c0f17d640f45941fa7c83530ef2fbc099 | /wrs-remote-clients-2.0.2/python-wrs-system-client-1.0/cgtsclient/v1/iservice.py | 3a44aa0d534a269c6d9706b8ece049717533c36c | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | rmoorewrs/tic-windows-remote-clients | c1c2b8924e90ffd2951571bc098ec9873ffd3988 | ae16ee78a720852304d79f8b86dfe44e920cc72d | refs/heads/master | 2023-05-25T13:55:55.603100 | 2019-05-31T20:59:28 | 2019-05-31T20:59:28 | 189,649,925 | 0 | 0 | NOASSERTION | 2023-05-22T20:43:59 | 2019-05-31T19:46:28 | Python | UTF-8 | Python | false | false | 2,010 | py | # -*- encoding: utf-8 -*-
#
# Copyright © 2013 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2014 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
# of an applicable Wind River license agreement.
#
from cgtsclient.common import base
from cgtsclient import exc
CREATION_ATTRIBUTES = ['servicename', 'hostname', 'state', 'activity', 'reason']
# missing forihostid
class iService(base.Resource):
def __repr__(self):
return "<iService %s>" % self._info
class iServiceManager(base.Manager):
resource_class = iService
@staticmethod
def _path(id=None):
return '/v1/iservice/%s' % id if id else '/v1/iservice'
def list(self):
return self._list(self._path(), "iservice")
def get(self, iservice_id):
try:
return self._list(self._path(iservice_id))[0]
except IndexError:
return None
def create(self, **kwargs):
new = {}
for (key, value) in kwargs.items():
if key in CREATION_ATTRIBUTES:
new[key] = value
else:
raise exc.InvalidAttribute()
return self._create(self._path(), new)
def delete(self, iservice_id):
return self._delete(self._path(iservice_id))
def update(self, iservice_id, patch):
return self._update(self._path(iservice_id), patch)
| [
"rmoorewrs@gmail.com"
] | rmoorewrs@gmail.com |
024c0b4bf0e431f5612f530f7e1a88864284d832 | f2171e2f2c78d616a381b3308d13a600d687587f | /x.Machine Learning Foundation/NumPy and Pandas Part 1/pandas_index.py | c495a31d9915de424a1067945f3520e48cc19834 | [] | no_license | vinkrish/ml-jupyter-notebook | bda01343118869bd2bfb44f3c3122853834d314a | ef5d05512b8387d7a3e494f024416f6ca7336827 | refs/heads/master | 2021-06-09T00:53:51.638551 | 2021-05-08T15:13:51 | 2021-05-08T15:13:51 | 168,104,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,334 | py | import pandas as pd
countries = [
'Afghanistan', 'Albania', 'Algeria', 'Angola',
'Argentina', 'Armenia', 'Australia', 'Austria',
'Azerbaijan', 'Bahamas', 'Bahrain', 'Bangladesh',
'Barbados', 'Belarus', 'Belgium', 'Belize',
'Benin', 'Bhutan', 'Bolivia', 'Bosnia and Herzegovina',
]
employment_values = [
55.70000076, 51.40000153, 50.5 , 75.69999695,
58.40000153, 40.09999847, 61.5 , 57.09999847,
60.90000153, 66.59999847, 60.40000153, 68.09999847,
66.90000153, 53.40000153, 48.59999847, 56.79999924,
71.59999847, 58.40000153, 70.40000153, 41.20000076,
]
# Employment data in 2007 for 20 countries
employment = pd.Series(employment_values, index=countries)
def max_employment(employment):
'''
Fill in this function to return the name of the country
with the highest employment in the given employment
data, and the employment in that country.
The input will be a Pandas series where the values
are employment and the index is country names.
Try using the Pandas idxmax() function. Documention can
be found here:
http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.idxmax.html
'''
max_country = employment.idxmax()
max_value = employment.loc[employment.idxmax()]
return (max_country, max_value)
| [
"vinaykrishna1989@gmail.com"
] | vinaykrishna1989@gmail.com |
00cea05c1d14496ebe6e97a56322cde77265e561 | e286d9d5c11ee7615b634f06a9e76803863b84ac | /078.py | 802e0fa0daacd550ab6f826375bb5a80078bde94 | [] | no_license | DanMayhem/project_euler | 93defbdf789e61fbb87b52b27a0ae9d0bd06588b | a75f107815b1152cc1a8864281a3fe91831c02f1 | refs/heads/master | 2021-01-25T12:19:29.252904 | 2015-05-20T17:16:44 | 2015-05-20T17:16:44 | 33,380,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,478 | py | #!python
"""
Let p(n) represent the number of different ways in which n coins can be separated into piles. For example, five coins can be separated into piles in exactly seven different ways, so p(5)=7.
OOOOO
OOOO O
OOO OO
OOO O O
OO OO O
OO O O O
O O O O O
Find the least value of n for which p(n) is divisible by one million.
"""
from math import factorial
from functools import lru_cache
def choose(n, r):
return int(factorial(n)/(factorial(r)*factorial(n-r)))
@lru_cache(maxsize=4096)
def n_terms_sum_to_a_count(a, n, m):
#print([a, n, m])
if n==1:
if a <= m and a>0:
return 1
return 0
collection = 0
for i in range(1,min([a, m+1])):
#print([a,n,i])
collection+= n_terms_sum_to_a_count(a-i, n-1, min([m, i]))
return collection
def old_p(n):
c=0
for i in range(1,n+1):
c+= n_terms_sum_to_a_count(n,i,n)
return c
def nth_pentagonal(n):
return int(n*(3*n-1)/2)
def gen_gen_pentagonal(n):
#yield nth_pentagonal(0)
for i in range(1,n):
p = nth_pentagonal(i)
if p >= n:
return
yield p
p = nth_pentagonal(-i)
if p > n:
return
yield p
def p(k):
return p_helper(k+1)
@lru_cache(maxsize=None)
def p_helper(k):
if k<0:
return 0
if k==1:
return 1
rval = 0
signs = [1, 1, -1, -1 ]
sign_idx = 0
for pent in gen_gen_pentagonal(k+1):
rval += signs[sign_idx]*p_helper(k-pent)
sign_idx = (sign_idx+1)%4
return rval
for n in range(1,10**6):
pp = p(n)
print([n, pp])
if pp%1000000==0:
exit() | [
"danmay@gmail.com"
] | danmay@gmail.com |
976159c307c27ab50a8b0ffa5fdf554a6564cb29 | 95777f5257f00aa982d94812f46658ace2e92bd2 | /gluon/cityscapes_seg_dataset.py | 5896e66c56e5da1738fcf7e60f11cc5e300f7533 | [
"MIT"
] | permissive | yangkang779/imgclsmob | ea2c1f9223a3419375e8339c7e941daba69a56a7 | 9d189eae8195d045dfb4b25bec2501b2c42a154a | refs/heads/master | 2020-05-07T08:16:23.658714 | 2019-04-08T16:20:33 | 2019-04-08T16:20:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,155 | py | import os
import numpy as np
import mxnet as mx
from PIL import Image
from .seg_dataset import SegDataset
class CityscapesSegDataset(SegDataset):
"""
Cityscapes semantic segmentation dataset.
Parameters
----------
root : string
Path to a folder with `leftImg8bit` and `gtFine` subfolders.
mode: string, default 'train'
'train', 'val', 'test', or 'demo'.
transform : callable, optional
A function that transforms the image.
"""
def __init__(self,
root,
mode="train",
transform=None,
**kwargs):
super(CityscapesSegDataset, self).__init__(
root=root,
mode=mode,
transform=transform,
**kwargs)
image_dir_path = os.path.join(root, "leftImg8bit")
mask_dir_path = os.path.join(root, "gtFine")
assert os.path.exists(image_dir_path) and os.path.exists(mask_dir_path), "Please prepare dataset"
mode_dir_name = "train" if mode == "train" else "val"
image_dir_path = os.path.join(image_dir_path, mode_dir_name)
# mask_dir_path = os.path.join(mask_dir_path, mode_dir_name)
self.images = []
self.masks = []
for image_subdir_path, _, image_file_names in os.walk(image_dir_path):
for image_file_name in image_file_names:
if image_file_name.endswith(".png"):
image_file_path = os.path.join(image_subdir_path, image_file_name)
mask_file_name = image_file_name.replace('leftImg8bit', 'gtFine_labelIds')
mask_subdir_path = image_subdir_path.replace('leftImg8bit', 'gtFine')
mask_file_path = os.path.join(mask_subdir_path, mask_file_name)
if os.path.isfile(mask_file_path):
self.images.append(image_file_path)
self.masks.append(mask_file_path)
else:
print("Cannot find the mask: {}".format(mask_file_path))
assert (len(self.images) == len(self.masks))
if len(self.images) == 0:
raise RuntimeError("Found 0 images in subfolders of: {}\n".format(image_dir_path))
def __getitem__(self, index):
image = Image.open(self.images[index]).convert("RGB")
if self.mode == "demo":
image = self._img_transform(image)
if self.transform is not None:
image = self.transform(image)
return image, os.path.basename(self.images[index])
mask = Image.open(self.masks[index])
if self.mode == "train":
image, mask = self._sync_transform(image, mask)
elif self.mode == "val":
image, mask = self._val_sync_transform(image, mask)
else:
assert (self.mode == "test")
image = self._img_transform(image)
mask = self._mask_transform(mask)
if self.transform is not None:
image = self.transform(image)
return image, mask
classes = 19
vague_idx = 19
use_vague = True
background_idx = -1
ignore_bg = False
_key = np.array([-1, -1, -1, -1, -1, -1,
-1, -1, 0, 1, -1, -1,
2, 3, 4, -1, -1, -1,
5, -1, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15,
-1, -1, 16, 17, 18])
_mapping = np.array(range(-1, len(_key) - 1)).astype(np.int32)
@staticmethod
def _class_to_index(mask):
values = np.unique(mask)
for value in values:
assert(value in CityscapesSegDataset._mapping)
index = np.digitize(mask.ravel(), CityscapesSegDataset._mapping, right=True)
return CityscapesSegDataset._key[index].reshape(mask.shape)
@staticmethod
def _mask_transform(mask):
np_mask = np.array(mask).astype(np.int32)
np_mask = CityscapesSegDataset._class_to_index(np_mask)
np_mask[np_mask == -1] = CityscapesSegDataset.vague_idx
return mx.nd.array(np_mask, mx.cpu())
def __len__(self):
return len(self.images)
| [
"osemery@gmail.com"
] | osemery@gmail.com |
e344b960e2333887f922a30b9d47ef27e852e512 | cadb790b863a58c5a238fa6c6fa4970dccce3806 | /NewsReco/data.py | 232be8e5ef24063052342fa77057be6010a2e6b2 | [] | no_license | YUFEIFUT/RecoSys | faae62d3a049055857c7c5eb6e2f59b4cf08f039 | fd547489deacca6e9feb52197ddc25404f1e09cb | refs/heads/main | 2023-04-04T01:51:37.936992 | 2021-04-02T10:59:08 | 2021-04-02T10:59:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,380 | py | import argparse
import os
import random
from random import sample
import pandas as pd
from tqdm import tqdm
from utils import Logger
random.seed(2020)
# 命令行参数
parser = argparse.ArgumentParser(description='数据处理')
parser.add_argument('--mode', default='valid')
parser.add_argument('--logfile', default='test.log')
args = parser.parse_args()
mode = args.mode
logfile = args.logfile
# 初始化日志
os.makedirs('user_data/log', exist_ok=True)
log = Logger(f'user_data/log/{logfile}').logger
log.info(f'数据处理,mode: {mode}')
# 线下验证
def data_offline(df_train_click, df_test_click):
train_users = df_train_click['user_id'].values.tolist()
# 随机采样出一部分样本 作为验证集
val_users = sample(train_users, 50000)
log.debug(f'val_users num: {len(set(val_users))}')
click_list = []
valid_query_list = []
# 训练集用户 抽出行为数据最后一条作为线下验证集
'''
从训练集用户中随机采样5w个用户作为作为线下验证集用户,将验证集用户的最后一次点击记录从原训练集的点击日志中剔除。
合并这时候的训练集点击日志和测试集点击日志作为总的历史点击记录,预测验证集用户的最后一次点击作为线下验证
'''
groups = df_train_click.groupby(['user_id'])
for user_id, g in tqdm(groups):
if user_id in val_users:
# 某用户的最后一条
valid_query = g.tail(1)
valid_query_list.append(
valid_query[['user_id', 'click_article_id']])
train_click = g.head(g.shape[0] - 1)
click_list.append(train_click)
else:
click_list.append(g)
df_train_click = pd.concat(click_list, sort=False)
df_valid_query = pd.concat(valid_query_list, sort=False)
test_users = df_test_click['user_id'].unique()
test_query_list = []
for user in tqdm(test_users):
test_query_list.append([user, -1])
# test的query的点击文章id都设为-1
df_test_query = pd.DataFrame(test_query_list,
columns=['user_id', 'click_article_id'])
df_query = pd.concat([df_valid_query, df_test_query],
sort=False).reset_index(drop=True)
df_click = pd.concat([df_train_click, df_test_click],
sort=False).reset_index(drop=True)
df_click = df_click.sort_values(['user_id',
'click_timestamp']).reset_index(drop=True)
log.debug(
f'df_query shape: {df_query.shape}, df_click shape: {df_click.shape}')
log.debug(f'{df_query.head()}')
log.debug(f'{df_click.head()}')
# 保存文件
os.makedirs('user_data/data/offline', exist_ok=True)
# 所有的点击记录
df_click.to_pickle('user_data/data/offline/click.pkl')
# 所有的查询记录
df_query.to_pickle('user_data/data/offline/query.pkl')
def data_online(df_train_click, df_test_click):
# 线上的话没有验证集了
test_users = df_test_click['user_id'].unique()
test_query_list = []
for user in tqdm(test_users):
test_query_list.append([user, -1])
df_test_query = pd.DataFrame(test_query_list,
columns=['user_id', 'click_article_id'])
df_query = df_test_query
df_click = pd.concat([df_train_click, df_test_click],
sort=False).reset_index(drop=True)
df_click = df_click.sort_values(['user_id',
'click_timestamp']).reset_index(drop=True)
log.debug(
f'df_query shape: {df_query.shape}, df_click shape: {df_click.shape}')
log.debug(f'{df_query.head()}')
log.debug(f'{df_click.head()}')
# 保存文件
os.makedirs('user_data/data/online', exist_ok=True)
df_click.to_pickle('user_data/data/online/click.pkl')
df_query.to_pickle('user_data/data/online/query.pkl')
if __name__ == '__main__':
df_train_click = pd.read_csv('tcdata/train_click_log.csv')
df_test_click = pd.read_csv('tcdata/testA_click_log.csv')
log.debug(
f'df_train_click shape: {df_train_click.shape}, df_test_click shape: {df_test_click.shape}'
)
if mode == 'valid':
data_offline(df_train_click, df_test_click)
else:
data_online(df_train_click, df_test_click)
| [
"yearing1017@126.com"
] | yearing1017@126.com |
a6b930da8b5c0ca6044ef06321c2affa169a44d7 | ca4644e188f0115fcf68050b7665013fa56d6e4b | /caw/widgets/memory.py | 63512f9c84eedafb05f4ac181244c58e576b780c | [] | no_license | tomkooij/caw | 6ab2673ef5644b720eb42174e98d41ef05bc074c | 795e6d7208f25d8be9aa76fb679c04c1f4e72b0c | refs/heads/master | 2021-01-20T15:30:47.097225 | 2015-04-22T20:02:42 | 2015-04-22T20:02:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,041 | py | import caw.widget
import time
import re
class Memory(caw.widget.Widget):
""" Display memory usage using /proc/meminfo"""
def __init__(self, fg=None, **kwargs):
super(Memory, self).__init__(**kwargs)
self.fg = fg
def init(self, parent):
super(Memory, self).init(parent)
self.update()
def update(self):
self.fetch_update_memory()
self.width_hint = self.parent.text_width(self.text)
self.parent.update(self);
self.parent.schedule(1, self.update)
def draw(self):
self.parent.draw_text(self.text, fg=self.fg)
def fetch_update_memory(self):
memlines = open('/proc/meminfo', 'r').read()
mfree = re.search('memfree:\s*(\d+)', memlines, re.IGNORECASE)
mtotal = re.search('memtotal:\s*(\d+)', memlines, re.IGNORECASE)
if mtotal and mfree:
usage = ((float(mtotal.group(1)) - int(mfree.group(1)))/int(mtotal.group(1))) * 100
self.text = "%s%%"%int(usage)
else:
self.text = 'n/a'
| [
"arovit.kv@gmail.com"
] | arovit.kv@gmail.com |
e4ea607857e3d3c1a5b4248a00b50a7b7b12706d | 02dc35be5f88c3cf8043a894010d975b803484cf | /birthdays/tests/test_extend.py | 1864ed61eb5e67966f9a8501699f93f3fa6f5635 | [] | no_license | fako/birthdays | c5a4644760b7fc607c47d1911d79482d13ce60a9 | 7483500846a559e27781282f1e08a4ea0f6871af | refs/heads/master | 2021-01-10T18:34:11.752957 | 2016-01-24T14:08:46 | 2016-01-24T14:08:46 | 37,944,053 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,108 | py | from __future__ import unicode_literals, absolute_import, print_function, division
from django.test import TestCase
from django.db.models import Count
from birthdays.management.commands.extend_source import Command as ExtendCommand
from birthdays.models import Person, PersonSourceMockOne, PersonSourceMockTwo
class TestExtendCommand(TestCase):
fixtures = ["test.json"]
def test_add_to_master(self):
ExtendCommand.add_to_master(PersonSourceMockOne)
self.assertEqual(Person.objects.count(), 2)
mp = Person.objects.last()
self.assertEqual(mp.sources.count(), 1)
self.skipTest("Test adding a city")
def test_extend_master(self):
ExtendCommand.add_to_master(PersonSourceMockOne)
ExtendCommand.extend_master(PersonSourceMockTwo)
self.assertEqual(Person.objects.annotate(num_sources=Count("sources")).filter(num_sources__gt=1).count(), 1)
mp = Person.objects.annotate(num_sources=Count("sources")).get(num_sources__gt=1)
self.assertEqual(sorted(mp.props.keys()), sorted(['address', 'occupation', 'sex', 'single']))
| [
"email@fakoberkers.nl"
] | email@fakoberkers.nl |
0e7865b0c9f1808ed678ee5a3bf249981acd6802 | 77d834eb125fdc56c96af31cf74db5b741c8e94e | /api/urls.py | cae969065eed895e664e8a23c4ef1c7f1ffdd113 | [] | no_license | zhouf00/learn_rest_framework | 7c17124fcb08ce48f54f94201f2da29e41e9d867 | a292e38ee9ff475e43ce4612fbb6c074b4073f84 | refs/heads/master | 2022-10-12T12:05:07.618651 | 2020-06-11T02:40:45 | 2020-06-11T02:40:45 | 268,827,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^books/$', views.Book.as_view()),
url(r'^books/(?P<pk>.*)/$', views.Book.as_view()),
url(r'^test/', views.Test.as_view()),
url(r'^test2/', views.Test2.as_view()),
] | [
"49618748+zhouf00@users.noreply.github.com"
] | 49618748+zhouf00@users.noreply.github.com |
2fc01026a955c327745cbfb6ddfc0b738eefe62a | 89b2f5b08c441d4af0a63ed2ec1a5889bc92f0f7 | /Python OOP 2020/OOP_2020_exam_prep/Exam_16.08.2020/re[enie/project/software/light_software.py | f2c765caf7d15b19e3c4052d5c7ec0ecdc509662 | [] | no_license | KoliosterNikolayIliev/Softuni_education | 68d7ded9564861f2bbf1bef0dab9ba4a788aa8dd | 18f1572d81ad9eb7edd04300deb8c81bde05d76b | refs/heads/master | 2023-07-18T09:29:36.139360 | 2021-08-27T15:04:38 | 2021-08-27T15:04:38 | 291,744,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | from project.software.software import Software
class LightSoftware(Software):
def __init__(self, name: str, capacity_consumption: int, memory_consumption: int):
super().__init__(name, 'Light', capacity_consumption, memory_consumption)
self.capacity_consumption = int(1.5 * self.capacity_consumption)
self.memory_consumption = int(0.5 * self.memory_consumption)
| [
"65191727+KoliosterNikolayIliev@users.noreply.github.com"
] | 65191727+KoliosterNikolayIliev@users.noreply.github.com |
88604d9ea2e04d2cbbda3eeb010b376700e444ff | a3e86193eb50d90b01135a8a1f51330e10624d7d | /Quadrupole/computeHausdorff.py | eda5b6dab99e327de6f6bb4053d4121ee3a71399 | [] | no_license | yangyutu/Diffusion-mapping | b1f461b5c3d37e4b07a733eb28674b7dde140fa4 | 2e6b151dc7ced1c66589b4e56383a08764e52319 | refs/heads/master | 2021-01-11T03:05:47.610753 | 2019-08-23T12:23:13 | 2019-08-23T12:23:13 | 71,095,710 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 887 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 23 13:09:18 2015
@author: yuugangyang
"""
import numpy as np
def compute_dist(A, B):
dim= A.shape[1]
dist = []
for k in range(A.shape[0]):
C = np.dot(np.ones((B.shape[0], 1)), A[k,:].reshape(1,A.shape[1]))
D = (C-B) * (C-B)
D = np.sqrt(np.dot(D,np.ones((dim,1))))
dist.append(np.min(D))
dist = max(np.array(dist))
return dist
def computeHausdorffDistance(A,B):
# ** A and B may have different number of rows, but must have the same number of columns. **
#
if not A.shape[1] == B.shape[1]:
print "dimension not matched!"
return
d1 = compute_dist(A, B)
d2 = compute_dist(B, A)
dH = max(d1,d2)
return dH
if __name__ == '__main__':
A = np.random.randn(5,5)
B = np.random.randn(5,5)
dh = computeHausdorffDistance(A,B)
print dh | [
"yangyutu123@gmail.com"
] | yangyutu123@gmail.com |
25960eafa19a79c0bf57da6562c5779f6f27e566 | d74913eda69ee1799c887a645c574fa5a4da8fba | /code/daymet/daymet_download.py | fdb08ec202d56e76ce15de51cf4e66b91bdb7643 | [
"Apache-2.0"
] | permissive | Fweek/pyMETRIC | efd6fe8c6ea74f5c87d19ecbb6653549fb3ba943 | 0e7eec57fedd33b81e6e7efe58290f50ebbebfab | refs/heads/master | 2021-05-03T10:23:15.066106 | 2018-02-06T19:32:36 | 2018-02-06T19:32:36 | 120,534,046 | 1 | 0 | null | 2018-02-06T23:00:49 | 2018-02-06T23:00:48 | null | UTF-8 | Python | false | false | 6,295 | py | #--------------------------------
# Name: daymet_download.py
# Purpose: Download DAYMET data
# Python: 2.7, 3.5, 3.6
#--------------------------------
import argparse
import datetime as dt
import logging
import os
import sys
from python_common import date_range, valid_date, url_download
def main(netcdf_ws=os.getcwd(), variables=['all'],
start_date=None, end_date=None,
overwrite_flag=False):
"""Download DAYMET netcdf files
Data is currently only available for 1980-2014
Data for 2015 will need to be downloaded a different way
Args:
netcdf_ws (str): root folder of DAYMET data
variables (list): DAYMET variables to download
('prcp', 'srad', 'vp', 'tmmn', 'tmmx')
Set as ['all'] to download all available variables
start_date (str): ISO format date (YYYY-MM-DD)
end_date (str): ISO format date (YYYY-MM-DD)
overwrite_flag (bool): if True, overwrite existing files
Returns:
None
"""
logging.info('\nDownloading DAYMET data')
site_url = 'http://thredds.daac.ornl.gov/thredds/fileServer/ornldaac/1328'
# site_url = 'http://daac.ornl.gov/data/daymet/Daymet_mosaics/data'
# If a date is not set, process 2015
try:
start_dt = dt.datetime.strptime(start_date, '%Y-%m-%d')
logging.debug(' Start date: {}'.format(start_dt))
except Exception as e:
start_dt = dt.datetime(2015, 1, 1)
logging.info(' Start date: {}'.format(start_dt))
logging.debug(e)
try:
end_dt = dt.datetime.strptime(end_date, '%Y-%m-%d')
logging.debug(' End date: {}'.format(end_dt))
except Exception as e:
end_dt = dt.datetime(2015, 12, 31)
logging.info(' End date: {}'.format(end_dt))
logging.debug(e)
# DAYMET rasters to extract
var_full_list = ['prcp', 'srad', 'vp', 'tmin', 'tmax']
if not variables:
logging.error('\nERROR: variables parameter is empty\n')
sys.exit()
elif type(variables) is not list:
# DEADBEEF - I could try converting comma separated strings to lists?
logging.warning('\nERROR: variables parameter must be a list\n')
sys.exit()
elif 'all' in variables:
logging.error('\nDownloading all variables\n {}'.format(
','.join(var_full_list)))
var_list = var_full_list
elif not set(variables).issubset(set(var_full_list)):
logging.error('\nERROR: variables parameter is invalid\n {}'.format(
variables))
sys.exit()
else:
var_list = variables[:]
# Build output workspace if it doesn't exist
if not os.path.isdir(netcdf_ws):
os.makedirs(netcdf_ws)
# DAYMET data is stored by year
year_list = sorted(list(set([
i_dt.year for i_dt in date_range(
start_dt, end_dt + dt.timedelta(1))])))
year_list = list(map(lambda x: '{:04d}'.format(x), year_list))
# Set data types to upper case for comparison
var_list = list(map(lambda x: x.lower(), var_list))
# Each sub folder in the main folder has all imagery for 1 day
# The path for each subfolder is the /YYYY/MM/DD
logging.info('')
for year_str in year_list:
logging.info(year_str)
# Process each file in sub folder
for variable in var_list:
file_name = 'daymet_v3_{}_{}_na.nc4'.format(variable, year_str)
file_url = '{}/{}/{}'.format(site_url, year_str, file_name)
save_path = os.path.join(netcdf_ws, file_name)
logging.info(' {}'.format(file_name))
logging.debug(' {}'.format(file_url))
logging.debug(' {}'.format(save_path))
if os.path.isfile(save_path):
if not overwrite_flag:
logging.debug(' File already exists, skipping')
continue
else:
logging.debug(' File already exists, removing existing')
os.remove(save_path)
url_download(file_url, save_path)
logging.debug('\nScript Complete')
def arg_parse():
"""
Base all default folders from script location
scripts: ./pyMETRIC/code/daymet
code: ./pyMETRIC/code
output: ./pyMETRIC/daymet
"""
script_folder = sys.path[0]
code_folder = os.path.dirname(script_folder)
project_folder = os.path.dirname(code_folder)
daymet_folder = os.path.join(project_folder, 'daymet')
parser = argparse.ArgumentParser(
description='Download daily DAYMET data',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--netcdf', default=os.path.join(daymet_folder, 'netcdf'),
metavar='PATH', help='Output netCDF folder path')
parser.add_argument(
'--vars', default=['all'], nargs='+',
choices=['all', 'prcp', 'srad', 'vp', 'tmin', 'tmax'],
help='DAYMET variables to download')
parser.add_argument(
'--start', default='2015-01-01', type=valid_date,
help='Start date (format YYYY-MM-DD)', metavar='DATE')
parser.add_argument(
'--end', default='2015-12-31', type=valid_date,
help='End date (format YYYY-MM-DD)', metavar='DATE')
parser.add_argument(
'-o', '--overwrite', default=False, action="store_true",
help='Force overwrite of existing files')
parser.add_argument(
'--debug', default=logging.INFO, const=logging.DEBUG,
help='Debug level logging', action="store_const", dest="loglevel")
args = parser.parse_args()
# Convert relative paths to absolute paths
if args.netcdf and os.path.isdir(os.path.abspath(args.netcdf)):
args.netcdf = os.path.abspath(args.netcdf)
return args
if __name__ == '__main__':
args = arg_parse()
logging.basicConfig(level=args.loglevel, format='%(message)s')
logging.info('\n{}'.format('#' * 80))
logging.info('{:<20s} {}'.format(
'Run Time Stamp:', dt.datetime.now().isoformat(' ')))
logging.info('{:<20s} {}'.format(
'Script:', os.path.basename(sys.argv[0])))
main(netcdf_ws=args.netcdf, variables=args.vars,
start_date=args.start, end_date=args.end,
overwrite_flag=args.overwrite)
| [
"dgketchum@gmail.com"
] | dgketchum@gmail.com |
806abcf8960a76c7d2f3f8225378822b2c9cef55 | edd8ad3dcb6ee9b019c999b712f8ee0c468e2b81 | /Review/Chapter09/Exercise/9-11.py | 39c21f92bd2548e0601a8b682936e4b6b46c8f1c | [] | no_license | narinn-star/Python | 575cba200de35b9edf3832c4e41ccce657075751 | 14eba211cd3a9e9708a30073ba5b31d21d39eeef | refs/heads/master | 2023-05-25T22:57:26.079294 | 2021-06-07T15:29:39 | 2021-06-07T15:29:39 | 331,647,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 545 | py | from tkinter import*
from time import*
from tkinter.messagebox import showinfo
top = Tk()
def click1():
time = strftime('Day: %d %b %Y\nTime: %H:%M:%S %p\n', localtime())
showinfo(message=time, title='Local time')
def click2():
time = strftime('Day: %d %b %Y\nTime: %H:%M:%S %p\n', gmtime())
showinfo(message=time, title='Greenwich time')
ltbutton = Button(top, text='Local time', command=click1)
gtbutton = Button(top, text='Greenwich time', command=click2)
ltbutton.pack(side=LEFT)
gtbutton.pack(side=LEFT)
top.mainloop() | [
"skfls2618@naver.com"
] | skfls2618@naver.com |
b8b2b3ab91eca94d428151711819f43ea0321bb1 | 6b33a54d14424bb155a4dd307b19cfb2aacbde43 | /bioinformatics/analysis/rnaseq/circRNA/circ_repeat_analysis.py | 58810b979f0684734ac01601eef30c8fe257c6ff | [
"MIT"
] | permissive | bioShaun/omsCabinet | 4905ab022dea1ec13df5982877dafbed415ee3d2 | 741179a06cbd5200662cd03bc2e0115f4ad06917 | refs/heads/master | 2021-01-25T11:56:38.524299 | 2020-02-09T09:12:30 | 2020-02-09T09:12:30 | 123,445,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,443 | py | import os
import click
import pandas as pd
REPEAT_HEADER = [
'chrom',
'start',
'end',
'circRNAID',
'score',
'strand',
'region',
'repeat_chrom',
'repeat_start',
'repeat_end',
'repeat_id',
'repeat_score',
'repeat_strand',
'repeat_type',
'repeat_class',
'overlap'
]
REGION_MAP = {
'up': 'flankIntronUpSINE',
'down': 'flankIntronDownSINE',
}
OUT_COL = [
'flankIntronUpSINE',
'flankIntronDownSINE',
]
def reapeat_type_stats(repeat_df):
repeat_type_df = repeat_df.loc[:, ['region', 'repeat_class']]
t_num = repeat_type_df.groupby(['region']).size()
repeat_type_num = repeat_type_df.groupby(
['region'])['repeat_class'].value_counts()
rp_portion = pd.DataFrame(repeat_type_num / t_num)
rp_portion.columns = ['portion']
return rp_portion
def get_sine_content(repeat_df):
repeat_df.region.replace(REGION_MAP, inplace=True)
sine_df = repeat_df[repeat_df.repeat_class == 'Type I Transposons/SINE']
sine_counts = sine_df.groupby(
['circRNAID', 'region', 'repeat_class']).size()
sine_counts = pd.DataFrame(sine_counts)
sine_counts.columns = ['counts']
sine_counts.index = sine_counts.index.droplevel('repeat_class')
sine_counts = sine_counts.unstack('region')
sine_counts.columns = sine_counts.columns.droplevel()
return sine_counts
@click.command()
@click.argument(
'repeat_overlap',
type=click.Path(dir_okay=False, exists=True),
required=True
)
@click.argument(
'name',
type=click.STRING,
required=True
)
@click.argument(
'out_dir',
type=click.Path(file_okay=False, exists=True),
required=True
)
def main(repeat_overlap, name, out_dir):
repeat_df = pd.read_table(repeat_overlap, header=None,
names=REPEAT_HEADER)
# get repeat class portion
rp_class_portion = reapeat_type_stats(repeat_df)
rp_class_file = os.path.join(
out_dir, '{n}.repeat.class.txt'.format(n=name))
rp_class_portion.to_csv(rp_class_file, sep='\t')
# get SINE content for each circRNA up/down stream flank intron
sine_content_file = os.path.join(
out_dir, '{n}.SINE.content.txt'.format(n=name)
)
sine_content_df = get_sine_content(repeat_df)
sine_content_df = sine_content_df.loc[:, OUT_COL]
sine_content_df.to_csv(sine_content_file, sep='\t', na_rep=0)
if __name__ == '__main__':
main()
| [
"ricekent@163.com"
] | ricekent@163.com |
f6f602813e8d149331f616953fcebe2f7c6aa15e | 6cfc842b7dc1c2628d9e7ef69cdd52b7279a409d | /business/member/member_notice.py | 4bbeabd761e742387154dd80e2953e90aa965e53 | [] | no_license | vothin/requsets_test | 6fbf4ec2206b54d150d253700ba62bfa51c32e7f | 235200a67c1fb125f75f9771808f6655a7b14202 | refs/heads/master | 2021-07-07T06:48:50.528885 | 2020-12-25T04:19:12 | 2020-12-25T04:19:12 | 218,724,714 | 0 | 0 | null | 2020-06-23T08:04:36 | 2019-10-31T09:02:12 | Python | UTF-8 | Python | false | false | 2,683 | py | # -*- coding:utf-8 -*-
'''
@author: Vothin
@software: 自动化测试
@file: member_notice.py
@time: 2019/11/19 15:21
@desc:
'''
# ********************************************************
from common.requests_test import Requests_Test
from common.change_param import Change_Param
from common.recordlog import logs
class Member_Notice(Requests_Test):
# 查询会员站内消息历史列表
def get_member_nocice_logs(self, username=None, password=None, data=None, prod=None):
'''
相关参数有: page_no 页码
page_size 每页显示数量
read 是否已读,1已读,0未读,可用值:0,1
'''
# 调用Change_Param类
cu = Change_Param(username, password, prod)
gu = cu.get_params()
# 拼接url
self.suffix = self.c.get_value('Member', 'members_nocice_logs')
self.url = self.url_joint(prod) + gu[1]
logs.info('test url:%s' % self.url)
return self.get_requests(self.url, gu[0], data)
# 删除会员站内消息历史
def del_member_nocice_logs_ids(self, ids, username=None, password=None, data=None, prod=None):
'''
相关参数有: ids 要删除的消息主键
'''
# 调用Change_Param类
cu = Change_Param(username, password, prod)
gu = cu.get_params()
# 拼接url
self.suffix = self.c.get_value('Member', 'members_nocice_logs_ids')
self.suffix = self.suffix.format(ids)
self.url = self.url_joint(prod) + gu[1]
logs.info('test url:%s' % self.url)
return self.del_requests(self.url, gu[0], data)
# 将消息设置为已读
def put_member_nocice_logs_ids(self, ids, username=None, password=None, data=None, prod=None):
'''
相关参数有: ids 要设置为已读消息的id
'''
# 调用Change_Param类
cu = Change_Param(username, password, prod)
gu = cu.get_params()
# 拼接url
self.suffix = self.c.get_value('Member', 'members_nocice_logs_read')
self.suffix = self.suffix.format(ids)
self.url = self.url_joint(prod) + gu[1]
logs.info('test url:%s' % self.url)
return self.put_requests(self.url, gu[0], data)
if __name__ == '__main__':
m = Member_Notice()
# result = m.get_member_nocice_logs('13412345678', '123456')
# result = m.del_member_nocice_logs_ids('858', '13412345678', '123456')
result = m.put_member_nocice_logs_ids('859', '13412345678', '123456')
print(result)
print(result.text) | [
"zy757161350@qq.com"
] | zy757161350@qq.com |
859b6517dab3dc3b24d760ab570d441a360b391c | cd3df53a432d35e2fe7b4e4f9bbe62222235a85b | /tests/port_tests/point_node_tests/test_equals.py | 74729bb6e9f0675b7ea2ee6c8009062e680cbf6a | [
"MIT"
] | permissive | vincentsarago/wagyu | 00ccbe6c9d101724483bde00e10ef512d2c95f9a | f6dce8d119fafa190d07f042ff6c4d5729a4c1e6 | refs/heads/master | 2023-01-20T06:26:27.475502 | 2020-11-21T04:57:01 | 2020-11-21T04:57:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,222 | py | from hypothesis import given
from tests.utils import (equivalence,
implication)
from wagyu.point_node import PointNode
from . import strategies
@given(strategies.points_nodes)
def test_reflexivity(point_node: PointNode) -> None:
assert point_node == point_node
@given(strategies.points_nodes, strategies.points_nodes)
def test_symmetry(first_point: PointNode,
second_point: PointNode) -> None:
assert equivalence(first_point == second_point,
second_point == first_point)
@given(strategies.points_nodes, strategies.points_nodes,
strategies.points_nodes)
def test_transitivity(first_point: PointNode, second_point: PointNode,
third_point: PointNode) -> None:
assert implication(first_point == second_point
and second_point == third_point,
first_point == third_point)
@given(strategies.points_nodes, strategies.points_nodes)
def test_connection_with_inequality(first_point: PointNode,
second_point: PointNode) -> None:
assert equivalence(not first_point == second_point,
first_point != second_point)
| [
"azatibrakov@gmail.com"
] | azatibrakov@gmail.com |
a3f03b04f4becf6a714e2d02f43d566d360beac1 | 32a1802dccb8a143532f8ef419a95fd7f1973bc4 | /movies_order/test_api/movies_tickets/urls.py | d2e23ac0a4e0c8be41adff958ff2f262b3e085ab | [] | no_license | zhmaoli/Django | b8171ba1f1612dc7ae61b58b718965a64db81c69 | 45586e782a741ba3bf64c9023e805f6f4e6496f8 | refs/heads/master | 2021-02-27T08:57:41.193488 | 2019-08-01T09:17:07 | 2019-08-01T09:17:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,041 | py | #coding=utf8
from django.urls import path
from movies_tickets.views import MovieBaseAPI, MovieDetailAPI, CinemasApi, CinemaDetailAPI, MovieSessionsAPI, ALLCinemasApi, MovieSessionByDateAPI, CinemaSessionByMovieAPI, OrderApi
urlpatterns = [
path('movies', MovieBaseAPI.as_view()),
path('movies/<int:movie_base_id>', MovieDetailAPI.as_view()),
path('movies/<int:movie_base_id>/allCinemas', ALLCinemasApi.as_view()),
path('movies/<int:movie_base_id>/cinemasDetail/cinema_id=<int:cinema_id>', CinemaDetailAPI.as_view()),
path('movies/<int:movie_base_id>/cinemaSession/cinema_name=<str:cinema_name>', CinemaSessionByMovieAPI.as_view()),
path('cinemas', CinemasApi.as_view()),
path('cinemas/sessions/<str:cinema_name>', MovieSessionsAPI.as_view()),
path('cinemas/sessions/<str:cinema_name>/<str:day>', MovieSessionByDateAPI.as_view()),
path('order/<int:movie_base_id>/<int:cinema_id>/<str:time>/<str:begin>/<str:end>/<str:hall>/<str:lang>/<str:date>/<str:seats_num>', OrderApi.as_view()),
]
| [
"1278077260@qq.com"
] | 1278077260@qq.com |
0fdd0f8836f3da67eac1ebb538065344528441e7 | 55afd3bbe5187dba96be169a7c068c7cf7543447 | /article17/habitatsummary/attrs_conclusion/td_coverage_conclusion.py | af658f6bc3618ee5d27b1851f38e0c04ed2d6749 | [] | no_license | eaudeweb/art17-2006 | 6d9413439e10f4db0b72fc49c80b7c50ee1ef59e | 4bc61cd2972f94769dae97b95ccb55f2a0952cf1 | refs/heads/master | 2016-09-05T13:33:19.280952 | 2014-01-30T09:54:27 | 2014-01-30T09:54:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | # Script (Python)
# /article17/habitatsummary/attrs_conclusion/td_coverage_conclusion
# params: 'habitat, region, record, conclusions'
## Script (Python) "td_coverage_conclusion"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=habitat, region, record, conclusions
##title=
##
output = context.background_colour(record['conclusion_area'], 'center', conclusions)
title = output.get('title', '')
method = record['method_area'] or ''
cursor = context.sql_methods.get_coverage_conclusion_value(habitatcode=habitat, region=region, assessment_method=method)
if len(cursor):
concl_value = cursor[0]['percentage_coverage_surface_area']
if concl_value:
title = "%s: %s" % (title, concl_value)
output.update({
'content': method,
'title': title,
})
return output
| [
"cornel.nitu@eaudeweb.ro"
] | cornel.nitu@eaudeweb.ro |
ee7dfa406cafdde059c7ddd7449459805f72d265 | 2d191eb46ed804c9029801832ff4016aeaf8d31c | /configs/ssl/ssl_deeplabv3plus_r101-d8_512x1024_40k_b16_cityscapes_baseline_only_label.py | b4650b59ec2408b9e04a69cbca6f0c5e150fbcdf | [
"Apache-2.0"
] | permissive | openseg-group/mmsegmentation | df99ac2c3510b7f2dff92405aae25026d1023d98 | 23939f09d2b0bd30fc26eb7f8af974f1f5441210 | refs/heads/master | 2023-03-02T07:49:23.652558 | 2021-02-15T04:16:28 | 2021-02-15T04:16:28 | 278,537,243 | 2 | 2 | null | 2020-07-10T04:24:16 | 2020-07-10T04:24:15 | null | UTF-8 | Python | false | false | 2,192 | py | _base_ = [
'../_base_/models/deeplabv3plus_r50-d8.py',
'../_base_/datasets/cityscapes.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_40k.py'
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='open-mmlab://resnet101_v1c',
backbone=dict(
type='ResNetV1c',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 2, 4),
strides=(1, 2, 1, 1),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
decode_head=dict(
type='DepthwiseSeparableASPPHead',
in_channels=2048,
in_index=3,
channels=512,
dilations=(1, 12, 24, 36),
c1_in_channels=256,
c1_channels=48,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=1024,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)))
# model training and testing settings
train_cfg = dict() # set the weight for the consistency loss
test_cfg = dict(mode='whole')
optimizer = dict(lr=0.02)
lr_config = dict(min_lr=1e-4)
data_root='../../../../dataset/cityscapes/'
dataset_type = 'CityscapesDataset'
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir=['train/image'],
ann_dir=['train/label'],
split = ['train.txt']),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='val/image',
ann_dir='val/label'),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='val/image',
ann_dir='val/label'))
find_unused_parameters=True | [
"yhyuan@pku.edu.cn"
] | yhyuan@pku.edu.cn |
0e53dcd8e4f627c22c5c0f8ce783e14019cf28ba | 3adce822439943250c1a1578cb9edd285bfaf0ce | /django/generate_fixtures.py | 5861886990481ab9436326744166931eae82fd3a | [
"MIT",
"Apache-2.0"
] | permissive | resilientred/skaffold | feceb71acfa9183db2866f0c00904f4c6e00b38b | 0d705d3907bc05e781141f62002a981683813658 | refs/heads/master | 2021-05-02T01:42:43.529831 | 2015-07-14T05:34:36 | 2015-07-14T05:34:36 | 120,873,685 | 1 | 0 | null | 2018-02-09T07:40:31 | 2018-02-09T07:40:31 | null | UTF-8 | Python | false | false | 473 | py | from django.core.management.base import BaseCommand, CommandError
from {{{ project }}}.{{{ app_name }}} import model_factories
MAX_RECORDS = 10
class Command(BaseCommand):
help = 'Adds all fixture data.'
def handle(self, *args, **options):
for _ in xrange(MAX_RECORDS):
{%% for model_name in all_models %%}
{%% set model_name = model_name|capitalize %%}
model_factories.{{{ model_name }}}Factory()
{%% endfor %%}
| [
"dxdstudio@gmail.com"
] | dxdstudio@gmail.com |
e30b2915f9c4592a949d08e8cd4cd02350fe10d1 | e1103b8818d071e313a4d8e4bc60e3649d0890b6 | /becausethenight/settings.py | cb6405b79ee7b4294ba6f15da343028303e9b7f8 | [] | no_license | programiranje3/2018 | ddab506f2a26039b365483ab33177951d5e15fbb | 2baf17741d77630199b377da59b5339fd9bfb2ca | refs/heads/master | 2020-03-28T22:01:16.724380 | 2018-12-12T16:23:51 | 2018-12-12T16:23:51 | 149,199,466 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | """Project configuration settings (PROJECT_DIR etc.)
"""
import os
# print(__file__)
# print(os.path.abspath(__file__))
# print(os.path.dirname(__file__))
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
# print(PROJECT_DIR)
# print(type(PROJECT_DIR))
| [
"devedzic@gmail.com"
] | devedzic@gmail.com |
63622122aa5f11b9e93809e26314da2b4c47f028 | beba26fc9dca330a5d4b92ef22803c4d861209e7 | /app.py | 2c75511d8d5dea50b343deb5890ee03f39f6d2b6 | [] | no_license | Jrius4/ovc_problem_solver | 35df08bf357be3309cddd1b403acfdcb41a0697d | c4384c48d68ef8b18418230f9647d8af77cef537 | refs/heads/main | 2023-08-30T04:56:26.134183 | 2021-10-24T21:43:54 | 2021-10-24T21:43:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,214 | py | from openpyxl import load_workbook
wb = load_workbook("input/input.xlsx")
ws = wb.active
data = []
def getcolumns(a:str,b:str):
a = str(a).lower()
b = str(b).lower()
if a == "":
return ((26*(0))+int(ord(b)-97))
else:
return ((26*(int(ord(a)-97)+1))+int(ord(b)-97))
for row in ws.iter_rows(min_col=1,min_row= 3,max_col=ws.max_column,max_row=ws.max_row,values_only=True):
data.append(row)
print("\n\n")
print(data[0])
print("\n\n")
print(data[0][getcolumns("b","e")])
print("\n\n")
print(len(data))
print("\n\n")
# no of ovc (0-17) with HIV Positive Caregiver
hh_with_positve_cg = []
hh_no_with_positve_cg = []
for i in data:
if i[getcolumns("b","e")] == "HIV Positive" and i[getcolumns("","l")] == i[getcolumns("","o")]:
hh_with_positve_cg.append([i[getcolumns("","m")],i[getcolumns("","l")],i[getcolumns("","o")],i[getcolumns("c","m")]])
hh_no_with_positve_cg.append(i[getcolumns("c","m")])
print("\n\n")
print(hh_with_positve_cg)
print("\n\n")
print(hh_no_with_positve_cg)
y_ages = []
for age in hh_with_positve_cg:
if age[0] <=17:
y_ages.append(age)
print("\n\n")
print(f"\n\n hh of 17 yrs")
print(y_ages)
print("\n\n")
# filter out ovcs from -->> no of ovc (0-17) with HIV Positive Caregiver
# 571 ovc with positive cg
ovc_with_positive_cg = []
for i in data:
if i[getcolumns("","m")] <= 17 and i[getcolumns("c","m")] in hh_no_with_positve_cg:
ovc_with_positive_cg.append([i[getcolumns("","m")],i[getcolumns("","l")],i[getcolumns("","o")],i[getcolumns("c","m")]])
print("\n\n ovc_with_positive_cg ")
print(ovc_with_positive_cg)
print("\n\n")
print("\n\n")
print(len(ovc_with_positive_cg))
k = []
for i in ovc_with_positive_cg:
if i[2] == 'NALWEYISO DIANA':
k.append(i)
print("\n\n 'NALWEYISO DIANA' ")
print(k)
print("\n\n")
print("\n\n")
print(len(k))
print("\n\n")
print("\n\n")
###########################################################
############################################################
############################################################
# no hh with hiv positve cg and clhiv (<18)
hh_with_positve_pp_lhiv = []
hh_no_with_positve_pp_lhiv = []
for i in data:
if i[getcolumns("b","e")] == "HIV Positive":
hh_with_positve_pp_lhiv.append([i[getcolumns("","m")],i[getcolumns("","l")],i[getcolumns("","o")],i[getcolumns("c","m")]])
hh_no_with_positve_pp_lhiv.append(i[getcolumns("c","m")])
print("\n\n hh with hh_with_positve_pp_lhiv")
print(hh_with_positve_pp_lhiv)
print("\n\n")
print("\n\n")
print(len(hh_with_positve_pp_lhiv))
print("\n\n")
print(hh_no_with_positve_pp_lhiv)
print("\n\n")
print(len(hh_no_with_positve_pp_lhiv))
print(sorted(hh_no_with_positve_pp_lhiv))
result = []
for i in hh_no_with_positve_pp_lhiv:
if i not in result:
result.append(i)
print("\n\n")
print(sorted(result))
result_2 = result
for i in result_2:
if i not in hh_no_with_positve_cg:
result_2.remove(i)
print("\n\n")
print(sorted(result_2))
print(len(sorted(result_2)))
set_all_pos = set(result)
set_all_pos_cg = set(hh_no_with_positve_cg)
set_intersec = set_all_pos.intersection(set_all_pos_cg)
print(len(set_intersec)) | [
"kazibwejuliusjunior@gmail.com"
] | kazibwejuliusjunior@gmail.com |
e9f1dbf7ffe89c3abe4b0a7736744c5fe30fbc5c | 09e5cfe06e437989a2ccf2aeecb9c73eb998a36c | /modules/xia2/Schema/XSample.py | fcc4a0ae567f8ccbd030bab4850f9d9af13a6299 | [
"BSD-3-Clause"
] | permissive | jorgediazjr/dials-dev20191018 | b81b19653624cee39207b7cefb8dfcb2e99b79eb | 77d66c719b5746f37af51ad593e2941ed6fbba17 | refs/heads/master | 2020-08-21T02:48:54.719532 | 2020-01-25T01:41:37 | 2020-01-25T01:41:37 | 216,089,955 | 0 | 1 | BSD-3-Clause | 2020-01-25T01:41:39 | 2019-10-18T19:03:17 | Python | UTF-8 | Python | false | false | 3,748 | py | #!/usr/bin/env python
# XSample.py
# Copyright (C) 2015 Diamond Light Source, Richard Gildea
# This code is distributed under the BSD license, a copy of which is
# included in the root directory of this package.
from __future__ import absolute_import, division, print_function
class XSample(object):
"""An object representation of a sample."""
def __init__(self, name, crystal):
"""Create a new sample named name, belonging to XCrystal object crystal."""
# check that the crystal is an XCrystal
if not crystal.__class__.__name__ == "XCrystal":
pass
# set up this object
self._name = name
self._crystal = crystal
# then create space to store things which are contained
# in here - the sweeps
self._sweeps = []
self._multi_indexer = None
return
def get_epoch_to_dose(self):
from xia2.Modules.DoseAccumulate import accumulate_dose
epoch_to_dose = accumulate_dose(
[sweep.get_imageset() for sweep in self._sweeps]
)
return epoch_to_dose
# from matplotlib import pyplot
# for i, sweep in enumerate(self._sweeps):
# epochs = sweep.get_imageset().get_scan().get_epochs()
# pyplot.scatter(
# list(epochs), [epoch_to_dose[e] for e in epochs],
# marker='+', color='bg'[i])
# pyplot.show()
# serialization functions
def to_dict(self):
obj = {}
obj["__id__"] = "XSample"
import inspect
attributes = inspect.getmembers(self, lambda m: not (inspect.isroutine(m)))
for a in attributes:
if a[0] == "_sweeps":
sweeps = []
for sweep in a[1]:
sweeps.append(sweep.to_dict())
obj[a[0]] = sweeps
elif a[0] == "_crystal":
# don't serialize this since the parent xsample *should* contain
# the reference to the child xsweep
continue
elif a[0] == "_multi_indexer" and a[1] is not None:
obj[a[0]] = a[1].to_dict()
elif a[0].startswith("__"):
continue
else:
obj[a[0]] = a[1]
return obj
@classmethod
def from_dict(cls, obj):
assert obj["__id__"] == "XSample"
return_obj = cls(name=None, crystal=None)
for k, v in obj.iteritems():
if k == "_sweeps":
v = [s_dict["_name"] for s_dict in v]
elif k == "_multi_indexer" and v is not None:
from libtbx.utils import import_python_object
cls = import_python_object(
import_path=".".join((v["__module__"], v["__name__"])),
error_prefix="",
target_must_be="",
where_str="",
).object
v = cls.from_dict(v)
setattr(return_obj, k, v)
return return_obj
def get_output(self):
result = "Sample name: %s\n" % self._name
result += "Sweeps:\n"
return result[:-1]
def get_crystal(self):
return self._crystal
def get_name(self):
return self._name
def add_sweep(self, sweep):
self._sweeps.append(sweep)
def get_sweeps(self):
return self._sweeps
def set_multi_indexer(self, multi_indexer):
self._multi_indexer = multi_indexer
def get_multi_indexer(self):
return self._multi_indexer
def remove_sweep(self, sweep):
"""Remove a sweep object from this wavelength."""
try:
self._sweeps.remove(sweep)
except ValueError:
pass
return
| [
"jorge7soccer@gmail.com"
] | jorge7soccer@gmail.com |
6ae91e2b8f53229d6c371a8f235392667d79ab8a | 12258001571bd504223fbf4587870960fa93a46d | /mud/django-haystack-2.3.2/haystack/admin.py | 6b0b0988dfc87447577e3757ac8592a3289d131f | [
"BSD-3-Clause",
"MIT"
] | permissive | Nik0las1984/mud-obj | 0bd71e71855a9b0f0d3244dec2c877bd212cdbd2 | 5d74280724ff6c6ac1b2d3a7c86b382e512ecf4d | refs/heads/master | 2023-01-07T04:12:33.472377 | 2019-10-11T09:10:14 | 2019-10-11T09:10:14 | 69,223,190 | 2 | 0 | null | 2022-12-26T20:15:20 | 2016-09-26T07:11:49 | Python | UTF-8 | Python | false | false | 6,639 | py | from __future__ import unicode_literals
from django import template
from django.contrib.admin.options import csrf_protect_m, ModelAdmin
from django.contrib.admin.views.main import ChangeList, SEARCH_VAR
from django.core.exceptions import PermissionDenied
from django.core.paginator import InvalidPage, Paginator
from django.shortcuts import render_to_response
from django.utils.translation import ungettext
from haystack import connections
from haystack.query import SearchQuerySet
from haystack.utils import get_model_ct_tuple
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
def list_max_show_all(changelist):
"""
Returns the maximum amount of results a changelist can have for the
"Show all" link to be displayed in a manner compatible with both Django
1.4 and 1.3. See Django ticket #15997 for details.
"""
try:
# This import is available in Django 1.3 and below
from django.contrib.admin.views.main import MAX_SHOW_ALL_ALLOWED
return MAX_SHOW_ALL_ALLOWED
except ImportError:
return changelist.list_max_show_all
class SearchChangeList(ChangeList):
def __init__(self, **kwargs):
self.haystack_connection = kwargs.pop('haystack_connection', 'default')
super(SearchChangeList, self).__init__(**kwargs)
def get_results(self, request):
if not SEARCH_VAR in request.GET:
return super(SearchChangeList, self).get_results(request)
# Note that pagination is 0-based, not 1-based.
sqs = SearchQuerySet(self.haystack_connection).models(self.model).auto_query(request.GET[SEARCH_VAR]).load_all()
paginator = Paginator(sqs, self.list_per_page)
# Get the number of objects, with admin filters applied.
result_count = paginator.count
full_result_count = SearchQuerySet(self.haystack_connection).models(self.model).all().count()
can_show_all = result_count <= list_max_show_all(self)
multi_page = result_count > self.list_per_page
# Get the list of objects to display on this page.
try:
result_list = paginator.page(self.page_num+1).object_list
# Grab just the Django models, since that's what everything else is
# expecting.
result_list = [result.object for result in result_list]
except InvalidPage:
result_list = ()
self.result_count = result_count
self.full_result_count = full_result_count
self.result_list = result_list
self.can_show_all = can_show_all
self.multi_page = multi_page
self.paginator = paginator
class SearchModelAdmin(ModelAdmin):
# haystack connection to use for searching
haystack_connection = 'default'
@csrf_protect_m
def changelist_view(self, request, extra_context=None):
if not self.has_change_permission(request, None):
raise PermissionDenied
if not SEARCH_VAR in request.GET:
# Do the usual song and dance.
return super(SearchModelAdmin, self).changelist_view(request, extra_context)
# Do a search of just this model and populate a Changelist with the
# returned bits.
if not self.model in connections[self.haystack_connection].get_unified_index().get_indexed_models():
# Oops. That model isn't being indexed. Return the usual
# behavior instead.
return super(SearchModelAdmin, self).changelist_view(request, extra_context)
# So. Much. Boilerplate.
# Why copy-paste a few lines when you can copy-paste TONS of lines?
list_display = list(self.list_display)
kwargs = {
'haystack_connection': self.haystack_connection,
'request': request,
'model': self.model,
'list_display': list_display,
'list_display_links': self.list_display_links,
'list_filter': self.list_filter,
'date_hierarchy': self.date_hierarchy,
'search_fields': self.search_fields,
'list_select_related': self.list_select_related,
'list_per_page': self.list_per_page,
'list_editable': self.list_editable,
'model_admin': self
}
# Django 1.4 compatibility.
if hasattr(self, 'list_max_show_all'):
kwargs['list_max_show_all'] = self.list_max_show_all
changelist = SearchChangeList(**kwargs)
formset = changelist.formset = None
media = self.media
# Build the action form and populate it with available actions.
# Check actions to see if any are available on this changelist
actions = self.get_actions(request)
if actions:
action_form = self.action_form(auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
else:
action_form = None
selection_note = ungettext('0 of %(count)d selected',
'of %(count)d selected', len(changelist.result_list))
selection_note_all = ungettext('%(total_count)s selected',
'All %(total_count)s selected', changelist.result_count)
context = {
'module_name': force_text(self.model._meta.verbose_name_plural),
'selection_note': selection_note % {'count': len(changelist.result_list)},
'selection_note_all': selection_note_all % {'total_count': changelist.result_count},
'title': changelist.title,
'is_popup': changelist.is_popup,
'cl': changelist,
'media': media,
'has_add_permission': self.has_add_permission(request),
# More Django 1.4 compatibility
'root_path': getattr(self.admin_site, 'root_path', None),
'app_label': self.model._meta.app_label,
'action_form': action_form,
'actions_on_top': self.actions_on_top,
'actions_on_bottom': self.actions_on_bottom,
'actions_selection_counter': getattr(self, 'actions_selection_counter', 0),
}
context.update(extra_context or {})
context_instance = template.RequestContext(request, current_app=self.admin_site.name)
app_name, model_name = get_model_ct_tuple(self.model)
return render_to_response(self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_name, model_name),
'admin/%s/change_list.html' % app_name,
'admin/change_list.html'
], context, context_instance=context_instance)
| [
"kolya.khokhlov@gmail.com"
] | kolya.khokhlov@gmail.com |
8bc29b5636fd7970d731f56428ec9f29064104af | 7caa438706a423dd9779a81f8345fcf1ec11e921 | /timeit/28_02/genetation_test_fix.py | bc470b672816ae1dc444d5d948b1b4ee741c916a | [] | no_license | tamarinvs19/python-learning | 5dd2582f5dc504e19a53e9176677adc5170778b0 | 1e514ad7ca8f3d2e2f785b11b0be4d57696dc1e9 | refs/heads/master | 2021-07-15T13:23:24.238594 | 2021-07-08T07:07:21 | 2021-07-08T07:07:21 | 120,604,826 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | from random import randint, seed
m = 10000
def one_rf():
length = randint(0, m)
#print(1, length)
xs = []
for _ in range(length):
r = randint(0, m)
xs.append(r)
def two_rf():
length = randint(0, m)
#print(2, length)
xs = [randint(0, m) for _ in range(length)]
| [
"slavabarsuk@ya.ru"
] | slavabarsuk@ya.ru |
bb7f5783d40bb042630bb35a9e3b2378d09b7311 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /E9Wkppxyo763XywBe_24.py | c54745b649c7618965decf030eb38c9d949ece2e | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,866 | py | """
A binary clock displays the time of day in binary format. Modern binary clocks
have six columns of lights; two for each of the hours, minutes and seconds.
The photo below shows a binary clock displaying the time "12:15:45":

The binary values increase from the bottom to the top row. Lights on the
bottom row have a value of 1, lights on the row above have a value of 2, then
4 on the row above that, and finally a value of 8 on the top row. Any 24-hour
time can be shown by switching on a certain combination of lights. For
example, to show the time "10:37:49":

You've decided to build your own binary clock, and you need to figure out how
to light each row of the clock to show the correct time. Given the time as a
string, return a `list` containing strings that shows the lights for each row
of the clock (top to bottom). Use "1" for on, and "0" for off. Leave a blank
space for any part of the row that doesn't require a light.
### Examples
binary_clock("10:37:49") ➞ [
" 0 0 1",
" 00110",
"001100",
"101101"
]
binary_clock("18:57:31") ➞ [
" 1 0 0",
" 01100",
"000110",
"101111"
]
binary_clock("10:50:22") ➞ [
" 0 0 0",
" 01000",
"000011",
"101000"
]
### Notes
See the **Resources** section for more information on binary clocks.
"""
def binary_clock(time):
class Column:
def __init__(self, n, light_num = 4):
self.n = n
self.ln = light_num
def display(self):
display_lights = {1: [1], 2: [2], 3: [1, 2], 4: [4], 5: [1, 4], 6: [2, 4], 7: [1, 2, 4], 8: [8], 9: [8, 1], 0: []}
val_indexes = {1: 0, 2: 1, 4: 2, 8: 3}
on = display_lights[self.n]
display_reversed = []
for n in range(self.ln):
off = True
for light in on:
if val_indexes[light] == n:
display_reversed.append('1')
off = False
break
if off == True:
display_reversed.append('0')
for n in range(4 - self.ln):
display_reversed.append(' ')
return list(reversed(display_reversed))
time = time.split(':')
hour = time[0]
mins = time[1]
secs = time[2]
h1 = int(hour[0])
h2 = int(hour[1])
m1 = int(mins[0])
m2 = int(mins[1])
s1 = int(secs[0])
s2 = int(secs[1])
c1 = Column(h1,2)
c2 = Column(h2)
c3 = Column(m1,3)
c4 = Column(m2)
c5 = Column(s1,3)
c6 = Column(s2)
rawdisplay = [c1.display(),c2.display(),c3.display(),c4.display(),c5.display(),c6.display()]
display = ['','','','']
for n in range(4):
for item in rawdisplay:
display[n] += item[n]
return display
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
795bc5ac4bfbb332f76a5e289c2b64859aecc897 | 52aca246e91ad6ff611a1db31893089db1782344 | /fluent_contents/tests/utils.py | 9eab0a404cb280c88311b7a29fea19af132145cd | [
"Apache-2.0"
] | permissive | si14/django-fluent-contents | a2b53ca09997ae2ba20c42e2bb706c177c38c3cc | 12d98390b8799d8568d90ca9359b30f49ed2eade | refs/heads/master | 2021-01-20T03:11:39.067625 | 2017-04-26T16:52:22 | 2017-04-26T16:52:22 | 89,505,219 | 0 | 0 | null | 2017-04-26T16:49:28 | 2017-04-26T16:49:28 | null | UTF-8 | Python | false | false | 3,997 | py | """
Utils for internal tests, and utils for testing third party plugins.
"""
from __future__ import print_function
import django
from future.builtins import str
from django.conf import settings
from django.core.management import call_command
from django.contrib.sites.models import Site
from django.test import TestCase
from fluent_utils.django_compat import get_user_model
import os
from fluent_contents import rendering
from fluent_contents.rendering.utils import get_dummy_request
try:
from importlib import import_module
except ImportError:
from django.utils.importlib import import_module # Python 2.6
__all__ = (
# Utils for testing third party plugins.
'render_content_items',
'get_dummy_request',
# For internal tests:
'AppTestCase',
)
def render_content_items(items, request=None, language=None, template_name=None, cachable=False):
"""
Render a content items with settings well suited for testing.
"""
if request is None:
request = get_dummy_request(language=language)
return rendering.render_content_items(request, items, template_name=template_name, cachable=cachable)
class AppTestCase(TestCase):
"""
Tests for URL resolving.
"""
user = None
install_apps = (
'fluent_contents.tests.testapp',
'fluent_contents.plugins.sharedcontent',
'fluent_contents.plugins.picture',
'fluent_contents.plugins.text',
)
@classmethod
def setUpClass(cls):
super(AppTestCase, cls).setUpClass()
# Avoid early import, triggers AppCache
User = get_user_model()
if cls.install_apps:
# When running this app via `./manage.py test fluent_pages`, auto install the test app + models.
run_syncdb = False
for appname in cls.install_apps:
if appname not in settings.INSTALLED_APPS:
print('Adding {0} to INSTALLED_APPS'.format(appname))
settings.INSTALLED_APPS = (appname,) + tuple(settings.INSTALLED_APPS)
run_syncdb = True
testapp = import_module(appname)
# Flush caches
if django.VERSION < (1, 9):
from django.template.loaders import app_directories
from django.db.models import loading
loading.cache.loaded = False
app_directories.app_template_dirs += (
os.path.join(os.path.dirname(testapp.__file__), 'templates'),
)
else:
from django.template.utils import get_app_template_dirs
get_app_template_dirs.cache_clear()
if run_syncdb:
if django.VERSION < (1, 7):
call_command('syncdb', verbosity=0) # may run south's overlaid version
else:
call_command('migrate', verbosity=0)
# Create basic objects
# 1.4 does not create site automatically with the defined SITE_ID, 1.3 does.
Site.objects.get_or_create(id=settings.SITE_ID, defaults=dict(domain='django.localhost', name='django at localhost'))
cls.user, _ = User.objects.get_or_create(is_superuser=True, is_staff=True, username="fluent-contents-admin")
def assert200(self, url, msg_prefix=''):
"""
Test that an URL exists.
"""
if msg_prefix:
msg_prefix += ": "
self.assertEqual(self.client.get(url).status_code, 200, str(msg_prefix) + u"Page at {0} should be found.".format(url))
def assert404(self, url, msg_prefix=''):
"""
Test that an URL does not exist.
"""
if msg_prefix:
msg_prefix += ": "
response = self.client.get(url)
self.assertEqual(response.status_code, 404, str(msg_prefix) + u"Page at {0} should return 404, got {1}.".format(url, response.status_code))
| [
"vdboor@edoburu.nl"
] | vdboor@edoburu.nl |
b40fa846aa398e8e39aadf0971b887251b2a9952 | 8f6ebd257d7e0d3a71c3e173dbee65e32973a526 | /binaryS.py | c6b94c8de340bdf3cb481ccd9326851a5528b86c | [] | no_license | jennyChing/mit-handout_practice | e27839e72a6f36a3dde1e72021c493d2ed97898a | ca1f217fe97a76c2ebe55268ea812715a6b79503 | refs/heads/master | 2021-01-10T09:06:35.245013 | 2016-03-24T08:07:05 | 2016-03-24T08:07:05 | 54,456,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | def biSearch (a, n):
left = 0
right = len(a)
while right > left:
middle = int((right+left)/2)
print(left,right,middle,a[middle],n)
if n == a[middle]:
return middle
print(middle)
elif n > a[middle]:
left = middle+1
else:
right = middle
return None
while True:
try:
a = list(map(int,input().split()))
n = int(input())
print(biSearch(a, n))
except(EOFError):
break
| [
"jklife3@gmail.com"
] | jklife3@gmail.com |
2d2faa48a176471eb9914d9d4d9a4c0fe96dfdb5 | 7a4a934ab01fe76243cfc1075af837d8a4659b96 | /makingQueries/wsgi.py | 67804b38c693a877cf8796983061b044b06cb3d2 | [] | no_license | ephremworkeye/makingQueries | 2cc464be56c1626425862aec3f191aedc054fa84 | 95f897690f05b918f54bc49c4667fc2952e7fd34 | refs/heads/master | 2023-07-03T13:40:53.538436 | 2021-08-01T08:01:55 | 2021-08-01T08:01:55 | 391,565,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | """
WSGI config for makingQueries project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'makingQueries.settings')
application = get_wsgi_application()
| [
"ephremworkeye@gmail.com"
] | ephremworkeye@gmail.com |
3b28b0eb18f4e471ce30f37864a27ccf145b8896 | 21a92e72448715510d509ab0ec07af37f388013a | /camouflage.py | 1c778ab4060a0a0105fd08ae3b58b681a0fd37b6 | [] | no_license | chlee1252/dailyLeetCode | 9758ad5a74997672129c91fb78ecc00092e1cf2a | 71b9e3d82d4fbb58e8c86f60f3741db6691bf2f3 | refs/heads/master | 2023-01-22T02:40:55.779267 | 2020-12-03T15:01:07 | 2020-12-03T15:01:07 | 265,159,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 584 | py | from collections import defaultdict, Counter
from functools import reduce
def solution(clothes):
# Approach 1: use defaultdict
answer = 1 # Set init answer value as 1
hash = defaultdict(int)
for cloth in clothes:
hash[cloth[1]] += 1
for key, value in hash.items():
answer *= value + 1
return answer-1 # Remove init answer value
# Approach 2: Use Counter and reduce(function, iterable, init), Better and clean approach
hash = Counter([kind for name, kind in clothes])
return reduce(lambda x, y: x*(y+1), hash.values(), 1) - 1
| [
"chlee1252@gmail.com"
] | chlee1252@gmail.com |
0073eba180930fa02f31ad4bad81cc9fdd6be234 | 8830831a87f35ff2628f379d8230928ec6b5641a | /BNPParibas/code/xgb_naive_bayes.py | 550126dd6fde9b812eb51b8bd8c481cbbb2f4145 | [] | no_license | nickmcadden/Kaggle | e5882c9d68a81700d8d969328d91c059a0643868 | cbc5347dec90e4bf64d4dbaf28b8ffb362efc64f | refs/heads/master | 2019-07-18T08:09:40.683168 | 2018-01-26T14:35:38 | 2018-01-26T14:35:38 | 40,735,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,490 | py | import sys
import pandas as pd
import numpy as np
import scipy as sp
import xgboost as xgb
import data_naive_bayes as data
import argparse
import pickle as pkl
from scipy import stats
from sklearn.utils import shuffle
from sklearn.cross_validation import StratifiedShuffleSplit, KFold
def log_loss(act, pred):
""" Vectorised computation of logloss """
epsilon = 1e-15
pred = sp.maximum(epsilon, pred)
pred = sp.minimum(1-epsilon, pred)
ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
ll = ll * -1.0/len(act)
return ll
parser = argparse.ArgumentParser(description='XGBoost for BNP')
parser.add_argument('-f','--n_features', help='Number of features', type=int, default=1000)
parser.add_argument('-n','--n_rounds', help='Number of Boost iterations', type=int, default=5000)
parser.add_argument('-e','--eta', help='Learning rate', type=float, default=0.002)
parser.add_argument('-r','--r_seed', help='Set random seed', type=int, default=3)
parser.add_argument('-b','--minbin', help='Minimum categorical bin size', type=int, default=1)
parser.add_argument('-ct','--cat_trans', help='Category transformation method', type=str, default='std')
parser.add_argument('-cv','--cv', action='store_true')
parser.add_argument('-codetest','--codetest', action='store_true')
parser.add_argument('-getcached', '--getcached', action='store_true')
parser.add_argument('-extra', '--extra', action='store_true')
m_params = vars(parser.parse_args())
# Load data
X, y, X_sub, ids = data.load(m_params)
print("BNP Parabas: classification...\n")
xgb_param = {'silent' : 1, 'max_depth' : 10, 'eval_metric' : 'logloss', 'eta': m_params['eta'], 'min_child_weight': 3, 'objective': 'binary:logistic', 'subsample': 0.7, 'colsample_bytree': 0.5}
if m_params['cv']:
# do cross validation scoring
kf = KFold(X.shape[0], n_folds=4, shuffle=True, random_state=1)
scr = np.zeros([len(kf)])
oob_pred = np.zeros(X.shape[0])
sub_pred = np.zeros((X_sub.shape[0], 4))
dtest = xgb.DMatrix(X_sub)
for i, (tr_ix, val_ix) in enumerate(kf):
dtrain = xgb.DMatrix(X[tr_ix], y[tr_ix])
dval = xgb.DMatrix(X[val_ix], y[val_ix])
clf = xgb.train(xgb_param, dtrain, m_params['n_rounds'], evals=([dtrain,'train'], [dval,'val']))
pred = clf.predict(dval)
oob_pred[val_ix] = np.array(pred)
sub_pred[:,i] = clf.predict(dtest)
scr[i] = log_loss(y[val_ix], np.array(pred))
print('Train score is:', scr[i])
print np.mean(scr)
print oob_pred[1:10]
sub_pred = sub_pred.mean(axis=1)
oob_pred_filename = '../output/oob_pred_xgblinearfeat' + str(np.mean(scr))
sub_pred_filename = '../output/sub_pred_xgblinearfeat' + str(np.mean(scr))
pkl.dump(oob_pred, open(oob_pred_filename + '.p', 'wb'))
pkl.dump(sub_pred, open(sub_pred_filename + '.p', 'wb'))
preds = pd.DataFrame({"ID": ids, "PredictedProb": sub_pred})
preds.to_csv(sub_pred_filename + '.csv', index=False)
else:
# Train on full data
dtrain = xgb.DMatrix(X,y)
dtest = xgb.DMatrix(X_sub)
clf = xgb.train(xgb_param, dtrain, m_params['n_rounds'], evals=([dtrain,'train'], [dtrain,'train']))
pred = clf.predict(dtrain)
print('Train score is:', log_loss(y, np.array(pred)))
model_pathname = '../output/pred_xgb_' + str(m_params['n_rounds'])
clf.save_model(model_pathname + '.model')
pred = clf.predict(dtest)
pkl.dump(pred, open(model_pathname + '.p', 'wb'))
print("Saving Results.")
preds = pd.DataFrame({"ID": ids, "PredictedProb": pred})
preds.to_csv(model_pathname + '.csv', index=False)
| [
"nmcadden@globalpersonals.co.uk"
] | nmcadden@globalpersonals.co.uk |
6ade2b1fbe60d8188804988229f7f4671350eb1c | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/artificial/transf_Fisher/trend_Lag1Trend/cycle_0/ar_/test_artificial_32_Fisher_Lag1Trend_0__20.py | 5207f3b5c001ec81a647c5bcb91b30deaaa7fbae | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 265 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 0, transform = "Fisher", sigma = 0.0, exog_count = 20, ar_order = 0); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
35a84d93d7da4a1fe22aa258dc364462324cfd8a | 49fa43ae11cd06f68efb65a9f59add168b205f29 | /python/132_palindrome-partitioning-II/palindromePartitioningII.py | 1431ee00dc62b2bbd014b72c2d5902c94368dd4c | [] | no_license | kfrancischen/leetcode | 634510672df826a2e2c3d7cf0b2d00f7fc003973 | 08500c39e14f3bf140db82a3dd2df4ca18705845 | refs/heads/master | 2021-01-23T13:09:02.410336 | 2019-04-17T06:01:28 | 2019-04-17T06:01:28 | 56,357,131 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | class Solution(object):
def minCut(self, s):
"""
:type s: str
:rtype: int
"""
n = len(s)
dp = [i-1 for i in range(n + 1)]
for i in range(1, n+1):
for j in range(i):
temp = s[j:i]
if temp == temp[::-1]:
dp[i] = min(dp[i], dp[j] + 1)
return dp[n]
mytest = Solution()
s = 'aab'
print mytest.minCut(s)
| [
"kfrancischen@gmail.com"
] | kfrancischen@gmail.com |
84ec594f8624640b12420b289697b98165eb129d | f0a62605171bc62eb68dd884c77cf146657ec5cb | /library/f5bigip_ltm_monitor_soap.py | 82c1972e1d3fdc6839c472d393fc326641e76ce1 | [
"Apache-2.0"
] | permissive | erjac77/ansible-role-f5 | dd5cc32c4cc4c79d6eba669269e0d6e978314d66 | c45b5d9d5f34a8ac6d19ded836d0a6b7ee7f8056 | refs/heads/master | 2020-04-06T08:13:14.095083 | 2020-02-16T23:44:13 | 2020-02-16T23:44:13 | 240,129,047 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,822 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2016 Eric Jacob <erjac77@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: f5bigip_ltm_monitor_soap
short_description: BIG-IP ltm monitor soap module
description:
- Configures a Simple Object Access Protocol (SOAP) monitor.
version_added: "1.0.0" # of erjac77.f5 role
author:
- "Gabriel Fortin (@GabrielFortin)"
options:
debug:
description:
- Specifies whether the monitor sends error messages and additional information to a log file created and
labeled specifically for this monitor.
default: no
choices: ['no', 'yes']
defaults_from:
description:
- Specifies the type of monitor you want to use to create the new monitor.
default: soap
destination:
description:
- Specifies the IP address and service port of the resource that is the destination of this monitor.
expect_fault:
description:
- Specifies whether the value of the method option causes the monitor to expect a SOAP fault message.
default: no
choices: ['no', 'yes']
interval:
description:
- Specifies, in seconds, the frequency at which the system issues the monitor check when either the resource
is down or the status of the resource is unknown.
default: 5
manual_resume:
description:
- Specifies whether the system automatically changes the status of a resource to up at the next successful
monitor check.
default: disabled
choices: ['disabled', 'enabled']
method:
description:
- Specifies the method by which the monitor contacts the resource.
namespace:
description:
- Specifies the name space for the Web service you are monitoring, for example, http://example.com/.
parameter_name:
description:
- If the method has a parameter, specifies the name of that parameter.
parameter_type:
description:
- Specifies the parameter type.
default: bool
choices: ['bool', 'int', 'long', 'string']
parameter_value:
description:
- Specifies the value for the parameter.
password:
description:
- Specifies the password if the monitored target requires authentication.
protocol:
description:
- Specifies the protocol that the monitor uses to communicate with the target, http or https.
default: http
choices: ['http', 'https']
return_type:
description:
- ['bool', 'char', 'double', 'int', 'long', 'short', 'string']
default: bool
return_value:
description:
- Specifies the value for the returned parameter.
soap_action:
description:
- Specifies the value for the SOAPAction header.
default: ''
time_until_up:
description:
- Specifies the amount of time, in seconds, after the first successful response before a node is marked up.
default: 0
timeout:
description:
- Specifies the number of seconds the target has in which to respond to the monitor request.
default: 16
up_interval:
description:
- Specifies, in seconds, the frequency at which the system issues the monitor check when the resource is up.
default: 0
url_path:
description:
- Specifies the URL for the Web service that you are monitoring, for example, /services/myservice.aspx.
username:
description:
- Specifies the user name if the monitored target requires authentication.
extends_documentation_fragment:
- f5_common
- f5_app_service
- f5_description
- f5_name
- f5_partition
- f5_state
"""
EXAMPLES = """
- name: Create LTM Monitor SOAP
f5bigip_ltm_monitor_soap:
provider:
server: "{{ ansible_host }}"
server_port: "{{ http_port | default(443) }}"
user: "{{ http_user }}"
password: "{{ http_pass }}"
validate_certs: false
name: my_soap_monitor
partition: Common
description: My soap monitor
state: present
delegate_to: localhost
"""
RETURN = """ # """
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.erjac77.network.f5.common import F5_ACTIVATION_CHOICES
from ansible.module_utils.erjac77.network.f5.common import F5_NAMED_OBJ_ARGS
from ansible.module_utils.erjac77.network.f5.common import F5_POLAR_CHOICES
from ansible.module_utils.erjac77.network.f5.common import F5_PROVIDER_ARGS
from ansible.module_utils.erjac77.network.f5.bigip import F5BigIpNamedObject
class ModuleParams(object):
@property
def argument_spec(self):
argument_spec = dict(
app_service=dict(type="str"),
debug=dict(type="str", choices=F5_POLAR_CHOICES),
defaults_from=dict(type="str"),
description=dict(type="str"),
destination=dict(type="str"),
expect_fault=dict(type="str", choices=F5_POLAR_CHOICES),
interval=dict(type="int"),
manual_resume=dict(type="str", choices=F5_ACTIVATION_CHOICES),
method=dict(type="str"),
namespace=dict(type="str"),
parameter_name=dict(type="str"),
parameter_type=dict(type="str", choices=["bool", "int", "long", "string"]),
parameter_value=dict(type="str"),
password=dict(type="str", no_log=True),
protocol=dict(type="str", choices=["http", "https"]),
return_type=dict(
type="str",
choices=["bool", "char", "double", "int", "long", "short", "string"],
),
return_value=dict(type="str"),
soap_action=dict(type="str"),
time_until_up=dict(type="int"),
timeout=dict(type="int"),
up_interval=dict(type="int"),
url_path=dict(type="str"),
username=dict(type="str"),
)
argument_spec.update(F5_PROVIDER_ARGS)
argument_spec.update(F5_NAMED_OBJ_ARGS)
return argument_spec
@property
def supports_check_mode(self):
return True
class F5BigIpLtmMonitorSoap(F5BigIpNamedObject):
def _set_crud_methods(self):
self._methods = {
"create": self._api.tm.ltm.monitor.soaps.soap.create,
"read": self._api.tm.ltm.monitor.soaps.soap.load,
"update": self._api.tm.ltm.monitor.soaps.soap.update,
"delete": self._api.tm.ltm.monitor.soaps.soap.delete,
"exists": self._api.tm.ltm.monitor.soaps.soap.exists,
}
def main():
params = ModuleParams()
module = AnsibleModule(
argument_spec=params.argument_spec,
supports_check_mode=params.supports_check_mode,
)
try:
obj = F5BigIpLtmMonitorSoap(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except Exception as exc:
module.fail_json(msg=str(exc))
if __name__ == "__main__":
main()
| [
"erjac77@gmail.com"
] | erjac77@gmail.com |
84c3509be2e56001daedb1739428049bbe0fb6a3 | 441b9d601c5e6b860a11bf579f97406bf6c2a2b9 | /tests/testServerManager.py | e4ace767c7e6e9a3c60aaf1ecb282ca7e9c24f3d | [
"MIT"
] | permissive | YunjeongLee/SBstoat | 7885d5604dbfd10efa79ad71823a6835e19c53c4 | 31b184176a7f19074c905db76e6e6ac8e4fc36a8 | refs/heads/master | 2023-04-14T21:48:41.499046 | 2021-04-15T18:48:00 | 2021-04-15T18:48:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,303 | py | # -*- coding: utf-8 -*-
"""
Created on March 23, 2021
@author: joseph-hellerstein
"""
import SBstoat._serverManager as sm
from SBstoat.logs import Logger
import multiprocessing
import numpy as np
import unittest
IGNORE_TEST = False
IS_PLOT = False
SIZE = 10
PRIME_SIZES = [5, 10, 15]
class PrimeFinder(sm.AbstractServer):
"""A work unit is number of primes to calculate."""
def __init__(self, initialArgument, inputQ, outputQ, isException=False,
logger=Logger()):
super().__init__(initialArgument, inputQ, outputQ, logger=Logger())
self.isException = isException
@staticmethod
def _isPrime(number, primes):
if number < 2:
return False
maxNumber = np.sqrt(number)
for prime in primes:
if prime > maxNumber:
return True
if np.mod(number, prime) == 0:
return False
return True
def runFunction(self, numPrime):
"""
Calculates the specified number of prime numbers.
Parameters
----------
numPrime: int
Returns
-------
np.array
"""
if self.isException:
raise RuntimeError("Generated RuntimeError.")
# Find primes until enough are accumulated
primes = []
num = 2
while len(primes) < numPrime:
if self._isPrime(num, primes):
primes.append(num)
num += 1
return np.array(primes)
################## CLASSES BEING TESTED ##############
class TestAbstractConsumer(unittest.TestCase):
def setUp(self):
self.inputQ = multiprocessing.Queue()
self.outputQ = multiprocessing.Queue()
self.finder = PrimeFinder(None, self.inputQ, self.outputQ)
def testPrimeFinder(self):
if IGNORE_TEST:
return
primes = self.finder.runFunction(SIZE)
self.assertEqual(len(primes), SIZE)
def testRunNoException(self):
if IGNORE_TEST:
return
server = PrimeFinder(None, self.inputQ, self.outputQ)
server.start()
self.inputQ.put(SIZE)
result = self.outputQ.get()
server.terminate()
self.assertEqual(len(result), SIZE)
def testRunWithException(self):
if IGNORE_TEST:
return
server = PrimeFinder(None, self.inputQ, self.outputQ,
isException=True)
server.start()
self.inputQ.put(SIZE)
result = self.outputQ.get()
server.terminate()
#self.inputQ.put(None)
self.assertIsNone(result)
class TestConsumerlRunner(unittest.TestCase):
def _init(self):
self.manager = sm.ServerManager(PrimeFinder, PRIME_SIZES)
def testConstructor(self):
if IGNORE_TEST:
return
self._init()
pids = [s.pid for s in self.manager.servers]
self.assertEqual(len(pids), len(PRIME_SIZES))
self.manager.stop()
def testRunServers(self):
if IGNORE_TEST:
return
self._init()
results = self.manager.submit(PRIME_SIZES)
self.manager.stop()
for result, size in zip(results, PRIME_SIZES):
self.assertEqual(len(result), size)
if __name__ == '__main__':
unittest.main()
| [
"jlheller@uw.edu"
] | jlheller@uw.edu |
ddd615787593b009450780201e0cdfe624fdb6ad | 3d4318396b8433e36a144ea583158eb22807577a | /draft_sport/errors/error.py | ebe483be371232aa6ac701926151609590875d0f | [] | no_license | draft-sport/draft-sport-py | 245ee567a3ddaf41bbb99d03f043f90ad5b6a49c | 177fd438bc095cebcff5374bc6b1e815846617bb | refs/heads/master | 2023-03-11T16:56:29.415427 | 2021-03-01T09:59:46 | 2021-03-01T09:59:46 | 233,480,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | """
Draft Sport
Error Module
author: hugh@blinkybeach.com
"""
class DraftSportError(Exception):
def __init__(self, description: str) -> None:
self._description = description
super().__init__(self)
return
| [
"hugh.jeremy@gmail.com"
] | hugh.jeremy@gmail.com |
890e70bce6655c521ab1f813287934355db57ca2 | 65b087722e7824abbb134e56ab0dad1982369a4d | /server/start_consuming.py | c05f39eee6e5e81844cd2eb01df3e97330585756 | [] | no_license | BabyCakes13/SMS | 3c35abf2aea7619a579af2bb708819676068f918 | db8b8cd05bd47ee99abcc8660453edf7fce1c7a1 | refs/heads/master | 2020-03-26T15:13:05.380050 | 2018-08-23T16:48:46 | 2018-08-23T16:48:46 | 145,028,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,686 | py | """Module which takes the packets from the RabbitMQ and
stores them in the database."""
import json
import threading
import pika
from util import reader, strings
from server import database
class RabbitThread(threading.Thread):
"""Class which handles the packets fromm the RabbitMQ queue."""
def __init__(self, app):
"""Initialises the connection between RabbitMQ queue and Flask server,
in order to get the objects waiting in Rabbit queue and put them in
the database."""
threading.Thread.__init__(self)
self.connection = False
self.app = app
self.connect()
self.database = database.Database(self.app)
def connect(self):
"""Connects to the RabbitMQ queue."""
read = reader.Reader()
connection = pika.BlockingConnection(
pika.ConnectionParameters(
read.get_c_value()[1], read.get_c_value()[2]))
self.connection = connection.channel()
queue = strings.get_rabbit_queue()
self.connection.queue_declare(queue=queue)
def collect_packet(self, channel, method, properties, body):
"""Adds the packet collected from the RabbitMQ
queue to the database."""
self.database.add_pack(json.loads(body))
print("added...")
print(body)
def run(self):
"""Starts the thread which consumes the objects
from the RabbitMQ queue."""
queue = strings.get_rabbit_queue()
self.connection.basic_consume(self.collect_packet,
queue=queue,
no_ack=True)
self.connection.start_consuming()
| [
"you@example.com"
] | you@example.com |
8baa1d5b6b25d9c56f6939f1e56070b151d51539 | 8f3336bbf7cd12485a4c52daa831b5d39749cf9b | /Python/total-hamming-distance.py | c67638d43a8288635242506fcc0908db31353a52 | [] | no_license | black-shadows/LeetCode-Topicwise-Solutions | 9487de1f9a1da79558287b2bc2c6b28d3d27db07 | b1692583f7b710943ffb19b392b8bf64845b5d7a | refs/heads/master | 2022-05-30T22:16:38.536678 | 2022-05-18T09:18:32 | 2022-05-18T09:18:32 | 188,701,704 | 240 | 110 | null | 2020-05-08T13:04:36 | 2019-05-26T15:41:03 | C++ | UTF-8 | Python | false | false | 405 | py | # Time: O(n)
# Space: O(1)
class Solution(object):
def totalHammingDistance(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = 0
for i in xrange(32):
counts = [0] * 2
for num in nums:
counts[(num >> i) & 1] += 1
result += counts[0] * counts[1]
return result
| [
"noreply@github.com"
] | black-shadows.noreply@github.com |
9d6a5a5dcd5c8a9cf43c18f8cebde2d13d895a4d | 6d91104de3e00649659774e9ea27a6e01ddc1aae | /supervised_learning/0x0F-word_embeddings/0-bag_of_words.py | 1689de9bb859099c2fda01c1afea7a429c1a9197 | [
"MIT"
] | permissive | linkem97/holbertonschool-machine_learning | 07794bdd318323395f541d3568946ec52e7632da | 58c367f3014919f95157426121093b9fe14d4035 | refs/heads/master | 2023-01-24T13:04:54.642014 | 2020-11-24T00:37:58 | 2020-11-24T00:37:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | #!/usr/bin/env python3
"""This module has the function bag_of_words"""
from sklearn.feature_extraction.text import CountVectorizer
def bag_of_words(sentences, vocab=None):
"""
This function creates a bag of words
sentences is a list of sentences to analyze
vocab is a list of the vocabulary words to use for the analysis
If None, all words within sentences should be used
Returns: embeddings, features
embeddings is a numpy.ndarray of shape (s, f) containing the embeddings
s is the number of sentences in sentences
f is the number of features analyzed
features is a list of the features used for embeddings
"""
vectorizer = CountVectorizer(vocabulary=vocab)
X = vectorizer.fit_transform(sentences)
features = vectorizer.get_feature_names()
embedded = X.toarray()
return embedded, features
| [
"pauloan@hotmail.com"
] | pauloan@hotmail.com |
4622d9ee12f9a207db4e0ef4ad15d1eba124b5a7 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5640146288377856_1/Python/xyang/A.py | 7020507bdf0be002df2ee122f2d27c345aa2b207 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | import os
import math
import copy
import sys
from collections import *
os.chdir('/Users/Dana/Documents/0502')
f = open('A-large.in','r')
fo = open('A.out','w')
T = int(f.readline())
for ite in range(T):
temp = str.split(f.readline())
r,c,w = int(temp[0]),int(temp[1]),int(temp[2])
#print(r,c,w)
if r==1:
if c%w==0:
res = math.floor(c/w)+w-1
else:
res = math.floor(c/w)+w
else:
if c%w==0:
res = math.floor(c/w)+w-1
else:
res = math.floor(c/w)+w
res = res+(r-1)*math.floor(c/w)
print(res)
fo.write('Case #')
fo.write(str(ite+1))
fo.write(': ')
fo.write(str(res))
fo.write('\n')
fo.close()
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
c6cd28474478f73a3bc51fcb68b3d5beb42c4047 | f4e45e2f6a6c42571eefdc64773ca83c6b9c2b98 | /plugin/ffmpeg/__init__.py | 26a6e7be616deec5bdf3aa9046fff3a7812e6609 | [] | no_license | soju6jan2/sjva2_src_obfuscate | 83659707ca16d94378b7eff4d20e5e7ccf224007 | e2dd6c733bbf34b444362011f11b5aca2053aa34 | refs/heads/master | 2023-04-21T12:27:01.132955 | 2021-05-06T17:35:03 | 2021-05-06T17:35:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | from.plugin import blueprint,menu,plugin_load,plugin_unload,streaming_kill,get_video_info
from.logic import Status,Logic
from.interface_program_ffmpeg import Ffmpeg
# Created by pyminifier (https://github.com/liftoff/pyminifier)
| [
"cybersol@naver.com"
] | cybersol@naver.com |
7334b7f22281ae1338b4f47133b91bc70411827e | a7fa51726bae15b197b7bb6829acd5139b6e3073 | /feature_engineering/bl/steps/BackwardStepsDialogueFeatureEngineerImpl.py | 1c7b054857e9458e86ecca93b5dd0640eaf34b8b | [] | no_license | Rmsharks4/NLPEngine-opensource | ae375dc9ea364de793d78ab1c9d950718a11d54a | 7e384f6e13377723bb651130733a16ed53fe31d1 | refs/heads/master | 2023-02-27T17:45:38.592126 | 2021-02-01T20:59:16 | 2021-02-01T20:59:16 | 209,055,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,059 | py | from feature_engineering.utils.ActsUtils import ActsUtils
from feature_engineering.bl.steps.StepsDialogueFeatureEngineerImpl import StepsDialogueFeatureEngineerImpl
from feature_engineering.bl.intents import *
from feature_engineering.bl.intents.AbstractDialogueIntent import AbstractDialogueIntent
class BackwardStepsDialogueFeatureEngineerImpl(StepsDialogueFeatureEngineerImpl):
def __init__(self):
super().__init__()
self.config_pattern.properties.req_data = [[x.__name__ for x in AbstractDialogueIntent.__subclasses__()]]
def steps(self, args):
res = None
for req_data in self.config_pattern.properties.req_data:
for data in req_data:
if data in args:
if res is None:
res = [None] * len(args[data])
BackwardStepsDialogueFeatureEngineerImpl.stepdown({
data: args[data],
ActsUtils.__name__: args[ActsUtils.__name__]
}, data, res)
return res
@staticmethod
def stepdown(args, name, res):
i = 0
dels = []
prev = None
for intents in args[name]:
if intents is not None and args[ActsUtils.__name__].resp in intents:
if BackwardStepsDialogueFeatureEngineerImpl.prev_match(
intents[:-(len(intents) - intents.find(args[ActsUtils.__name__].resp))],
prev, args[ActsUtils.__name__].resp):
dels.append(intents)
if res[i] is None:
res[i] = [intent for intent in intents if intent not in dels]
else:
res[i].extend([intent for intent in intents if intent not in dels])
i += 1
prev = intents
@staticmethod
def prev_match(match, arr, resp):
look = False
for prevint in arr:
if match in prevint and resp not in prevint:
look = True
if look:
return False
return True
| [
"Github12"
] | Github12 |
2bb7a17bad94a40501802812427b35990374e9f6 | 63bcca68dc75b77b737542925062effe37bc13c8 | /fabfile/common.py | 2e96fa7881425042dc6fc5ef658c350dcb0d0d33 | [] | no_license | bekbossyn/dictionary | 35b48b4a7b000156a52b8039917299fed0ac9284 | 1a9824ca9b450086a1c348c5f9fff0f967e9a282 | refs/heads/master | 2020-05-20T00:35:26.802693 | 2019-05-11T09:40:37 | 2019-05-11T09:40:37 | 185,290,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 979 | py | from fabric.decorators import task
from fabric.operations import sudo, run
@task
def git_pull():
"""
Updates the repository
"""
run("cd /home/development/dictionary && git pull origin master")
# @task
# def celery_logs():
# """
# Updates the repository
# """
# sudo("tail -f /var/log/celery/belka.log")
@task
def update_supervisor():
"""
Dunno for now (
"""
# sudo("cp ~/{}/configs/supervisor/celery.conf /etc/supervisor/conf.d".format(env.repo_name))
# sudo("supervisorctl reread; supervisorctl restart celery; supervisorctl restart celerybeat; supervisorctl restart flower; supervisorctl update; supervisorctl status celery")
sudo("supervisorctl update")
@task
def update():
"""
Restarts the server
"""
run("cd /home/development/dictionary/ && . ./run.sh")
sudo("systemctl restart gunicorn")
sudo("systemctl restart nginx")
update_supervisor() | [
"bekbossyn.kassymkhan@gmail.com"
] | bekbossyn.kassymkhan@gmail.com |
48f6953f51bce07b48d04d081601460c628880bf | 856e9a8afcb81ae66dd998b0d2cc3556c9f315ea | /dexy/commands/parsers.py | 2ee73bab171b219f6839db2ac0a1c9c5dc0e2b59 | [
"MIT"
] | permissive | dexy/dexy | 1d5c999830de4663c05a09f4cd00b1628dfc8d46 | 323c1806e51f75435e11d2265703e68f46c8aef3 | refs/heads/develop | 2023-06-10T08:02:45.076551 | 2021-02-28T22:40:41 | 2021-02-28T22:40:41 | 1,506,989 | 141 | 34 | MIT | 2020-06-15T17:44:50 | 2011-03-21T14:48:28 | Python | UTF-8 | Python | false | false | 990 | py | from dexy.utils import defaults
from dexy.commands.utils import dummy_wrapper
from dexy.parser import AbstractSyntaxTree
from dexy.parser import Parser
def parsers_command():
wrapper = dummy_wrapper()
ast = AbstractSyntaxTree(wrapper)
processed_aliases = set()
for alias in sorted(Parser.plugins):
if alias in processed_aliases:
continue
parser = Parser.create_instance(alias, ast, wrapper)
for alias in parser.aliases:
processed_aliases.add(alias)
print("%s Parser" % parser.__class__.__name__)
print('')
print(parser.setting('help'))
print('')
print("aliases:")
for alias in parser.aliases:
print(" %s" % alias)
print('')
print("Default parsers are: " + defaults['parsers'])
print('')
print("Dexy will only look for config files to parse in the root directory")
print("of your project unless --recurse is specified.")
print('')
| [
"ana@ananelson.com"
] | ana@ananelson.com |
f5ee7e5132c1a686ff3d2503c9dadfdd7382a6fc | eb5857487dff2655a0228a9ca7024b54e6b1e061 | /solutions/2022/kws/aoc_2022_kws/day_14.py | 7dc12f45d0103d2220c1646b839de1e1b3b41af8 | [
"MIT"
] | permissive | SocialFinanceDigitalLabs/AdventOfCode | 8690db87dedb2898db37704c6fcf8526f7ea8d2e | 4af7c27f1eb514ed805a402dc4635555e495bd1c | refs/heads/main | 2023-02-19T16:23:12.195354 | 2022-12-28T09:54:50 | 2022-12-28T09:54:50 | 159,806,963 | 2 | 4 | MIT | 2023-08-30T00:02:24 | 2018-11-30T10:25:06 | Jupyter Notebook | UTF-8 | Python | false | false | 5,379 | py | from contextlib import contextmanager
from enum import Enum
from typing import Generator, List, Text
import click
from aoc_2022_kws.cli import main
from aoc_2022_kws.config import config
from rich.live import Live
class CoordinateType(Enum):
ROCK = "#"
SAND = "o"
UNREACHABLE = "-"
class Coordinate:
def __init__(self, *args, type: CoordinateType):
if len(args) > 0 and isinstance(args[0], str):
x, y = args[0].split(",", 1)
self.x = int(x)
self.y = int(y)
else:
self.x = int(args[0])
self.y = int(args[1])
self.type = type
def __repr__(self):
return f"{self.__class__.__name__}({self.x}, {self.y}, type={self.type})"
def parse_structures(line) -> List[Coordinate]:
vertices = line.split(" -> ")
vertices = [Coordinate(v, type=CoordinateType.ROCK) for v in vertices]
points = []
for ix, v in enumerate(vertices[1:]):
dx = v.x - vertices[ix].x
dy = v.y - vertices[ix].y
steps = max(abs(dx), abs(dy))
dx = dx // steps
dy = dy // steps
for step in range(steps + 1):
points.append(
Coordinate(
vertices[ix].x + dx * step, vertices[ix].y + dy * step, type=v.type
)
)
return points
def animate_sand(
structures: List[Coordinate], sand, floor=0
) -> Generator[Coordinate, None, None]:
cave_map = {(s.x, s.y) for s in structures}
y_max = floor if floor else max([c.y for c in structures])
if (sand.x, sand.y) in cave_map:
return
yield sand
while sand.y <= y_max:
possible = [
(sand.x, sand.y + 1),
(sand.x - 1, sand.y + 1),
(sand.x + 1, sand.y + 1),
]
if floor:
possible = [p for p in possible if p[1] <= floor]
available = [p for p in possible if p not in cave_map]
if available:
c = available[0]
sand = Coordinate(c[0], c[1], type=CoordinateType.SAND)
yield sand
else:
return None
@contextmanager
def show_map():
with Live() as live:
def display(structures: List[Coordinate]):
x_min = min([c.x for c in structures])
x_max = max([c.x for c in structures])
y_min = min([c.y for c in structures])
y_max = max([c.y for c in structures])
output_data = ""
map = {(s.x, s.y): s.type for s in structures}
for y in range(y_min, y_max + 1):
for x in range(x_min, x_max + 1):
s = map.get((x, y))
output_data += s.value if s else "."
output_data += "\n"
live.update(Text(output_data))
yield display
@main.command()
@click.option("--sample", "-s", is_flag=True)
def day14(sample):
if sample:
input_data = (config.SAMPLE_DIR / "day14.txt").read_text()
else:
input_data = (config.USER_DIR / "day14.txt").read_text()
starting_structures = [
c for struct in input_data.splitlines() for c in parse_structures(struct)
]
structures = list(starting_structures)
y_max = max([c.y for c in structures])
start_point = Coordinate(500, 0, type=CoordinateType.SAND)
path = list(animate_sand(structures, start_point))
with show_map() as display:
while path[-1].y <= y_max:
structures.append(path[-1])
display(structures)
path = list(animate_sand(structures, start_point))
print("PART 1", len([c for c in structures if c.type == CoordinateType.SAND]))
structures = list(starting_structures)
y_max += 1
path = list(
animate_sand(
structures, Coordinate(500, 0, type=CoordinateType.SAND), floor=y_max
)
)
while path and path[-1].y <= y_max:
structures.append(path[-1])
path = list(animate_sand(structures, start_point, floor=y_max))
with show_map() as display:
display(structures)
print("PART 2", len([c for c in structures if c.type == CoordinateType.SAND]))
## I suspect there may be a much quicker way to do this by mapping the areas that can't be filled.
## The main shape is basically a triangle, but we remove all solid areas as well as those that are unreachable
## A block is unreachable if it has an unreachable block above it from x-1 to x+1
structures = list(starting_structures)
y_max = max([c.y for c in structures])
cave_map = {(s.x, s.y): s for s in structures}
for y in range(y_max + 2):
x_min = min([c.x for c in cave_map.values()])
x_max = max([c.x for c in cave_map.values()])
for x in range(x_min, x_max + 1):
blockers = {(x - 1, y - 1), (x, y - 1), (x + 1, y - 1)}
if blockers & cave_map.keys() == blockers:
if (x, y) not in cave_map:
cave_map[(x, y)] = Coordinate(x, y, type=CoordinateType.UNREACHABLE)
with show_map() as display:
display(list(cave_map.values()))
print(f"There are {len(cave_map)} unreachable blocks")
h = y_max + 2
area = h**2
print(
f"The whole pyramid is {h} blocks high, so there are {area} blocks in the pyramid"
)
print(f"So the fillable area is {area - len(cave_map)}")
| [
"kaj@k-si.com"
] | kaj@k-si.com |
af46777b4d6e2311448c22027c268d417315ad5e | d897c2bc4ba9a84e7e8a2fe3e998d78cd116f920 | /conta_multipli/att/conta_multipli_template_sol.py | ec1a507bb856a8d52ddd89b12cfcc45080cbd11a | [] | no_license | romeorizzi/problemsCMS_for_LaboProg | 8907622744bc89752391024f24025a7e9706501b | 027b1b204efe602461e93d8b1c194a09eb6526cd | refs/heads/master | 2020-04-03T17:33:52.384915 | 2020-03-25T07:10:17 | 2020-03-25T07:10:17 | 155,449,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | # -*- coding: utf-8 -*-
# Template di soluzione per il problema conta_multipli
from __future__ import print_function
import sys
if sys.version_info < (3, 0):
input = raw_input # in python2, l'equivalente di input è raw_input
# INIZIO area entro la quale ti consigliamo di operare.
# Ecco la funzione che spetta a tè di implementare come da consegna dell'esercizio (ove credi puoi articolarla e scomporla su ulteriori funzioni e/o introdurre strutture dati di supporto):
def conta_multipli(a, b, c):
return 42
# FINE area entro la quale ti consigliamo di operare.
# Lettura input: all'esame non è il caso tu modifichi il codice sotto questa riga.
a, b, c = map(int, input().strip().split())
print(conta_multipli(a, b, c))
| [
"romeo.rizzi@univr.it"
] | romeo.rizzi@univr.it |
bdf57ae8440dbf93e1bc6eddc1f9701310ee943d | ac56934de4f66f5ad56193209f5fd669e1d34167 | /holecard_handicapper/model/sample_sin_fitting/data_reader.py | 1042c1c482cb3b23c983288c670e8e4550bc0ded | [] | no_license | ishikota/_HoleCardHandicapper | 54a4310c32956baecfa21bee6c72007da091c25b | 6a0674404698f22e208ca0e4c0a870ff0f34f2dd | refs/heads/master | 2021-05-03T07:52:18.077756 | 2016-06-27T07:21:07 | 2016-06-27T07:21:07 | 60,923,502 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | import numpy as np
def load_data(file_path, test_ratio):
num_training, num_testing, sigma, seed = 400, 100, 0.3, 0
def sin(x):
return np.sin(x)*3+1
N = num_training + num_testing
tau=4*np.pi
np.random.seed(seed)
X = np.random.random((N,1))*tau
Y = sin(X)+np.random.normal(0,sigma,(N,1))
I = np.arange(N)
np.random.shuffle(I)
training, testing = I[:num_training], I[num_training:]
return (X[training], Y[training]), (X[testing], Y[testing])
| [
"ishikota086@gmail.com"
] | ishikota086@gmail.com |
2f5f17015373e413acca75e653c2af857f16dd4d | 2af6a5c2d33e2046a1d25ae9dd66d349d3833940 | /res_bw/scripts/common/lib/json/tests/test_fail.py | 62e191830eb6c23db772e05ba90fd4cca92e450c | [] | no_license | webiumsk/WOT-0.9.12-CT | e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2 | 2506e34bd6634ad500b6501f4ed4f04af3f43fa0 | refs/heads/master | 2021-01-10T01:38:38.080814 | 2015-11-11T00:08:04 | 2015-11-11T00:08:04 | 45,803,240 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 2,304 | py | # 2015.11.10 21:36:30 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/json/tests/test_fail.py
from json.tests import PyTest, CTest
JSONDOCS = ['"A JSON payload should be an object or array, not a string."',
'["Unclosed array"',
'{unquoted_key: "keys must be quoted"}',
'["extra comma",]',
'["double extra comma",,]',
'[ , "<-- missing value"]',
'["Comma after the close"],',
'["Extra close"]]',
'{"Extra comma": true,}',
'{"Extra value after close": true} "misplaced quoted value"',
'{"Illegal expression": 1 + 2}',
'{"Illegal invocation": alert()}',
'{"Numbers cannot have leading zeroes": 013}',
'{"Numbers cannot be hex": 0x14}',
'["Illegal backslash escape: \\x15"]',
'[\\naked]',
'["Illegal backslash escape: \\017"]',
'[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]',
'{"Missing colon" null}',
'{"Double colon":: null}',
'{"Comma instead of colon", null}',
'["Colon instead of comma": false]',
'["Bad value", truth]',
"['single quote']",
'["\ttab\tcharacter\tin\tstring\t"]',
'["tab\\ character\\ in\\ string\\ "]',
'["line\nbreak"]',
'["line\\\nbreak"]',
'[0e]',
'[0e+]',
'[0e+-1]',
'{"Comma instead if closing brace": true,',
'["mismatch"}',
u'["A\x1fZ control characters in string"]']
SKIPS = {1: 'why not have a string payload?',
18: "spec doesn't specify any nesting limitations"}
class TestFail(object):
def test_failures(self):
for idx, doc in enumerate(JSONDOCS):
idx = idx + 1
if idx in SKIPS:
self.loads(doc)
continue
try:
self.loads(doc)
except ValueError:
pass
else:
self.fail('Expected failure for fail{0}.json: {1!r}'.format(idx, doc))
def test_non_string_keys_dict(self):
data = {'a': 1,
(1, 2): 2}
self.assertRaises(TypeError, self.dumps, data)
self.assertRaises(TypeError, self.dumps, data, indent=True)
class TestPyFail(TestFail, PyTest):
pass
class TestCFail(TestFail, CTest):
pass
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\json\tests\test_fail.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:36:30 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
6c2ffeac03adb70f34b7c11bb93bedc336920023 | 0822d36728e9ed1d4e91d8ee8b5ea39010ac9371 | /robo/pages/acre/pagina20_politica.py | 03057626a23722d2c1352c9539c8933c4e907622 | [] | no_license | diegothuran/blog | 11161e6f425d08bf7689190eac0ca5bd7cb65dd7 | 233135a1db24541de98a7aeffd840cf51e5e462e | refs/heads/master | 2022-12-08T14:03:02.876353 | 2019-06-05T17:57:55 | 2019-06-05T17:57:55 | 176,329,704 | 0 | 0 | null | 2022-12-08T04:53:02 | 2019-03-18T16:46:43 | Python | UTF-8 | Python | false | false | 911 | py | # coding: utf-8
import sys
sys.path.insert(0, '../../../blog')
from bs4 import BeautifulSoup
import requests
from robo.pages.util.constantes import PAGE_LIMIT
GLOBAL_RANK = 1306322
RANK_BRAZIL = None
NAME = 'pagina20.net'
def get_urls():
try:
urls = []
for i in range(1,PAGE_LIMIT):
if(i == 1):
link = 'http://pagina20.net/v2/category/politica/'
else:
link = 'http://pagina20.net/v2/category/politica/page/' + str(i)
req = requests.get(link)
noticias = BeautifulSoup(req.text, "html.parser").find_all('div', class_='card painel-noticias2')
for noticia in noticias:
href = noticia.find_all('a', href=True)[0]['href']
# print(href)
urls.append(href)
return urls
except:
raise Exception('Exception in pagina20_politica')
| [
"diego.thuran@gmail.com"
] | diego.thuran@gmail.com |
8194dad6dd0e415b6afedc496cab4a6f3c488433 | 924763dfaa833a898a120c411a5ed3b2d9b2f8c7 | /compiled/python/zlib_with_header_78.py | 8eef421ed619a14bff098297e52e60abc6bd9cd0 | [
"MIT"
] | permissive | kaitai-io/ci_targets | 31257dfdf77044d32a659ab7b8ec7da083f12d25 | 2f06d144c5789ae909225583df32e2ceb41483a3 | refs/heads/master | 2023-08-25T02:27:30.233334 | 2023-08-04T18:54:45 | 2023-08-04T18:54:45 | 87,530,818 | 4 | 6 | MIT | 2023-07-28T22:12:01 | 2017-04-07T09:44:44 | C++ | UTF-8 | Python | false | false | 744 | py | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
# type: ignore
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
import zlib
if getattr(kaitaistruct, 'API_VERSION', (0, 9)) < (0, 9):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class ZlibWithHeader78(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self._raw_data = self._io.read_bytes_full()
self.data = zlib.decompress(self._raw_data)
| [
"kaitai-bot@kaitai.io"
] | kaitai-bot@kaitai.io |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.