hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0fd3f38639b58bfaa2d13a8e15df20cc8f1aedbf
| 1,675
|
py
|
Python
|
keckcode/esiredux/esi1d.py
|
cdfassnacht/keck_code
|
a952b3806b3e64eef70deec2b2d1352e6ef6dfa0
|
[
"MIT"
] | null | null | null |
keckcode/esiredux/esi1d.py
|
cdfassnacht/keck_code
|
a952b3806b3e64eef70deec2b2d1352e6ef6dfa0
|
[
"MIT"
] | null | null | null |
keckcode/esiredux/esi1d.py
|
cdfassnacht/keck_code
|
a952b3806b3e64eef70deec2b2d1352e6ef6dfa0
|
[
"MIT"
] | 1
|
2020-07-15T23:16:36.000Z
|
2020-07-15T23:16:36.000Z
|
import numpy as np
from astropy.table import Table
from specim.specfuncs import echelle1d
"""
============================== Esi1d class ==============================
"""
class Esi1d(echelle1d.Ech1d):
"""
A class for ESI 1D spectra, which have been extracted by the Esi2d
methods, but have not yet been combined into one final output spectrum.
Therefore, there are 10 extracted 1d spectra, one for each order.
These 10 extracted spectra will be stored in an array of Spec1d instances.
"""
def __init__(self, inspec, informat='text', summary=True, verbose=True):
"""
Initializes an Esi1d instance, essentially by initializing an
Ech1d instance with the ESI order information
"""
"""
Define the information pertaining to the ESI echelle orders
Note that the pixmin and pixmax are not used at this point since
any trimming of the orders should have been done in previous
steps.
"""
dtype = [('order', int), ('pixmin', int), ('pixmax', int)]
oinfo = np.array([
(1, 0, -1),
(2, 0, -1),
(3, 0, -1),
(4, 0, -1),
(5, 0, -1),
(6, 0, -1),
(7, 0, -1),
(8, 0, -1),
(9, 0, -1),
(10, 0, -1),
], dtype=dtype)
ordinfo = Table(oinfo)
# ordinfo = None
""" Initialize by calling the parent class """
super(Esi1d, self).__init__(inspec, informat=informat, ordinfo=ordinfo,
summary=summary, verbose=verbose)
| 31.603774
| 79
| 0.519403
|
6542bb7cfb613ccd399cc99b96409fbf75f0166c
| 347
|
py
|
Python
|
python/example_code/iam/iam_basics/test/conftest.py
|
gabehollombe-aws/aws-doc-sdk-examples
|
dfc0e06ebe1762ab127f3ef5f425507644c6a99c
|
[
"Apache-2.0"
] | 5,166
|
2016-09-02T08:48:38.000Z
|
2022-03-31T19:12:43.000Z
|
python/example_code/iam/iam_basics/test/conftest.py
|
gabehollombe-aws/aws-doc-sdk-examples
|
dfc0e06ebe1762ab127f3ef5f425507644c6a99c
|
[
"Apache-2.0"
] | 1,186
|
2016-09-28T23:05:19.000Z
|
2022-03-31T18:07:47.000Z
|
python/example_code/iam/iam_basics/test/conftest.py
|
gabehollombe-aws/aws-doc-sdk-examples
|
dfc0e06ebe1762ab127f3ef5f425507644c6a99c
|
[
"Apache-2.0"
] | 4,003
|
2016-08-29T19:51:40.000Z
|
2022-03-31T16:40:02.000Z
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Contains common test fixtures used to run AWS Identity and Access Management (IAM)
tests.
"""
import sys
# This is needed so Python can find test_tools on the path.
sys.path.append('../../..')
from test_tools.fixtures.common import *
| 26.692308
| 82
| 0.740634
|
0621d34561a1d8ea0b4e9dca5a8889df597c87e3
| 505
|
py
|
Python
|
logging/logger_mongo.py
|
flyhigher139/flask_example
|
9a109d10616f6c63e8b3e0f119e9ec7c3c90d703
|
[
"MIT"
] | 3
|
2016-09-10T15:00:36.000Z
|
2020-05-27T09:14:18.000Z
|
logging/logger_mongo.py
|
flyhigher139/flask_example
|
9a109d10616f6c63e8b3e0f119e9ec7c3c90d703
|
[
"MIT"
] | null | null | null |
logging/logger_mongo.py
|
flyhigher139/flask_example
|
9a109d10616f6c63e8b3e0f119e9ec7c3c90d703
|
[
"MIT"
] | 4
|
2017-08-31T02:00:22.000Z
|
2021-01-13T05:20:22.000Z
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
import logging
from log4mongo.handlers import MongoHandler
logger = logging.getLogger('mongo_example')
mon = MongoHandler(host='localhost', database_name='mongo_logs')
mon.setLevel(logging.WARNING)
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
logger.addHandler(mon)
logger.addHandler(ch)
logger.debug('debug message')
logger.info('info message')
logger.warn('warn message')
logger.error('error message')
logger.critical('critical message')
| 21.956522
| 64
| 0.768317
|
e0b235e1818e2eba00700c075e90262a048ad565
| 2,934
|
py
|
Python
|
model_basics.py
|
kondiz/casme
|
52cd5769bb7c1089934dd528a331afaf3882eff2
|
[
"BSD-3-Clause"
] | 79
|
2018-06-05T21:26:55.000Z
|
2022-03-12T13:44:15.000Z
|
model_basics.py
|
kondiz/casme
|
52cd5769bb7c1089934dd528a331afaf3882eff2
|
[
"BSD-3-Clause"
] | 1
|
2019-10-30T12:51:54.000Z
|
2019-11-06T11:51:28.000Z
|
model_basics.py
|
kondiz/casme
|
52cd5769bb7c1089934dd528a331afaf3882eff2
|
[
"BSD-3-Clause"
] | 13
|
2018-06-06T03:31:11.000Z
|
2021-03-09T11:38:17.000Z
|
import numpy as np
import scipy.ndimage
import torch
import torch.nn.functional as F
import archs
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def load_model(casm_path):
name = casm_path.split('/')[-1].replace('.chk','')
print("\n=> Loading model localized in '{}'".format(casm_path))
classifier = archs.resnet50shared()
checkpoint = torch.load(casm_path)
classifier.load_state_dict(checkpoint['state_dict_classifier'])
classifier.eval().to(device)
decoder = archs.decoder()
decoder.load_state_dict(checkpoint['state_dict_decoder'])
decoder.eval().to(device)
print("=> Model loaded.")
return {'classifier': classifier, 'decoder': decoder, 'name': name}
def get_masks_and_check_predictions(input, target, model):
with torch.no_grad():
input, target = torch.tensor(input), torch.tensor(target)
mask, output = get_mask(input, model, get_output=True)
rectangular = binarize_mask(mask.clone())
for id in range(mask.size(0)):
if rectangular[id].sum() == 0:
continue
rectangular[id] = get_rectangular_mask(rectangular[id].squeeze().numpy())
target = target.to(device)
_, max_indexes = output.data.max(1)
isCorrect = target.eq(max_indexes)
return mask.squeeze().cpu().numpy(), rectangular.squeeze().cpu().numpy(), isCorrect.cpu().numpy()
def get_mask(input, model, get_output=False):
with torch.no_grad():
input = input.to(device)
output, layers = model['classifier'](input)
if get_output:
return model['decoder'](layers), output
return model['decoder'](layers)
def binarize_mask(mask):
with torch.no_grad():
avg = F.avg_pool2d(mask, 224, stride=1).squeeze()
flat_mask = mask.cpu().view(mask.size(0), -1)
binarized_mask = torch.zeros_like(flat_mask)
for i in range(mask.size(0)):
kth = 1 + int((flat_mask[i].size(0) - 1) * (1 - avg[i].item()) + 0.5)
th, _ = torch.kthvalue(flat_mask[i], kth)
th.clamp_(1e-6, 1 - 1e-6)
binarized_mask[i] = flat_mask[i].gt(th).float()
binarized_mask = binarized_mask.view(mask.size())
return binarized_mask
def get_largest_connected(m):
mask, num_labels = scipy.ndimage.label(m)
largest_label = np.argmax(np.bincount(
mask.reshape(-1), weights=m.reshape(-1)))
largest_connected = (mask == largest_label)
return largest_connected
def get_bounding_box(m):
x = m.any(1)
y = m.any(0)
xmin = np.argmax(x)
xmax = np.argmax(np.cumsum(x))
ymin = np.argmax(y)
ymax = np.argmax(np.cumsum(y))
with torch.no_grad():
box_mask = torch.zeros(224, 224).to(device)
box_mask[xmin:xmax+1, ymin:ymax+1] = 1
return box_mask
def get_rectangular_mask(m):
return get_bounding_box(get_largest_connected(m))
| 32.241758
| 106
| 0.639741
|
59c5276f5d38df4166a83b84b5b9460bf8a6f727
| 1,759
|
py
|
Python
|
unit/test_bootstrap_vshard.py
|
FizikRoot/ansible-cartridge
|
ad06411ec701b68fbf5b8ed5e184a47ffb0ac70f
|
[
"BSD-2-Clause"
] | 17
|
2019-09-02T15:31:56.000Z
|
2022-03-29T18:49:59.000Z
|
unit/test_bootstrap_vshard.py
|
FizikRoot/ansible-cartridge
|
ad06411ec701b68fbf5b8ed5e184a47ffb0ac70f
|
[
"BSD-2-Clause"
] | 171
|
2019-10-24T15:34:34.000Z
|
2022-03-29T09:18:46.000Z
|
unit/test_bootstrap_vshard.py
|
FizikRoot/ansible-cartridge
|
ad06411ec701b68fbf5b8ed5e184a47ffb0ac70f
|
[
"BSD-2-Clause"
] | 14
|
2019-12-23T08:27:06.000Z
|
2021-07-06T15:53:49.000Z
|
import sys
import unittest
import module_utils.helpers as helpers
from unit.instance import Instance
sys.modules['ansible.module_utils.helpers'] = helpers
from library.cartridge_bootstrap_vshard import bootstrap_vshard
def call_bootstrap_vshard(console_sock):
return bootstrap_vshard({
'console_sock': console_sock,
})
class TestBootstrapVshard(unittest.TestCase):
def setUp(self):
self.instance = Instance()
self.console_sock = self.instance.console_sock
self.cookie = self.instance.cluster_cookie
self.instance.start()
def test_can_not_bootstrap_vshard(self):
self.instance.set_variable('can_bootstrap_vshard', False)
res = call_bootstrap_vshard(self.console_sock)
self.assertFalse(res.failed, msg=res.msg)
self.assertFalse(res.changed)
self.assertEqual(len(self.instance.get_calls('bootstrap_vshard')), 0)
def test_bootstrap_successfully(self):
self.instance.set_variable('can_bootstrap_vshard', True)
res = call_bootstrap_vshard(self.console_sock)
self.assertFalse(res.failed, msg=res.msg)
self.assertTrue(res.changed)
self.assertEqual(len(self.instance.get_calls('bootstrap_vshard')), 1)
def test_bootstrap_fails(self):
self.instance.set_variable('can_bootstrap_vshard', True)
self.instance.set_fail_on('bootstrap_vshard')
res = call_bootstrap_vshard(self.console_sock)
self.assertTrue(res.failed)
self.assertIn('Vshard bootstrap failed', res.msg)
self.assertIn('cartridge err', res.msg)
self.assertEqual(len(self.instance.get_calls('bootstrap_vshard')), 1)
def tearDown(self):
self.instance.stop()
del self.instance
| 30.859649
| 77
| 0.715179
|
08c8c27acd0761df667428a39c3da702ec3d50f8
| 12,704
|
py
|
Python
|
char_gen.py
|
turpenar/dion
|
48ce4e664441c107e39293ebc4aae4d1e88faf13
|
[
"0BSD"
] | null | null | null |
char_gen.py
|
turpenar/dion
|
48ce4e664441c107e39293ebc4aae4d1e88faf13
|
[
"0BSD"
] | null | null | null |
char_gen.py
|
turpenar/dion
|
48ce4e664441c107e39293ebc4aae4d1e88faf13
|
[
"0BSD"
] | null | null | null |
import tkinter as tk
import pathlib as pathlib
import pickle as pickle
import player as player
import world as world
global terminal_output
def link_terminal(terminal):
global terminal_output
terminal_output = terminal
class General(tk.Frame):
def __init__(self, parent, *args, **kwargs):
tk.Frame.__init__(self, parent, *args, **kwargs)
self.parent = parent
self.first_name_label = tk.Label(self, text="First Name")
self.first_name_label.grid(row=0, column=0)
self.first_name_entry = tk.Entry(self, width=25)
self.first_name_entry.grid(row=1, column=0)
self.last_name_label = tk.Label(self, text="Last Name")
self.last_name_label.grid(row=2, column=0)
self.last_name_entry = tk.Entry(self, width=25)
self.last_name_entry.grid(row=3, column=0)
self.gender_label = tk.Label(self, text="Gender")
self.gender_label.grid(row=4, column=0)
self.genderVar = tk.StringVar(self.parent)
gender_choices = ['None', 'Female', 'Male']
self.genderVar.set('None')
self.gender_entry = tk.OptionMenu(self, self.genderVar, *gender_choices)
self.gender_entry.grid(row=5, column=0)
class Profession(tk.Frame):
def __init__(self, parent, *args, **kwargs):
tk.Frame.__init__(self, parent, *args, **kwargs)
self.parent = parent
self.profession_label = tk.Label(self, text="Profession")
self.profession_label.grid(row=0, column=0)
self.professionVar = tk.StringVar(self.parent)
profession_choices = ['None', 'Clairvoyant', 'Enchanter', 'Illusionist', 'Paladin', 'Ranger', 'Rogue', 'Inyanga', 'Warror']
self.professionVar.set('None')
self.profession_entry = tk.OptionMenu(self, self.professionVar, *profession_choices)
self.profession_entry.grid(row=1, column=0)
class Stats(tk.Frame):
def __init__(self, parent, *args, **kwargs):
tk.Frame.__init__(self, parent, *args, **kwargs)
self.parent = parent
self.all_entries = {}
self.remainingVar = tk.IntVar()
self.remainingVar.set(528)
self.remaining_points_label = tk.Label(self, text="Remaining Points = ")
self.remaining_points_label.grid(row=0, column=0)
self.remaining_points = tk.Label(self, textvariable=self.remainingVar)
self.remaining_points.grid(row=0, column=1)
self.strength_label = tk.Label(self, text="Strength")
self.strength_label.grid(row=1, column=0)
validatecommand = (self.register(self.check_entry), '%d', '%i', '%P', '%s', '%S')
self.strength_entry = tk.Entry(self, width=8, validate='key', validatecommand=validatecommand)
self.strength_entry.grid(row=1, column=1)
self.all_entries['Strength'] = self.strength_entry
self.constitution_label = tk.Label(self, text="Constitution")
self.constitution_label.grid(row=2, column=0)
self.constitution_entry = tk.Entry(self, width=8, validate='key', validatecommand=validatecommand)
self.constitution_entry.grid(row=2, column=1)
self.all_entries['Constitution'] = self.constitution_entry
self.dexterity_label = tk.Label(self, text="Dexterity")
self.dexterity_label.grid(row=3, column=0)
self.dexterity_entry = tk.Entry(self, width=8, validate='key', validatecommand=validatecommand)
self.dexterity_entry.grid(row=3, column=1)
self.all_entries['Dexterity'] = self.dexterity_entry
self.agility_label = tk.Label(self, text="Agility")
self.agility_label.grid(row=4, column=0)
self.agility_entry = tk.Entry(self, width=8, validate='key', validatecommand=validatecommand)
self.agility_entry.grid(row=4, column=1)
self.all_entries['Agility'] = self.agility_entry
self.intelligence_label = tk.Label(self, text="Intelligence")
self.intelligence_label.grid(row=1, column=3)
self.intelligence_entry = tk.Entry(self, width=8, validate='key', validatecommand=validatecommand)
self.intelligence_entry.grid(row=1, column=4)
self.all_entries['Intelligence'] = self.intelligence_entry
self.wisdom_label = tk.Label(self, text="Wisdom")
self.wisdom_label.grid(row=2, column=3)
self.wisdom_entry = tk.Entry(self, width=8, validate='key', validatecommand=validatecommand)
self.wisdom_entry.grid(row=2, column=4)
self.all_entries['Wisdom'] = self.wisdom_entry
self.logic_label = tk.Label(self, text="Logic")
self.logic_label.grid(row=3, column=3)
self.logic_entry = tk.Entry(self, width=8, validate='key', validatecommand=validatecommand)
self.logic_entry.grid(row=3, column=4)
self.all_entries['Logic'] = self.logic_entry
self.spirit_label = tk.Label(self, text="Spirit")
self.spirit_label.grid(row=4, column=3)
self.spirit_entry = tk.Entry(self, width=8, validate='key', validatecommand=validatecommand)
self.spirit_entry.grid(row=4, column=4)
self.all_entries['Spirit'] = self.spirit_entry
def check_entry(self, action_type, index, value_post, value_prior, addition):
if value_prior == '':
value_prior = 0
if value_post == '':
val_post = 0
val_prior = int(value_prior)
val_change = val_post - val_prior
self.reset_remaining(val_change)
return True
if not value_post.isdigit():
return False
val_post = int(value_post)
val_prior = int(value_prior)
if (val_post < 0) + (val_post > 100):
return False
val_change = val_post - val_prior
if val_change > self.remainingVar.get():
return False
self.reset_remaining(val_change)
return True
def reset_remaining(self, change):
old_remaining = self.remainingVar.get()
new_remaining = old_remaining - change
self.remainingVar.set(new_remaining)
self.master.update_idletasks()
class CharacterGenerator:
def __init__(self, parent, character_created_var):
self.parent = parent
self.frame = tk.Frame(self.parent)
self.character_created_var = character_created_var
self.general = General(self.parent)
self.general.grid(row=0, column=0)
self.profession = Profession(self.parent)
self.profession.grid(row=1, column=0)
self.stats = Stats(self.parent)
self.stats.grid(row=3, column=0)
self.label = tk.Label(self.frame)
self.label.grid(row=4, column=0)
self.button1 = tk.Button(self.frame, text="Create Character", command=self.create_character)
self.button1.grid(row=5, column=0)
self.frame.grid()
def create_character(self):
total_stats = 0
available_stat_points = 528
base_training_points = 25
if self.general.first_name_entry.get() == '':
self.popupmsg("Please create a first name.")
return
if self.general.last_name_entry.get() == '':
self.popupmsg("Please create a last name.")
return
if self.general.genderVar.get() == 'None':
self.popupmsg("You need to select a gender.")
return
if self.profession.professionVar.get() == 'None':
self.popupmsg("You need to select a profession.")
return
for entry in self.stats.all_entries:
if not self.stats.all_entries[entry].get():
self.popupmsg(entry + " has no value!")
return
total_stats += int(self.stats.all_entries[entry].get())
if total_stats != available_stat_points:
self.popupmsg("Your stats total does not equal " + str(available_stat_points) + ".")
return
world.load_tiles()
player.create_character('new_player')
player.character.name = self.general.first_name_entry.get()
player.character.first_name = self.general.first_name_entry.get()
player.character.last_name = self.general.last_name_entry.get()
player.character.gender = self.general.genderVar.get()
player.character.profession = self.profession.professionVar.get()
player.character.strength = int(self.stats.strength_entry.get())
player.character.constitution = int(self.stats.constitution_entry.get())
player.character.dexterity = int(self.stats.dexterity_entry.get())
player.character.agility = int(self.stats.agility_entry.get())
player.character.intelligence = int(self.stats.intelligence_entry.get())
player.character.wisdom = int(self.stats.wisdom_entry.get())
player.character.logic = int(self.stats.logic_entry.get())
player.character.spirit = int(self.stats.spirit_entry.get())
player.character.physical_points = round(base_training_points + (player.character.strength
+ player.character.constitution
+ player.character.dexterity
+ player.character.agility) / 20, 0)
player.character.mental_points = round(base_training_points + (player.character.intelligence
+ player.character.wisdom
+ player.character.logic
+ player.character.spirit) / 20, 0)
player.character.save()
terminal_output.print_text('''
You have created {} {}
<Press Enter>
\
'''.format(player.character.first_name,
player.character.last_name))
self.character_created_var.set(True)
self.close_window()
def popupmsg(self, msg):
self.popup = tk.Tk()
self.popup.wm_title("Whoops!")
label = tk.Label(self.popup, text=msg)
label.pack(side="top", fill="x", pady=10)
B1 = tk.Button(self.popup, text="Okay", command=self.popup.destroy)
B1.pack()
self.popup.mainloop()
def close_window(self):
self.parent.destroy()
class CharacterLoader:
def __init__(self, parent, character_created_var):
self.parent = parent
self.frame = tk.Frame(self.parent)
self.character_created_var = character_created_var
self.saved_characters = []
self.gender_label = tk.Label(self.frame, text="Select a character")
self.gender_label.grid(row=1, column=0)
self.charVar = tk.StringVar(self.frame)
self.char_choices = self.get_characters()
self.charVar.set("Choose Character")
self.gender_entry = tk.OptionMenu(self.frame, self.charVar, *self.char_choices)
self.gender_entry.grid(row=2, column=0)
self.label = tk.Label(self.frame)
self.label.grid(row=3, column=0)
self.button1 = tk.Button(self.frame, text="Load Character", command=self.load_character)
self.button1.grid(row=4, column=0)
self.frame.grid()
def load_character(self):
char_name = self.charVar.get()
world.load_tiles()
for char_data in self.saved_characters:
if char_data['first_name'] == char_name:
player.create_character("new_player")
player.character.load(state=char_data)
terminal_output.print_text('''
You have loaded {} {}
*** Type HELP for a list of commands available to you. Type HELP <command> for assistance with a particular
command usage. ***
<Press Enter>
\
'''.format(player.character.first_name,
player.character.last_name))
self.character_created_var.set(True)
self.close_window()
def get_characters(self):
path_load = pathlib.Path.cwd() / 'Profiles'
filenames = path_load.glob('*.p')
for filename in filenames:
path_load_file = path_load / filename
f = open(path_load_file.absolute().as_posix(), 'rb')
self.saved_characters.append(pickle.load(f))
saved_character_names = []
for character in self.saved_characters:
saved_character_names.append(character['first_name'])
return saved_character_names
def close_window(self):
self.parent.destroy()
| 37.037901
| 131
| 0.623898
|
804ff3b804987b14f0561b15872a534bfae36db1
| 5,976
|
py
|
Python
|
jasmin/bin/dlrlookupd.py
|
s4mu3lbk/jasmin
|
ed37cd7258b1cebc3c27543e33f6e14eaf58e5c7
|
[
"Apache-2.0"
] | 1
|
2021-06-09T15:26:31.000Z
|
2021-06-09T15:26:31.000Z
|
jasmin/bin/dlrlookupd.py
|
s4mu3lbk/jasmin
|
ed37cd7258b1cebc3c27543e33f6e14eaf58e5c7
|
[
"Apache-2.0"
] | 1
|
2020-11-20T12:41:08.000Z
|
2020-11-20T12:41:08.000Z
|
jasmin/bin/dlrlookupd.py
|
s4mu3lbk/jasmin
|
ed37cd7258b1cebc3c27543e33f6e14eaf58e5c7
|
[
"Apache-2.0"
] | 1
|
2021-09-01T19:07:41.000Z
|
2021-09-01T19:07:41.000Z
|
#!/usr/bin/python
import os
import signal
import sys
import syslog
import ntpath
from lockfile import FileLock, LockTimeout, AlreadyLocked
from twisted.internet import reactor, defer
from twisted.python import usage
from jasmin.managers.configs import DLRLookupConfig
from jasmin.managers.dlr import DLRLookup
from jasmin.queues.configs import AmqpConfig
from jasmin.queues.factory import AmqpFactory
from jasmin.redis.client import ConnectionWithConfiguration
from jasmin.redis.configs import RedisForJasminConfig
ROOT_PATH = os.getenv('ROOT_PATH', '/')
CONFIG_PATH = os.getenv('CONFIG_PATH', '%s/etc/jasmin/' % ROOT_PATH)
class Options(usage.Options):
optParameters = [
['config', 'c', '%s/dlrlookupd.cfg' % CONFIG_PATH,
'Jasmin dlrlookupd configuration file'],
['id', 'i', 'master',
'Daemon id, need to be different for each dlrlookupd daemon'],
]
optFlags = [
]
class DlrlookupDaemon:
def __init__(self, opt):
self.options = opt
self.components = {}
@defer.inlineCallbacks
def startRedisClient(self):
"""Start AMQP Broker"""
RedisForJasminConfigInstance = RedisForJasminConfig(self.options['config'])
self.components['rc'] = yield ConnectionWithConfiguration(RedisForJasminConfigInstance)
# Authenticate and select db
if RedisForJasminConfigInstance.password is not None:
yield self.components['rc'].auth(RedisForJasminConfigInstance.password)
yield self.components['rc'].select(RedisForJasminConfigInstance.dbid)
def stopRedisClient(self):
"""Stop AMQP Broker"""
return self.components['rc'].disconnect()
def startAMQPBrokerService(self):
"""Start AMQP Broker"""
AMQPServiceConfigInstance = AmqpConfig(self.options['config'])
self.components['amqp-broker-factory'] = AmqpFactory(AMQPServiceConfigInstance)
self.components['amqp-broker-factory'].preConnect()
# Add service
self.components['amqp-broker-client'] = reactor.connectTCP(
AMQPServiceConfigInstance.host,
AMQPServiceConfigInstance.port,
self.components['amqp-broker-factory'])
def stopAMQPBrokerService(self):
"""Stop AMQP Broker"""
return self.components['amqp-broker-client'].disconnect()
@defer.inlineCallbacks
def startDLRLookupService(self):
"""Start DLRLookup"""
DLRLookupConfigInstance = DLRLookupConfig(self.options['config'])
# This is a separate process: do not log to same log_file as Jasmin sm-listener
# Refs #629
DLRLookupConfigInstance.log_file = '%s/dlrlookupd-%s' % ntpath.split(DLRLookupConfigInstance.log_file)
self.components['dlrlookup'] = DLRLookup(DLRLookupConfigInstance, self.components['amqp-broker-factory'],
self.components['rc'])
yield self.components['dlrlookup'].subscribe()
@defer.inlineCallbacks
def start(self):
"""Start Dlrlookupd daemon"""
syslog.syslog(syslog.LOG_INFO, "Starting Dlrlookup Daemon ...")
########################################################
# Connect to redis server
try:
yield self.startRedisClient()
except Exception as e:
syslog.syslog(syslog.LOG_ERR, " Cannot start RedisClient: %s" % e)
else:
syslog.syslog(syslog.LOG_INFO, " RedisClient Started.")
########################################################
# Start AMQP Broker
try:
yield self.startAMQPBrokerService()
yield self.components['amqp-broker-factory'].getChannelReadyDeferred()
except Exception as e:
syslog.syslog(syslog.LOG_ERR, " Cannot start AMQP Broker: %s" % e)
else:
syslog.syslog(syslog.LOG_INFO, " AMQP Broker Started.")
try:
# [optional] Start DLR Lookup
self.startDLRLookupService()
except Exception as e:
syslog.syslog(syslog.LOG_ERR, " Cannot start DLRLookup: %s" % e)
else:
syslog.syslog(syslog.LOG_INFO, " DLRLookup Started.")
@defer.inlineCallbacks
def stop(self):
"""Stop Dlrlookup daemon"""
syslog.syslog(syslog.LOG_INFO, "Stopping Dlrlookup Daemon ...")
if 'amqp-broker-client' in self.components:
yield self.stopAMQPBrokerService()
syslog.syslog(syslog.LOG_INFO, " AMQP Broker disconnected.")
if 'rc' in self.components:
yield self.stopRedisClient()
syslog.syslog(syslog.LOG_INFO, " RedisClient stopped.")
reactor.stop()
def sighandler_stop(self, signum, frame):
"""Handle stop signal cleanly"""
syslog.syslog(syslog.LOG_INFO, "Received signal to stop Jasmin DlrlookupDaemon")
return self.stop()
if __name__ == '__main__':
lock = None
try:
options = Options()
options.parseOptions()
# Must not be executed simultaneously (c.f. #265)
lock = FileLock("/tmp/dlrlookupd-%s" % options['id'])
# Ensure there are no paralell runs of this script
lock.acquire(timeout=2)
# Prepare to start
ja_d = DlrlookupDaemon(options)
# Setup signal handlers
signal.signal(signal.SIGINT, ja_d.sighandler_stop)
signal.signal(signal.SIGTERM, ja_d.sighandler_stop)
# Start DlrlookupDaemon
ja_d.start()
reactor.run()
except usage.UsageError as errortext:
print('%s: %s' % (sys.argv[0], errortext))
print('%s: Try --help for usage details.' % (sys.argv[0]))
except LockTimeout:
print("Lock not acquired ! exiting")
except AlreadyLocked:
print("There's another instance on dlrlookupd running, exiting.")
finally:
# Release the lock
if lock is not None and lock.i_am_locking():
lock.release()
| 34.543353
| 113
| 0.635877
|
ab54430129d0ef44fb637109a49c6b34a5b8ceca
| 1,271
|
py
|
Python
|
sdk/python/kfp/v2/compiler_cli_tests/test_data/pipeline_with_reused_component.py
|
tomar27/pipelines
|
fc6a2761b3770cb3b854115b841c1a50876665c3
|
[
"Apache-2.0"
] | 1
|
2021-11-22T12:22:14.000Z
|
2021-11-22T12:22:14.000Z
|
sdk/python/kfp/v2/compiler_cli_tests/test_data/pipeline_with_reused_component.py
|
tomar27/pipelines
|
fc6a2761b3770cb3b854115b841c1a50876665c3
|
[
"Apache-2.0"
] | 4
|
2020-06-07T18:45:09.000Z
|
2021-02-12T16:03:58.000Z
|
sdk/python/kfp/v2/compiler_cli_tests/test_data/pipeline_with_reused_component.py
|
tomar27/pipelines
|
fc6a2761b3770cb3b854115b841c1a50876665c3
|
[
"Apache-2.0"
] | 1
|
2020-10-29T04:24:27.000Z
|
2020-10-29T04:24:27.000Z
|
# Copyright 2020 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from kfp.v2 import components
from kfp.v2 import compiler
from kfp.v2 import dsl
test_data_dir = pathlib.Path(__file__).parent / 'component_yaml'
add_op = components.load_component_from_file(
str(test_data_dir / 'add_component.yaml'))
@dsl.pipeline(name='add-pipeline', pipeline_root='dummy_root')
def my_pipeline(
a: int = 2,
b: int = 5,
):
first_add_task = add_op(a, 3)
second_add_task = add_op(first_add_task.outputs['sum'], b)
third_add_task = add_op(second_add_task.outputs['sum'], 7)
if __name__ == '__main__':
compiler.Compiler().compile(
pipeline_func=my_pipeline,
package_path=__file__.replace('.py', '.json'))
| 31.775
| 74
| 0.736428
|
e40e7999b64d153918e89ea1bddd1a2a9a2fe4c5
| 80,336
|
py
|
Python
|
Lib/test/test_asyncio/test_base_events.py
|
eldipa/cpython
|
0d6bd1ca7c683137d52041194f3a2b02219f225a
|
[
"0BSD"
] | 5
|
2021-06-22T19:41:37.000Z
|
2022-01-03T18:49:33.000Z
|
Lib/test/test_asyncio/test_base_events.py
|
eldipa/cpython
|
0d6bd1ca7c683137d52041194f3a2b02219f225a
|
[
"0BSD"
] | 50
|
2020-01-07T19:11:11.000Z
|
2022-03-01T14:40:03.000Z
|
Lib/test/test_asyncio/test_base_events.py
|
thomboroboto/cpyth
|
7375b42fe8ac3562f5179ca5a6edcffda578ce35
|
[
"0BSD"
] | 4
|
2018-07-13T08:20:36.000Z
|
2020-09-28T18:02:05.000Z
|
"""Tests for base_events.py"""
import concurrent.futures
import errno
import math
import socket
import sys
import threading
import time
import unittest
from unittest import mock
import asyncio
from asyncio import base_events
from asyncio import constants
from test.test_asyncio import utils as test_utils
from test import support
from test.support.script_helper import assert_python_ok
from test.support import os_helper
from test.support import socket_helper
MOCK_ANY = mock.ANY
PY34 = sys.version_info >= (3, 4)
def tearDownModule():
asyncio.set_event_loop_policy(None)
def mock_socket_module():
m_socket = mock.MagicMock(spec=socket)
for name in (
'AF_INET', 'AF_INET6', 'AF_UNSPEC', 'IPPROTO_TCP', 'IPPROTO_UDP',
'SOCK_STREAM', 'SOCK_DGRAM', 'SOL_SOCKET', 'SO_REUSEADDR', 'inet_pton'
):
if hasattr(socket, name):
setattr(m_socket, name, getattr(socket, name))
else:
delattr(m_socket, name)
m_socket.socket = mock.MagicMock()
m_socket.socket.return_value = test_utils.mock_nonblocking_socket()
m_socket.getaddrinfo._is_coroutine = False
return m_socket
def patch_socket(f):
return mock.patch('asyncio.base_events.socket',
new_callable=mock_socket_module)(f)
class BaseEventTests(test_utils.TestCase):
def test_ipaddr_info(self):
UNSPEC = socket.AF_UNSPEC
INET = socket.AF_INET
INET6 = socket.AF_INET6
STREAM = socket.SOCK_STREAM
DGRAM = socket.SOCK_DGRAM
TCP = socket.IPPROTO_TCP
UDP = socket.IPPROTO_UDP
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info(b'1.2.3.4', 1, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, STREAM, TCP))
self.assertEqual(
(INET, DGRAM, UDP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, DGRAM, UDP))
# Socket type STREAM implies TCP protocol.
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, STREAM, 0))
# Socket type DGRAM implies UDP protocol.
self.assertEqual(
(INET, DGRAM, UDP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, DGRAM, 0))
# No socket type.
self.assertIsNone(
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, 0, 0))
if socket_helper.IPV6_ENABLED:
# IPv4 address with family IPv6.
self.assertIsNone(
base_events._ipaddr_info('1.2.3.4', 1, INET6, STREAM, TCP))
self.assertEqual(
(INET6, STREAM, TCP, '', ('::3', 1, 0, 0)),
base_events._ipaddr_info('::3', 1, INET6, STREAM, TCP))
self.assertEqual(
(INET6, STREAM, TCP, '', ('::3', 1, 0, 0)),
base_events._ipaddr_info('::3', 1, UNSPEC, STREAM, TCP))
# IPv6 address with family IPv4.
self.assertIsNone(
base_events._ipaddr_info('::3', 1, INET, STREAM, TCP))
# IPv6 address with zone index.
self.assertIsNone(
base_events._ipaddr_info('::3%lo0', 1, INET6, STREAM, TCP))
def test_port_parameter_types(self):
# Test obscure kinds of arguments for "port".
INET = socket.AF_INET
STREAM = socket.SOCK_STREAM
TCP = socket.IPPROTO_TCP
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', None, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', b'', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', '', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', '1', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', b'1', INET, STREAM, TCP))
@patch_socket
def test_ipaddr_info_no_inet_pton(self, m_socket):
del m_socket.inet_pton
self.assertIsNone(base_events._ipaddr_info('1.2.3.4', 1,
socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP))
class BaseEventLoopTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = base_events.BaseEventLoop()
self.loop._selector = mock.Mock()
self.loop._selector.select.return_value = ()
self.set_event_loop(self.loop)
def test_not_implemented(self):
m = mock.Mock()
self.assertRaises(
NotImplementedError,
self.loop._make_socket_transport, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_ssl_transport, m, m, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_datagram_transport, m, m)
self.assertRaises(
NotImplementedError, self.loop._process_events, [])
self.assertRaises(
NotImplementedError, self.loop._write_to_self)
self.assertRaises(
NotImplementedError,
self.loop._make_read_pipe_transport, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_write_pipe_transport, m, m)
gen = self.loop._make_subprocess_transport(m, m, m, m, m, m, m)
with self.assertRaises(NotImplementedError):
gen.send(None)
def test_close(self):
self.assertFalse(self.loop.is_closed())
self.loop.close()
self.assertTrue(self.loop.is_closed())
# it should be possible to call close() more than once
self.loop.close()
self.loop.close()
# operation blocked when the loop is closed
f = self.loop.create_future()
self.assertRaises(RuntimeError, self.loop.run_forever)
self.assertRaises(RuntimeError, self.loop.run_until_complete, f)
def test__add_callback_handle(self):
h = asyncio.Handle(lambda: False, (), self.loop, None)
self.loop._add_callback(h)
self.assertFalse(self.loop._scheduled)
self.assertIn(h, self.loop._ready)
def test__add_callback_cancelled_handle(self):
h = asyncio.Handle(lambda: False, (), self.loop, None)
h.cancel()
self.loop._add_callback(h)
self.assertFalse(self.loop._scheduled)
self.assertFalse(self.loop._ready)
def test_set_default_executor(self):
class DummyExecutor(concurrent.futures.ThreadPoolExecutor):
def submit(self, fn, *args, **kwargs):
raise NotImplementedError(
'cannot submit into a dummy executor')
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
executor = DummyExecutor()
self.loop.set_default_executor(executor)
self.assertIs(executor, self.loop._default_executor)
def test_set_default_executor_deprecation_warnings(self):
executor = mock.Mock()
with self.assertWarns(DeprecationWarning):
self.loop.set_default_executor(executor)
# Avoid cleaning up the executor mock
self.loop._default_executor = None
def test_call_soon(self):
def cb():
pass
h = self.loop.call_soon(cb)
self.assertEqual(h._callback, cb)
self.assertIsInstance(h, asyncio.Handle)
self.assertIn(h, self.loop._ready)
def test_call_soon_non_callable(self):
self.loop.set_debug(True)
with self.assertRaisesRegex(TypeError, 'a callable object'):
self.loop.call_soon(1)
def test_call_later(self):
def cb():
pass
h = self.loop.call_later(10.0, cb)
self.assertIsInstance(h, asyncio.TimerHandle)
self.assertIn(h, self.loop._scheduled)
self.assertNotIn(h, self.loop._ready)
def test_call_later_negative_delays(self):
calls = []
def cb(arg):
calls.append(arg)
self.loop._process_events = mock.Mock()
self.loop.call_later(-1, cb, 'a')
self.loop.call_later(-2, cb, 'b')
test_utils.run_briefly(self.loop)
self.assertEqual(calls, ['b', 'a'])
def test_time_and_call_at(self):
def cb():
self.loop.stop()
self.loop._process_events = mock.Mock()
delay = 0.1
when = self.loop.time() + delay
self.loop.call_at(when, cb)
t0 = self.loop.time()
self.loop.run_forever()
dt = self.loop.time() - t0
# 50 ms: maximum granularity of the event loop
self.assertGreaterEqual(dt, delay - 0.050, dt)
# tolerate a difference of +800 ms because some Python buildbots
# are really slow
self.assertLessEqual(dt, 0.9, dt)
def check_thread(self, loop, debug):
def cb():
pass
loop.set_debug(debug)
if debug:
msg = ("Non-thread-safe operation invoked on an event loop other "
"than the current one")
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_soon(cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_later(60, cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_at(loop.time() + 60, cb)
else:
loop.call_soon(cb)
loop.call_later(60, cb)
loop.call_at(loop.time() + 60, cb)
def test_check_thread(self):
def check_in_thread(loop, event, debug, create_loop, fut):
# wait until the event loop is running
event.wait()
try:
if create_loop:
loop2 = base_events.BaseEventLoop()
try:
asyncio.set_event_loop(loop2)
self.check_thread(loop, debug)
finally:
asyncio.set_event_loop(None)
loop2.close()
else:
self.check_thread(loop, debug)
except Exception as exc:
loop.call_soon_threadsafe(fut.set_exception, exc)
else:
loop.call_soon_threadsafe(fut.set_result, None)
def test_thread(loop, debug, create_loop=False):
event = threading.Event()
fut = loop.create_future()
loop.call_soon(event.set)
args = (loop, event, debug, create_loop, fut)
thread = threading.Thread(target=check_in_thread, args=args)
thread.start()
loop.run_until_complete(fut)
thread.join()
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
# raise RuntimeError if the thread has no event loop
test_thread(self.loop, True)
# check disabled if debug mode is disabled
test_thread(self.loop, False)
# raise RuntimeError if the event loop of the thread is not the called
# event loop
test_thread(self.loop, True, create_loop=True)
# check disabled if debug mode is disabled
test_thread(self.loop, False, create_loop=True)
def test__run_once(self):
h1 = asyncio.TimerHandle(time.monotonic() + 5.0, lambda: True, (),
self.loop, None)
h2 = asyncio.TimerHandle(time.monotonic() + 10.0, lambda: True, (),
self.loop, None)
h1.cancel()
self.loop._process_events = mock.Mock()
self.loop._scheduled.append(h1)
self.loop._scheduled.append(h2)
self.loop._run_once()
t = self.loop._selector.select.call_args[0][0]
self.assertTrue(9.5 < t < 10.5, t)
self.assertEqual([h2], self.loop._scheduled)
self.assertTrue(self.loop._process_events.called)
def test_set_debug(self):
self.loop.set_debug(True)
self.assertTrue(self.loop.get_debug())
self.loop.set_debug(False)
self.assertFalse(self.loop.get_debug())
def test__run_once_schedule_handle(self):
handle = None
processed = False
def cb(loop):
nonlocal processed, handle
processed = True
handle = loop.call_soon(lambda: True)
h = asyncio.TimerHandle(time.monotonic() - 1, cb, (self.loop,),
self.loop, None)
self.loop._process_events = mock.Mock()
self.loop._scheduled.append(h)
self.loop._run_once()
self.assertTrue(processed)
self.assertEqual([handle], list(self.loop._ready))
def test__run_once_cancelled_event_cleanup(self):
self.loop._process_events = mock.Mock()
self.assertTrue(
0 < base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION < 1.0)
def cb():
pass
# Set up one "blocking" event that will not be cancelled to
# ensure later cancelled events do not make it to the head
# of the queue and get cleaned.
not_cancelled_count = 1
self.loop.call_later(3000, cb)
# Add less than threshold (base_events._MIN_SCHEDULED_TIMER_HANDLES)
# cancelled handles, ensure they aren't removed
cancelled_count = 2
for x in range(2):
h = self.loop.call_later(3600, cb)
h.cancel()
# Add some cancelled events that will be at head and removed
cancelled_count += 2
for x in range(2):
h = self.loop.call_later(100, cb)
h.cancel()
# This test is invalid if _MIN_SCHEDULED_TIMER_HANDLES is too low
self.assertLessEqual(cancelled_count + not_cancelled_count,
base_events._MIN_SCHEDULED_TIMER_HANDLES)
self.assertEqual(self.loop._timer_cancelled_count, cancelled_count)
self.loop._run_once()
cancelled_count -= 2
self.assertEqual(self.loop._timer_cancelled_count, cancelled_count)
self.assertEqual(len(self.loop._scheduled),
cancelled_count + not_cancelled_count)
# Need enough events to pass _MIN_CANCELLED_TIMER_HANDLES_FRACTION
# so that deletion of cancelled events will occur on next _run_once
add_cancel_count = int(math.ceil(
base_events._MIN_SCHEDULED_TIMER_HANDLES *
base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION)) + 1
add_not_cancel_count = max(base_events._MIN_SCHEDULED_TIMER_HANDLES -
add_cancel_count, 0)
# Add some events that will not be cancelled
not_cancelled_count += add_not_cancel_count
for x in range(add_not_cancel_count):
self.loop.call_later(3600, cb)
# Add enough cancelled events
cancelled_count += add_cancel_count
for x in range(add_cancel_count):
h = self.loop.call_later(3600, cb)
h.cancel()
# Ensure all handles are still scheduled
self.assertEqual(len(self.loop._scheduled),
cancelled_count + not_cancelled_count)
self.loop._run_once()
# Ensure cancelled events were removed
self.assertEqual(len(self.loop._scheduled), not_cancelled_count)
# Ensure only uncancelled events remain scheduled
self.assertTrue(all([not x._cancelled for x in self.loop._scheduled]))
def test_run_until_complete_type_error(self):
self.assertRaises(TypeError,
self.loop.run_until_complete, 'blah')
def test_run_until_complete_loop(self):
task = self.loop.create_future()
other_loop = self.new_test_loop()
self.addCleanup(other_loop.close)
self.assertRaises(ValueError,
other_loop.run_until_complete, task)
def test_run_until_complete_loop_orphan_future_close_loop(self):
class ShowStopper(SystemExit):
pass
async def foo(delay):
await asyncio.sleep(delay)
def throw():
raise ShowStopper
self.loop._process_events = mock.Mock()
self.loop.call_soon(throw)
with self.assertRaises(ShowStopper):
self.loop.run_until_complete(foo(0.1))
# This call fails if run_until_complete does not clean up
# done-callback for the previous future.
self.loop.run_until_complete(foo(0.2))
def test_subprocess_exec_invalid_args(self):
args = [sys.executable, '-c', 'pass']
# missing program parameter (empty args)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol)
# expected multiple arguments, not a list
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, args)
# program arguments must be strings, not int
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, sys.executable, 123)
# universal_newlines, shell, bufsize must not be set
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, universal_newlines=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, shell=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, bufsize=4096)
def test_subprocess_shell_invalid_args(self):
# expected a string, not an int or a list
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 123)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, [sys.executable, '-c', 'pass'])
# universal_newlines, shell, bufsize must not be set
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', universal_newlines=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', shell=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', bufsize=4096)
def test_default_exc_handler_callback(self):
self.loop._process_events = mock.Mock()
def zero_error(fut):
fut.set_result(True)
1/0
# Test call_soon (events.Handle)
with mock.patch('asyncio.base_events.logger') as log:
fut = self.loop.create_future()
self.loop.call_soon(zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.error.assert_called_with(
test_utils.MockPattern('Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
# Test call_later (events.TimerHandle)
with mock.patch('asyncio.base_events.logger') as log:
fut = self.loop.create_future()
self.loop.call_later(0.01, zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.error.assert_called_with(
test_utils.MockPattern('Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
def test_default_exc_handler_coro(self):
self.loop._process_events = mock.Mock()
async def zero_error_coro():
await asyncio.sleep(0.01)
1/0
# Test Future.__del__
with mock.patch('asyncio.base_events.logger') as log:
fut = asyncio.ensure_future(zero_error_coro(), loop=self.loop)
fut.add_done_callback(lambda *args: self.loop.stop())
self.loop.run_forever()
fut = None # Trigger Future.__del__ or futures._TracebackLogger
support.gc_collect()
if PY34:
# Future.__del__ in Python 3.4 logs error with
# an actual exception context
log.error.assert_called_with(
test_utils.MockPattern('.*exception was never retrieved'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
else:
# futures._TracebackLogger logs only textual traceback
log.error.assert_called_with(
test_utils.MockPattern(
'.*exception was never retrieved.*ZeroDiv'),
exc_info=False)
def test_set_exc_handler_invalid(self):
with self.assertRaisesRegex(TypeError, 'A callable object or None'):
self.loop.set_exception_handler('spam')
def test_set_exc_handler_custom(self):
def zero_error():
1/0
def run_loop():
handle = self.loop.call_soon(zero_error)
self.loop._run_once()
return handle
self.loop.set_debug(True)
self.loop._process_events = mock.Mock()
self.assertIsNone(self.loop.get_exception_handler())
mock_handler = mock.Mock()
self.loop.set_exception_handler(mock_handler)
self.assertIs(self.loop.get_exception_handler(), mock_handler)
handle = run_loop()
mock_handler.assert_called_with(self.loop, {
'exception': MOCK_ANY,
'message': test_utils.MockPattern(
'Exception in callback.*zero_error'),
'handle': handle,
'source_traceback': handle._source_traceback,
})
mock_handler.reset_mock()
self.loop.set_exception_handler(None)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern(
'Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
assert not mock_handler.called
def test_set_exc_handler_broken(self):
def run_loop():
def zero_error():
1/0
self.loop.call_soon(zero_error)
self.loop._run_once()
def handler(loop, context):
raise AttributeError('spam')
self.loop._process_events = mock.Mock()
self.loop.set_exception_handler(handler)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern(
'Unhandled error in exception handler'),
exc_info=(AttributeError, MOCK_ANY, MOCK_ANY))
def test_default_exc_handler_broken(self):
_context = None
class Loop(base_events.BaseEventLoop):
_selector = mock.Mock()
_process_events = mock.Mock()
def default_exception_handler(self, context):
nonlocal _context
_context = context
# Simulates custom buggy "default_exception_handler"
raise ValueError('spam')
loop = Loop()
self.addCleanup(loop.close)
asyncio.set_event_loop(loop)
def run_loop():
def zero_error():
1/0
loop.call_soon(zero_error)
loop._run_once()
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
'Exception in default exception handler',
exc_info=True)
def custom_handler(loop, context):
raise ValueError('ham')
_context = None
loop.set_exception_handler(custom_handler)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern('Exception in default exception.*'
'while handling.*in custom'),
exc_info=True)
# Check that original context was passed to default
# exception handler.
self.assertIn('context', _context)
self.assertIs(type(_context['context']['exception']),
ZeroDivisionError)
def test_set_task_factory_invalid(self):
with self.assertRaisesRegex(
TypeError, 'task factory must be a callable or None'):
self.loop.set_task_factory(1)
self.assertIsNone(self.loop.get_task_factory())
def test_set_task_factory(self):
self.loop._process_events = mock.Mock()
class MyTask(asyncio.Task):
pass
async def coro():
pass
factory = lambda loop, coro: MyTask(coro, loop=loop)
self.assertIsNone(self.loop.get_task_factory())
self.loop.set_task_factory(factory)
self.assertIs(self.loop.get_task_factory(), factory)
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, MyTask))
self.loop.run_until_complete(task)
self.loop.set_task_factory(None)
self.assertIsNone(self.loop.get_task_factory())
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, asyncio.Task))
self.assertFalse(isinstance(task, MyTask))
self.loop.run_until_complete(task)
def test_env_var_debug(self):
code = '\n'.join((
'import asyncio',
'loop = asyncio.get_event_loop()',
'print(loop.get_debug())'))
# Test with -E to not fail if the unit test was run with
# PYTHONASYNCIODEBUG set to a non-empty string
sts, stdout, stderr = assert_python_ok('-E', '-c', code)
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='',
PYTHONDEVMODE='')
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='1',
PYTHONDEVMODE='')
self.assertEqual(stdout.rstrip(), b'True')
sts, stdout, stderr = assert_python_ok('-E', '-c', code,
PYTHONASYNCIODEBUG='1')
self.assertEqual(stdout.rstrip(), b'False')
# -X dev
sts, stdout, stderr = assert_python_ok('-E', '-X', 'dev',
'-c', code)
self.assertEqual(stdout.rstrip(), b'True')
def test_create_task(self):
class MyTask(asyncio.Task):
pass
async def test():
pass
class EventLoop(base_events.BaseEventLoop):
def create_task(self, coro):
return MyTask(coro, loop=loop)
loop = EventLoop()
self.set_event_loop(loop)
coro = test()
task = asyncio.ensure_future(coro, loop=loop)
self.assertIsInstance(task, MyTask)
# make warnings quiet
task._log_destroy_pending = False
coro.close()
def test_create_named_task_with_default_factory(self):
async def test():
pass
loop = asyncio.new_event_loop()
task = loop.create_task(test(), name='test_task')
try:
self.assertEqual(task.get_name(), 'test_task')
finally:
loop.run_until_complete(task)
loop.close()
def test_create_named_task_with_custom_factory(self):
def task_factory(loop, coro):
return asyncio.Task(coro, loop=loop)
async def test():
pass
loop = asyncio.new_event_loop()
loop.set_task_factory(task_factory)
task = loop.create_task(test(), name='test_task')
try:
self.assertEqual(task.get_name(), 'test_task')
finally:
loop.run_until_complete(task)
loop.close()
def test_run_forever_keyboard_interrupt(self):
# Python issue #22601: ensure that the temporary task created by
# run_forever() consumes the KeyboardInterrupt and so don't log
# a warning
async def raise_keyboard_interrupt():
raise KeyboardInterrupt
self.loop._process_events = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
try:
self.loop.run_until_complete(raise_keyboard_interrupt())
except KeyboardInterrupt:
pass
self.loop.close()
support.gc_collect()
self.assertFalse(self.loop.call_exception_handler.called)
def test_run_until_complete_baseexception(self):
# Python issue #22429: run_until_complete() must not schedule a pending
# call to stop() if the future raised a BaseException
async def raise_keyboard_interrupt():
raise KeyboardInterrupt
self.loop._process_events = mock.Mock()
try:
self.loop.run_until_complete(raise_keyboard_interrupt())
except KeyboardInterrupt:
pass
def func():
self.loop.stop()
func.called = True
func.called = False
try:
self.loop.call_soon(func)
self.loop.run_forever()
except KeyboardInterrupt:
pass
self.assertTrue(func.called)
def test_single_selecter_event_callback_after_stopping(self):
# Python issue #25593: A stopped event loop may cause event callbacks
# to run more than once.
event_sentinel = object()
callcount = 0
doer = None
def proc_events(event_list):
nonlocal doer
if event_sentinel in event_list:
doer = self.loop.call_soon(do_event)
def do_event():
nonlocal callcount
callcount += 1
self.loop.call_soon(clear_selector)
def clear_selector():
doer.cancel()
self.loop._selector.select.return_value = ()
self.loop._process_events = proc_events
self.loop._selector.select.return_value = (event_sentinel,)
for i in range(1, 3):
with self.subTest('Loop %d/2' % i):
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(callcount, 1)
def test_run_once(self):
# Simple test for test_utils.run_once(). It may seem strange
# to have a test for this (the function isn't even used!) but
# it's a de-factor standard API for library tests. This tests
# the idiom: loop.call_soon(loop.stop); loop.run_forever().
count = 0
def callback():
nonlocal count
count += 1
self.loop._process_events = mock.Mock()
self.loop.call_soon(callback)
test_utils.run_once(self.loop)
self.assertEqual(count, 1)
def test_run_forever_pre_stopped(self):
# Test that the old idiom for pre-stopping the loop works.
self.loop._process_events = mock.Mock()
self.loop.stop()
self.loop.run_forever()
self.loop._selector.select.assert_called_once_with(0)
async def leave_unfinalized_asyncgen(self):
# Create an async generator, iterate it partially, and leave it
# to be garbage collected.
# Used in async generator finalization tests.
# Depends on implementation details of garbage collector. Changes
# in gc may break this function.
status = {'started': False,
'stopped': False,
'finalized': False}
async def agen():
status['started'] = True
try:
for item in ['ZERO', 'ONE', 'TWO', 'THREE', 'FOUR']:
yield item
finally:
status['finalized'] = True
ag = agen()
ai = ag.__aiter__()
async def iter_one():
try:
item = await ai.__anext__()
except StopAsyncIteration:
return
if item == 'THREE':
status['stopped'] = True
return
asyncio.create_task(iter_one())
asyncio.create_task(iter_one())
return status
def test_asyncgen_finalization_by_gc(self):
# Async generators should be finalized when garbage collected.
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
with support.disable_gc():
status = self.loop.run_until_complete(self.leave_unfinalized_asyncgen())
while not status['stopped']:
test_utils.run_briefly(self.loop)
self.assertTrue(status['started'])
self.assertTrue(status['stopped'])
self.assertFalse(status['finalized'])
support.gc_collect()
test_utils.run_briefly(self.loop)
self.assertTrue(status['finalized'])
def test_asyncgen_finalization_by_gc_in_other_thread(self):
# Python issue 34769: If garbage collector runs in another
# thread, async generators will not finalize in debug
# mode.
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
self.loop.set_debug(True)
with support.disable_gc():
status = self.loop.run_until_complete(self.leave_unfinalized_asyncgen())
while not status['stopped']:
test_utils.run_briefly(self.loop)
self.assertTrue(status['started'])
self.assertTrue(status['stopped'])
self.assertFalse(status['finalized'])
self.loop.run_until_complete(
self.loop.run_in_executor(None, support.gc_collect))
test_utils.run_briefly(self.loop)
self.assertTrue(status['finalized'])
class MyProto(asyncio.Protocol):
done = None
def __init__(self, create_future=False):
self.state = 'INITIAL'
self.nbytes = 0
if create_future:
self.done = asyncio.get_running_loop().create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, create_future=False, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if create_future:
self.done = loop.create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class BaseEventLoopWithSelectorTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.SelectorEventLoop()
self.set_event_loop(self.loop)
@mock.patch('socket.getnameinfo')
def test_getnameinfo(self, m_gai):
m_gai.side_effect = lambda *args: 42
r = self.loop.run_until_complete(self.loop.getnameinfo(('abc', 123)))
self.assertEqual(r, 42)
@patch_socket
def test_create_connection_multiple_errors(self, m_socket):
class MyProto(asyncio.Protocol):
pass
async def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('107.6.106.82', 80)),
(2, 1, 6, '', ('107.6.106.82', 80))]
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
idx = -1
errors = ['err1', 'err2']
def _socket(*args, **kw):
nonlocal idx, errors
idx += 1
raise OSError(errors[idx])
m_socket.socket = _socket
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(MyProto, 'example.com', 80)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(coro)
self.assertEqual(str(cm.exception), 'Multiple exceptions: err1, err2')
@patch_socket
def test_create_connection_timeout(self, m_socket):
# Ensure that the socket is closed on timeout
sock = mock.Mock()
m_socket.socket.return_value = sock
def getaddrinfo(*args, **kw):
fut = self.loop.create_future()
addr = (socket.AF_INET, socket.SOCK_STREAM, 0, '',
('127.0.0.1', 80))
fut.set_result([addr])
return fut
self.loop.getaddrinfo = getaddrinfo
with mock.patch.object(self.loop, 'sock_connect',
side_effect=asyncio.TimeoutError):
coro = self.loop.create_connection(MyProto, '127.0.0.1', 80)
with self.assertRaises(asyncio.TimeoutError):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
def test_create_connection_host_port_sock(self):
coro = self.loop.create_connection(
MyProto, 'example.com', 80, sock=object())
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_wrong_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_connection(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A Stream Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_server_wrong_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_server(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A Stream Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_server_ssl_timeout_for_plain_socket(self):
coro = self.loop.create_server(
MyProto, 'example.com', 80, ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'no socket.SOCK_NONBLOCK (linux only)')
def test_create_server_stream_bittype(self):
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
with sock:
coro = self.loop.create_server(lambda: None, sock=sock)
srv = self.loop.run_until_complete(coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'no IPv6 support')
def test_create_server_ipv6(self):
async def main():
srv = await asyncio.start_server(lambda: None, '::1', 0)
try:
self.assertGreater(len(srv.sockets), 0)
finally:
srv.close()
await srv.wait_closed()
try:
self.loop.run_until_complete(main())
except OSError as ex:
if (hasattr(errno, 'EADDRNOTAVAIL') and
ex.errno == errno.EADDRNOTAVAIL):
self.skipTest('failed to bind to ::1')
else:
raise
def test_create_datagram_endpoint_wrong_sock(self):
sock = socket.socket(socket.AF_INET)
with sock:
coro = self.loop.create_datagram_endpoint(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A UDP Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_connection_no_host_port_sock(self):
coro = self.loop.create_connection(MyProto)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_no_getaddrinfo(self):
async def getaddrinfo(*args, **kw):
return []
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(MyProto, 'example.com', 80)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_connection_connect_err(self):
async def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('107.6.106.82', 80))]
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_connection(MyProto, 'example.com', 80)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_connection_multiple(self):
async def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('0.0.0.1', 80)),
(2, 1, 6, '', ('0.0.0.2', 80))]
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET)
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
@patch_socket
def test_create_connection_multiple_errors_local_addr(self, m_socket):
def bind(addr):
if addr[0] == '0.0.0.1':
err = OSError('Err')
err.strerror = 'Err'
raise err
m_socket.socket.return_value.bind = bind
async def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('0.0.0.1', 80)),
(2, 1, 6, '', ('0.0.0.2', 80))]
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError('Err2')
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET,
local_addr=(None, 8080))
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(coro)
self.assertTrue(str(cm.exception).startswith('Multiple exceptions: '))
self.assertTrue(m_socket.socket.return_value.close.called)
def _test_create_connection_ip_addr(self, m_socket, allow_inet_pton):
# Test the fallback code, even if this system has inet_pton.
if not allow_inet_pton:
del m_socket.inet_pton
m_socket.getaddrinfo = socket.getaddrinfo
sock = m_socket.socket.return_value
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
self.loop._add_writer = mock.Mock()
self.loop._add_writer._is_coroutine = False
coro = self.loop.create_connection(asyncio.Protocol, '1.2.3.4', 80)
t, p = self.loop.run_until_complete(coro)
try:
sock.connect.assert_called_with(('1.2.3.4', 80))
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
if socket_helper.IPV6_ENABLED:
sock.family = socket.AF_INET6
coro = self.loop.create_connection(asyncio.Protocol, '::1', 80)
t, p = self.loop.run_until_complete(coro)
try:
# Without inet_pton we use getaddrinfo, which transforms
# ('::1', 80) to ('::1', 80, 0, 0). The last 0s are flow info,
# scope id.
[address] = sock.connect.call_args[0]
host, port = address[:2]
self.assertRegex(host, r'::(0\.)*1')
self.assertEqual(port, 80)
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET6)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'no IPv6 support')
@unittest.skipIf(sys.platform.startswith('aix'),
"bpo-25545: IPv6 scope id and getaddrinfo() behave differently on AIX")
@patch_socket
def test_create_connection_ipv6_scope(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
sock = m_socket.socket.return_value
sock.family = socket.AF_INET6
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
self.loop._add_writer = mock.Mock()
self.loop._add_writer._is_coroutine = False
coro = self.loop.create_connection(asyncio.Protocol, 'fe80::1%1', 80)
t, p = self.loop.run_until_complete(coro)
try:
sock.connect.assert_called_with(('fe80::1', 80, 0, 1))
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET6)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
@patch_socket
def test_create_connection_ip_addr(self, m_socket):
self._test_create_connection_ip_addr(m_socket, True)
@patch_socket
def test_create_connection_no_inet_pton(self, m_socket):
self._test_create_connection_ip_addr(m_socket, False)
@patch_socket
def test_create_connection_service_name(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
sock = m_socket.socket.return_value
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
self.loop._add_writer = mock.Mock()
self.loop._add_writer._is_coroutine = False
for service, port in ('http', 80), (b'http', 80):
coro = self.loop.create_connection(asyncio.Protocol,
'127.0.0.1', service)
t, p = self.loop.run_until_complete(coro)
try:
sock.connect.assert_called_with(('127.0.0.1', port))
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
for service in 'nonsense', b'nonsense':
coro = self.loop.create_connection(asyncio.Protocol,
'127.0.0.1', service)
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
def test_create_connection_no_local_addr(self):
async def getaddrinfo(host, *args, **kw):
if host == 'example.com':
return [(2, 1, 6, '', ('107.6.106.82', 80)),
(2, 1, 6, '', ('107.6.106.82', 80))]
else:
return []
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET,
local_addr=(None, 8080))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_connection_bluetooth(self, m_socket):
# See http://bugs.python.org/issue27136, fallback to getaddrinfo when
# we can't recognize an address is resolved, e.g. a Bluetooth address.
addr = ('00:01:02:03:04:05', 1)
def getaddrinfo(host, port, *args, **kw):
assert (host, port) == addr
return [(999, 1, 999, '', (addr, 1))]
m_socket.getaddrinfo = getaddrinfo
sock = m_socket.socket()
coro = self.loop.sock_connect(sock, addr)
self.loop.run_until_complete(coro)
def test_create_connection_ssl_server_hostname_default(self):
self.loop.getaddrinfo = mock.Mock()
def mock_getaddrinfo(*args, **kwds):
f = self.loop.create_future()
f.set_result([(socket.AF_INET, socket.SOCK_STREAM,
socket.SOL_TCP, '', ('1.2.3.4', 80))])
return f
self.loop.getaddrinfo.side_effect = mock_getaddrinfo
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.return_value = self.loop.create_future()
self.loop.sock_connect.return_value.set_result(None)
self.loop._make_ssl_transport = mock.Mock()
class _SelectorTransportMock:
_sock = None
def get_extra_info(self, key):
return mock.Mock()
def close(self):
self._sock.close()
def mock_make_ssl_transport(sock, protocol, sslcontext, waiter,
**kwds):
waiter.set_result(None)
transport = _SelectorTransportMock()
transport._sock = sock
return transport
self.loop._make_ssl_transport.side_effect = mock_make_ssl_transport
ANY = mock.ANY
handshake_timeout = object()
# First try the default server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(
MyProto, 'python.org', 80, ssl=True,
ssl_handshake_timeout=handshake_timeout)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='python.org',
ssl_handshake_timeout=handshake_timeout)
# Next try an explicit server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(
MyProto, 'python.org', 80, ssl=True,
server_hostname='perl.com',
ssl_handshake_timeout=handshake_timeout)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='perl.com',
ssl_handshake_timeout=handshake_timeout)
# Finally try an explicit empty server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(
MyProto, 'python.org', 80, ssl=True,
server_hostname='',
ssl_handshake_timeout=handshake_timeout)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='',
ssl_handshake_timeout=handshake_timeout)
def test_create_connection_no_ssl_server_hostname_errors(self):
# When not using ssl, server_hostname must be None.
coro = self.loop.create_connection(MyProto, 'python.org', 80,
server_hostname='')
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
coro = self.loop.create_connection(MyProto, 'python.org', 80,
server_hostname='python.org')
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_ssl_server_hostname_errors(self):
# When using ssl, server_hostname may be None if host is non-empty.
coro = self.loop.create_connection(MyProto, '', 80, ssl=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
coro = self.loop.create_connection(MyProto, None, 80, ssl=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
sock = socket.socket()
coro = self.loop.create_connection(MyProto, None, None,
ssl=True, sock=sock)
self.addCleanup(sock.close)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_ssl_timeout_for_plain_socket(self):
coro = self.loop.create_connection(
MyProto, 'example.com', 80, ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
def test_create_server_empty_host(self):
# if host is empty string use None instead
host = object()
async def getaddrinfo(*args, **kw):
nonlocal host
host = args[0]
return []
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
fut = self.loop.create_server(MyProto, '', 0)
self.assertRaises(OSError, self.loop.run_until_complete, fut)
self.assertIsNone(host)
def test_create_server_host_port_sock(self):
fut = self.loop.create_server(
MyProto, '0.0.0.0', 0, sock=object())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_server_no_host_port_sock(self):
fut = self.loop.create_server(MyProto)
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_server_no_getaddrinfo(self):
getaddrinfo = self.loop.getaddrinfo = mock.Mock()
getaddrinfo.return_value = self.loop.create_future()
getaddrinfo.return_value.set_result(None)
f = self.loop.create_server(MyProto, 'python.org', 0)
self.assertRaises(OSError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_nosoreuseport(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
del m_socket.SO_REUSEPORT
m_socket.socket.return_value = mock.Mock()
f = self.loop.create_server(
MyProto, '0.0.0.0', 0, reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_soreuseport_only_defined(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
m_socket.socket.return_value = mock.Mock()
m_socket.SO_REUSEPORT = -1
f = self.loop.create_server(
MyProto, '0.0.0.0', 0, reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_cant_bind(self, m_socket):
class Err(OSError):
strerror = 'error'
m_socket.getaddrinfo.return_value = [
(2, 1, 6, '', ('127.0.0.1', 10100))]
m_socket.getaddrinfo._is_coroutine = False
m_sock = m_socket.socket.return_value = mock.Mock()
m_sock.bind.side_effect = Err
fut = self.loop.create_server(MyProto, '0.0.0.0', 0)
self.assertRaises(OSError, self.loop.run_until_complete, fut)
self.assertTrue(m_sock.close.called)
@patch_socket
def test_create_datagram_endpoint_no_addrinfo(self, m_socket):
m_socket.getaddrinfo.return_value = []
m_socket.getaddrinfo._is_coroutine = False
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('localhost', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_addr_error(self):
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr='localhost')
self.assertRaises(
AssertionError, self.loop.run_until_complete, coro)
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('localhost', 1, 2, 3))
self.assertRaises(
AssertionError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_connect_err(self):
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, remote_addr=('127.0.0.1', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_allow_broadcast(self):
protocol = MyDatagramProto(create_future=True, loop=self.loop)
self.loop.sock_connect = sock_connect = mock.Mock()
sock_connect.return_value = []
coro = self.loop.create_datagram_endpoint(
lambda: protocol,
remote_addr=('127.0.0.1', 0),
allow_broadcast=True)
transport, _ = self.loop.run_until_complete(coro)
self.assertFalse(sock_connect.called)
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@patch_socket
def test_create_datagram_endpoint_socket_err(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
m_socket.socket.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, family=socket.AF_INET)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, local_addr=('127.0.0.1', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_datagram_endpoint_no_matching_family(self):
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol,
remote_addr=('127.0.0.1', 0), local_addr=('::1', 0))
self.assertRaises(
ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_setblk_err(self, m_socket):
m_socket.socket.return_value.setblocking.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, family=socket.AF_INET)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
self.assertTrue(
m_socket.socket.return_value.close.called)
def test_create_datagram_endpoint_noaddr_nofamily(self):
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_cant_bind(self, m_socket):
class Err(OSError):
pass
m_socket.getaddrinfo = socket.getaddrinfo
m_sock = m_socket.socket.return_value = mock.Mock()
m_sock.bind.side_effect = Err
fut = self.loop.create_datagram_endpoint(
MyDatagramProto,
local_addr=('127.0.0.1', 0), family=socket.AF_INET)
self.assertRaises(Err, self.loop.run_until_complete, fut)
self.assertTrue(m_sock.close.called)
def test_create_datagram_endpoint_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('127.0.0.1', 0))
fut = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
sock=sock)
transport, protocol = self.loop.run_until_complete(fut)
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_datagram_endpoint_sock_unix(self):
fut = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
family=socket.AF_UNIX)
transport, protocol = self.loop.run_until_complete(fut)
assert transport._sock.family == socket.AF_UNIX
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@socket_helper.skip_unless_bind_unix_socket
def test_create_datagram_endpoint_existing_sock_unix(self):
with test_utils.unix_socket_path() as path:
sock = socket.socket(socket.AF_UNIX, type=socket.SOCK_DGRAM)
sock.bind(path)
sock.close()
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
path, family=socket.AF_UNIX)
transport, protocol = self.loop.run_until_complete(coro)
transport.close()
self.loop.run_until_complete(protocol.done)
def test_create_datagram_endpoint_sock_sockopts(self):
class FakeSock:
type = socket.SOCK_DGRAM
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('127.0.0.1', 0), sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, remote_addr=('127.0.0.1', 0), sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, family=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, proto=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, flags=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, reuse_port=True, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, allow_broadcast=True, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
@unittest.skipIf(sys.platform == 'vxworks',
"SO_BROADCAST is enabled by default on VxWorks")
def test_create_datagram_endpoint_sockopts(self):
# Socket options should not be applied unless asked for.
# SO_REUSEPORT is not available on all platforms.
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
local_addr=('127.0.0.1', 0))
transport, protocol = self.loop.run_until_complete(coro)
sock = transport.get_extra_info('socket')
reuseport_supported = hasattr(socket, 'SO_REUSEPORT')
if reuseport_supported:
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST))
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
local_addr=('127.0.0.1', 0),
reuse_port=reuseport_supported,
allow_broadcast=True)
transport, protocol = self.loop.run_until_complete(coro)
sock = transport.get_extra_info('socket')
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR))
if reuseport_supported:
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST))
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
def test_create_datagram_endpoint_reuse_address_error(self):
# bpo-37228: Ensure that explicit passing of `reuse_address=True`
# raises an error, as it is not safe to use SO_REUSEADDR when using UDP
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
local_addr=('127.0.0.1', 0),
reuse_address=True)
with self.assertRaises(ValueError):
self.loop.run_until_complete(coro)
def test_create_datagram_endpoint_reuse_address_warning(self):
# bpo-37228: Deprecate *reuse_address* parameter
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
local_addr=('127.0.0.1', 0),
reuse_address=False)
with self.assertWarns(DeprecationWarning):
transport, protocol = self.loop.run_until_complete(coro)
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@patch_socket
def test_create_datagram_endpoint_nosoreuseport(self, m_socket):
del m_socket.SO_REUSEPORT
m_socket.socket.return_value = mock.Mock()
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
local_addr=('127.0.0.1', 0),
reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_ip_addr(self, m_socket):
def getaddrinfo(*args, **kw):
self.fail('should not have called getaddrinfo')
m_socket.getaddrinfo = getaddrinfo
m_socket.socket.return_value.bind = bind = mock.Mock()
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
reuseport_supported = hasattr(socket, 'SO_REUSEPORT')
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
local_addr=('1.2.3.4', 0),
reuse_port=reuseport_supported)
t, p = self.loop.run_until_complete(coro)
try:
bind.assert_called_with(('1.2.3.4', 0))
m_socket.socket.assert_called_with(family=m_socket.AF_INET,
proto=m_socket.IPPROTO_UDP,
type=m_socket.SOCK_DGRAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
def test_accept_connection_retry(self):
sock = mock.Mock()
sock.accept.side_effect = BlockingIOError()
self.loop._accept_connection(MyProto, sock)
self.assertFalse(sock.close.called)
@mock.patch('asyncio.base_events.logger')
def test_accept_connection_exception(self, m_log):
sock = mock.Mock()
sock.fileno.return_value = 10
sock.accept.side_effect = OSError(errno.EMFILE, 'Too many open files')
self.loop._remove_reader = mock.Mock()
self.loop.call_later = mock.Mock()
self.loop._accept_connection(MyProto, sock)
self.assertTrue(m_log.error.called)
self.assertFalse(sock.close.called)
self.loop._remove_reader.assert_called_with(10)
self.loop.call_later.assert_called_with(
constants.ACCEPT_RETRY_DELAY,
# self.loop._start_serving
mock.ANY,
MyProto, sock, None, None, mock.ANY, mock.ANY)
def test_call_coroutine(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def simple_coroutine():
pass
self.loop.set_debug(True)
coro_func = simple_coroutine
coro_obj = coro_func()
self.addCleanup(coro_obj.close)
for func in (coro_func, coro_obj):
with self.assertRaises(TypeError):
self.loop.call_soon(func)
with self.assertRaises(TypeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(TypeError):
self.loop.call_later(60, func)
with self.assertRaises(TypeError):
self.loop.call_at(self.loop.time() + 60, func)
with self.assertRaises(TypeError):
self.loop.run_until_complete(
self.loop.run_in_executor(None, func))
@mock.patch('asyncio.base_events.logger')
def test_log_slow_callbacks(self, m_logger):
def stop_loop_cb(loop):
loop.stop()
async def stop_loop_coro(loop):
loop.stop()
asyncio.set_event_loop(self.loop)
self.loop.set_debug(True)
self.loop.slow_callback_duration = 0.0
# slow callback
self.loop.call_soon(stop_loop_cb, self.loop)
self.loop.run_forever()
fmt, *args = m_logger.warning.call_args[0]
self.assertRegex(fmt % tuple(args),
"^Executing <Handle.*stop_loop_cb.*> "
"took .* seconds$")
# slow task
asyncio.ensure_future(stop_loop_coro(self.loop), loop=self.loop)
self.loop.run_forever()
fmt, *args = m_logger.warning.call_args[0]
self.assertRegex(fmt % tuple(args),
"^Executing <Task.*stop_loop_coro.*> "
"took .* seconds$")
class RunningLoopTests(unittest.TestCase):
def test_running_loop_within_a_loop(self):
async def runner(loop):
loop.run_forever()
loop = asyncio.new_event_loop()
outer_loop = asyncio.new_event_loop()
try:
with self.assertRaisesRegex(RuntimeError,
'while another loop is running'):
outer_loop.run_until_complete(runner(loop))
finally:
loop.close()
outer_loop.close()
class BaseLoopSockSendfileTests(test_utils.TestCase):
DATA = b"12345abcde" * 16 * 1024 # 160 KiB
class MyProto(asyncio.Protocol):
def __init__(self, loop):
self.started = False
self.closed = False
self.data = bytearray()
self.fut = loop.create_future()
self.transport = None
def connection_made(self, transport):
self.started = True
self.transport = transport
def data_received(self, data):
self.data.extend(data)
def connection_lost(self, exc):
self.closed = True
self.fut.set_result(None)
self.transport = None
async def wait_closed(self):
await self.fut
@classmethod
def setUpClass(cls):
cls.__old_bufsize = constants.SENDFILE_FALLBACK_READBUFFER_SIZE
constants.SENDFILE_FALLBACK_READBUFFER_SIZE = 1024 * 16
with open(os_helper.TESTFN, 'wb') as fp:
fp.write(cls.DATA)
super().setUpClass()
@classmethod
def tearDownClass(cls):
constants.SENDFILE_FALLBACK_READBUFFER_SIZE = cls.__old_bufsize
os_helper.unlink(os_helper.TESTFN)
super().tearDownClass()
def setUp(self):
from asyncio.selector_events import BaseSelectorEventLoop
# BaseSelectorEventLoop() has no native implementation
self.loop = BaseSelectorEventLoop()
self.set_event_loop(self.loop)
self.file = open(os_helper.TESTFN, 'rb')
self.addCleanup(self.file.close)
super().setUp()
def make_socket(self, blocking=False):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(blocking)
self.addCleanup(sock.close)
return sock
def run_loop(self, coro):
return self.loop.run_until_complete(coro)
def prepare(self):
sock = self.make_socket()
proto = self.MyProto(self.loop)
server = self.run_loop(self.loop.create_server(
lambda: proto, socket_helper.HOST, 0, family=socket.AF_INET))
addr = server.sockets[0].getsockname()
for _ in range(10):
try:
self.run_loop(self.loop.sock_connect(sock, addr))
except OSError:
self.run_loop(asyncio.sleep(0.5))
continue
else:
break
else:
# One last try, so we get the exception
self.run_loop(self.loop.sock_connect(sock, addr))
def cleanup():
server.close()
self.run_loop(server.wait_closed())
sock.close()
if proto.transport is not None:
proto.transport.close()
self.run_loop(proto.wait_closed())
self.addCleanup(cleanup)
return sock, proto
def test__sock_sendfile_native_failure(self):
sock, proto = self.prepare()
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"sendfile is not available"):
self.run_loop(self.loop._sock_sendfile_native(sock, self.file,
0, None))
self.assertEqual(proto.data, b'')
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_no_fallback(self):
sock, proto = self.prepare()
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"sendfile is not available"):
self.run_loop(self.loop.sock_sendfile(sock, self.file,
fallback=False))
self.assertEqual(self.file.tell(), 0)
self.assertEqual(proto.data, b'')
def test_sock_sendfile_fallback(self):
sock, proto = self.prepare()
ret = self.run_loop(self.loop.sock_sendfile(sock, self.file))
sock.close()
self.run_loop(proto.wait_closed())
self.assertEqual(ret, len(self.DATA))
self.assertEqual(self.file.tell(), len(self.DATA))
self.assertEqual(proto.data, self.DATA)
def test_sock_sendfile_fallback_offset_and_count(self):
sock, proto = self.prepare()
ret = self.run_loop(self.loop.sock_sendfile(sock, self.file,
1000, 2000))
sock.close()
self.run_loop(proto.wait_closed())
self.assertEqual(ret, 2000)
self.assertEqual(self.file.tell(), 3000)
self.assertEqual(proto.data, self.DATA[1000:3000])
def test_blocking_socket(self):
self.loop.set_debug(True)
sock = self.make_socket(blocking=True)
with self.assertRaisesRegex(ValueError, "must be non-blocking"):
self.run_loop(self.loop.sock_sendfile(sock, self.file))
def test_nonbinary_file(self):
sock = self.make_socket()
with open(os_helper.TESTFN, 'r') as f:
with self.assertRaisesRegex(ValueError, "binary mode"):
self.run_loop(self.loop.sock_sendfile(sock, f))
def test_nonstream_socket(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setblocking(False)
self.addCleanup(sock.close)
with self.assertRaisesRegex(ValueError, "only SOCK_STREAM type"):
self.run_loop(self.loop.sock_sendfile(sock, self.file))
def test_notint_count(self):
sock = self.make_socket()
with self.assertRaisesRegex(TypeError,
"count must be a positive integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, 0, 'count'))
def test_negative_count(self):
sock = self.make_socket()
with self.assertRaisesRegex(ValueError,
"count must be a positive integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, 0, -1))
def test_notint_offset(self):
sock = self.make_socket()
with self.assertRaisesRegex(TypeError,
"offset must be a non-negative integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, 'offset'))
def test_negative_offset(self):
sock = self.make_socket()
with self.assertRaisesRegex(ValueError,
"offset must be a non-negative integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, -1))
class TestSelectorUtils(test_utils.TestCase):
def check_set_nodelay(self, sock):
opt = sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
self.assertFalse(opt)
base_events._set_nodelay(sock)
opt = sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
self.assertTrue(opt)
@unittest.skipUnless(hasattr(socket, 'TCP_NODELAY'),
'need socket.TCP_NODELAY')
def test_set_nodelay(self):
sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM,
proto=socket.IPPROTO_TCP)
with sock:
self.check_set_nodelay(sock)
sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM,
proto=socket.IPPROTO_TCP)
with sock:
sock.setblocking(False)
self.check_set_nodelay(sock)
if __name__ == '__main__':
unittest.main()
| 37.141008
| 91
| 0.612652
|
df9f3c7a7799245e7c007d9baaa8e14834437ef7
| 4,513
|
py
|
Python
|
python/test/lonestar/assortativity.py
|
bowu/katana
|
85bef7f17a1d294c1de2bda627a9be22bac0bbc5
|
[
"BSD-3-Clause"
] | 1
|
2021-07-06T15:51:14.000Z
|
2021-07-06T15:51:14.000Z
|
python/test/lonestar/assortativity.py
|
bowu/katana
|
85bef7f17a1d294c1de2bda627a9be22bac0bbc5
|
[
"BSD-3-Clause"
] | 2
|
2020-08-15T23:41:58.000Z
|
2020-08-29T04:46:35.000Z
|
python/test/lonestar/assortativity.py
|
bowu/katana
|
85bef7f17a1d294c1de2bda627a9be22bac0bbc5
|
[
"BSD-3-Clause"
] | null | null | null |
from enum import Enum
from math import sqrt
from test.lonestar.calculate_degree import calculate_degree
import numpy as np
from katana import do_all, do_all_operator
from katana.local import Graph
from katana.local.atomic import ReduceSum
class DegreeType(Enum):
IN = 1
OUT = 2
@do_all_operator()
def sum_degree_operator(
graph: Graph,
source_degree,
sum_source: ReduceSum[np.uint64],
destination_degree,
sum_destination: ReduceSum[np.uint64],
nid,
):
for edge in graph.edge_ids(nid):
sum_source.update(source_degree[nid])
dst = graph.get_edge_dest(edge)
sum_destination.update(destination_degree[dst])
def average_degree(graph: Graph, num_edges: int, source_degree, destination_degree):
"""
Calculate the average in or out degree for the source and destination nodes
Returns the result as a tuple in the form (average degree for source, average degree for destination)
"""
sum_source_degrees = ReduceSum[np.uint64](0)
sum_destination_degrees = ReduceSum[np.uint64](0)
do_all(
range(graph.num_nodes()),
sum_degree_operator(graph, source_degree, sum_source_degrees, destination_degree, sum_destination_degrees),
steal=True,
)
return (sum_source_degrees.reduce() / num_edges, sum_destination_degrees.reduce() / num_edges)
@do_all_operator()
def degree_assortativity_coefficient_operator(
graph: Graph,
source_degree,
source_average,
destination_degree,
destination_average,
product_of_dev: ReduceSum[float],
square_of_source_dev: ReduceSum[float],
square_of_destination_dev: ReduceSum[float],
nid,
):
# deviation of source node from average
source_dev = source_degree[nid] - source_average
for edge in graph.edge_ids(nid):
dst = graph.get_edge_dest(edge)
destination_dev = destination_degree[dst] - destination_average
product_of_dev.update(source_dev * destination_dev)
square_of_source_dev.update(source_dev * source_dev)
square_of_destination_dev.update(destination_dev * destination_dev)
def degree_assortativity_coefficient(
graph: Graph,
source_degree_type: DegreeType = DegreeType.OUT,
destination_degree_type: DegreeType = DegreeType.IN,
weight=None,
):
"""
Calculates and returns the degree assortativity of a given graph.
Paramaters:
* graph: the Graph to be analyzed
* source_degree_type: description of degree type to consider for the source node on an edge
expected values are DegreeType.IN or DegreeType.OUT
* destination_degree_type: description the degree type to consider for the destination node on an edge
expected values are DegreeType.IN or DegreeType.OUT
* weight (optional): edge property to use if using weighted degrees
"""
# get the tables associated with the degree types of the source and destination nodes
calculate_degree(graph, "temp_DegreeType.IN", "temp_DegreeType.OUT", weight)
source_degree = graph.get_node_property("temp_" + str(source_degree_type))
destination_degree = graph.get_node_property("temp_" + str(destination_degree_type))
try:
# Calculate the average in and out degrees of graph
# (with respect to number of edges, not number of nodes)
num_edges = graph.num_edges()
source_average, destination_average = average_degree(graph, num_edges, source_degree, destination_degree)
# Calculate the numerator (product of deviation from mean)
# and the factors of the denominator (square deviation from mean)
product_of_dev = ReduceSum[float](0)
square_of_source_dev = ReduceSum[float](0)
square_of_destination_dev = ReduceSum[float](0)
do_all(
range(graph.num_nodes()),
degree_assortativity_coefficient_operator(
graph,
source_degree,
source_average,
destination_degree,
destination_average,
product_of_dev,
square_of_source_dev,
square_of_destination_dev,
),
steal=True,
loop_name="degree assortativity coefficient calculation",
)
return product_of_dev.reduce() / sqrt(square_of_source_dev.reduce() * square_of_destination_dev.reduce())
finally:
graph.remove_node_property("temp_DegreeType.IN")
graph.remove_node_property("temp_DegreeType.OUT")
| 37.608333
| 115
| 0.70729
|
7d12fa65c97dbc1661e65f9f6e6f6a1a2923219f
| 1,713
|
py
|
Python
|
tests/test_handler_instance_mixin.py
|
debox-dev/wormhole
|
631bf8dd123178fb371096456afa4bb545f70931
|
[
"MIT"
] | null | null | null |
tests/test_handler_instance_mixin.py
|
debox-dev/wormhole
|
631bf8dd123178fb371096456afa4bb545f70931
|
[
"MIT"
] | null | null | null |
tests/test_handler_instance_mixin.py
|
debox-dev/wormhole
|
631bf8dd123178fb371096456afa4bb545f70931
|
[
"MIT"
] | null | null | null |
import pytest
import redis
from tests.test_objects import Vector3MixinHandler, Vector3Message
from wormhole.channel import WormholeRedisChannel
from wormhole.async_implementations.async_gevent import GeventWormhole
from typing import *
from wormhole.error import WormholeHandlingError, WormholeWaitForReplyError
from wormhole.utils import wait_all
if TYPE_CHECKING:
from wormhole.channel import AbstractWormholeChannel
class TestWormholeHandlerInstanceMixin:
TEST_REDIS = "redis://localhost:6379/1"
wormhole: Optional[GeventWormhole]
wormhole_channel: Optional["AbstractWormholeChannel"]
def setup_method(self):
rdb = redis.Redis.from_url(self.TEST_REDIS)
rdb.flushdb()
rdb.close()
self.wormhole_channel = WormholeRedisChannel(self.TEST_REDIS, max_connections=10)
self.wormhole = GeventWormhole(self.wormhole_channel)
self.wormhole.process_async(max_parallel=10)
def teardown_method(self):
self.wormhole.stop(wait=True)
self.wormhole_channel.close()
self.wormhole = None
self.wormhole_channel = None
def test_simple_setup(self):
instance = Vector3MixinHandler(self.wormhole)
with pytest.raises(WormholeWaitForReplyError):
Vector3Message(1, 5, 3).send(wormhole=self.wormhole).wait(timeout=1)
wait_all(instance.activate_all_handlers())
wait_all([Vector3Message(1, 5, 3).send(wormhole=self.wormhole) for _ in range(50)])
wait_all(instance.deactivate_all_handlers())
with pytest.raises(WormholeWaitForReplyError):
Vector3Message(1, 5, 3).send(wormhole=self.wormhole).wait(timeout=1)
| 38.066667
| 92
| 0.725628
|
42ac988486469ffcdb421044da2c13b706261a7c
| 6,741
|
py
|
Python
|
lib/job_outcome.py
|
angelonakos/aws-build-accumulator
|
8768bb60dfda13d1f8b3ca334a2d0c4d84eea2bb
|
[
"Apache-2.0"
] | 24
|
2020-07-31T14:23:54.000Z
|
2022-03-04T23:58:05.000Z
|
lib/job_outcome.py
|
angelonakos/aws-build-accumulator
|
8768bb60dfda13d1f8b3ca334a2d0c4d84eea2bb
|
[
"Apache-2.0"
] | 68
|
2020-09-11T17:36:33.000Z
|
2022-03-30T18:14:45.000Z
|
lib/job_outcome.py
|
angelonakos/aws-build-accumulator
|
8768bb60dfda13d1f8b3ca334a2d0c4d84eea2bb
|
[
"Apache-2.0"
] | 11
|
2020-10-18T20:13:26.000Z
|
2022-02-03T18:49:09.000Z
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import dataclasses
import json
import logging
import os
import lib.validation
################################################################################
# Decider classes
# ``````````````````````````````````````````````````````````````````````````````
# Each of these classes has get_job_fields method. The class evaluates what
# the result of a single Litani job is and returns that result in the dict.
################################################################################
@dataclasses.dataclass
class OutcomeTableDecider:
"""Decide what the result of a job is based on an outcome table.
An 'outcome table' is a mapping from 'outcomes'---like return codes,
timeouts, or a wildcard---to whether or not the job is successful. This
class takes a user-specified or default outcome table, and decides what the
result of a single Litani job was by iterating through the table.
"""
table: dict
return_code: int
timeout_reached: bool
loaded_from_file: bool
def _get_wildcard_outcome(self):
for outcome in self.table["outcomes"]:
if outcome["type"] == "wildcard":
return outcome["action"]
raise UserWarning(
"Outcome table contains no wildcard rule: %s" % json.dumps(
self.table, indent=2))
def _get_timeout_outcome(self):
for outcome in self.table["outcomes"]:
if outcome["type"] == "timeout":
return outcome["action"]
return None
def _get_return_code_outcome(self, return_code):
for outcome in self.table["outcomes"]:
if outcome["type"] == "return-code" and \
outcome["value"] == return_code:
return outcome["action"]
return None
def get_job_fields(self):
return {
"outcome": self.get_outcome(),
"loaded_outcome_dict":
self.table if self.loaded_from_file else None
}
def get_outcome(self):
timeout_outcome = self._get_timeout_outcome()
if self.timeout_reached:
if timeout_outcome:
return timeout_outcome
return self._get_wildcard_outcome()
rc_outcome = self._get_return_code_outcome(self.return_code)
if rc_outcome:
return rc_outcome
return self._get_wildcard_outcome()
################################################################################
# Utilities
################################################################################
def _get_default_outcome_dict(args):
"""Litani's default behavior if the user does not specify an outcome table.
This is not a constant dict as it also depends on whether the user passed in
command-line flags that affect how the result is decided, like
--ignore-returns etc.
"""
outcomes = []
if args.timeout_ok:
outcomes.append({
"type": "timeout",
"action": "success",
})
elif args.timeout_ignore:
outcomes.append({
"type": "timeout",
"action": "fail_ignored",
})
if args.ok_returns:
for rc in args.ok_returns:
outcomes.append({
"type": "return-code",
"value": int(rc),
"action": "success",
})
if args.ignore_returns:
for rc in args.ignore_returns:
outcomes.append({
"type": "return-code",
"value": int(rc),
"action": "fail_ignored",
})
outcomes.extend([{
"type": "return-code",
"value": 0,
"action": "success",
}, {
"type": "wildcard",
"action": "fail",
}])
return {"outcomes": outcomes}
def _get_outcome_table_job_decider(args, return_code, timeout_reached):
if args.outcome_table:
_, ext = os.path.splitext(args.outcome_table)
with open(args.outcome_table) as handle:
if ext == ".json":
outcome_table = json.load(handle)
elif ext == ".yaml":
import yaml
outcome_table = yaml.safe_load(handle)
else:
raise UserWarning("Unsupported outcome table format (%s)" % ext)
loaded_from_file = True
else:
loaded_from_file = False
outcome_table = _get_default_outcome_dict(args)
logging.debug("Using outcome table: %s", json.dumps(outcome_table, indent=2))
lib.validation.validate_outcome_table(outcome_table)
return OutcomeTableDecider(
outcome_table, return_code, timeout_reached,
loaded_from_file=loaded_from_file)
################################################################################
# Entry point
################################################################################
def fill_in_result(runner, job_data, args):
"""Add fields pertaining to job result to job_data dict
The 'result' of a job can be evaluated in several ways. The most simple
mechanism, where a return code of 0 means success and anything else is a
failure, is encoded by the "default outcome table". Users can also supply
their own outcome table as a JSON file, and other mechanisms could be
available in the future.
Depending on how we are to evaluate the result, we construct an instance of
one of the Decider classes in this module, and use the Decider to evaluate
the result of the job. The result is a dict, whose keys and values we add to
the job's dict.
"""
job_data["complete"] = True
job_data["timeout_reached"] = runner.reached_timeout()
job_data["command_return_code"] = runner.get_return_code()
job_data["memory_trace"] = runner.get_memory_trace()
# These get set by the deciders
job_data["loaded_outcome_dict"] = None
decider = _get_outcome_table_job_decider(
args, runner.get_return_code(), runner.reached_timeout())
fields = decider.get_job_fields()
for k, v in fields.items():
job_data[k] = v
job_data["wrapper_return_code"] = 1 if job_data["outcome"] == "fail" else 0
| 32.723301
| 81
| 0.584928
|
037ce7e6eb3a9a3ee738cb89fcd2007aab02dfef
| 5,774
|
py
|
Python
|
src/ebay_rest/api/sell_marketing/models/delete_ad_request.py
|
matecsaj/ebay_rest
|
dd23236f39e05636eff222f99df1e3699ce47d4a
|
[
"MIT"
] | 3
|
2021-12-12T04:28:03.000Z
|
2022-03-10T03:29:18.000Z
|
src/ebay_rest/api/sell_marketing/models/delete_ad_request.py
|
jdavv/ebay_rest
|
20fc88c6aefdae9ab90f9c1330e79abddcd750cd
|
[
"MIT"
] | 33
|
2021-06-16T20:44:36.000Z
|
2022-03-30T14:55:06.000Z
|
src/ebay_rest/api/sell_marketing/models/delete_ad_request.py
|
jdavv/ebay_rest
|
20fc88c6aefdae9ab90f9c1330e79abddcd750cd
|
[
"MIT"
] | 7
|
2021-06-03T09:30:23.000Z
|
2022-03-08T19:51:33.000Z
|
# coding: utf-8
"""
Marketing API
<p>The <i>Marketing API </i> offers two platforms that sellers can use to promote and advertise their products:</p> <ul><li><b>Promoted Listings</b> is an eBay ad service that lets sellers set up <i>ad campaigns </i> for the products they want to promote. eBay displays the ads in search results and in other marketing modules as <b>SPONSORED</b> listings. If an item in a Promoted Listings campaign sells, the seller is assessed a Promoted Listings fee, which is a seller-specified percentage applied to the sales price. For complete details, see <a href=\"/api-docs/sell/static/marketing/promoted-listings.html\">Promoted Listings</a>.</li> <li><b>Promotions Manager</b> gives sellers a way to offer discounts on specific items as a way to attract buyers to their inventory. Sellers can set up discounts (such as \"20% off\" and other types of offers) on specific items or on an entire customer order. To further attract buyers, eBay prominently displays promotion <i>teasers</i> throughout buyer flows. For complete details, see <a href=\"/api-docs/sell/static/marketing/promotions-manager.html\">Promotions Manager</a>.</li></ul> <p><b>Marketing reports</b>, on both the Promoted Listings and Promotions Manager platforms, give sellers information that shows the effectiveness of their marketing strategies. The data gives sellers the ability to review and fine tune their marketing efforts.</p> <p class=\"tablenote\"><b>Important!</b> Sellers must have an active eBay Store subscription, and they must accept the <b>Terms and Conditions</b> before they can make requests to these APIs in the Production environment. There are also site-specific listings requirements and restrictions associated with these marketing tools, as listed in the \"requirements and restrictions\" sections for <a href=\"/api-docs/sell/marketing/static/overview.html#PL-requirements\">Promoted Listings</a> and <a href=\"/api-docs/sell/marketing/static/overview.html#PM-requirements\">Promotions Manager</a>.</p> <p>The table below lists all the Marketing API calls grouped by resource.</p> # noqa: E501
OpenAPI spec version: v1.10.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DeleteAdRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'listing_id': 'str'
}
attribute_map = {
'listing_id': 'listingId'
}
def __init__(self, listing_id=None): # noqa: E501
"""DeleteAdRequest - a model defined in Swagger""" # noqa: E501
self._listing_id = None
self.discriminator = None
if listing_id is not None:
self.listing_id = listing_id
@property
def listing_id(self):
"""Gets the listing_id of this DeleteAdRequest. # noqa: E501
A unique eBay-assigned ID for a listing that is generated when the listing is created. <p class=\"tablenote\"><b>Note:</b> This request accepts both listing IDs, as generated by the Inventory API, and an item IDs, as used in the eBay Traditional API set (e.g., the Trading and Finding APIs).</p> # noqa: E501
:return: The listing_id of this DeleteAdRequest. # noqa: E501
:rtype: str
"""
return self._listing_id
@listing_id.setter
def listing_id(self, listing_id):
"""Sets the listing_id of this DeleteAdRequest.
A unique eBay-assigned ID for a listing that is generated when the listing is created. <p class=\"tablenote\"><b>Note:</b> This request accepts both listing IDs, as generated by the Inventory API, and an item IDs, as used in the eBay Traditional API set (e.g., the Trading and Finding APIs).</p> # noqa: E501
:param listing_id: The listing_id of this DeleteAdRequest. # noqa: E501
:type: str
"""
self._listing_id = listing_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DeleteAdRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteAdRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 51.097345
| 2,092
| 0.656044
|
ffeda97fbd31a76722ed34c8ec46d511da3368cc
| 4,365
|
py
|
Python
|
openstack_dashboard/dashboards/admin/networks/ports/views.py
|
2020human/horizon
|
fab662a19c02318c10c69efced0fac43c28d95f9
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/dashboards/admin/networks/ports/views.py
|
2020human/horizon
|
fab662a19c02318c10c69efced0fac43c28d95f9
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/dashboards/admin/networks/ports/views.py
|
2020human/horizon
|
fab662a19c02318c10c69efced0fac43c28d95f9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.networks.ports \
import forms as ports_forms
from openstack_dashboard.dashboards.admin.networks.ports \
import tables as ports_tables
from openstack_dashboard.dashboards.admin.networks.ports \
import tabs as ports_tabs
from openstack_dashboard.dashboards.project.networks.ports \
import views as project_views
class CreateView(forms.ModalFormView):
form_class = ports_forms.CreatePort
form_id = "create_port_form"
submit_label = _("Create Port")
submit_url = "horizon:admin:networks:addport"
page_title = _("Create Port")
template_name = 'admin/networks/ports/create.html'
url = 'horizon:admin:networks:detail'
def get_success_url(self):
return reverse(self.url,
args=(self.kwargs['network_id'],))
@memoized.memoized_method
def get_object(self):
try:
network_id = self.kwargs["network_id"]
return api.neutron.network_get(self.request, network_id)
except Exception:
redirect = reverse(self.url,
args=(self.kwargs['network_id'],))
msg = _("Unable to retrieve network.")
exceptions.handle(self.request, msg, redirect=redirect)
def get_context_data(self, **kwargs):
context = super(CreateView, self).get_context_data(**kwargs)
context['network'] = self.get_object()
args = (self.kwargs['network_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
context['cancel_url'] = reverse(self.url, args=args)
return context
def get_initial(self):
network = self.get_object()
return {"network_id": self.kwargs['network_id'],
"network_name": network.name}
class DetailView(project_views.DetailView):
tab_group_class = ports_tabs.PortDetailTabs
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
port = context["port"]
network_url = "horizon:admin:networks:detail"
subnet_url = "horizon:admin:networks:subnets:detail"
port.network_url = reverse(network_url, args=[port.network_id])
for ip in port.fixed_ips:
ip['subnet_url'] = reverse(subnet_url, args=[ip['subnet_id']])
table = ports_tables.PortsTable(self.request,
network_id=port.network_id)
# TODO(robcresswell) Add URL for "Ports" crumb after bug/1416838
breadcrumb = [
(_("Networks"), self.get_redirect_url()),
((port.network_name or port.network_id), port.network_url),
(_("Ports"), None)
]
context["custom_breadcrumb"] = breadcrumb
context["url"] = \
reverse('horizon:admin:networks:ports_tab', args=[port.network_id])
context["actions"] = table.render_row_actions(port)
return context
@staticmethod
def get_redirect_url():
return reverse('horizon:admin:networks:index')
class UpdateView(project_views.UpdateView):
form_class = ports_forms.UpdatePort
template_name = 'admin/networks/ports/update.html'
context_object_name = 'port'
submit_url = "horizon:admin:networks:editport"
success_url = 'horizon:admin:networks:detail'
def get_initial(self):
initial = super(UpdateView, self).get_initial()
port = self._get_object()
initial['binding__host_id'] = port['binding__host_id']
return initial
| 38.289474
| 79
| 0.678121
|
f5fdba6692b2feff53ee4db39ea770af81a42f00
| 150,273
|
py
|
Python
|
Data-Analysis/venv_macos/lib/python3.8/site-packages/IPython/core/interactiveshell.py
|
Qiaozhi94/Python-Projects
|
aefc6cf49c1f4f2cc9beba8dbe80cfa826ba75c4
|
[
"MIT"
] | null | null | null |
Data-Analysis/venv_macos/lib/python3.8/site-packages/IPython/core/interactiveshell.py
|
Qiaozhi94/Python-Projects
|
aefc6cf49c1f4f2cc9beba8dbe80cfa826ba75c4
|
[
"MIT"
] | 2
|
2021-06-08T21:41:55.000Z
|
2021-09-08T02:06:41.000Z
|
Data-Analysis/venv_macos/lib/python3.8/site-packages/IPython/core/interactiveshell.py
|
Qiaozhi94/Python-Projects
|
aefc6cf49c1f4f2cc9beba8dbe80cfa826ba75c4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Main IPython class."""
#-----------------------------------------------------------------------------
# Copyright (C) 2001 Janko Hauser <jhauser@zscout.de>
# Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import abc
import ast
import atexit
import builtins as builtin_mod
import functools
import inspect
import os
import re
import runpy
import sys
import tempfile
import traceback
import types
import subprocess
import warnings
from io import open as io_open
from pickleshare import PickleShareDB
from traitlets.config.configurable import SingletonConfigurable
from traitlets.utils.importstring import import_item
from IPython.core import oinspect
from IPython.core import magic
from IPython.core import page
from IPython.core import prefilter
from IPython.core import ultratb
from IPython.core.alias import Alias, AliasManager
from IPython.core.autocall import ExitAutocall
from IPython.core.builtin_trap import BuiltinTrap
from IPython.core.events import EventManager, available_events
from IPython.core.compilerop import CachingCompiler, check_linecache_ipython
from IPython.core.debugger import Pdb
from IPython.core.display_trap import DisplayTrap
from IPython.core.displayhook import DisplayHook
from IPython.core.displaypub import DisplayPublisher
from IPython.core.error import InputRejected, UsageError
from IPython.core.extensions import ExtensionManager
from IPython.core.formatters import DisplayFormatter
from IPython.core.history import HistoryManager
from IPython.core.inputtransformer2 import ESC_MAGIC, ESC_MAGIC2
from IPython.core.logger import Logger
from IPython.core.macro import Macro
from IPython.core.payload import PayloadManager
from IPython.core.prefilter import PrefilterManager
from IPython.core.profiledir import ProfileDir
from IPython.core.usage import default_banner
from IPython.display import display
from IPython.testing.skipdoctest import skip_doctest
from IPython.utils import PyColorize
from IPython.utils import io
from IPython.utils import py3compat
from IPython.utils import openpy
from IPython.utils.decorators import undoc
from IPython.utils.io import ask_yes_no
from IPython.utils.ipstruct import Struct
from IPython.paths import get_ipython_dir
from IPython.utils.path import get_home_dir, get_py_filename, ensure_dir_exists
from IPython.utils.process import system, getoutput
from IPython.utils.strdispatch import StrDispatch
from IPython.utils.syspathcontext import prepended_to_syspath
from IPython.utils.text import format_screen, LSString, SList, DollarFormatter
from IPython.utils.tempdir import TemporaryDirectory
from traitlets import (
Integer, Bool, CaselessStrEnum, Enum, List, Dict, Unicode, Instance, Type,
observe, default, validate, Any
)
from warnings import warn
from logging import error
import IPython.core.hooks
from typing import List as ListType, Tuple
from ast import AST
# NoOpContext is deprecated, but ipykernel imports it from here.
# See https://github.com/ipython/ipykernel/issues/157
# (2016, let's try to remove than in IPython 8.0)
from IPython.utils.contexts import NoOpContext
try:
import docrepr.sphinxify as sphx
def sphinxify(doc):
with TemporaryDirectory() as dirname:
return {
'text/html': sphx.sphinxify(doc, dirname),
'text/plain': doc
}
except ImportError:
sphinxify = None
class ProvisionalWarning(DeprecationWarning):
"""
Warning class for unstable features
"""
pass
if sys.version_info > (3,8):
from ast import Module
else :
# mock the new API, ignore second argument
# see https://github.com/ipython/ipython/issues/11590
from ast import Module as OriginalModule
Module = lambda nodelist, type_ignores: OriginalModule(nodelist)
if sys.version_info > (3,6):
_assign_nodes = (ast.AugAssign, ast.AnnAssign, ast.Assign)
_single_targets_nodes = (ast.AugAssign, ast.AnnAssign)
else:
_assign_nodes = (ast.AugAssign, ast.Assign )
_single_targets_nodes = (ast.AugAssign, )
#-----------------------------------------------------------------------------
# Await Helpers
#-----------------------------------------------------------------------------
def removed_co_newlocals(function:types.FunctionType) -> types.FunctionType:
"""Return a function that do not create a new local scope.
Given a function, create a clone of this function where the co_newlocal flag
has been removed, making this function code actually run in the sourounding
scope.
We need this in order to run asynchronous code in user level namespace.
"""
from types import CodeType, FunctionType
CO_NEWLOCALS = 0x0002
code = function.__code__
new_co_flags = code.co_flags & ~CO_NEWLOCALS
if sys.version_info > (3, 8, 0, 'alpha', 3):
new_code = code.replace(co_flags=new_co_flags)
else:
new_code = CodeType(
code.co_argcount,
code.co_kwonlyargcount,
code.co_nlocals,
code.co_stacksize,
new_co_flags,
code.co_code,
code.co_consts,
code.co_names,
code.co_varnames,
code.co_filename,
code.co_name,
code.co_firstlineno,
code.co_lnotab,
code.co_freevars,
code.co_cellvars
)
return FunctionType(new_code, globals(), function.__name__, function.__defaults__)
# we still need to run things using the asyncio eventloop, but there is no
# async integration
from .async_helpers import (_asyncio_runner, _asyncify, _pseudo_sync_runner)
from .async_helpers import _curio_runner, _trio_runner, _should_be_async
def _ast_asyncify(cell:str, wrapper_name:str) -> ast.Module:
"""
Parse a cell with top-level await and modify the AST to be able to run it later.
Parameter
---------
cell: str
The code cell to asyncronify
wrapper_name: str
The name of the function to be used to wrap the passed `cell`. It is
advised to **not** use a python identifier in order to not pollute the
global namespace in which the function will be ran.
Return
------
A module object AST containing **one** function named `wrapper_name`.
The given code is wrapped in a async-def function, parsed into an AST, and
the resulting function definition AST is modified to return the last
expression.
The last expression or await node is moved into a return statement at the
end of the function, and removed from its original location. If the last
node is not Expr or Await nothing is done.
The function `__code__` will need to be later modified (by
``removed_co_newlocals``) in a subsequent step to not create new `locals()`
meaning that the local and global scope are the same, ie as if the body of
the function was at module level.
Lastly a call to `locals()` is made just before the last expression of the
function, or just after the last assignment or statement to make sure the
global dict is updated as python function work with a local fast cache which
is updated only on `local()` calls.
"""
from ast import Expr, Await, Return
if sys.version_info >= (3,8):
return ast.parse(cell)
tree = ast.parse(_asyncify(cell))
function_def = tree.body[0]
function_def.name = wrapper_name
try_block = function_def.body[0]
lastexpr = try_block.body[-1]
if isinstance(lastexpr, (Expr, Await)):
try_block.body[-1] = Return(lastexpr.value)
ast.fix_missing_locations(tree)
return tree
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# compiled regexps for autoindent management
dedent_re = re.compile(r'^\s+raise|^\s+return|^\s+pass')
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
@undoc
def softspace(file, newvalue):
"""Copied from code.py, to remove the dependency"""
oldvalue = 0
try:
oldvalue = file.softspace
except AttributeError:
pass
try:
file.softspace = newvalue
except (AttributeError, TypeError):
# "attribute-less object" or "read-only attributes"
pass
return oldvalue
@undoc
def no_op(*a, **kw):
pass
class SpaceInInput(Exception): pass
def get_default_colors():
"DEPRECATED"
warn('get_default_color is deprecated since IPython 5.0, and returns `Neutral` on all platforms.',
DeprecationWarning, stacklevel=2)
return 'Neutral'
class SeparateUnicode(Unicode):
r"""A Unicode subclass to validate separate_in, separate_out, etc.
This is a Unicode based trait that converts '0'->'' and ``'\\n'->'\n'``.
"""
def validate(self, obj, value):
if value == '0': value = ''
value = value.replace('\\n','\n')
return super(SeparateUnicode, self).validate(obj, value)
@undoc
class DummyMod(object):
"""A dummy module used for IPython's interactive module when
a namespace must be assigned to the module's __dict__."""
__spec__ = None
class ExecutionInfo(object):
"""The arguments used for a call to :meth:`InteractiveShell.run_cell`
Stores information about what is going to happen.
"""
raw_cell = None
store_history = False
silent = False
shell_futures = True
def __init__(self, raw_cell, store_history, silent, shell_futures):
self.raw_cell = raw_cell
self.store_history = store_history
self.silent = silent
self.shell_futures = shell_futures
def __repr__(self):
name = self.__class__.__qualname__
raw_cell = ((self.raw_cell[:50] + '..')
if len(self.raw_cell) > 50 else self.raw_cell)
return '<%s object at %x, raw_cell="%s" store_history=%s silent=%s shell_futures=%s>' %\
(name, id(self), raw_cell, self.store_history, self.silent, self.shell_futures)
class ExecutionResult(object):
"""The result of a call to :meth:`InteractiveShell.run_cell`
Stores information about what took place.
"""
execution_count = None
error_before_exec = None
error_in_exec = None
info = None
result = None
def __init__(self, info):
self.info = info
@property
def success(self):
return (self.error_before_exec is None) and (self.error_in_exec is None)
def raise_error(self):
"""Reraises error if `success` is `False`, otherwise does nothing"""
if self.error_before_exec is not None:
raise self.error_before_exec
if self.error_in_exec is not None:
raise self.error_in_exec
def __repr__(self):
name = self.__class__.__qualname__
return '<%s object at %x, execution_count=%s error_before_exec=%s error_in_exec=%s info=%s result=%s>' %\
(name, id(self), self.execution_count, self.error_before_exec, self.error_in_exec, repr(self.info), repr(self.result))
class InteractiveShell(SingletonConfigurable):
"""An enhanced, interactive shell for Python."""
_instance = None
ast_transformers = List([], help=
"""
A list of ast.NodeTransformer subclass instances, which will be applied
to user input before code is run.
"""
).tag(config=True)
autocall = Enum((0,1,2), default_value=0, help=
"""
Make IPython automatically call any callable object even if you didn't
type explicit parentheses. For example, 'str 43' becomes 'str(43)'
automatically. The value can be '0' to disable the feature, '1' for
'smart' autocall, where it is not applied if there are no more
arguments on the line, and '2' for 'full' autocall, where all callable
objects are automatically called (even if no arguments are present).
"""
).tag(config=True)
autoindent = Bool(True, help=
"""
Autoindent IPython code entered interactively.
"""
).tag(config=True)
autoawait = Bool(True, help=
"""
Automatically run await statement in the top level repl.
"""
).tag(config=True)
loop_runner_map ={
'asyncio':(_asyncio_runner, True),
'curio':(_curio_runner, True),
'trio':(_trio_runner, True),
'sync': (_pseudo_sync_runner, False)
}
loop_runner = Any(default_value="IPython.core.interactiveshell._asyncio_runner",
allow_none=True,
help="""Select the loop runner that will be used to execute top-level asynchronous code"""
).tag(config=True)
@default('loop_runner')
def _default_loop_runner(self):
return import_item("IPython.core.interactiveshell._asyncio_runner")
@validate('loop_runner')
def _import_runner(self, proposal):
if isinstance(proposal.value, str):
if proposal.value in self.loop_runner_map:
runner, autoawait = self.loop_runner_map[proposal.value]
self.autoawait = autoawait
return runner
runner = import_item(proposal.value)
if not callable(runner):
raise ValueError('loop_runner must be callable')
return runner
if not callable(proposal.value):
raise ValueError('loop_runner must be callable')
return proposal.value
automagic = Bool(True, help=
"""
Enable magic commands to be called without the leading %.
"""
).tag(config=True)
banner1 = Unicode(default_banner,
help="""The part of the banner to be printed before the profile"""
).tag(config=True)
banner2 = Unicode('',
help="""The part of the banner to be printed after the profile"""
).tag(config=True)
cache_size = Integer(1000, help=
"""
Set the size of the output cache. The default is 1000, you can
change it permanently in your config file. Setting it to 0 completely
disables the caching system, and the minimum value accepted is 3 (if
you provide a value less than 3, it is reset to 0 and a warning is
issued). This limit is defined because otherwise you'll spend more
time re-flushing a too small cache than working
"""
).tag(config=True)
color_info = Bool(True, help=
"""
Use colors for displaying information about objects. Because this
information is passed through a pager (like 'less'), and some pagers
get confused with color codes, this capability can be turned off.
"""
).tag(config=True)
colors = CaselessStrEnum(('Neutral', 'NoColor','LightBG','Linux'),
default_value='Neutral',
help="Set the color scheme (NoColor, Neutral, Linux, or LightBG)."
).tag(config=True)
debug = Bool(False).tag(config=True)
disable_failing_post_execute = Bool(False,
help="Don't call post-execute functions that have failed in the past."
).tag(config=True)
display_formatter = Instance(DisplayFormatter, allow_none=True)
displayhook_class = Type(DisplayHook)
display_pub_class = Type(DisplayPublisher)
sphinxify_docstring = Bool(False, help=
"""
Enables rich html representation of docstrings. (This requires the
docrepr module).
""").tag(config=True)
@observe("sphinxify_docstring")
def _sphinxify_docstring_changed(self, change):
if change['new']:
warn("`sphinxify_docstring` is provisional since IPython 5.0 and might change in future versions." , ProvisionalWarning)
enable_html_pager = Bool(False, help=
"""
(Provisional API) enables html representation in mime bundles sent
to pagers.
""").tag(config=True)
@observe("enable_html_pager")
def _enable_html_pager_changed(self, change):
if change['new']:
warn("`enable_html_pager` is provisional since IPython 5.0 and might change in future versions.", ProvisionalWarning)
data_pub_class = None
exit_now = Bool(False)
exiter = Instance(ExitAutocall)
@default('exiter')
def _exiter_default(self):
return ExitAutocall(self)
# Monotonically increasing execution counter
execution_count = Integer(1)
filename = Unicode("<ipython console>")
ipython_dir= Unicode('').tag(config=True) # Set to get_ipython_dir() in __init__
# Used to transform cells before running them, and check whether code is complete
input_transformer_manager = Instance('IPython.core.inputtransformer2.TransformerManager',
())
@property
def input_transformers_cleanup(self):
return self.input_transformer_manager.cleanup_transforms
input_transformers_post = List([],
help="A list of string input transformers, to be applied after IPython's "
"own input transformations."
)
@property
def input_splitter(self):
"""Make this available for backward compatibility (pre-7.0 release) with existing code.
For example, ipykernel ipykernel currently uses
`shell.input_splitter.check_complete`
"""
from warnings import warn
warn("`input_splitter` is deprecated since IPython 7.0, prefer `input_transformer_manager`.",
DeprecationWarning, stacklevel=2
)
return self.input_transformer_manager
logstart = Bool(False, help=
"""
Start logging to the default log file in overwrite mode.
Use `logappend` to specify a log file to **append** logs to.
"""
).tag(config=True)
logfile = Unicode('', help=
"""
The name of the logfile to use.
"""
).tag(config=True)
logappend = Unicode('', help=
"""
Start logging to the given file in append mode.
Use `logfile` to specify a log file to **overwrite** logs to.
"""
).tag(config=True)
object_info_string_level = Enum((0,1,2), default_value=0,
).tag(config=True)
pdb = Bool(False, help=
"""
Automatically call the pdb debugger after every exception.
"""
).tag(config=True)
display_page = Bool(False,
help="""If True, anything that would be passed to the pager
will be displayed as regular output instead."""
).tag(config=True)
# deprecated prompt traits:
prompt_in1 = Unicode('In [\\#]: ',
help="Deprecated since IPython 4.0 and ignored since 5.0, set TerminalInteractiveShell.prompts object directly."
).tag(config=True)
prompt_in2 = Unicode(' .\\D.: ',
help="Deprecated since IPython 4.0 and ignored since 5.0, set TerminalInteractiveShell.prompts object directly."
).tag(config=True)
prompt_out = Unicode('Out[\\#]: ',
help="Deprecated since IPython 4.0 and ignored since 5.0, set TerminalInteractiveShell.prompts object directly."
).tag(config=True)
prompts_pad_left = Bool(True,
help="Deprecated since IPython 4.0 and ignored since 5.0, set TerminalInteractiveShell.prompts object directly."
).tag(config=True)
@observe('prompt_in1', 'prompt_in2', 'prompt_out', 'prompt_pad_left')
def _prompt_trait_changed(self, change):
name = change['name']
warn("InteractiveShell.{name} is deprecated since IPython 4.0"
" and ignored since 5.0, set TerminalInteractiveShell.prompts"
" object directly.".format(name=name))
# protect against weird cases where self.config may not exist:
show_rewritten_input = Bool(True,
help="Show rewritten input, e.g. for autocall."
).tag(config=True)
quiet = Bool(False).tag(config=True)
history_length = Integer(10000,
help='Total length of command history'
).tag(config=True)
history_load_length = Integer(1000, help=
"""
The number of saved history entries to be loaded
into the history buffer at startup.
"""
).tag(config=True)
ast_node_interactivity = Enum(['all', 'last', 'last_expr', 'none', 'last_expr_or_assign'],
default_value='last_expr',
help="""
'all', 'last', 'last_expr' or 'none', 'last_expr_or_assign' specifying
which nodes should be run interactively (displaying output from expressions).
"""
).tag(config=True)
# TODO: this part of prompt management should be moved to the frontends.
# Use custom TraitTypes that convert '0'->'' and '\\n'->'\n'
separate_in = SeparateUnicode('\n').tag(config=True)
separate_out = SeparateUnicode('').tag(config=True)
separate_out2 = SeparateUnicode('').tag(config=True)
wildcards_case_sensitive = Bool(True).tag(config=True)
xmode = CaselessStrEnum(('Context', 'Plain', 'Verbose', 'Minimal'),
default_value='Context',
help="Switch modes for the IPython exception handlers."
).tag(config=True)
# Subcomponents of InteractiveShell
alias_manager = Instance('IPython.core.alias.AliasManager', allow_none=True)
prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
builtin_trap = Instance('IPython.core.builtin_trap.BuiltinTrap', allow_none=True)
display_trap = Instance('IPython.core.display_trap.DisplayTrap', allow_none=True)
extension_manager = Instance('IPython.core.extensions.ExtensionManager', allow_none=True)
payload_manager = Instance('IPython.core.payload.PayloadManager', allow_none=True)
history_manager = Instance('IPython.core.history.HistoryAccessorBase', allow_none=True)
magics_manager = Instance('IPython.core.magic.MagicsManager', allow_none=True)
profile_dir = Instance('IPython.core.application.ProfileDir', allow_none=True)
@property
def profile(self):
if self.profile_dir is not None:
name = os.path.basename(self.profile_dir.location)
return name.replace('profile_','')
# Private interface
_post_execute = Dict()
# Tracks any GUI loop loaded for pylab
pylab_gui_select = None
last_execution_succeeded = Bool(True, help='Did last executed command succeeded')
last_execution_result = Instance('IPython.core.interactiveshell.ExecutionResult', help='Result of executing the last command', allow_none=True)
def __init__(self, ipython_dir=None, profile_dir=None,
user_module=None, user_ns=None,
custom_exceptions=((), None), **kwargs):
# This is where traits with a config_key argument are updated
# from the values on config.
super(InteractiveShell, self).__init__(**kwargs)
if 'PromptManager' in self.config:
warn('As of IPython 5.0 `PromptManager` config will have no effect'
' and has been replaced by TerminalInteractiveShell.prompts_class')
self.configurables = [self]
# These are relatively independent and stateless
self.init_ipython_dir(ipython_dir)
self.init_profile_dir(profile_dir)
self.init_instance_attrs()
self.init_environment()
# Check if we're in a virtualenv, and set up sys.path.
self.init_virtualenv()
# Create namespaces (user_ns, user_global_ns, etc.)
self.init_create_namespaces(user_module, user_ns)
# This has to be done after init_create_namespaces because it uses
# something in self.user_ns, but before init_sys_modules, which
# is the first thing to modify sys.
# TODO: When we override sys.stdout and sys.stderr before this class
# is created, we are saving the overridden ones here. Not sure if this
# is what we want to do.
self.save_sys_module_state()
self.init_sys_modules()
# While we're trying to have each part of the code directly access what
# it needs without keeping redundant references to objects, we have too
# much legacy code that expects ip.db to exist.
self.db = PickleShareDB(os.path.join(self.profile_dir.location, 'db'))
self.init_history()
self.init_encoding()
self.init_prefilter()
self.init_syntax_highlighting()
self.init_hooks()
self.init_events()
self.init_pushd_popd_magic()
self.init_user_ns()
self.init_logger()
self.init_builtins()
# The following was in post_config_initialization
self.init_inspector()
self.raw_input_original = input
self.init_completer()
# TODO: init_io() needs to happen before init_traceback handlers
# because the traceback handlers hardcode the stdout/stderr streams.
# This logic in in debugger.Pdb and should eventually be changed.
self.init_io()
self.init_traceback_handlers(custom_exceptions)
self.init_prompts()
self.init_display_formatter()
self.init_display_pub()
self.init_data_pub()
self.init_displayhook()
self.init_magics()
self.init_alias()
self.init_logstart()
self.init_pdb()
self.init_extension_manager()
self.init_payload()
self.init_deprecation_warnings()
self.hooks.late_startup_hook()
self.events.trigger('shell_initialized', self)
atexit.register(self.atexit_operations)
# The trio runner is used for running Trio in the foreground thread. It
# is different from `_trio_runner(async_fn)` in `async_helpers.py`
# which calls `trio.run()` for every cell. This runner runs all cells
# inside a single Trio event loop. If used, it is set from
# `ipykernel.kernelapp`.
self.trio_runner = None
def get_ipython(self):
"""Return the currently running IPython instance."""
return self
#-------------------------------------------------------------------------
# Trait changed handlers
#-------------------------------------------------------------------------
@observe('ipython_dir')
def _ipython_dir_changed(self, change):
ensure_dir_exists(change['new'])
def set_autoindent(self,value=None):
"""Set the autoindent flag.
If called with no arguments, it acts as a toggle."""
if value is None:
self.autoindent = not self.autoindent
else:
self.autoindent = value
def set_trio_runner(self, tr):
self.trio_runner = tr
#-------------------------------------------------------------------------
# init_* methods called by __init__
#-------------------------------------------------------------------------
def init_ipython_dir(self, ipython_dir):
if ipython_dir is not None:
self.ipython_dir = ipython_dir
return
self.ipython_dir = get_ipython_dir()
def init_profile_dir(self, profile_dir):
if profile_dir is not None:
self.profile_dir = profile_dir
return
self.profile_dir =\
ProfileDir.create_profile_dir_by_name(self.ipython_dir, 'default')
def init_instance_attrs(self):
self.more = False
# command compiler
self.compile = CachingCompiler()
# Make an empty namespace, which extension writers can rely on both
# existing and NEVER being used by ipython itself. This gives them a
# convenient location for storing additional information and state
# their extensions may require, without fear of collisions with other
# ipython names that may develop later.
self.meta = Struct()
# Temporary files used for various purposes. Deleted at exit.
self.tempfiles = []
self.tempdirs = []
# keep track of where we started running (mainly for crash post-mortem)
# This is not being used anywhere currently.
self.starting_dir = os.getcwd()
# Indentation management
self.indent_current_nsp = 0
# Dict to track post-execution functions that have been registered
self._post_execute = {}
def init_environment(self):
"""Any changes we need to make to the user's environment."""
pass
def init_encoding(self):
# Get system encoding at startup time. Certain terminals (like Emacs
# under Win32 have it set to None, and we need to have a known valid
# encoding to use in the raw_input() method
try:
self.stdin_encoding = sys.stdin.encoding or 'ascii'
except AttributeError:
self.stdin_encoding = 'ascii'
@observe('colors')
def init_syntax_highlighting(self, changes=None):
# Python source parser/formatter for syntax highlighting
pyformat = PyColorize.Parser(style=self.colors, parent=self).format
self.pycolorize = lambda src: pyformat(src,'str')
def refresh_style(self):
# No-op here, used in subclass
pass
def init_pushd_popd_magic(self):
# for pushd/popd management
self.home_dir = get_home_dir()
self.dir_stack = []
def init_logger(self):
self.logger = Logger(self.home_dir, logfname='ipython_log.py',
logmode='rotate')
def init_logstart(self):
"""Initialize logging in case it was requested at the command line.
"""
if self.logappend:
self.magic('logstart %s append' % self.logappend)
elif self.logfile:
self.magic('logstart %s' % self.logfile)
elif self.logstart:
self.magic('logstart')
def init_deprecation_warnings(self):
"""
register default filter for deprecation warning.
This will allow deprecation warning of function used interactively to show
warning to users, and still hide deprecation warning from libraries import.
"""
if sys.version_info < (3,7):
warnings.filterwarnings("default", category=DeprecationWarning, module=self.user_ns.get("__name__"))
def init_builtins(self):
# A single, static flag that we set to True. Its presence indicates
# that an IPython shell has been created, and we make no attempts at
# removing on exit or representing the existence of more than one
# IPython at a time.
builtin_mod.__dict__['__IPYTHON__'] = True
builtin_mod.__dict__['display'] = display
self.builtin_trap = BuiltinTrap(shell=self)
@observe('colors')
def init_inspector(self, changes=None):
# Object inspector
self.inspector = oinspect.Inspector(oinspect.InspectColors,
PyColorize.ANSICodeColors,
self.colors,
self.object_info_string_level)
def init_io(self):
# This will just use sys.stdout and sys.stderr. If you want to
# override sys.stdout and sys.stderr themselves, you need to do that
# *before* instantiating this class, because io holds onto
# references to the underlying streams.
# io.std* are deprecated, but don't show our own deprecation warnings
# during initialization of the deprecated API.
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
io.stdout = io.IOStream(sys.stdout)
io.stderr = io.IOStream(sys.stderr)
def init_prompts(self):
# Set system prompts, so that scripts can decide if they are running
# interactively.
sys.ps1 = 'In : '
sys.ps2 = '...: '
sys.ps3 = 'Out: '
def init_display_formatter(self):
self.display_formatter = DisplayFormatter(parent=self)
self.configurables.append(self.display_formatter)
def init_display_pub(self):
self.display_pub = self.display_pub_class(parent=self, shell=self)
self.configurables.append(self.display_pub)
def init_data_pub(self):
if not self.data_pub_class:
self.data_pub = None
return
self.data_pub = self.data_pub_class(parent=self)
self.configurables.append(self.data_pub)
def init_displayhook(self):
# Initialize displayhook, set in/out prompts and printing system
self.displayhook = self.displayhook_class(
parent=self,
shell=self,
cache_size=self.cache_size,
)
self.configurables.append(self.displayhook)
# This is a context manager that installs/revmoes the displayhook at
# the appropriate time.
self.display_trap = DisplayTrap(hook=self.displayhook)
def init_virtualenv(self):
"""Add a virtualenv to sys.path so the user can import modules from it.
This isn't perfect: it doesn't use the Python interpreter with which the
virtualenv was built, and it ignores the --no-site-packages option. A
warning will appear suggesting the user installs IPython in the
virtualenv, but for many cases, it probably works well enough.
Adapted from code snippets online.
http://blog.ufsoft.org/2009/1/29/ipython-and-virtualenv
"""
if 'VIRTUAL_ENV' not in os.environ:
# Not in a virtualenv
return
p = os.path.normcase(sys.executable)
p_venv = os.path.normcase(os.environ['VIRTUAL_ENV'])
# executable path should end like /bin/python or \\scripts\\python.exe
p_exe_up2 = os.path.dirname(os.path.dirname(p))
if p_exe_up2 and os.path.exists(p_venv) and os.path.samefile(p_exe_up2, p_venv):
# Our exe is inside the virtualenv, don't need to do anything.
return
# fallback venv detection:
# stdlib venv may symlink sys.executable, so we can't use realpath.
# but others can symlink *to* the venv Python, so we can't just use sys.executable.
# So we just check every item in the symlink tree (generally <= 3)
paths = [p]
while os.path.islink(p):
p = os.path.normcase(os.path.join(os.path.dirname(p), os.readlink(p)))
paths.append(p)
# In Cygwin paths like "c:\..." and '\cygdrive\c\...' are possible
if p_venv.startswith('\\cygdrive'):
p_venv = p_venv[11:]
elif len(p_venv) >= 2 and p_venv[1] == ':':
p_venv = p_venv[2:]
if any(p_venv in p for p in paths):
# Running properly in the virtualenv, don't need to do anything
return
warn("Attempting to work in a virtualenv. If you encounter problems, please "
"install IPython inside the virtualenv.")
if sys.platform == "win32":
virtual_env = os.path.join(os.environ['VIRTUAL_ENV'], 'Lib', 'site-packages')
else:
virtual_env = os.path.join(os.environ['VIRTUAL_ENV'], 'lib',
'python%d.%d' % sys.version_info[:2], 'site-packages')
import site
sys.path.insert(0, virtual_env)
site.addsitedir(virtual_env)
#-------------------------------------------------------------------------
# Things related to injections into the sys module
#-------------------------------------------------------------------------
def save_sys_module_state(self):
"""Save the state of hooks in the sys module.
This has to be called after self.user_module is created.
"""
self._orig_sys_module_state = {'stdin': sys.stdin,
'stdout': sys.stdout,
'stderr': sys.stderr,
'excepthook': sys.excepthook}
self._orig_sys_modules_main_name = self.user_module.__name__
self._orig_sys_modules_main_mod = sys.modules.get(self.user_module.__name__)
def restore_sys_module_state(self):
"""Restore the state of the sys module."""
try:
for k, v in self._orig_sys_module_state.items():
setattr(sys, k, v)
except AttributeError:
pass
# Reset what what done in self.init_sys_modules
if self._orig_sys_modules_main_mod is not None:
sys.modules[self._orig_sys_modules_main_name] = self._orig_sys_modules_main_mod
#-------------------------------------------------------------------------
# Things related to the banner
#-------------------------------------------------------------------------
@property
def banner(self):
banner = self.banner1
if self.profile and self.profile != 'default':
banner += '\nIPython profile: %s\n' % self.profile
if self.banner2:
banner += '\n' + self.banner2
return banner
def show_banner(self, banner=None):
if banner is None:
banner = self.banner
sys.stdout.write(banner)
#-------------------------------------------------------------------------
# Things related to hooks
#-------------------------------------------------------------------------
def init_hooks(self):
# hooks holds pointers used for user-side customizations
self.hooks = Struct()
self.strdispatchers = {}
# Set all default hooks, defined in the IPython.hooks module.
hooks = IPython.core.hooks
for hook_name in hooks.__all__:
# default hooks have priority 100, i.e. low; user hooks should have
# 0-100 priority
self.set_hook(hook_name,getattr(hooks,hook_name), 100, _warn_deprecated=False)
if self.display_page:
self.set_hook('show_in_pager', page.as_hook(page.display_page), 90)
def set_hook(self,name,hook, priority=50, str_key=None, re_key=None,
_warn_deprecated=True):
"""set_hook(name,hook) -> sets an internal IPython hook.
IPython exposes some of its internal API as user-modifiable hooks. By
adding your function to one of these hooks, you can modify IPython's
behavior to call at runtime your own routines."""
# At some point in the future, this should validate the hook before it
# accepts it. Probably at least check that the hook takes the number
# of args it's supposed to.
f = types.MethodType(hook,self)
# check if the hook is for strdispatcher first
if str_key is not None:
sdp = self.strdispatchers.get(name, StrDispatch())
sdp.add_s(str_key, f, priority )
self.strdispatchers[name] = sdp
return
if re_key is not None:
sdp = self.strdispatchers.get(name, StrDispatch())
sdp.add_re(re.compile(re_key), f, priority )
self.strdispatchers[name] = sdp
return
dp = getattr(self.hooks, name, None)
if name not in IPython.core.hooks.__all__:
print("Warning! Hook '%s' is not one of %s" % \
(name, IPython.core.hooks.__all__ ))
if _warn_deprecated and (name in IPython.core.hooks.deprecated):
alternative = IPython.core.hooks.deprecated[name]
warn("Hook {} is deprecated. Use {} instead.".format(name, alternative), stacklevel=2)
if not dp:
dp = IPython.core.hooks.CommandChainDispatcher()
try:
dp.add(f,priority)
except AttributeError:
# it was not commandchain, plain old func - replace
dp = f
setattr(self.hooks,name, dp)
#-------------------------------------------------------------------------
# Things related to events
#-------------------------------------------------------------------------
def init_events(self):
self.events = EventManager(self, available_events)
self.events.register("pre_execute", self._clear_warning_registry)
def register_post_execute(self, func):
"""DEPRECATED: Use ip.events.register('post_run_cell', func)
Register a function for calling after code execution.
"""
warn("ip.register_post_execute is deprecated, use "
"ip.events.register('post_run_cell', func) instead.", stacklevel=2)
self.events.register('post_run_cell', func)
def _clear_warning_registry(self):
# clear the warning registry, so that different code blocks with
# overlapping line number ranges don't cause spurious suppression of
# warnings (see gh-6611 for details)
if "__warningregistry__" in self.user_global_ns:
del self.user_global_ns["__warningregistry__"]
#-------------------------------------------------------------------------
# Things related to the "main" module
#-------------------------------------------------------------------------
def new_main_mod(self, filename, modname):
"""Return a new 'main' module object for user code execution.
``filename`` should be the path of the script which will be run in the
module. Requests with the same filename will get the same module, with
its namespace cleared.
``modname`` should be the module name - normally either '__main__' or
the basename of the file without the extension.
When scripts are executed via %run, we must keep a reference to their
__main__ module around so that Python doesn't
clear it, rendering references to module globals useless.
This method keeps said reference in a private dict, keyed by the
absolute path of the script. This way, for multiple executions of the
same script we only keep one copy of the namespace (the last one),
thus preventing memory leaks from old references while allowing the
objects from the last execution to be accessible.
"""
filename = os.path.abspath(filename)
try:
main_mod = self._main_mod_cache[filename]
except KeyError:
main_mod = self._main_mod_cache[filename] = types.ModuleType(
modname,
doc="Module created for script run in IPython")
else:
main_mod.__dict__.clear()
main_mod.__name__ = modname
main_mod.__file__ = filename
# It seems pydoc (and perhaps others) needs any module instance to
# implement a __nonzero__ method
main_mod.__nonzero__ = lambda : True
return main_mod
def clear_main_mod_cache(self):
"""Clear the cache of main modules.
Mainly for use by utilities like %reset.
Examples
--------
In [15]: import IPython
In [16]: m = _ip.new_main_mod(IPython.__file__, 'IPython')
In [17]: len(_ip._main_mod_cache) > 0
Out[17]: True
In [18]: _ip.clear_main_mod_cache()
In [19]: len(_ip._main_mod_cache) == 0
Out[19]: True
"""
self._main_mod_cache.clear()
#-------------------------------------------------------------------------
# Things related to debugging
#-------------------------------------------------------------------------
def init_pdb(self):
# Set calling of pdb on exceptions
# self.call_pdb is a property
self.call_pdb = self.pdb
def _get_call_pdb(self):
return self._call_pdb
def _set_call_pdb(self,val):
if val not in (0,1,False,True):
raise ValueError('new call_pdb value must be boolean')
# store value in instance
self._call_pdb = val
# notify the actual exception handlers
self.InteractiveTB.call_pdb = val
call_pdb = property(_get_call_pdb,_set_call_pdb,None,
'Control auto-activation of pdb at exceptions')
def debugger(self,force=False):
"""Call the pdb debugger.
Keywords:
- force(False): by default, this routine checks the instance call_pdb
flag and does not actually invoke the debugger if the flag is false.
The 'force' option forces the debugger to activate even if the flag
is false.
"""
if not (force or self.call_pdb):
return
if not hasattr(sys,'last_traceback'):
error('No traceback has been produced, nothing to debug.')
return
self.InteractiveTB.debugger(force=True)
#-------------------------------------------------------------------------
# Things related to IPython's various namespaces
#-------------------------------------------------------------------------
default_user_namespaces = True
def init_create_namespaces(self, user_module=None, user_ns=None):
# Create the namespace where the user will operate. user_ns is
# normally the only one used, and it is passed to the exec calls as
# the locals argument. But we do carry a user_global_ns namespace
# given as the exec 'globals' argument, This is useful in embedding
# situations where the ipython shell opens in a context where the
# distinction between locals and globals is meaningful. For
# non-embedded contexts, it is just the same object as the user_ns dict.
# FIXME. For some strange reason, __builtins__ is showing up at user
# level as a dict instead of a module. This is a manual fix, but I
# should really track down where the problem is coming from. Alex
# Schmolck reported this problem first.
# A useful post by Alex Martelli on this topic:
# Re: inconsistent value from __builtins__
# Von: Alex Martelli <aleaxit@yahoo.com>
# Datum: Freitag 01 Oktober 2004 04:45:34 nachmittags/abends
# Gruppen: comp.lang.python
# Michael Hohn <hohn@hooknose.lbl.gov> wrote:
# > >>> print type(builtin_check.get_global_binding('__builtins__'))
# > <type 'dict'>
# > >>> print type(__builtins__)
# > <type 'module'>
# > Is this difference in return value intentional?
# Well, it's documented that '__builtins__' can be either a dictionary
# or a module, and it's been that way for a long time. Whether it's
# intentional (or sensible), I don't know. In any case, the idea is
# that if you need to access the built-in namespace directly, you
# should start with "import __builtin__" (note, no 's') which will
# definitely give you a module. Yeah, it's somewhat confusing:-(.
# These routines return a properly built module and dict as needed by
# the rest of the code, and can also be used by extension writers to
# generate properly initialized namespaces.
if (user_ns is not None) or (user_module is not None):
self.default_user_namespaces = False
self.user_module, self.user_ns = self.prepare_user_module(user_module, user_ns)
# A record of hidden variables we have added to the user namespace, so
# we can list later only variables defined in actual interactive use.
self.user_ns_hidden = {}
# Now that FakeModule produces a real module, we've run into a nasty
# problem: after script execution (via %run), the module where the user
# code ran is deleted. Now that this object is a true module (needed
# so doctest and other tools work correctly), the Python module
# teardown mechanism runs over it, and sets to None every variable
# present in that module. Top-level references to objects from the
# script survive, because the user_ns is updated with them. However,
# calling functions defined in the script that use other things from
# the script will fail, because the function's closure had references
# to the original objects, which are now all None. So we must protect
# these modules from deletion by keeping a cache.
#
# To avoid keeping stale modules around (we only need the one from the
# last run), we use a dict keyed with the full path to the script, so
# only the last version of the module is held in the cache. Note,
# however, that we must cache the module *namespace contents* (their
# __dict__). Because if we try to cache the actual modules, old ones
# (uncached) could be destroyed while still holding references (such as
# those held by GUI objects that tend to be long-lived)>
#
# The %reset command will flush this cache. See the cache_main_mod()
# and clear_main_mod_cache() methods for details on use.
# This is the cache used for 'main' namespaces
self._main_mod_cache = {}
# A table holding all the namespaces IPython deals with, so that
# introspection facilities can search easily.
self.ns_table = {'user_global':self.user_module.__dict__,
'user_local':self.user_ns,
'builtin':builtin_mod.__dict__
}
@property
def user_global_ns(self):
return self.user_module.__dict__
def prepare_user_module(self, user_module=None, user_ns=None):
"""Prepare the module and namespace in which user code will be run.
When IPython is started normally, both parameters are None: a new module
is created automatically, and its __dict__ used as the namespace.
If only user_module is provided, its __dict__ is used as the namespace.
If only user_ns is provided, a dummy module is created, and user_ns
becomes the global namespace. If both are provided (as they may be
when embedding), user_ns is the local namespace, and user_module
provides the global namespace.
Parameters
----------
user_module : module, optional
The current user module in which IPython is being run. If None,
a clean module will be created.
user_ns : dict, optional
A namespace in which to run interactive commands.
Returns
-------
A tuple of user_module and user_ns, each properly initialised.
"""
if user_module is None and user_ns is not None:
user_ns.setdefault("__name__", "__main__")
user_module = DummyMod()
user_module.__dict__ = user_ns
if user_module is None:
user_module = types.ModuleType("__main__",
doc="Automatically created module for IPython interactive environment")
# We must ensure that __builtin__ (without the final 's') is always
# available and pointing to the __builtin__ *module*. For more details:
# http://mail.python.org/pipermail/python-dev/2001-April/014068.html
user_module.__dict__.setdefault('__builtin__', builtin_mod)
user_module.__dict__.setdefault('__builtins__', builtin_mod)
if user_ns is None:
user_ns = user_module.__dict__
return user_module, user_ns
def init_sys_modules(self):
# We need to insert into sys.modules something that looks like a
# module but which accesses the IPython namespace, for shelve and
# pickle to work interactively. Normally they rely on getting
# everything out of __main__, but for embedding purposes each IPython
# instance has its own private namespace, so we can't go shoving
# everything into __main__.
# note, however, that we should only do this for non-embedded
# ipythons, which really mimic the __main__.__dict__ with their own
# namespace. Embedded instances, on the other hand, should not do
# this because they need to manage the user local/global namespaces
# only, but they live within a 'normal' __main__ (meaning, they
# shouldn't overtake the execution environment of the script they're
# embedded in).
# This is overridden in the InteractiveShellEmbed subclass to a no-op.
main_name = self.user_module.__name__
sys.modules[main_name] = self.user_module
def init_user_ns(self):
"""Initialize all user-visible namespaces to their minimum defaults.
Certain history lists are also initialized here, as they effectively
act as user namespaces.
Notes
-----
All data structures here are only filled in, they are NOT reset by this
method. If they were not empty before, data will simply be added to
them.
"""
# This function works in two parts: first we put a few things in
# user_ns, and we sync that contents into user_ns_hidden so that these
# initial variables aren't shown by %who. After the sync, we add the
# rest of what we *do* want the user to see with %who even on a new
# session (probably nothing, so they really only see their own stuff)
# The user dict must *always* have a __builtin__ reference to the
# Python standard __builtin__ namespace, which must be imported.
# This is so that certain operations in prompt evaluation can be
# reliably executed with builtins. Note that we can NOT use
# __builtins__ (note the 's'), because that can either be a dict or a
# module, and can even mutate at runtime, depending on the context
# (Python makes no guarantees on it). In contrast, __builtin__ is
# always a module object, though it must be explicitly imported.
# For more details:
# http://mail.python.org/pipermail/python-dev/2001-April/014068.html
ns = {}
# make global variables for user access to the histories
ns['_ih'] = self.history_manager.input_hist_parsed
ns['_oh'] = self.history_manager.output_hist
ns['_dh'] = self.history_manager.dir_hist
# user aliases to input and output histories. These shouldn't show up
# in %who, as they can have very large reprs.
ns['In'] = self.history_manager.input_hist_parsed
ns['Out'] = self.history_manager.output_hist
# Store myself as the public api!!!
ns['get_ipython'] = self.get_ipython
ns['exit'] = self.exiter
ns['quit'] = self.exiter
# Sync what we've added so far to user_ns_hidden so these aren't seen
# by %who
self.user_ns_hidden.update(ns)
# Anything put into ns now would show up in %who. Think twice before
# putting anything here, as we really want %who to show the user their
# stuff, not our variables.
# Finally, update the real user's namespace
self.user_ns.update(ns)
@property
def all_ns_refs(self):
"""Get a list of references to all the namespace dictionaries in which
IPython might store a user-created object.
Note that this does not include the displayhook, which also caches
objects from the output."""
return [self.user_ns, self.user_global_ns, self.user_ns_hidden] + \
[m.__dict__ for m in self._main_mod_cache.values()]
def reset(self, new_session=True):
"""Clear all internal namespaces, and attempt to release references to
user objects.
If new_session is True, a new history session will be opened.
"""
# Clear histories
self.history_manager.reset(new_session)
# Reset counter used to index all histories
if new_session:
self.execution_count = 1
# Reset last execution result
self.last_execution_succeeded = True
self.last_execution_result = None
# Flush cached output items
if self.displayhook.do_full_cache:
self.displayhook.flush()
# The main execution namespaces must be cleared very carefully,
# skipping the deletion of the builtin-related keys, because doing so
# would cause errors in many object's __del__ methods.
if self.user_ns is not self.user_global_ns:
self.user_ns.clear()
ns = self.user_global_ns
drop_keys = set(ns.keys())
drop_keys.discard('__builtin__')
drop_keys.discard('__builtins__')
drop_keys.discard('__name__')
for k in drop_keys:
del ns[k]
self.user_ns_hidden.clear()
# Restore the user namespaces to minimal usability
self.init_user_ns()
# Restore the default and user aliases
self.alias_manager.clear_aliases()
self.alias_manager.init_aliases()
# Now define aliases that only make sense on the terminal, because they
# need direct access to the console in a way that we can't emulate in
# GUI or web frontend
if os.name == 'posix':
for cmd in ('clear', 'more', 'less', 'man'):
if cmd not in self.magics_manager.magics['line']:
self.alias_manager.soft_define_alias(cmd, cmd)
# Flush the private list of module references kept for script
# execution protection
self.clear_main_mod_cache()
def del_var(self, varname, by_name=False):
"""Delete a variable from the various namespaces, so that, as
far as possible, we're not keeping any hidden references to it.
Parameters
----------
varname : str
The name of the variable to delete.
by_name : bool
If True, delete variables with the given name in each
namespace. If False (default), find the variable in the user
namespace, and delete references to it.
"""
if varname in ('__builtin__', '__builtins__'):
raise ValueError("Refusing to delete %s" % varname)
ns_refs = self.all_ns_refs
if by_name: # Delete by name
for ns in ns_refs:
try:
del ns[varname]
except KeyError:
pass
else: # Delete by object
try:
obj = self.user_ns[varname]
except KeyError:
raise NameError("name '%s' is not defined" % varname)
# Also check in output history
ns_refs.append(self.history_manager.output_hist)
for ns in ns_refs:
to_delete = [n for n, o in ns.items() if o is obj]
for name in to_delete:
del ns[name]
# Ensure it is removed from the last execution result
if self.last_execution_result.result is obj:
self.last_execution_result = None
# displayhook keeps extra references, but not in a dictionary
for name in ('_', '__', '___'):
if getattr(self.displayhook, name) is obj:
setattr(self.displayhook, name, None)
def reset_selective(self, regex=None):
"""Clear selective variables from internal namespaces based on a
specified regular expression.
Parameters
----------
regex : string or compiled pattern, optional
A regular expression pattern that will be used in searching
variable names in the users namespaces.
"""
if regex is not None:
try:
m = re.compile(regex)
except TypeError:
raise TypeError('regex must be a string or compiled pattern')
# Search for keys in each namespace that match the given regex
# If a match is found, delete the key/value pair.
for ns in self.all_ns_refs:
for var in ns:
if m.search(var):
del ns[var]
def push(self, variables, interactive=True):
"""Inject a group of variables into the IPython user namespace.
Parameters
----------
variables : dict, str or list/tuple of str
The variables to inject into the user's namespace. If a dict, a
simple update is done. If a str, the string is assumed to have
variable names separated by spaces. A list/tuple of str can also
be used to give the variable names. If just the variable names are
give (list/tuple/str) then the variable values looked up in the
callers frame.
interactive : bool
If True (default), the variables will be listed with the ``who``
magic.
"""
vdict = None
# We need a dict of name/value pairs to do namespace updates.
if isinstance(variables, dict):
vdict = variables
elif isinstance(variables, (str, list, tuple)):
if isinstance(variables, str):
vlist = variables.split()
else:
vlist = variables
vdict = {}
cf = sys._getframe(1)
for name in vlist:
try:
vdict[name] = eval(name, cf.f_globals, cf.f_locals)
except:
print('Could not get variable %s from %s' %
(name,cf.f_code.co_name))
else:
raise ValueError('variables must be a dict/str/list/tuple')
# Propagate variables to user namespace
self.user_ns.update(vdict)
# And configure interactive visibility
user_ns_hidden = self.user_ns_hidden
if interactive:
for name in vdict:
user_ns_hidden.pop(name, None)
else:
user_ns_hidden.update(vdict)
def drop_by_id(self, variables):
"""Remove a dict of variables from the user namespace, if they are the
same as the values in the dictionary.
This is intended for use by extensions: variables that they've added can
be taken back out if they are unloaded, without removing any that the
user has overwritten.
Parameters
----------
variables : dict
A dictionary mapping object names (as strings) to the objects.
"""
for name, obj in variables.items():
if name in self.user_ns and self.user_ns[name] is obj:
del self.user_ns[name]
self.user_ns_hidden.pop(name, None)
#-------------------------------------------------------------------------
# Things related to object introspection
#-------------------------------------------------------------------------
def _ofind(self, oname, namespaces=None):
"""Find an object in the available namespaces.
self._ofind(oname) -> dict with keys: found,obj,ospace,ismagic
Has special code to detect magic functions.
"""
oname = oname.strip()
if not oname.startswith(ESC_MAGIC) and \
not oname.startswith(ESC_MAGIC2) and \
not all(a.isidentifier() for a in oname.split(".")):
return {'found': False}
if namespaces is None:
# Namespaces to search in:
# Put them in a list. The order is important so that we
# find things in the same order that Python finds them.
namespaces = [ ('Interactive', self.user_ns),
('Interactive (global)', self.user_global_ns),
('Python builtin', builtin_mod.__dict__),
]
ismagic = False
isalias = False
found = False
ospace = None
parent = None
obj = None
# Look for the given name by splitting it in parts. If the head is
# found, then we look for all the remaining parts as members, and only
# declare success if we can find them all.
oname_parts = oname.split('.')
oname_head, oname_rest = oname_parts[0],oname_parts[1:]
for nsname,ns in namespaces:
try:
obj = ns[oname_head]
except KeyError:
continue
else:
for idx, part in enumerate(oname_rest):
try:
parent = obj
# The last part is looked up in a special way to avoid
# descriptor invocation as it may raise or have side
# effects.
if idx == len(oname_rest) - 1:
obj = self._getattr_property(obj, part)
else:
obj = getattr(obj, part)
except:
# Blanket except b/c some badly implemented objects
# allow __getattr__ to raise exceptions other than
# AttributeError, which then crashes IPython.
break
else:
# If we finish the for loop (no break), we got all members
found = True
ospace = nsname
break # namespace loop
# Try to see if it's magic
if not found:
obj = None
if oname.startswith(ESC_MAGIC2):
oname = oname.lstrip(ESC_MAGIC2)
obj = self.find_cell_magic(oname)
elif oname.startswith(ESC_MAGIC):
oname = oname.lstrip(ESC_MAGIC)
obj = self.find_line_magic(oname)
else:
# search without prefix, so run? will find %run?
obj = self.find_line_magic(oname)
if obj is None:
obj = self.find_cell_magic(oname)
if obj is not None:
found = True
ospace = 'IPython internal'
ismagic = True
isalias = isinstance(obj, Alias)
# Last try: special-case some literals like '', [], {}, etc:
if not found and oname_head in ["''",'""','[]','{}','()']:
obj = eval(oname_head)
found = True
ospace = 'Interactive'
return {
'obj':obj,
'found':found,
'parent':parent,
'ismagic':ismagic,
'isalias':isalias,
'namespace':ospace
}
@staticmethod
def _getattr_property(obj, attrname):
"""Property-aware getattr to use in object finding.
If attrname represents a property, return it unevaluated (in case it has
side effects or raises an error.
"""
if not isinstance(obj, type):
try:
# `getattr(type(obj), attrname)` is not guaranteed to return
# `obj`, but does so for property:
#
# property.__get__(self, None, cls) -> self
#
# The universal alternative is to traverse the mro manually
# searching for attrname in class dicts.
attr = getattr(type(obj), attrname)
except AttributeError:
pass
else:
# This relies on the fact that data descriptors (with both
# __get__ & __set__ magic methods) take precedence over
# instance-level attributes:
#
# class A(object):
# @property
# def foobar(self): return 123
# a = A()
# a.__dict__['foobar'] = 345
# a.foobar # == 123
#
# So, a property may be returned right away.
if isinstance(attr, property):
return attr
# Nothing helped, fall back.
return getattr(obj, attrname)
def _object_find(self, oname, namespaces=None):
"""Find an object and return a struct with info about it."""
return Struct(self._ofind(oname, namespaces))
def _inspect(self, meth, oname, namespaces=None, **kw):
"""Generic interface to the inspector system.
This function is meant to be called by pdef, pdoc & friends.
"""
info = self._object_find(oname, namespaces)
docformat = sphinxify if self.sphinxify_docstring else None
if info.found:
pmethod = getattr(self.inspector, meth)
# TODO: only apply format_screen to the plain/text repr of the mime
# bundle.
formatter = format_screen if info.ismagic else docformat
if meth == 'pdoc':
pmethod(info.obj, oname, formatter)
elif meth == 'pinfo':
pmethod(info.obj, oname, formatter, info,
enable_html_pager=self.enable_html_pager, **kw)
else:
pmethod(info.obj, oname)
else:
print('Object `%s` not found.' % oname)
return 'not found' # so callers can take other action
def object_inspect(self, oname, detail_level=0):
"""Get object info about oname"""
with self.builtin_trap:
info = self._object_find(oname)
if info.found:
return self.inspector.info(info.obj, oname, info=info,
detail_level=detail_level
)
else:
return oinspect.object_info(name=oname, found=False)
def object_inspect_text(self, oname, detail_level=0):
"""Get object info as formatted text"""
return self.object_inspect_mime(oname, detail_level)['text/plain']
def object_inspect_mime(self, oname, detail_level=0):
"""Get object info as a mimebundle of formatted representations.
A mimebundle is a dictionary, keyed by mime-type.
It must always have the key `'text/plain'`.
"""
with self.builtin_trap:
info = self._object_find(oname)
if info.found:
return self.inspector._get_info(info.obj, oname, info=info,
detail_level=detail_level
)
else:
raise KeyError(oname)
#-------------------------------------------------------------------------
# Things related to history management
#-------------------------------------------------------------------------
def init_history(self):
"""Sets up the command history, and starts regular autosaves."""
self.history_manager = HistoryManager(shell=self, parent=self)
self.configurables.append(self.history_manager)
#-------------------------------------------------------------------------
# Things related to exception handling and tracebacks (not debugging)
#-------------------------------------------------------------------------
debugger_cls = Pdb
def init_traceback_handlers(self, custom_exceptions):
# Syntax error handler.
self.SyntaxTB = ultratb.SyntaxTB(color_scheme='NoColor', parent=self)
# The interactive one is initialized with an offset, meaning we always
# want to remove the topmost item in the traceback, which is our own
# internal code. Valid modes: ['Plain','Context','Verbose','Minimal']
self.InteractiveTB = ultratb.AutoFormattedTB(mode = 'Plain',
color_scheme='NoColor',
tb_offset = 1,
check_cache=check_linecache_ipython,
debugger_cls=self.debugger_cls, parent=self)
# The instance will store a pointer to the system-wide exception hook,
# so that runtime code (such as magics) can access it. This is because
# during the read-eval loop, it may get temporarily overwritten.
self.sys_excepthook = sys.excepthook
# and add any custom exception handlers the user may have specified
self.set_custom_exc(*custom_exceptions)
# Set the exception mode
self.InteractiveTB.set_mode(mode=self.xmode)
def set_custom_exc(self, exc_tuple, handler):
"""set_custom_exc(exc_tuple, handler)
Set a custom exception handler, which will be called if any of the
exceptions in exc_tuple occur in the mainloop (specifically, in the
run_code() method).
Parameters
----------
exc_tuple : tuple of exception classes
A *tuple* of exception classes, for which to call the defined
handler. It is very important that you use a tuple, and NOT A
LIST here, because of the way Python's except statement works. If
you only want to trap a single exception, use a singleton tuple::
exc_tuple == (MyCustomException,)
handler : callable
handler must have the following signature::
def my_handler(self, etype, value, tb, tb_offset=None):
...
return structured_traceback
Your handler must return a structured traceback (a list of strings),
or None.
This will be made into an instance method (via types.MethodType)
of IPython itself, and it will be called if any of the exceptions
listed in the exc_tuple are caught. If the handler is None, an
internal basic one is used, which just prints basic info.
To protect IPython from crashes, if your handler ever raises an
exception or returns an invalid result, it will be immediately
disabled.
WARNING: by putting in your own exception handler into IPython's main
execution loop, you run a very good chance of nasty crashes. This
facility should only be used if you really know what you are doing."""
if not isinstance(exc_tuple, tuple):
raise TypeError("The custom exceptions must be given as a tuple.")
def dummy_handler(self, etype, value, tb, tb_offset=None):
print('*** Simple custom exception handler ***')
print('Exception type :', etype)
print('Exception value:', value)
print('Traceback :', tb)
def validate_stb(stb):
"""validate structured traceback return type
return type of CustomTB *should* be a list of strings, but allow
single strings or None, which are harmless.
This function will *always* return a list of strings,
and will raise a TypeError if stb is inappropriate.
"""
msg = "CustomTB must return list of strings, not %r" % stb
if stb is None:
return []
elif isinstance(stb, str):
return [stb]
elif not isinstance(stb, list):
raise TypeError(msg)
# it's a list
for line in stb:
# check every element
if not isinstance(line, str):
raise TypeError(msg)
return stb
if handler is None:
wrapped = dummy_handler
else:
def wrapped(self,etype,value,tb,tb_offset=None):
"""wrap CustomTB handler, to protect IPython from user code
This makes it harder (but not impossible) for custom exception
handlers to crash IPython.
"""
try:
stb = handler(self,etype,value,tb,tb_offset=tb_offset)
return validate_stb(stb)
except:
# clear custom handler immediately
self.set_custom_exc((), None)
print("Custom TB Handler failed, unregistering", file=sys.stderr)
# show the exception in handler first
stb = self.InteractiveTB.structured_traceback(*sys.exc_info())
print(self.InteractiveTB.stb2text(stb))
print("The original exception:")
stb = self.InteractiveTB.structured_traceback(
(etype,value,tb), tb_offset=tb_offset
)
return stb
self.CustomTB = types.MethodType(wrapped,self)
self.custom_exceptions = exc_tuple
def excepthook(self, etype, value, tb):
"""One more defense for GUI apps that call sys.excepthook.
GUI frameworks like wxPython trap exceptions and call
sys.excepthook themselves. I guess this is a feature that
enables them to keep running after exceptions that would
otherwise kill their mainloop. This is a bother for IPython
which excepts to catch all of the program exceptions with a try:
except: statement.
Normally, IPython sets sys.excepthook to a CrashHandler instance, so if
any app directly invokes sys.excepthook, it will look to the user like
IPython crashed. In order to work around this, we can disable the
CrashHandler and replace it with this excepthook instead, which prints a
regular traceback using our InteractiveTB. In this fashion, apps which
call sys.excepthook will generate a regular-looking exception from
IPython, and the CrashHandler will only be triggered by real IPython
crashes.
This hook should be used sparingly, only in places which are not likely
to be true IPython errors.
"""
self.showtraceback((etype, value, tb), tb_offset=0)
def _get_exc_info(self, exc_tuple=None):
"""get exc_info from a given tuple, sys.exc_info() or sys.last_type etc.
Ensures sys.last_type,value,traceback hold the exc_info we found,
from whichever source.
raises ValueError if none of these contain any information
"""
if exc_tuple is None:
etype, value, tb = sys.exc_info()
else:
etype, value, tb = exc_tuple
if etype is None:
if hasattr(sys, 'last_type'):
etype, value, tb = sys.last_type, sys.last_value, \
sys.last_traceback
if etype is None:
raise ValueError("No exception to find")
# Now store the exception info in sys.last_type etc.
# WARNING: these variables are somewhat deprecated and not
# necessarily safe to use in a threaded environment, but tools
# like pdb depend on their existence, so let's set them. If we
# find problems in the field, we'll need to revisit their use.
sys.last_type = etype
sys.last_value = value
sys.last_traceback = tb
return etype, value, tb
def show_usage_error(self, exc):
"""Show a short message for UsageErrors
These are special exceptions that shouldn't show a traceback.
"""
print("UsageError: %s" % exc, file=sys.stderr)
def get_exception_only(self, exc_tuple=None):
"""
Return as a string (ending with a newline) the exception that
just occurred, without any traceback.
"""
etype, value, tb = self._get_exc_info(exc_tuple)
msg = traceback.format_exception_only(etype, value)
return ''.join(msg)
def showtraceback(self, exc_tuple=None, filename=None, tb_offset=None,
exception_only=False, running_compiled_code=False):
"""Display the exception that just occurred.
If nothing is known about the exception, this is the method which
should be used throughout the code for presenting user tracebacks,
rather than directly invoking the InteractiveTB object.
A specific showsyntaxerror() also exists, but this method can take
care of calling it if needed, so unless you are explicitly catching a
SyntaxError exception, don't try to analyze the stack manually and
simply call this method."""
try:
try:
etype, value, tb = self._get_exc_info(exc_tuple)
except ValueError:
print('No traceback available to show.', file=sys.stderr)
return
if issubclass(etype, SyntaxError):
# Though this won't be called by syntax errors in the input
# line, there may be SyntaxError cases with imported code.
self.showsyntaxerror(filename, running_compiled_code)
elif etype is UsageError:
self.show_usage_error(value)
else:
if exception_only:
stb = ['An exception has occurred, use %tb to see '
'the full traceback.\n']
stb.extend(self.InteractiveTB.get_exception_only(etype,
value))
else:
try:
# Exception classes can customise their traceback - we
# use this in IPython.parallel for exceptions occurring
# in the engines. This should return a list of strings.
stb = value._render_traceback_()
except Exception:
stb = self.InteractiveTB.structured_traceback(etype,
value, tb, tb_offset=tb_offset)
self._showtraceback(etype, value, stb)
if self.call_pdb:
# drop into debugger
self.debugger(force=True)
return
# Actually show the traceback
self._showtraceback(etype, value, stb)
except KeyboardInterrupt:
print('\n' + self.get_exception_only(), file=sys.stderr)
def _showtraceback(self, etype, evalue, stb):
"""Actually show a traceback.
Subclasses may override this method to put the traceback on a different
place, like a side channel.
"""
print(self.InteractiveTB.stb2text(stb))
def showsyntaxerror(self, filename=None, running_compiled_code=False):
"""Display the syntax error that just occurred.
This doesn't display a stack trace because there isn't one.
If a filename is given, it is stuffed in the exception instead
of what was there before (because Python's parser always uses
"<string>" when reading from a string).
If the syntax error occurred when running a compiled code (i.e. running_compile_code=True),
longer stack trace will be displayed.
"""
etype, value, last_traceback = self._get_exc_info()
if filename and issubclass(etype, SyntaxError):
try:
value.filename = filename
except:
# Not the format we expect; leave it alone
pass
# If the error occurred when executing compiled code, we should provide full stacktrace.
elist = traceback.extract_tb(last_traceback) if running_compiled_code else []
stb = self.SyntaxTB.structured_traceback(etype, value, elist)
self._showtraceback(etype, value, stb)
# This is overridden in TerminalInteractiveShell to show a message about
# the %paste magic.
def showindentationerror(self):
"""Called by _run_cell when there's an IndentationError in code entered
at the prompt.
This is overridden in TerminalInteractiveShell to show a message about
the %paste magic."""
self.showsyntaxerror()
#-------------------------------------------------------------------------
# Things related to readline
#-------------------------------------------------------------------------
def init_readline(self):
"""DEPRECATED
Moved to terminal subclass, here only to simplify the init logic."""
# Set a number of methods that depend on readline to be no-op
warnings.warn('`init_readline` is no-op since IPython 5.0 and is Deprecated',
DeprecationWarning, stacklevel=2)
self.set_custom_completer = no_op
@skip_doctest
def set_next_input(self, s, replace=False):
""" Sets the 'default' input string for the next command line.
Example::
In [1]: _ip.set_next_input("Hello Word")
In [2]: Hello Word_ # cursor is here
"""
self.rl_next_input = s
def _indent_current_str(self):
"""return the current level of indentation as a string"""
return self.input_splitter.get_indent_spaces() * ' '
#-------------------------------------------------------------------------
# Things related to text completion
#-------------------------------------------------------------------------
def init_completer(self):
"""Initialize the completion machinery.
This creates completion machinery that can be used by client code,
either interactively in-process (typically triggered by the readline
library), programmatically (such as in test suites) or out-of-process
(typically over the network by remote frontends).
"""
from IPython.core.completer import IPCompleter
from IPython.core.completerlib import (module_completer,
magic_run_completer, cd_completer, reset_completer)
self.Completer = IPCompleter(shell=self,
namespace=self.user_ns,
global_namespace=self.user_global_ns,
parent=self,
)
self.configurables.append(self.Completer)
# Add custom completers to the basic ones built into IPCompleter
sdisp = self.strdispatchers.get('complete_command', StrDispatch())
self.strdispatchers['complete_command'] = sdisp
self.Completer.custom_completers = sdisp
self.set_hook('complete_command', module_completer, str_key = 'import')
self.set_hook('complete_command', module_completer, str_key = 'from')
self.set_hook('complete_command', module_completer, str_key = '%aimport')
self.set_hook('complete_command', magic_run_completer, str_key = '%run')
self.set_hook('complete_command', cd_completer, str_key = '%cd')
self.set_hook('complete_command', reset_completer, str_key = '%reset')
@skip_doctest
def complete(self, text, line=None, cursor_pos=None):
"""Return the completed text and a list of completions.
Parameters
----------
text : string
A string of text to be completed on. It can be given as empty and
instead a line/position pair are given. In this case, the
completer itself will split the line like readline does.
line : string, optional
The complete line that text is part of.
cursor_pos : int, optional
The position of the cursor on the input line.
Returns
-------
text : string
The actual text that was completed.
matches : list
A sorted list with all possible completions.
The optional arguments allow the completion to take more context into
account, and are part of the low-level completion API.
This is a wrapper around the completion mechanism, similar to what
readline does at the command line when the TAB key is hit. By
exposing it as a method, it can be used by other non-readline
environments (such as GUIs) for text completion.
Simple usage example:
In [1]: x = 'hello'
In [2]: _ip.complete('x.l')
Out[2]: ('x.l', ['x.ljust', 'x.lower', 'x.lstrip'])
"""
# Inject names into __builtin__ so we can complete on the added names.
with self.builtin_trap:
return self.Completer.complete(text, line, cursor_pos)
def set_custom_completer(self, completer, pos=0):
"""Adds a new custom completer function.
The position argument (defaults to 0) is the index in the completers
list where you want the completer to be inserted."""
newcomp = types.MethodType(completer, self.Completer)
self.Completer.custom_matchers.insert(pos,newcomp)
def set_completer_frame(self, frame=None):
"""Set the frame of the completer."""
if frame:
self.Completer.namespace = frame.f_locals
self.Completer.global_namespace = frame.f_globals
else:
self.Completer.namespace = self.user_ns
self.Completer.global_namespace = self.user_global_ns
#-------------------------------------------------------------------------
# Things related to magics
#-------------------------------------------------------------------------
def init_magics(self):
from IPython.core import magics as m
self.magics_manager = magic.MagicsManager(shell=self,
parent=self,
user_magics=m.UserMagics(self))
self.configurables.append(self.magics_manager)
# Expose as public API from the magics manager
self.register_magics = self.magics_manager.register
self.register_magics(m.AutoMagics, m.BasicMagics, m.CodeMagics,
m.ConfigMagics, m.DisplayMagics, m.ExecutionMagics,
m.ExtensionMagics, m.HistoryMagics, m.LoggingMagics,
m.NamespaceMagics, m.OSMagics, m.PackagingMagics,
m.PylabMagics, m.ScriptMagics,
)
self.register_magics(m.AsyncMagics)
# Register Magic Aliases
mman = self.magics_manager
# FIXME: magic aliases should be defined by the Magics classes
# or in MagicsManager, not here
mman.register_alias('ed', 'edit')
mman.register_alias('hist', 'history')
mman.register_alias('rep', 'recall')
mman.register_alias('SVG', 'svg', 'cell')
mman.register_alias('HTML', 'html', 'cell')
mman.register_alias('file', 'writefile', 'cell')
# FIXME: Move the color initialization to the DisplayHook, which
# should be split into a prompt manager and displayhook. We probably
# even need a centralize colors management object.
self.run_line_magic('colors', self.colors)
# Defined here so that it's included in the documentation
@functools.wraps(magic.MagicsManager.register_function)
def register_magic_function(self, func, magic_kind='line', magic_name=None):
self.magics_manager.register_function(func,
magic_kind=magic_kind, magic_name=magic_name)
def run_line_magic(self, magic_name, line, _stack_depth=1):
"""Execute the given line magic.
Parameters
----------
magic_name : str
Name of the desired magic function, without '%' prefix.
line : str
The rest of the input line as a single string.
_stack_depth : int
If run_line_magic() is called from magic() then _stack_depth=2.
This is added to ensure backward compatibility for use of 'get_ipython().magic()'
"""
fn = self.find_line_magic(magic_name)
if fn is None:
cm = self.find_cell_magic(magic_name)
etpl = "Line magic function `%%%s` not found%s."
extra = '' if cm is None else (' (But cell magic `%%%%%s` exists, '
'did you mean that instead?)' % magic_name )
raise UsageError(etpl % (magic_name, extra))
else:
# Note: this is the distance in the stack to the user's frame.
# This will need to be updated if the internal calling logic gets
# refactored, or else we'll be expanding the wrong variables.
# Determine stack_depth depending on where run_line_magic() has been called
stack_depth = _stack_depth
if getattr(fn, magic.MAGIC_NO_VAR_EXPAND_ATTR, False):
# magic has opted out of var_expand
magic_arg_s = line
else:
magic_arg_s = self.var_expand(line, stack_depth)
# Put magic args in a list so we can call with f(*a) syntax
args = [magic_arg_s]
kwargs = {}
# Grab local namespace if we need it:
if getattr(fn, "needs_local_scope", False):
kwargs['local_ns'] = sys._getframe(stack_depth).f_locals
with self.builtin_trap:
result = fn(*args, **kwargs)
return result
def run_cell_magic(self, magic_name, line, cell):
"""Execute the given cell magic.
Parameters
----------
magic_name : str
Name of the desired magic function, without '%' prefix.
line : str
The rest of the first input line as a single string.
cell : str
The body of the cell as a (possibly multiline) string.
"""
fn = self.find_cell_magic(magic_name)
if fn is None:
lm = self.find_line_magic(magic_name)
etpl = "Cell magic `%%{0}` not found{1}."
extra = '' if lm is None else (' (But line magic `%{0}` exists, '
'did you mean that instead?)'.format(magic_name))
raise UsageError(etpl.format(magic_name, extra))
elif cell == '':
message = '%%{0} is a cell magic, but the cell body is empty.'.format(magic_name)
if self.find_line_magic(magic_name) is not None:
message += ' Did you mean the line magic %{0} (single %)?'.format(magic_name)
raise UsageError(message)
else:
# Note: this is the distance in the stack to the user's frame.
# This will need to be updated if the internal calling logic gets
# refactored, or else we'll be expanding the wrong variables.
stack_depth = 2
if getattr(fn, magic.MAGIC_NO_VAR_EXPAND_ATTR, False):
# magic has opted out of var_expand
magic_arg_s = line
else:
magic_arg_s = self.var_expand(line, stack_depth)
kwargs = {}
if getattr(fn, "needs_local_scope", False):
kwargs['local_ns'] = self.user_ns
with self.builtin_trap:
args = (magic_arg_s, cell)
result = fn(*args, **kwargs)
return result
def find_line_magic(self, magic_name):
"""Find and return a line magic by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics['line'].get(magic_name)
def find_cell_magic(self, magic_name):
"""Find and return a cell magic by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics['cell'].get(magic_name)
def find_magic(self, magic_name, magic_kind='line'):
"""Find and return a magic of the given type by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics[magic_kind].get(magic_name)
def magic(self, arg_s):
"""DEPRECATED. Use run_line_magic() instead.
Call a magic function by name.
Input: a string containing the name of the magic function to call and
any additional arguments to be passed to the magic.
magic('name -opt foo bar') is equivalent to typing at the ipython
prompt:
In[1]: %name -opt foo bar
To call a magic without arguments, simply use magic('name').
This provides a proper Python function to call IPython's magics in any
valid Python code you can type at the interpreter, including loops and
compound statements.
"""
# TODO: should we issue a loud deprecation warning here?
magic_name, _, magic_arg_s = arg_s.partition(' ')
magic_name = magic_name.lstrip(prefilter.ESC_MAGIC)
return self.run_line_magic(magic_name, magic_arg_s, _stack_depth=2)
#-------------------------------------------------------------------------
# Things related to macros
#-------------------------------------------------------------------------
def define_macro(self, name, themacro):
"""Define a new macro
Parameters
----------
name : str
The name of the macro.
themacro : str or Macro
The action to do upon invoking the macro. If a string, a new
Macro object is created by passing the string to it.
"""
from IPython.core import macro
if isinstance(themacro, str):
themacro = macro.Macro(themacro)
if not isinstance(themacro, macro.Macro):
raise ValueError('A macro must be a string or a Macro instance.')
self.user_ns[name] = themacro
#-------------------------------------------------------------------------
# Things related to the running of system commands
#-------------------------------------------------------------------------
def system_piped(self, cmd):
"""Call the given cmd in a subprocess, piping stdout/err
Parameters
----------
cmd : str
Command to execute (can not end in '&', as background processes are
not supported. Should not be a command that expects input
other than simple text.
"""
if cmd.rstrip().endswith('&'):
# this is *far* from a rigorous test
# We do not support backgrounding processes because we either use
# pexpect or pipes to read from. Users can always just call
# os.system() or use ip.system=ip.system_raw
# if they really want a background process.
raise OSError("Background processes not supported.")
# we explicitly do NOT return the subprocess status code, because
# a non-None value would trigger :func:`sys.displayhook` calls.
# Instead, we store the exit_code in user_ns.
self.user_ns['_exit_code'] = system(self.var_expand(cmd, depth=1))
def system_raw(self, cmd):
"""Call the given cmd in a subprocess using os.system on Windows or
subprocess.call using the system shell on other platforms.
Parameters
----------
cmd : str
Command to execute.
"""
cmd = self.var_expand(cmd, depth=1)
# protect os.system from UNC paths on Windows, which it can't handle:
if sys.platform == 'win32':
from IPython.utils._process_win32 import AvoidUNCPath
with AvoidUNCPath() as path:
if path is not None:
cmd = '"pushd %s &&"%s' % (path, cmd)
try:
ec = os.system(cmd)
except KeyboardInterrupt:
print('\n' + self.get_exception_only(), file=sys.stderr)
ec = -2
else:
# For posix the result of the subprocess.call() below is an exit
# code, which by convention is zero for success, positive for
# program failure. Exit codes above 128 are reserved for signals,
# and the formula for converting a signal to an exit code is usually
# signal_number+128. To more easily differentiate between exit
# codes and signals, ipython uses negative numbers. For instance
# since control-c is signal 2 but exit code 130, ipython's
# _exit_code variable will read -2. Note that some shells like
# csh and fish don't follow sh/bash conventions for exit codes.
executable = os.environ.get('SHELL', None)
try:
# Use env shell instead of default /bin/sh
ec = subprocess.call(cmd, shell=True, executable=executable)
except KeyboardInterrupt:
# intercept control-C; a long traceback is not useful here
print('\n' + self.get_exception_only(), file=sys.stderr)
ec = 130
if ec > 128:
ec = -(ec - 128)
# We explicitly do NOT return the subprocess status code, because
# a non-None value would trigger :func:`sys.displayhook` calls.
# Instead, we store the exit_code in user_ns. Note the semantics
# of _exit_code: for control-c, _exit_code == -signal.SIGNIT,
# but raising SystemExit(_exit_code) will give status 254!
self.user_ns['_exit_code'] = ec
# use piped system by default, because it is better behaved
system = system_piped
def getoutput(self, cmd, split=True, depth=0):
"""Get output (possibly including stderr) from a subprocess.
Parameters
----------
cmd : str
Command to execute (can not end in '&', as background processes are
not supported.
split : bool, optional
If True, split the output into an IPython SList. Otherwise, an
IPython LSString is returned. These are objects similar to normal
lists and strings, with a few convenience attributes for easier
manipulation of line-based output. You can use '?' on them for
details.
depth : int, optional
How many frames above the caller are the local variables which should
be expanded in the command string? The default (0) assumes that the
expansion variables are in the stack frame calling this function.
"""
if cmd.rstrip().endswith('&'):
# this is *far* from a rigorous test
raise OSError("Background processes not supported.")
out = getoutput(self.var_expand(cmd, depth=depth+1))
if split:
out = SList(out.splitlines())
else:
out = LSString(out)
return out
#-------------------------------------------------------------------------
# Things related to aliases
#-------------------------------------------------------------------------
def init_alias(self):
self.alias_manager = AliasManager(shell=self, parent=self)
self.configurables.append(self.alias_manager)
#-------------------------------------------------------------------------
# Things related to extensions
#-------------------------------------------------------------------------
def init_extension_manager(self):
self.extension_manager = ExtensionManager(shell=self, parent=self)
self.configurables.append(self.extension_manager)
#-------------------------------------------------------------------------
# Things related to payloads
#-------------------------------------------------------------------------
def init_payload(self):
self.payload_manager = PayloadManager(parent=self)
self.configurables.append(self.payload_manager)
#-------------------------------------------------------------------------
# Things related to the prefilter
#-------------------------------------------------------------------------
def init_prefilter(self):
self.prefilter_manager = PrefilterManager(shell=self, parent=self)
self.configurables.append(self.prefilter_manager)
# Ultimately this will be refactored in the new interpreter code, but
# for now, we should expose the main prefilter method (there's legacy
# code out there that may rely on this).
self.prefilter = self.prefilter_manager.prefilter_lines
def auto_rewrite_input(self, cmd):
"""Print to the screen the rewritten form of the user's command.
This shows visual feedback by rewriting input lines that cause
automatic calling to kick in, like::
/f x
into::
------> f(x)
after the user's input prompt. This helps the user understand that the
input line was transformed automatically by IPython.
"""
if not self.show_rewritten_input:
return
# This is overridden in TerminalInteractiveShell to use fancy prompts
print("------> " + cmd)
#-------------------------------------------------------------------------
# Things related to extracting values/expressions from kernel and user_ns
#-------------------------------------------------------------------------
def _user_obj_error(self):
"""return simple exception dict
for use in user_expressions
"""
etype, evalue, tb = self._get_exc_info()
stb = self.InteractiveTB.get_exception_only(etype, evalue)
exc_info = {
u'status' : 'error',
u'traceback' : stb,
u'ename' : etype.__name__,
u'evalue' : py3compat.safe_unicode(evalue),
}
return exc_info
def _format_user_obj(self, obj):
"""format a user object to display dict
for use in user_expressions
"""
data, md = self.display_formatter.format(obj)
value = {
'status' : 'ok',
'data' : data,
'metadata' : md,
}
return value
def user_expressions(self, expressions):
"""Evaluate a dict of expressions in the user's namespace.
Parameters
----------
expressions : dict
A dict with string keys and string values. The expression values
should be valid Python expressions, each of which will be evaluated
in the user namespace.
Returns
-------
A dict, keyed like the input expressions dict, with the rich mime-typed
display_data of each value.
"""
out = {}
user_ns = self.user_ns
global_ns = self.user_global_ns
for key, expr in expressions.items():
try:
value = self._format_user_obj(eval(expr, global_ns, user_ns))
except:
value = self._user_obj_error()
out[key] = value
return out
#-------------------------------------------------------------------------
# Things related to the running of code
#-------------------------------------------------------------------------
def ex(self, cmd):
"""Execute a normal python statement in user namespace."""
with self.builtin_trap:
exec(cmd, self.user_global_ns, self.user_ns)
def ev(self, expr):
"""Evaluate python expression expr in user namespace.
Returns the result of evaluation
"""
with self.builtin_trap:
return eval(expr, self.user_global_ns, self.user_ns)
def safe_execfile(self, fname, *where, exit_ignore=False, raise_exceptions=False, shell_futures=False):
"""A safe version of the builtin execfile().
This version will never throw an exception, but instead print
helpful error messages to the screen. This only works on pure
Python files with the .py extension.
Parameters
----------
fname : string
The name of the file to be executed.
where : tuple
One or two namespaces, passed to execfile() as (globals,locals).
If only one is given, it is passed as both.
exit_ignore : bool (False)
If True, then silence SystemExit for non-zero status (it is always
silenced for zero status, as it is so common).
raise_exceptions : bool (False)
If True raise exceptions everywhere. Meant for testing.
shell_futures : bool (False)
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
"""
fname = os.path.abspath(os.path.expanduser(fname))
# Make sure we can open the file
try:
with open(fname):
pass
except:
warn('Could not open file <%s> for safe execution.' % fname)
return
# Find things also in current directory. This is needed to mimic the
# behavior of running a script from the system command line, where
# Python inserts the script's directory into sys.path
dname = os.path.dirname(fname)
with prepended_to_syspath(dname), self.builtin_trap:
try:
glob, loc = (where + (None, ))[:2]
py3compat.execfile(
fname, glob, loc,
self.compile if shell_futures else None)
except SystemExit as status:
# If the call was made with 0 or None exit status (sys.exit(0)
# or sys.exit() ), don't bother showing a traceback, as both of
# these are considered normal by the OS:
# > python -c'import sys;sys.exit(0)'; echo $?
# 0
# > python -c'import sys;sys.exit()'; echo $?
# 0
# For other exit status, we show the exception unless
# explicitly silenced, but only in short form.
if status.code:
if raise_exceptions:
raise
if not exit_ignore:
self.showtraceback(exception_only=True)
except:
if raise_exceptions:
raise
# tb offset is 2 because we wrap execfile
self.showtraceback(tb_offset=2)
def safe_execfile_ipy(self, fname, shell_futures=False, raise_exceptions=False):
"""Like safe_execfile, but for .ipy or .ipynb files with IPython syntax.
Parameters
----------
fname : str
The name of the file to execute. The filename must have a
.ipy or .ipynb extension.
shell_futures : bool (False)
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
raise_exceptions : bool (False)
If True raise exceptions everywhere. Meant for testing.
"""
fname = os.path.abspath(os.path.expanduser(fname))
# Make sure we can open the file
try:
with open(fname):
pass
except:
warn('Could not open file <%s> for safe execution.' % fname)
return
# Find things also in current directory. This is needed to mimic the
# behavior of running a script from the system command line, where
# Python inserts the script's directory into sys.path
dname = os.path.dirname(fname)
def get_cells():
"""generator for sequence of code blocks to run"""
if fname.endswith('.ipynb'):
from nbformat import read
nb = read(fname, as_version=4)
if not nb.cells:
return
for cell in nb.cells:
if cell.cell_type == 'code':
yield cell.source
else:
with open(fname) as f:
yield f.read()
with prepended_to_syspath(dname):
try:
for cell in get_cells():
result = self.run_cell(cell, silent=True, shell_futures=shell_futures)
if raise_exceptions:
result.raise_error()
elif not result.success:
break
except:
if raise_exceptions:
raise
self.showtraceback()
warn('Unknown failure executing file: <%s>' % fname)
def safe_run_module(self, mod_name, where):
"""A safe version of runpy.run_module().
This version will never throw an exception, but instead print
helpful error messages to the screen.
`SystemExit` exceptions with status code 0 or None are ignored.
Parameters
----------
mod_name : string
The name of the module to be executed.
where : dict
The globals namespace.
"""
try:
try:
where.update(
runpy.run_module(str(mod_name), run_name="__main__",
alter_sys=True)
)
except SystemExit as status:
if status.code:
raise
except:
self.showtraceback()
warn('Unknown failure executing module: <%s>' % mod_name)
def run_cell(self, raw_cell, store_history=False, silent=False, shell_futures=True):
"""Run a complete IPython cell.
Parameters
----------
raw_cell : str
The code (including IPython code such as %magic functions) to run.
store_history : bool
If True, the raw and translated cell will be stored in IPython's
history. For user code calling back into IPython's machinery, this
should be set to False.
silent : bool
If True, avoid side-effects, such as implicit displayhooks and
and logging. silent=True forces store_history=False.
shell_futures : bool
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
Returns
-------
result : :class:`ExecutionResult`
"""
result = None
try:
result = self._run_cell(
raw_cell, store_history, silent, shell_futures)
finally:
self.events.trigger('post_execute')
if not silent:
self.events.trigger('post_run_cell', result)
return result
def _run_cell(self, raw_cell:str, store_history:bool, silent:bool, shell_futures:bool):
"""Internal method to run a complete IPython cell."""
coro = self.run_cell_async(
raw_cell,
store_history=store_history,
silent=silent,
shell_futures=shell_futures,
)
# run_cell_async is async, but may not actually need an eventloop.
# when this is the case, we want to run it using the pseudo_sync_runner
# so that code can invoke eventloops (for example via the %run , and
# `%paste` magic.
if self.trio_runner:
runner = self.trio_runner
elif self.should_run_async(raw_cell):
runner = self.loop_runner
else:
runner = _pseudo_sync_runner
try:
return runner(coro)
except BaseException as e:
info = ExecutionInfo(raw_cell, store_history, silent, shell_futures)
result = ExecutionResult(info)
result.error_in_exec = e
self.showtraceback(running_compiled_code=True)
return result
return
def should_run_async(self, raw_cell: str) -> bool:
"""Return whether a cell should be run asynchronously via a coroutine runner
Parameters
----------
raw_cell: str
The code to be executed
Returns
-------
result: bool
Whether the code needs to be run with a coroutine runner or not
.. versionadded: 7.0
"""
if not self.autoawait:
return False
try:
cell = self.transform_cell(raw_cell)
except Exception:
# any exception during transform will be raised
# prior to execution
return False
return _should_be_async(cell)
async def run_cell_async(self, raw_cell: str, store_history=False, silent=False, shell_futures=True) -> ExecutionResult:
"""Run a complete IPython cell asynchronously.
Parameters
----------
raw_cell : str
The code (including IPython code such as %magic functions) to run.
store_history : bool
If True, the raw and translated cell will be stored in IPython's
history. For user code calling back into IPython's machinery, this
should be set to False.
silent : bool
If True, avoid side-effects, such as implicit displayhooks and
and logging. silent=True forces store_history=False.
shell_futures : bool
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
Returns
-------
result : :class:`ExecutionResult`
.. versionadded: 7.0
"""
info = ExecutionInfo(
raw_cell, store_history, silent, shell_futures)
result = ExecutionResult(info)
if (not raw_cell) or raw_cell.isspace():
self.last_execution_succeeded = True
self.last_execution_result = result
return result
if silent:
store_history = False
if store_history:
result.execution_count = self.execution_count
def error_before_exec(value):
if store_history:
self.execution_count += 1
result.error_before_exec = value
self.last_execution_succeeded = False
self.last_execution_result = result
return result
self.events.trigger('pre_execute')
if not silent:
self.events.trigger('pre_run_cell', info)
# If any of our input transformation (input_transformer_manager or
# prefilter_manager) raises an exception, we store it in this variable
# so that we can display the error after logging the input and storing
# it in the history.
try:
cell = self.transform_cell(raw_cell)
except Exception:
preprocessing_exc_tuple = sys.exc_info()
cell = raw_cell # cell has to exist so it can be stored/logged
else:
preprocessing_exc_tuple = None
# Store raw and processed history
if store_history:
self.history_manager.store_inputs(self.execution_count,
cell, raw_cell)
if not silent:
self.logger.log(cell, raw_cell)
# Display the exception if input processing failed.
if preprocessing_exc_tuple is not None:
self.showtraceback(preprocessing_exc_tuple)
if store_history:
self.execution_count += 1
return error_before_exec(preprocessing_exc_tuple[1])
# Our own compiler remembers the __future__ environment. If we want to
# run code with a separate __future__ environment, use the default
# compiler
compiler = self.compile if shell_futures else CachingCompiler()
_run_async = False
with self.builtin_trap:
cell_name = self.compile.cache(cell, self.execution_count)
with self.display_trap:
# Compile to bytecode
try:
if sys.version_info < (3,8) and self.autoawait:
if _should_be_async(cell):
# the code AST below will not be user code: we wrap it
# in an `async def`. This will likely make some AST
# transformer below miss some transform opportunity and
# introduce a small coupling to run_code (in which we
# bake some assumptions of what _ast_asyncify returns.
# they are ways around (like grafting part of the ast
# later:
# - Here, return code_ast.body[0].body[1:-1], as well
# as last expression in return statement which is
# the user code part.
# - Let it go through the AST transformers, and graft
# - it back after the AST transform
# But that seem unreasonable, at least while we
# do not need it.
code_ast = _ast_asyncify(cell, 'async-def-wrapper')
_run_async = True
else:
code_ast = compiler.ast_parse(cell, filename=cell_name)
else:
code_ast = compiler.ast_parse(cell, filename=cell_name)
except self.custom_exceptions as e:
etype, value, tb = sys.exc_info()
self.CustomTB(etype, value, tb)
return error_before_exec(e)
except IndentationError as e:
self.showindentationerror()
return error_before_exec(e)
except (OverflowError, SyntaxError, ValueError, TypeError,
MemoryError) as e:
self.showsyntaxerror()
return error_before_exec(e)
# Apply AST transformations
try:
code_ast = self.transform_ast(code_ast)
except InputRejected as e:
self.showtraceback()
return error_before_exec(e)
# Give the displayhook a reference to our ExecutionResult so it
# can fill in the output value.
self.displayhook.exec_result = result
# Execute the user code
interactivity = "none" if silent else self.ast_node_interactivity
if _run_async:
interactivity = 'async'
has_raised = await self.run_ast_nodes(code_ast.body, cell_name,
interactivity=interactivity, compiler=compiler, result=result)
self.last_execution_succeeded = not has_raised
self.last_execution_result = result
# Reset this so later displayed values do not modify the
# ExecutionResult
self.displayhook.exec_result = None
if store_history:
# Write output to the database. Does nothing unless
# history output logging is enabled.
self.history_manager.store_output(self.execution_count)
# Each cell is a *single* input, regardless of how many lines it has
self.execution_count += 1
return result
def transform_cell(self, raw_cell):
"""Transform an input cell before parsing it.
Static transformations, implemented in IPython.core.inputtransformer2,
deal with things like ``%magic`` and ``!system`` commands.
These run on all input.
Dynamic transformations, for things like unescaped magics and the exit
autocall, depend on the state of the interpreter.
These only apply to single line inputs.
These string-based transformations are followed by AST transformations;
see :meth:`transform_ast`.
"""
# Static input transformations
cell = self.input_transformer_manager.transform_cell(raw_cell)
if len(cell.splitlines()) == 1:
# Dynamic transformations - only applied for single line commands
with self.builtin_trap:
# use prefilter_lines to handle trailing newlines
# restore trailing newline for ast.parse
cell = self.prefilter_manager.prefilter_lines(cell) + '\n'
lines = cell.splitlines(keepends=True)
for transform in self.input_transformers_post:
lines = transform(lines)
cell = ''.join(lines)
return cell
def transform_ast(self, node):
"""Apply the AST transformations from self.ast_transformers
Parameters
----------
node : ast.Node
The root node to be transformed. Typically called with the ast.Module
produced by parsing user input.
Returns
-------
An ast.Node corresponding to the node it was called with. Note that it
may also modify the passed object, so don't rely on references to the
original AST.
"""
for transformer in self.ast_transformers:
try:
node = transformer.visit(node)
except InputRejected:
# User-supplied AST transformers can reject an input by raising
# an InputRejected. Short-circuit in this case so that we
# don't unregister the transform.
raise
except Exception:
warn("AST transformer %r threw an error. It will be unregistered." % transformer)
self.ast_transformers.remove(transformer)
if self.ast_transformers:
ast.fix_missing_locations(node)
return node
async def run_ast_nodes(self, nodelist:ListType[AST], cell_name:str, interactivity='last_expr',
compiler=compile, result=None):
"""Run a sequence of AST nodes. The execution mode depends on the
interactivity parameter.
Parameters
----------
nodelist : list
A sequence of AST nodes to run.
cell_name : str
Will be passed to the compiler as the filename of the cell. Typically
the value returned by ip.compile.cache(cell).
interactivity : str
'all', 'last', 'last_expr' , 'last_expr_or_assign' or 'none',
specifying which nodes should be run interactively (displaying output
from expressions). 'last_expr' will run the last node interactively
only if it is an expression (i.e. expressions in loops or other blocks
are not displayed) 'last_expr_or_assign' will run the last expression
or the last assignment. Other values for this parameter will raise a
ValueError.
Experimental value: 'async' Will try to run top level interactive
async/await code in default runner, this will not respect the
interactivity setting and will only run the last node if it is an
expression.
compiler : callable
A function with the same interface as the built-in compile(), to turn
the AST nodes into code objects. Default is the built-in compile().
result : ExecutionResult, optional
An object to store exceptions that occur during execution.
Returns
-------
True if an exception occurred while running code, False if it finished
running.
"""
if not nodelist:
return
if interactivity == 'last_expr_or_assign':
if isinstance(nodelist[-1], _assign_nodes):
asg = nodelist[-1]
if isinstance(asg, ast.Assign) and len(asg.targets) == 1:
target = asg.targets[0]
elif isinstance(asg, _single_targets_nodes):
target = asg.target
else:
target = None
if isinstance(target, ast.Name):
nnode = ast.Expr(ast.Name(target.id, ast.Load()))
ast.fix_missing_locations(nnode)
nodelist.append(nnode)
interactivity = 'last_expr'
_async = False
if interactivity == 'last_expr':
if isinstance(nodelist[-1], ast.Expr):
interactivity = "last"
else:
interactivity = "none"
if interactivity == 'none':
to_run_exec, to_run_interactive = nodelist, []
elif interactivity == 'last':
to_run_exec, to_run_interactive = nodelist[:-1], nodelist[-1:]
elif interactivity == 'all':
to_run_exec, to_run_interactive = [], nodelist
elif interactivity == 'async':
to_run_exec, to_run_interactive = [], nodelist
_async = True
else:
raise ValueError("Interactivity was %r" % interactivity)
try:
if _async and sys.version_info > (3,8):
raise ValueError("This branch should never happen on Python 3.8 and above, "
"please try to upgrade IPython and open a bug report with your case.")
if _async:
# If interactivity is async the semantics of run_code are
# completely different Skip usual machinery.
mod = Module(nodelist, [])
async_wrapper_code = compiler(mod, cell_name, 'exec')
exec(async_wrapper_code, self.user_global_ns, self.user_ns)
async_code = removed_co_newlocals(self.user_ns.pop('async-def-wrapper')).__code__
if (await self.run_code(async_code, result, async_=True)):
return True
else:
if sys.version_info > (3, 8):
def compare(code):
is_async = (inspect.CO_COROUTINE & code.co_flags == inspect.CO_COROUTINE)
return is_async
else:
def compare(code):
return _async
# refactor that to just change the mod constructor.
to_run = []
for node in to_run_exec:
to_run.append((node, 'exec'))
for node in to_run_interactive:
to_run.append((node, 'single'))
for node,mode in to_run:
if mode == 'exec':
mod = Module([node], [])
elif mode == 'single':
mod = ast.Interactive([node])
with compiler.extra_flags(getattr(ast, 'PyCF_ALLOW_TOP_LEVEL_AWAIT', 0x0) if self.autoawait else 0x0):
code = compiler(mod, cell_name, mode)
asy = compare(code)
if (await self.run_code(code, result, async_=asy)):
return True
# Flush softspace
if softspace(sys.stdout, 0):
print()
except:
# It's possible to have exceptions raised here, typically by
# compilation of odd code (such as a naked 'return' outside a
# function) that did parse but isn't valid. Typically the exception
# is a SyntaxError, but it's safest just to catch anything and show
# the user a traceback.
# We do only one try/except outside the loop to minimize the impact
# on runtime, and also because if any node in the node list is
# broken, we should stop execution completely.
if result:
result.error_before_exec = sys.exc_info()[1]
self.showtraceback()
return True
return False
def _async_exec(self, code_obj: types.CodeType, user_ns: dict):
"""
Evaluate an asynchronous code object using a code runner
Fake asynchronous execution of code_object in a namespace via a proxy namespace.
Returns coroutine object, which can be executed via async loop runner
WARNING: The semantics of `async_exec` are quite different from `exec`,
in particular you can only pass a single namespace. It also return a
handle to the value of the last things returned by code_object.
"""
return eval(code_obj, user_ns)
async def run_code(self, code_obj, result=None, *, async_=False):
"""Execute a code object.
When an exception occurs, self.showtraceback() is called to display a
traceback.
Parameters
----------
code_obj : code object
A compiled code object, to be executed
result : ExecutionResult, optional
An object to store exceptions that occur during execution.
async_ : Bool (Experimental)
Attempt to run top-level asynchronous code in a default loop.
Returns
-------
False : successful execution.
True : an error occurred.
"""
# Set our own excepthook in case the user code tries to call it
# directly, so that the IPython crash handler doesn't get triggered
old_excepthook, sys.excepthook = sys.excepthook, self.excepthook
# we save the original sys.excepthook in the instance, in case config
# code (such as magics) needs access to it.
self.sys_excepthook = old_excepthook
outflag = True # happens in more places, so it's easier as default
try:
try:
self.hooks.pre_run_code_hook()
if async_ and sys.version_info < (3,8):
last_expr = (await self._async_exec(code_obj, self.user_ns))
code = compile('last_expr', 'fake', "single")
exec(code, {'last_expr': last_expr})
elif async_ :
await eval(code_obj, self.user_global_ns, self.user_ns)
else:
exec(code_obj, self.user_global_ns, self.user_ns)
finally:
# Reset our crash handler in place
sys.excepthook = old_excepthook
except SystemExit as e:
if result is not None:
result.error_in_exec = e
self.showtraceback(exception_only=True)
warn("To exit: use 'exit', 'quit', or Ctrl-D.", stacklevel=1)
except self.custom_exceptions:
etype, value, tb = sys.exc_info()
if result is not None:
result.error_in_exec = value
self.CustomTB(etype, value, tb)
except:
if result is not None:
result.error_in_exec = sys.exc_info()[1]
self.showtraceback(running_compiled_code=True)
else:
outflag = False
return outflag
# For backwards compatibility
runcode = run_code
def check_complete(self, code: str) -> Tuple[str, str]:
"""Return whether a block of code is ready to execute, or should be continued
Parameters
----------
source : string
Python input code, which can be multiline.
Returns
-------
status : str
One of 'complete', 'incomplete', or 'invalid' if source is not a
prefix of valid code.
indent : str
When status is 'incomplete', this is some whitespace to insert on
the next line of the prompt.
"""
status, nspaces = self.input_transformer_manager.check_complete(code)
return status, ' ' * (nspaces or 0)
#-------------------------------------------------------------------------
# Things related to GUI support and pylab
#-------------------------------------------------------------------------
active_eventloop = None
def enable_gui(self, gui=None):
raise NotImplementedError('Implement enable_gui in a subclass')
def enable_matplotlib(self, gui=None):
"""Enable interactive matplotlib and inline figure support.
This takes the following steps:
1. select the appropriate eventloop and matplotlib backend
2. set up matplotlib for interactive use with that backend
3. configure formatters for inline figure display
4. enable the selected gui eventloop
Parameters
----------
gui : optional, string
If given, dictates the choice of matplotlib GUI backend to use
(should be one of IPython's supported backends, 'qt', 'osx', 'tk',
'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
matplotlib (as dictated by the matplotlib build-time options plus the
user's matplotlibrc configuration file). Note that not all backends
make sense in all contexts, for example a terminal ipython can't
display figures inline.
"""
from IPython.core import pylabtools as pt
gui, backend = pt.find_gui_and_backend(gui, self.pylab_gui_select)
if gui != 'inline':
# If we have our first gui selection, store it
if self.pylab_gui_select is None:
self.pylab_gui_select = gui
# Otherwise if they are different
elif gui != self.pylab_gui_select:
print('Warning: Cannot change to a different GUI toolkit: %s.'
' Using %s instead.' % (gui, self.pylab_gui_select))
gui, backend = pt.find_gui_and_backend(self.pylab_gui_select)
pt.activate_matplotlib(backend)
pt.configure_inline_support(self, backend)
# Now we must activate the gui pylab wants to use, and fix %run to take
# plot updates into account
self.enable_gui(gui)
self.magics_manager.registry['ExecutionMagics'].default_runner = \
pt.mpl_runner(self.safe_execfile)
return gui, backend
def enable_pylab(self, gui=None, import_all=True, welcome_message=False):
"""Activate pylab support at runtime.
This turns on support for matplotlib, preloads into the interactive
namespace all of numpy and pylab, and configures IPython to correctly
interact with the GUI event loop. The GUI backend to be used can be
optionally selected with the optional ``gui`` argument.
This method only adds preloading the namespace to InteractiveShell.enable_matplotlib.
Parameters
----------
gui : optional, string
If given, dictates the choice of matplotlib GUI backend to use
(should be one of IPython's supported backends, 'qt', 'osx', 'tk',
'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
matplotlib (as dictated by the matplotlib build-time options plus the
user's matplotlibrc configuration file). Note that not all backends
make sense in all contexts, for example a terminal ipython can't
display figures inline.
import_all : optional, bool, default: True
Whether to do `from numpy import *` and `from pylab import *`
in addition to module imports.
welcome_message : deprecated
This argument is ignored, no welcome message will be displayed.
"""
from IPython.core.pylabtools import import_pylab
gui, backend = self.enable_matplotlib(gui)
# We want to prevent the loading of pylab to pollute the user's
# namespace as shown by the %who* magics, so we execute the activation
# code in an empty namespace, and we update *both* user_ns and
# user_ns_hidden with this information.
ns = {}
import_pylab(ns, import_all)
# warn about clobbered names
ignored = {"__builtins__"}
both = set(ns).intersection(self.user_ns).difference(ignored)
clobbered = [ name for name in both if self.user_ns[name] is not ns[name] ]
self.user_ns.update(ns)
self.user_ns_hidden.update(ns)
return gui, backend, clobbered
#-------------------------------------------------------------------------
# Utilities
#-------------------------------------------------------------------------
def var_expand(self, cmd, depth=0, formatter=DollarFormatter()):
"""Expand python variables in a string.
The depth argument indicates how many frames above the caller should
be walked to look for the local namespace where to expand variables.
The global namespace for expansion is always the user's interactive
namespace.
"""
ns = self.user_ns.copy()
try:
frame = sys._getframe(depth+1)
except ValueError:
# This is thrown if there aren't that many frames on the stack,
# e.g. if a script called run_line_magic() directly.
pass
else:
ns.update(frame.f_locals)
try:
# We have to use .vformat() here, because 'self' is a valid and common
# name, and expanding **ns for .format() would make it collide with
# the 'self' argument of the method.
cmd = formatter.vformat(cmd, args=[], kwargs=ns)
except Exception:
# if formatter couldn't format, just let it go untransformed
pass
return cmd
def mktempfile(self, data=None, prefix='ipython_edit_'):
"""Make a new tempfile and return its filename.
This makes a call to tempfile.mkstemp (created in a tempfile.mkdtemp),
but it registers the created filename internally so ipython cleans it up
at exit time.
Optional inputs:
- data(None): if data is given, it gets written out to the temp file
immediately, and the file is closed again."""
dirname = tempfile.mkdtemp(prefix=prefix)
self.tempdirs.append(dirname)
handle, filename = tempfile.mkstemp('.py', prefix, dir=dirname)
os.close(handle) # On Windows, there can only be one open handle on a file
self.tempfiles.append(filename)
if data:
with open(filename, 'w') as tmp_file:
tmp_file.write(data)
return filename
@undoc
def write(self,data):
"""DEPRECATED: Write a string to the default output"""
warn('InteractiveShell.write() is deprecated, use sys.stdout instead',
DeprecationWarning, stacklevel=2)
sys.stdout.write(data)
@undoc
def write_err(self,data):
"""DEPRECATED: Write a string to the default error output"""
warn('InteractiveShell.write_err() is deprecated, use sys.stderr instead',
DeprecationWarning, stacklevel=2)
sys.stderr.write(data)
def ask_yes_no(self, prompt, default=None, interrupt=None):
if self.quiet:
return True
return ask_yes_no(prompt,default,interrupt)
def show_usage(self):
"""Show a usage message"""
page.page(IPython.core.usage.interactive_usage)
def extract_input_lines(self, range_str, raw=False):
"""Return as a string a set of input history slices.
Parameters
----------
range_str : string
The set of slices is given as a string, like "~5/6-~4/2 4:8 9",
since this function is for use by magic functions which get their
arguments as strings. The number before the / is the session
number: ~n goes n back from the current session.
raw : bool, optional
By default, the processed input is used. If this is true, the raw
input history is used instead.
Notes
-----
Slices can be described with two notations:
* ``N:M`` -> standard python form, means including items N...(M-1).
* ``N-M`` -> include items N..M (closed endpoint).
"""
lines = self.history_manager.get_range_by_str(range_str, raw=raw)
return "\n".join(x for _, _, x in lines)
def find_user_code(self, target, raw=True, py_only=False, skip_encoding_cookie=True, search_ns=False):
"""Get a code string from history, file, url, or a string or macro.
This is mainly used by magic functions.
Parameters
----------
target : str
A string specifying code to retrieve. This will be tried respectively
as: ranges of input history (see %history for syntax), url,
corresponding .py file, filename, or an expression evaluating to a
string or Macro in the user namespace.
raw : bool
If true (default), retrieve raw history. Has no effect on the other
retrieval mechanisms.
py_only : bool (default False)
Only try to fetch python code, do not try alternative methods to decode file
if unicode fails.
Returns
-------
A string of code.
ValueError is raised if nothing is found, and TypeError if it evaluates
to an object of another type. In each case, .args[0] is a printable
message.
"""
code = self.extract_input_lines(target, raw=raw) # Grab history
if code:
return code
try:
if target.startswith(('http://', 'https://')):
return openpy.read_py_url(target, skip_encoding_cookie=skip_encoding_cookie)
except UnicodeDecodeError:
if not py_only :
# Deferred import
from urllib.request import urlopen
response = urlopen(target)
return response.read().decode('latin1')
raise ValueError(("'%s' seem to be unreadable.") % target)
potential_target = [target]
try :
potential_target.insert(0,get_py_filename(target))
except IOError:
pass
for tgt in potential_target :
if os.path.isfile(tgt): # Read file
try :
return openpy.read_py_file(tgt, skip_encoding_cookie=skip_encoding_cookie)
except UnicodeDecodeError :
if not py_only :
with io_open(tgt,'r', encoding='latin1') as f :
return f.read()
raise ValueError(("'%s' seem to be unreadable.") % target)
elif os.path.isdir(os.path.expanduser(tgt)):
raise ValueError("'%s' is a directory, not a regular file." % target)
if search_ns:
# Inspect namespace to load object source
object_info = self.object_inspect(target, detail_level=1)
if object_info['found'] and object_info['source']:
return object_info['source']
try: # User namespace
codeobj = eval(target, self.user_ns)
except Exception:
raise ValueError(("'%s' was not found in history, as a file, url, "
"nor in the user namespace.") % target)
if isinstance(codeobj, str):
return codeobj
elif isinstance(codeobj, Macro):
return codeobj.value
raise TypeError("%s is neither a string nor a macro." % target,
codeobj)
#-------------------------------------------------------------------------
# Things related to IPython exiting
#-------------------------------------------------------------------------
def atexit_operations(self):
"""This will be executed at the time of exit.
Cleanup operations and saving of persistent data that is done
unconditionally by IPython should be performed here.
For things that may depend on startup flags or platform specifics (such
as having readline or not), register a separate atexit function in the
code that has the appropriate information, rather than trying to
clutter
"""
# Close the history session (this stores the end time and line count)
# this must be *before* the tempfile cleanup, in case of temporary
# history db
self.history_manager.end_session()
# Cleanup all tempfiles and folders left around
for tfile in self.tempfiles:
try:
os.unlink(tfile)
except OSError:
pass
for tdir in self.tempdirs:
try:
os.rmdir(tdir)
except OSError:
pass
# Clear all user namespaces to release all references cleanly.
self.reset(new_session=False)
# Run user hooks
self.hooks.shutdown_hook()
def cleanup(self):
self.restore_sys_module_state()
# Overridden in terminal subclass to change prompts
def switch_doctest_mode(self, mode):
pass
class InteractiveShellABC(metaclass=abc.ABCMeta):
"""An abstract base class for InteractiveShell."""
InteractiveShellABC.register(InteractiveShell)
| 40.461228
| 147
| 0.591444
|
556c646f2a72145f40d1b425db76dc9bef63864e
| 39,343
|
py
|
Python
|
tensorflow/python/ops/losses/losses_impl.py
|
joshz123/tensorflow
|
7841ca029060ab78e221e757d4b1ee6e3e0ffaa4
|
[
"Apache-2.0"
] | 57
|
2017-09-03T07:08:31.000Z
|
2022-02-28T04:33:42.000Z
|
tensorflow/python/ops/losses/losses_impl.py
|
sagol/tensorflow
|
04f2870814d2773e09dcfa00cbe76a66a2c4de88
|
[
"Apache-2.0"
] | 58
|
2021-11-22T05:41:28.000Z
|
2022-01-19T01:33:40.000Z
|
tensorflow/python/ops/losses/losses_impl.py
|
sagol/tensorflow
|
04f2870814d2773e09dcfa00cbe76a66a2c4de88
|
[
"Apache-2.0"
] | 66
|
2020-05-15T10:05:12.000Z
|
2022-02-14T07:28:18.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Loss operations for use in neural networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.ops.losses import util
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.deprecation import deprecated_argument_lookup
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["losses.Reduction"])
class Reduction(object):
"""Types of loss reduction.
Contains the following values:
* `NONE`: Un-reduced weighted losses with the same shape as input.
* `SUM`: Scalar sum of weighted losses.
* `MEAN`: Scalar `SUM` divided by sum of weights. DEPRECATED.
* `SUM_OVER_BATCH_SIZE`: Scalar `SUM` divided by number of elements in losses.
* `SUM_OVER_NONZERO_WEIGHTS`: Scalar `SUM` divided by number of non-zero
weights. DEPRECATED.
* `SUM_BY_NONZERO_WEIGHTS`: Same as `SUM_OVER_NONZERO_WEIGHTS`. DEPRECATED.
"""
NONE = "none"
SUM = "weighted_sum"
SUM_OVER_BATCH_SIZE = "weighted_sum_over_batch_size"
MEAN = "weighted_mean"
SUM_BY_NONZERO_WEIGHTS = "weighted_sum_by_nonzero_weights"
SUM_OVER_NONZERO_WEIGHTS = SUM_BY_NONZERO_WEIGHTS
@classmethod
def all(cls):
return (
cls.NONE,
cls.SUM,
cls.MEAN,
cls.SUM_OVER_BATCH_SIZE,
cls.SUM_OVER_NONZERO_WEIGHTS,
cls.SUM_BY_NONZERO_WEIGHTS)
@classmethod
def validate(cls, key):
if key not in cls.all():
raise ValueError("Invalid Reduction Key %s." % key)
def _safe_mean(losses, num_present):
"""Computes a safe mean of the losses.
Args:
losses: `Tensor` whose elements contain individual loss measurements.
num_present: The number of measurable elements in `losses`.
Returns:
A scalar representing the mean of `losses`. If `num_present` is zero,
then zero is returned.
"""
total_loss = math_ops.reduce_sum(losses)
return math_ops.div_no_nan(total_loss, num_present, name="value")
def _num_present(losses, weights, per_batch=False):
"""Computes the number of elements in the loss function induced by `weights`.
A given weights tensor induces different numbers of usable elements in the
`losses` tensor. The `weights` tensor is broadcast across `losses` for all
possible dimensions. For example, if `losses` is a tensor of dimension
`[4, 5, 6, 3]` and `weights` is a tensor of shape `[4, 5]`, then `weights` is,
in effect, tiled to match the shape of `losses`. Following this effective
tile, the total number of present elements is the number of non-zero weights.
Args:
losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
weights: `Tensor` of shape `[]`, `[batch_size]` or
`[batch_size, d1, ... dK]`, where K < N.
per_batch: Whether to return the number of elements per batch or as a sum
total.
Returns:
The number of present (non-zero) elements in the losses tensor. If
`per_batch` is `True`, the value is returned as a tensor of size
`[batch_size]`. Otherwise, a single scalar tensor is returned.
"""
if ((isinstance(weights, float) and weights != 0.0) or
(context.executing_eagerly() and weights._rank() == 0 # pylint: disable=protected-access
and not math_ops.equal(weights, 0.0))):
return _num_elements(losses)
with ops.name_scope(None, "num_present", (losses, weights)) as scope:
weights = math_ops.cast(weights, dtype=dtypes.float32)
present = array_ops.where(
math_ops.equal(weights, 0.0),
array_ops.zeros_like(weights),
array_ops.ones_like(weights))
present = weights_broadcast_ops.broadcast_weights(present, losses)
if per_batch:
return math_ops.reduce_sum(
present,
axis=math_ops.range(1, array_ops.rank(present)),
keepdims=True,
name=scope)
return math_ops.reduce_sum(present, name=scope)
def _num_elements(losses):
"""Computes the number of elements in `losses` tensor."""
with ops.name_scope(None, "num_elements", values=[losses]) as scope:
return math_ops.cast(array_ops.size(losses, name=scope), dtype=losses.dtype)
@tf_export(v1=["losses.compute_weighted_loss"])
def compute_weighted_loss(
losses, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Computes the weighted loss.
Args:
losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`losses`, and must be broadcastable to `losses` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: the scope for the operations performed in computing the loss.
loss_collection: the loss will be added to these collections.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss `Tensor` of the same type as `losses`. If `reduction` is
`NONE`, this has the same shape as `losses`; otherwise, it is scalar.
Raises:
ValueError: If `weights` is `None` or the shape is not compatible with
`losses`, or if the number of dimensions (rank) of either `losses` or
`weights` is missing.
Note:
When calculating the gradient of a weighted loss contributions from
both `losses` and `weights` are considered. If your `weights` depend
on some model parameters but you do not want this to affect the loss
gradient, you need to apply `tf.stop_gradient` to `weights` before
passing them to `compute_weighted_loss`.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
Reduction.validate(reduction)
with ops.name_scope(scope, "weighted_loss", (losses, weights)):
# Save the `reduction` argument for loss normalization when distributing
# to multiple replicas. Used only for estimator + v1 optimizer flow.
ops.get_default_graph()._last_loss_reduction = reduction # pylint: disable=protected-access
with ops.control_dependencies((
weights_broadcast_ops.assert_broadcastable(weights, losses),)):
losses = ops.convert_to_tensor(losses)
input_dtype = losses.dtype
losses = math_ops.cast(losses, dtype=dtypes.float32)
weights = math_ops.cast(weights, dtype=dtypes.float32)
weighted_losses = math_ops.multiply(losses, weights)
if reduction == Reduction.NONE:
loss = weighted_losses
else:
loss = math_ops.reduce_sum(weighted_losses)
if reduction == Reduction.MEAN:
loss = _safe_mean(
loss, math_ops.reduce_sum(array_ops.ones_like(losses) * weights))
elif (reduction == Reduction.SUM_BY_NONZERO_WEIGHTS or
reduction == Reduction.SUM_OVER_NONZERO_WEIGHTS):
loss = _safe_mean(loss, _num_present(losses, weights))
elif reduction == Reduction.SUM_OVER_BATCH_SIZE:
loss = _safe_mean(loss, _num_elements(losses))
# Convert the result back to the input type.
loss = math_ops.cast(loss, input_dtype)
util.add_loss(loss, loss_collection)
return loss
@tf_export(v1=["losses.absolute_difference"])
def absolute_difference(
labels, predictions, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds an Absolute Difference loss to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a `Tensor` of
shape `[batch_size]`, then the total loss for each sample of the batch is
rescaled by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
labels: The ground truth output tensor, same dimensions as 'predictions'.
predictions: The predicted outputs.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `predictions` doesn't match that of
`labels` or if the shape of `weights` is invalid or if `labels`
or `predictions` is None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if labels is None:
raise ValueError("labels must not be None.")
if predictions is None:
raise ValueError("predictions must not be None.")
with ops.name_scope(scope, "absolute_difference",
(predictions, labels, weights)) as scope:
predictions = math_ops.cast(predictions, dtype=dtypes.float32)
labels = math_ops.cast(labels, dtype=dtypes.float32)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
losses = math_ops.abs(math_ops.subtract(predictions, labels))
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
@tf_export(v1=["losses.cosine_distance"])
@deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def cosine_distance(
labels, predictions, axis=None, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS,
dim=None):
"""Adds a cosine-distance loss to the training procedure.
Note that the function assumes that `predictions` and `labels` are already
unit-normalized.
Args:
labels: `Tensor` whose shape matches 'predictions'
predictions: An arbitrary matrix.
axis: The dimension along which the cosine distance is computed.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: Type of reduction to apply to loss.
dim: The old (deprecated) name for `axis`.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If `predictions` shape doesn't match `labels` shape, or
`axis`, `labels`, `predictions` or `weights` is `None`.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
axis = deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
raise ValueError("You must specify 'axis'.")
if labels is None:
raise ValueError("labels must not be None.")
if predictions is None:
raise ValueError("predictions must not be None.")
with ops.name_scope(scope, "cosine_distance_loss",
(predictions, labels, weights)) as scope:
predictions = math_ops.cast(predictions, dtype=dtypes.float32)
labels = math_ops.cast(labels, dtype=dtypes.float32)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
radial_diffs = math_ops.multiply(predictions, labels)
losses = 1 - math_ops.reduce_sum(radial_diffs, axis=(axis,), keepdims=True)
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
@tf_export(v1=["losses.hinge_loss"])
def hinge_loss(labels, logits, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds a hinge loss to the training procedure.
Args:
labels: The ground truth output tensor. Its shape should match the shape of
logits. The values of the tensor are expected to be 0.0 or 1.0. Internally
the {0,1} labels are converted to {-1,1} when calculating the hinge loss.
logits: The logits, a float tensor. Note that logits are assumed to be
unbounded and 0-centered. A value > 0 (resp. < 0) is considered a positive
(resp. negative) binary prediction.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shapes of `logits` and `labels` don't match or
if `labels` or `logits` is None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if labels is None:
raise ValueError("labels must not be None.")
if logits is None:
raise ValueError("logits must not be None.")
with ops.name_scope(scope, "hinge_loss", (logits, labels, weights)) as scope:
logits = math_ops.cast(logits, dtype=dtypes.float32)
labels = math_ops.cast(labels, dtype=dtypes.float32)
logits.get_shape().assert_is_compatible_with(labels.get_shape())
# We first need to convert binary labels to -1/1 labels (as floats).
all_ones = array_ops.ones_like(labels)
labels = math_ops.subtract(2 * labels, all_ones)
losses = nn_ops.relu(
math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
@tf_export(v1=["losses.huber_loss"])
def huber_loss(labels, predictions, weights=1.0, delta=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds a [Huber Loss](https://en.wikipedia.org/wiki/Huber_loss) term to the training procedure.
For each value x in `error=labels-predictions`, the following is calculated:
```
0.5 * x^2 if |x| <= d
0.5 * d^2 + d * (|x| - d) if |x| > d
```
where d is `delta`.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
`[batch_size]`, then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
labels: The ground truth output tensor, same dimensions as 'predictions'.
predictions: The predicted outputs.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
delta: `float`, the point where the huber loss function changes from a
quadratic to linear.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid. Also if `labels` or
`predictions` is None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if labels is None:
raise ValueError("labels must not be None.")
if predictions is None:
raise ValueError("predictions must not be None.")
with ops.name_scope(scope, "huber_loss",
(predictions, labels, weights)) as scope:
predictions = math_ops.cast(predictions, dtype=dtypes.float32)
labels = math_ops.cast(labels, dtype=dtypes.float32)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
error = math_ops.subtract(predictions, labels)
abs_error = math_ops.abs(error)
quadratic = math_ops.minimum(abs_error, delta)
# The following expression is the same in value as
# tf.maximum(abs_error - delta, 0), but importantly the gradient for the
# expression when abs_error == delta is 0 (for tf.maximum it would be 1).
# This is necessary to avoid doubling the gradient, since there is already a
# nonzero contribution to the gradient from the quadratic term.
linear = math_ops.subtract(abs_error, quadratic)
losses = math_ops.add(
math_ops.multiply(
ops.convert_to_tensor(0.5, dtype=quadratic.dtype),
math_ops.multiply(quadratic, quadratic)),
math_ops.multiply(delta, linear))
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
@tf_export(v1=["losses.log_loss"])
def log_loss(labels, predictions, weights=1.0, epsilon=1e-7, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds a Log Loss term to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
`[batch_size]`, then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
labels: The ground truth output tensor, same dimensions as 'predictions'.
predictions: The predicted outputs.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
epsilon: A small increment to add to avoid taking a log of zero.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid. Also if `labels` or `predictions`
is None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if labels is None:
raise ValueError("labels must not be None.")
if predictions is None:
raise ValueError("predictions must not be None.")
with ops.name_scope(scope, "log_loss",
(predictions, labels, weights)) as scope:
predictions = math_ops.cast(predictions, dtype=dtypes.float32)
labels = math_ops.cast(labels, dtype=dtypes.float32)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
losses = -math_ops.multiply(
labels,
math_ops.log(predictions + epsilon)) - math_ops.multiply(
(1 - labels), math_ops.log(1 - predictions + epsilon))
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
# TODO(b/37208492): Add reduction arg.
@tf_export(v1=["losses.mean_pairwise_squared_error"])
def mean_pairwise_squared_error(
labels, predictions, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES):
"""Adds a pairwise-errors-squared loss to the training procedure.
Unlike `mean_squared_error`, which is a measure of the differences between
corresponding elements of `predictions` and `labels`,
`mean_pairwise_squared_error` is a measure of the differences between pairs of
corresponding elements of `predictions` and `labels`.
For example, if `labels`=[a, b, c] and `predictions`=[x, y, z], there are
three pairs of differences are summed to compute the loss:
loss = [ ((a-b) - (x-y)).^2 + ((a-c) - (x-z)).^2 + ((b-c) - (y-z)).^2 ] / 3
Note that since the inputs are of shape `[batch_size, d0, ... dN]`, the
corresponding pairs are computed within each batch sample but not across
samples within a batch. For example, if `predictions` represents a batch of
16 grayscale images of dimension [batch_size, 100, 200], then the set of pairs
is drawn from each image, but not across images.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
`[batch_size]`, then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector.
Args:
labels: The ground truth output tensor, whose shape must match the shape of
`predictions`.
predictions: The predicted outputs, a tensor of size
`[batch_size, d0, .. dN]` where N+1 is the total number of dimensions in
`predictions`.
weights: Coefficients for the loss a scalar, a tensor of shape
`[batch_size]` or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
Returns:
A scalar `Tensor` that returns the weighted loss.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid. Also if `labels` or `predictions`
is None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if labels is None:
raise ValueError("labels must not be None.")
if predictions is None:
raise ValueError("predictions must not be None.")
with ops.name_scope(scope, "mean_pairwise_squared_error",
(predictions, labels, weights)) as scope:
weights = math_ops.cast(weights, dtype=dtypes.float32)
labels = math_ops.cast(labels, dtype=dtypes.float32)
with ops.control_dependencies((
weights_broadcast_ops.assert_broadcastable(weights, labels),)):
predictions = math_ops.cast(predictions, dtype=dtypes.float32)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
diffs = math_ops.subtract(predictions, labels)
axis = math_ops.range(1, array_ops.rank(diffs))
sum_squares_diff_per_batch = math_ops.reduce_sum(
math_ops.square(diffs), axis=axis, keepdims=True)
num_present_per_batch = _num_present(diffs, weights, per_batch=True)
term1 = 2.0 * math_ops.div_no_nan(
sum_squares_diff_per_batch,
math_ops.maximum(num_present_per_batch - 1, 0),
name="value")
sum_diff = math_ops.reduce_sum(diffs, axis=axis, keepdims=True)
term2 = 2.0 * math_ops.div_no_nan(
math_ops.square(sum_diff),
math_ops.maximum(
math_ops.multiply(num_present_per_batch,
num_present_per_batch - 1), 0),
name="value")
weighted_losses = math_ops.multiply(term1 - term2, weights)
loss = math_ops.reduce_sum(weighted_losses)
mean_loss = array_ops.where(
math_ops.reduce_sum(num_present_per_batch) > 0,
loss,
array_ops.zeros_like(loss),
name="value")
util.add_loss(mean_loss, loss_collection)
return mean_loss
@tf_export(v1=["losses.mean_squared_error"])
def mean_squared_error(
labels, predictions, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds a Sum-of-Squares loss to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
`[batch_size]`, then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
labels: The ground truth output tensor, same dimensions as 'predictions'.
predictions: The predicted outputs.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid. Also if `labels` or `predictions`
is None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if labels is None:
raise ValueError("labels must not be None.")
if predictions is None:
raise ValueError("predictions must not be None.")
with ops.name_scope(scope, "mean_squared_error",
(predictions, labels, weights)) as scope:
predictions = math_ops.cast(predictions, dtype=dtypes.float32)
labels = math_ops.cast(labels, dtype=dtypes.float32)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
losses = math_ops.squared_difference(predictions, labels)
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
@tf_export(v1=["losses.sigmoid_cross_entropy"])
def sigmoid_cross_entropy(
multi_class_labels, logits, weights=1.0, label_smoothing=0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Creates a cross-entropy loss using tf.nn.sigmoid_cross_entropy_with_logits.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of shape `[batch_size]`, then the loss weights apply to each
corresponding sample.
If `label_smoothing` is nonzero, smooth the labels towards 1/2:
new_multiclass_labels = multiclass_labels * (1 - label_smoothing)
+ 0.5 * label_smoothing
Args:
multi_class_labels: `[batch_size, num_classes]` target integer labels in
`{0, 1}`.
logits: Float `[batch_size, num_classes]` logits outputs of the network.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
label_smoothing: If greater than `0` then smooth the labels.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss `Tensor` of the same type as `logits`. If `reduction` is
`NONE`, this has the same shape as `logits`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `logits` doesn't match that of
`multi_class_labels` or if the shape of `weights` is invalid, or if
`weights` is None. Also if `multi_class_labels` or `logits` is None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if multi_class_labels is None:
raise ValueError("multi_class_labels must not be None.")
if logits is None:
raise ValueError("logits must not be None.")
with ops.name_scope(scope, "sigmoid_cross_entropy_loss",
(logits, multi_class_labels, weights)) as scope:
logits = ops.convert_to_tensor(logits)
multi_class_labels = math_ops.cast(multi_class_labels, logits.dtype)
logits.get_shape().assert_is_compatible_with(multi_class_labels.get_shape())
if label_smoothing > 0:
multi_class_labels = (multi_class_labels * (1 - label_smoothing) +
0.5 * label_smoothing)
losses = nn.sigmoid_cross_entropy_with_logits(labels=multi_class_labels,
logits=logits,
name="xentropy")
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
@tf_export(v1=["losses.softmax_cross_entropy"])
def softmax_cross_entropy(
onehot_labels, logits, weights=1.0, label_smoothing=0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Creates a cross-entropy loss using tf.nn.softmax_cross_entropy_with_logits_v2.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of shape `[batch_size]`, then the loss weights apply to each
corresponding sample.
If `label_smoothing` is nonzero, smooth the labels towards 1/num_classes:
new_onehot_labels = onehot_labels * (1 - label_smoothing)
+ label_smoothing / num_classes
Note that `onehot_labels` and `logits` must have the same shape,
e.g. `[batch_size, num_classes]`. The shape of `weights` must be
broadcastable to loss, whose shape is decided by the shape of `logits`.
In case the shape of `logits` is `[batch_size, num_classes]`, loss is
a `Tensor` of shape `[batch_size]`.
Args:
onehot_labels: One-hot-encoded labels.
logits: Logits outputs of the network.
weights: Optional `Tensor` that is broadcastable to loss.
label_smoothing: If greater than 0 then smooth the labels.
scope: the scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss `Tensor` of the same type as `logits`. If `reduction` is
`NONE`, this has shape `[batch_size]`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `logits` doesn't match that of `onehot_labels`
or if the shape of `weights` is invalid or if `weights` is None. Also if
`onehot_labels` or `logits` is None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if onehot_labels is None:
raise ValueError("onehot_labels must not be None.")
if logits is None:
raise ValueError("logits must not be None.")
with ops.name_scope(scope, "softmax_cross_entropy_loss",
(logits, onehot_labels, weights)) as scope:
logits = ops.convert_to_tensor(logits)
onehot_labels = math_ops.cast(onehot_labels, logits.dtype)
logits.get_shape().assert_is_compatible_with(onehot_labels.get_shape())
if label_smoothing > 0:
num_classes = math_ops.cast(
array_ops.shape(onehot_labels)[-1], logits.dtype)
smooth_positives = 1.0 - label_smoothing
smooth_negatives = label_smoothing / num_classes
onehot_labels = onehot_labels * smooth_positives + smooth_negatives
onehot_labels = array_ops.stop_gradient(
onehot_labels, name="labels_stop_gradient")
losses = nn.softmax_cross_entropy_with_logits_v2(
labels=onehot_labels, logits=logits, name="xentropy")
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
# TODO(ptucker): Merge this with similar method in metrics_impl.
def _remove_squeezable_dimensions(
labels, predictions, weights=None, expected_rank_diff=0):
"""Internal version of _remove_squeezable_dimensions which handles weights.
Squeezes `predictions` and `labels` if their ranks differ from expected by
exactly 1.
Squeezes `weights` if its rank is 1 more than the new rank of `predictions`
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
labels: Label values, a `Tensor` whose dimensions match `predictions`.
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
weights: Optional weight `Tensor`. It will be squeezed if it's not scalar,
and its rank is 1 more than the new rank of `labels`.
expected_rank_diff: Expected result of `rank(predictions) - rank(labels)`.
Returns:
Tuple of `predictions`, `labels` and `weights`, possibly with the last
dimension squeezed.
"""
labels, predictions = confusion_matrix.remove_squeezable_dimensions(
labels, predictions, expected_rank_diff=expected_rank_diff)
if weights is not None:
weights = ops.convert_to_tensor(weights)
labels_rank = labels.get_shape().ndims
weights_shape = weights.get_shape()
weights_rank = weights_shape.ndims
if (labels_rank is not None) and (weights_rank is not None):
# Use static rank.
rank_diff = weights_rank - labels_rank
if rank_diff == 1:
weights = array_ops.squeeze(weights, [-1])
return labels, predictions, weights
# Use dynamic rank.
rank_diff = array_ops.rank(weights) - array_ops.rank(labels)
if (weights_rank is None) or (
weights_rank > 0 and weights_shape.dims[-1].is_compatible_with(1)):
weights = control_flow_ops.cond(
math_ops.equal(1, rank_diff),
lambda: array_ops.squeeze(weights, [-1]),
lambda: weights)
return labels, predictions, weights
@tf_export(v1=["losses.sparse_softmax_cross_entropy"])
def sparse_softmax_cross_entropy(
labels, logits, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Cross-entropy loss using `tf.nn.sparse_softmax_cross_entropy_with_logits`.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of shape `[batch_size]`, then the loss weights apply to each
corresponding sample.
Args:
labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of
`labels` and result) and dtype `int32` or `int64`. Each entry in `labels`
must be an index in `[0, num_classes)`. Other values will raise an
exception when this op is run on CPU, and return `NaN` for corresponding
loss and gradient rows on GPU.
logits: Unscaled log probabilities of shape
`[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32` or
`float64`.
weights: Coefficients for the loss. This must be scalar or broadcastable to
`labels` (i.e. same rank and each dimension is either 1 or the same).
scope: the scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss `Tensor` of the same type as `logits`. If `reduction` is
`NONE`, this has the same shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shapes of `logits`, `labels`, and `weights` are
incompatible, or if any of them are None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if labels is None:
raise ValueError("labels must not be None.")
if logits is None:
raise ValueError("logits must not be None.")
with ops.name_scope(scope, "sparse_softmax_cross_entropy_loss",
(logits, labels, weights)) as scope:
# As documented above in Args, labels contain class IDs and logits contains
# 1 probability per class ID, so we expect rank(logits) - rank(labels) == 1;
# therefore, expected_rank_diff=1.
labels, logits, weights = _remove_squeezable_dimensions(
labels, logits, weights, expected_rank_diff=1)
losses = nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
logits=logits,
name="xentropy")
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
| 44.255343
| 98
| 0.712325
|
57fe35a82dd0e3fdee0077b8e1d67041f9368543
| 95,002
|
py
|
Python
|
unzipping_simulation/unzipping_simulation.py
|
tobiasjj/unzipping_simulation
|
929cd61ba62698f7d1bc994d72130b4ed4ccbd5a
|
[
"Apache-2.0"
] | null | null | null |
unzipping_simulation/unzipping_simulation.py
|
tobiasjj/unzipping_simulation
|
929cd61ba62698f7d1bc994d72130b4ed4ccbd5a
|
[
"Apache-2.0"
] | null | null | null |
unzipping_simulation/unzipping_simulation.py
|
tobiasjj/unzipping_simulation
|
929cd61ba62698f7d1bc994d72130b4ed4ccbd5a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Unzipping Simulation, simulate the unzipping of DNA double strands
# Copyright 2018-2020 Tobias Jachowski
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Distributed calculation over the network, see
# https://eli.thegreenplace.net/2012/01/24/distributed-computing-in-python-with-multiprocessing/
"""
# Structure of a simulation
# The unzipping construct parameters
bases = simulation['settings']['bases']
nbs = simulation['settings']['nbs']
nbp = simulation['settings']['nbp']
nbs_loop = simulation['settings']['nbs_loop']
S = simulation['settings']['S']
L_p_ssDNA = simulation['settings']['L_p_ssDNA']
z = simulation['settings']['z']
pitch = simulation['settings']['pitch']
L_p_dsDNA = simulation['settings']['L_p_dsDNA']
# Other experimental parameters
radius = simulation['settings']['radius']
kappa = simulation['settings']['kappa']
c = simulation['settings']['c']
T = simulation['settings']['T']
# Parameters of the simulation
NNBP = simulation['settings']['NNBP']
x0_min = simulation['settings']['x0_min']
x0_max = simulation['settings']['x0_max']
h0 = simulation['settings']['h0']
y0 = simulation['settings']['y0']
resolution = simulation['settings']['resolution']
boltzmann_factor = simulation['settings']['boltzmann_factor']
# Variables of simulated data
XFE, XFE0 = simulation['XFE'], simulation['XFE0']
# extension, number of unzipped basepairs, force extension of the construct
X = XFE['EX_avg']
# 3D position of the stage
X0 = XFE['X0']
# average number of unzipped basepairs
NUZ0_avg = XFE['NUZ0_avg']
# most probable number of unzipped basepairs
NUZ0_max = XFE['NUZ0_max_W0']
# Average force acting on the construct
F0_avg = XFE['F0_avg']
# Most probable force acting on the construct
F0_max = XFE['F0_max_W0']
The unzipping simulation package was written with several calculation speed
improvements. An old version of the unzipping simulation was carefully tested
with different settings and algorithms, to find the best strategy of getting
the simulation done as fast as possible. The results are documented here to
give an insight into the different approaches, i.e. parallelization,
boltzmann_factor, binary search of estimated number of unzipped basepairs,
iteratively use simulated number of unzipped basepairs for one stage
displacement of a previous calculation as the estimated number of unzipped
basepairs for the subsequent calculation, precalculation and bufering of
WLC/FJC extension/force/energy models:
# timeit results:
#x0 = 800e-9
#xfe0 = xfe0_nuz(A0, bases=bases, nbs=nbs, nbp=nbp, nbs_loop=nbs_loop,
# radius=radius, kappa=kappa,
# S=S, L_p_ssDNA=L_p_ssDNA, z=z,
# pitch=pitch, L_p_dsDNA=L_p_dsDNA,
# NNBP=NNBP, c=c, T=T,
# boltzmann_factor=1e-5)
# Time needed with current settings:
# RS: 140 s
# R0: 884 s
# RI: 180 s
# RP: 12368 s
# Calculation of E_ext_ssDNA buffer on multiple cores
# 121.9 s
# Calculation times for force extension curve with differing boltzmann_factor on mulitple cpu cores
# boltzmann_factor
# 1e-2 35 s, STD = 55.043 fN (compared to gold standard), deviation seen of up to ~ 500 fN and partly distorted force/extension curve
# 1e-3 40 s, STD = 4.525 fN (compared to gold standard), deviation seen of up to 30 fN
# 1e-4 45 s, STD = 2.519 fN (compared to gold standard), deviation seen of only up to sub fN
# 1e-5 50 s, STD = 2.513 fN (compared to gold standard)
# 1e-6 54 s, STD = 1.363 fN (compared to gold standard)
# 1e-7 57 s, STD = 64.751 aN (compared to gold standard)
# 1e-8 62 s, STD = 3.281 aN (compared to gold standard)
# 1e-9 64 s, STD = 0.170 aN (compared to gold standard)
# 0e-9 806 s, gold standard
# Calculation times for force extension curve with boltzmann_factor = 1e-5 and iterative on one cpu
# F_ssDNA buffer calculation would need roughly the same amount of time as E_ext_ssDNA calculation
# F_ssDNA would be only called once for every call of E_ext_ssDNA -> not reasonable to do buffer F_ssDNA
# nuz_est iterative
# E_ext_ssDNA buffered
# ext_dsDNA_wlc buffered
# F_ssDNA buffered
# without_b buffer_calc with_b total_one_run
# + + + - elapsed time: 138.7 s -> 34.675 s + 121.9 s -> 156.575 s 371 % -> only feasable, if calculated ~ 12 x
# + - + - elapsed time: 168.6 s -> 42.15 s 100 % -> only feasable, if 10 s per calculation important
# + + - - elapsed time: 2853.1 s -> 713.275 s + 121.9 s -> 835.175 s 1981 %
# + - - - elapsed time: 2872.2 s -> 718.05 s 1704 %
# - + + - elapsed time: 173.3 s -> 43.325 s + 121.9 s -> 165.225 s 392 % -> only feasable, if calculated ~ 12 x
# - - + - elapsed time: 215.1 s -> 53.775 s 128 % -> most feasable settings
# - + - - elapsed time: 3XXX.X s -> not measured, only estimated
# - - - - elapsed time: 3641.0 s -> 910.25 s 2160 %
"""
import cloudpickle
import hashlib
import math
import mpmath
import numpy as np
import os
import pickle
import time
import warnings
from multiprocessing import Pool
from scipy import constants
from scipy.integrate import quad
from scipy.optimize import fminbound, minimize_scalar
# from scipy.optimize import fmin_l_bfgs_b, minimize, brent
# minimize_scalar alone causes imprecise results
from scipy.optimize._minimize import _minimize_lbfgsb
from matplotlib import pyplot as plt
# import cn_plot_style as cnps
# Boltzmann constant
kB = constants.value('Boltzmann constant')
cal = constants.calorie
Na = constants.Avogadro
kcal = 1e+3*cal
# Set digits of precision
mpmath.mp.dps = 30
_ssDNA_DEFAULT = {
'S': 800e-12,
'L_p': 7.97e-10,
'z': 0.537e-9
}
_dsDNA_DEFAULT = {
'pitch': 0.338e-9,
'L_p': 50e-9
}
_E_pair = {
# Huguet et. al. 2010, table 1, 1M NaCl
# Energies from Huguet 2010 are given for 298 K
# one Purine and one Pyrimidine or
# two succesive Purines/Pyrimidines with same bases
'AA': 1.23*kcal/Na,
'TT': 1.23*kcal/Na, #
'AC': 1.49*kcal/Na,
'TG': 1.49*kcal/Na, #
'AG': 1.36*kcal/Na,
'TC': 1.36*kcal/Na, #
'CA': 1.66*kcal/Na,
'GT': 1.66*kcal/Na, #
'CC': 1.93*kcal/Na,
'GG': 1.93*kcal/Na, #
'GA': 1.47*kcal/Na,
'CT': 1.47*kcal/Na, #
# two succesive Purines/Pyrimidines with different bases
'AT': 1.17*kcal/Na,
'CG': 2.37*kcal/Na,
'GC': 2.36*kcal/Na,
'TA': 0.84*kcal/Na,
# TODO: include proper energy term for the first and last bp
# Ander PhD thesis 2011
# kB*T = 4.1 pN*nm -> T ~ 298 K
'A': 1.2*kB*298,
'T': 1.2*kB*298,
'G': 3.4*kB*298,
'C': 3.4*kB*298
# energies Bockelmann et. al. 1997
# for S=800pN, L_p_ssDNA=0.75nm, z=0.61nm/bp
# 'AT': 1.3*kB*298
# 'GC': 2.9*kB*298
# for S=800pN, L_p_ssDNA=1.35nm, z=0.56nm/bp
# 'AT': 1.6*kB*298
# 'GC': 3.2*kB*298
}
_M_pair = {
# Huguet et. al. 2010, table 1, NaCl concentration correction factor
# Energies from Huguet et. al. 2010 are given for 298 K
'AA': 0.145*kcal/Na,
'TT': 0.145*kcal/Na, #
'AC': 0.099*kcal/Na,
'TG': 0.099*kcal/Na, #
'AG': 0.070*kcal/Na,
'TC': 0.070*kcal/Na, #
'CA': 0.091*kcal/Na,
'GT': 0.091*kcal/Na, #
'CC': 0.063*kcal/Na,
'GG': 0.063*kcal/Na, #
'GA': 0.155*kcal/Na,
'CT': 0.155*kcal/Na, #
'AT': 0.117*kcal/Na,
'CG': 0.132*kcal/Na,
'GC': 0.079*kcal/Na,
'TA': 0.091*kcal/Na,
}
_DH_pair = {
# Huguet et. al. 2010, table 2, enthalpy (kcal/mol)
'AA': 7.28*kcal/Na,
'TT': 7.28*kcal/Na, #
'AC': 5.80*kcal/Na,
'TG': 5.80*kcal/Na, #
'AG': 5.21*kcal/Na,
'TC': 5.21*kcal/Na, #
'CA': 8.96*kcal/Na,
'GT': 8.96*kcal/Na, #
'CC': 8.57*kcal/Na,
'GG': 8.57*kcal/Na, #
'GA': 8.16*kcal/Na,
'CT': 8.16*kcal/Na, #
'AT': 4.63*kcal/Na,
'CG': 9.66*kcal/Na,
'GC': 10.10*kcal/Na,
'TA': 8.31*kcal/Na
}
_DS_pair = {
# Huguet et. al. 2010, table 2, entropy (cal/mol)
'AA': 20.28*cal/Na,
'TT': 20.28*cal/Na, #
'AC': 14.46*cal/Na,
'TG': 14.46*cal/Na, #
'AG': 12.89*cal/Na,
'TC': 12.89*cal/Na, #
'CA': 24.48*cal/Na,
'GT': 24.48*cal/Na, #
'CC': 22.30*cal/Na,
'GG': 22.30*cal/Na, #
'GA': 22.46*cal/Na,
'CT': 22.46*cal/Na, #
'AT': 11.62*cal/Na,
'CG': 24.43*cal/Na,
'GC': 25.96*cal/Na,
'TA': 25.06*cal/Na
}
def get_unzipping_simulation(simulation_settings_file, simulations_dir=None,
simulation_file=None, read=True, save=True,
**kwargs):
"""
simulation_settings_file : str
Filepath to a simulation file with the settings for the simulation.
simulations_dir : str
Directory from where to read and to where to save the simulation.
Defaults to '.'.
simulation_file : str
Name of the simulation file to load. If no name is supplied it defaults
to 'hash_key' of the settings of the `simulation_settings_file`, as
returned by the function `get_key()` + the file extension '.p'.
read : bool
Try to read a preexisting simulation_file. If the file does not exist
or `read` is False, do the simulation with the function
`simulate_unzipping()`.
save : bool
Save the simulation result, if the simulation could not be read.
**kwargs
Keyword arguments with settings overwriting the default settings of the
`simulation_settings_file`.
"""
# Get simulation settings and settings encoded as hash key
simulation = simulation_settings(simulation_settings_file, **kwargs)
hash_key = get_key(**simulation['settings'])
# Get path of simulation
simulations_dir = '.' if simulations_dir is None else simulations_dir
simulation_file = simulation_file or ''.join((hash_key, '.p'))
simulation_file = os.path.join(simulations_dir, simulation_file)
# Load or do the simulation
if read and os.path.isfile(simulation_file):
with open(simulation_file, 'rb') as f:
simulation = pickle.load(f)
return simulation
else:
# Do the simulation
simulation = simulate_unzipping(simulation)
# Save the simulation
if save:
directory = os.path.dirname(simulation_file)
os.makedirs(directory, exist_ok=True)
with open(simulation_file, 'wb') as f:
pickle.dump(simulation, f)
return simulation
def simulation_settings(simulation_file, **kwargs):
# Get simulation settings
with open(simulation_file, 'rb') as f:
simulation = pickle.load(f)
simulation['settings'].update(kwargs)
return {'settings': simulation['settings']}
def get_key(x0_min, x0_max, y0, h0, resolution,
bases, nbs, nbp, nbs_loop,
radius, kappa,
S, L_p_ssDNA, z,
pitch, L_p_dsDNA,
NNBP, c, e_loop, T, boltzmann_factor,
**kwargs):
hasher = hashlib.md5()
for c in [x0_min, x0_max, y0, h0, resolution,
bases.capitalize(), nbs, nbp, nbs_loop,
radius,
S, L_p_ssDNA, z,
pitch, L_p_dsDNA,
NNBP, c, e_loop, T, boltzmann_factor]:
hasher.update(bytes(str(c), 'ASCII'))
hasher.update(kappa)
key = hasher.hexdigest()
return key
def simulate_unzipping(simulation_settings, processes=8):
simulation = simulation_settings
# Set the unzipping construct parameters
bases = simulation['settings']['bases']
nbs = simulation['settings']['nbs']
nbp = simulation['settings']['nbp']
nbs_loop = simulation['settings']['nbs_loop']
S = simulation['settings']['S']
L_p_ssDNA = simulation['settings']['L_p_ssDNA']
z = simulation['settings']['z']
pitch = simulation['settings']['pitch']
L_p_dsDNA = simulation['settings']['L_p_dsDNA']
# Set other experimental parameters
radius = simulation['settings']['radius']
kappa = simulation['settings']['kappa']
c = simulation['settings']['c']
e_loop = simulation['settings']['e_loop']
T = simulation['settings']['T']
# Set parameters for the simulation
NNBP = simulation['settings']['NNBP']
x0_min = simulation['settings']['x0_min']
x0_max = simulation['settings']['x0_max']
h0 = simulation['settings']['h0']
y0 = simulation['settings']['y0']
resolution = simulation['settings']['resolution']
boltzmann_factor = simulation['settings']['boltzmann_factor']
XFE, XFE0 = unzipping_force_energy(x0_min, x0_max, y0=y0, h0=h0,
resolution=resolution,
processes=processes,
bases=bases, nbs=nbs, nbp=nbp,
nbs_loop=nbs_loop,
radius=radius, kappa=kappa,
S=S, L_p_ssDNA=L_p_ssDNA, z=z,
pitch=pitch, L_p_dsDNA=L_p_dsDNA,
NNBP=NNBP, c=c, e_loop=e_loop, T=T,
boltzmann_factor=boltzmann_factor,
individual_points=True)
simulation['XFE'] = XFE
simulation['XFE0'] = XFE0
return simulation
def unzipping_force_energy(x0_min, x0_max, y0=0.0, h0=0.0, resolution=1e-9,
processes=8,
bases='', nbs=0, nbp=0, nbs_loop=0,
radius=0.0, kappa=None,
S=None, L_p_ssDNA=None, z=None,
pitch=None, L_p_dsDNA=None,
NNBP=False, c=0, e_loop=0.0, T=298.2,
spacing=5, min_stepsize=10,
boltzmann_factor=1e-9,
individual_points=False, verbose=False,
F_ssDNA_mod=None, E_ext_ssDNA_mod=None,
ext_dsDNA_wlc_mod=None):
# Assign DNA model functions to the variables of the global (module) scope,
# such that `multiprocessing.Pool` will see these variables.
global F_ssDNA
global E_ext_ssDNA
global ext_dsDNA_wlc
# Set DNA model functions to the unbuffered default functions
F_ssDNA = F_ssDNA_mod or _F_ssDNA
E_ext_ssDNA = E_ext_ssDNA_mod or _E_ext_ssDNA
# Initialize the approximations of the ssDNA/dsDNA model functions with
# fixed model function parameters and substitute the original DNA model
# functions
# F_ssDNA is implicitly buffered with `ext_dsDNA_wlc`.
# Buffered `E_ext_ssDNA` does not speed up calculation.
# E_ext_ssDNA = \
# init_buf_E_ext_ssDNA(read=False, write=False, filename='E_ext_ssDNA',
# processes=processes,
# bases=bases, nbs=nbs, nbs_loop=nbs_loop,
# S=S, L_p=L_p_ssDNA, z=z, T=T)
ext_dsDNA_wlc = ext_dsDNA_wlc_mod or \
init_buf_ext_dsDNA_wlc(nbp=nbp, pitch=pitch, L_p=L_p_dsDNA, T=T)
resolution = int(np.round((x0_max - x0_min) / resolution + 1))
X0 = np.linspace(x0_min, x0_max, resolution)
# Speed up calculation with the multiprocessing package by taking the
# nuz_est from previous calculation for each subsequent calculation
xfe0_nuz = _xfe0_nuz_chained()
# Define a closure to be executed by the pool
def f(x0):
print('\rCalculating equilibrium for stage displacement x0 = {:.3e}'
'...'.format(x0), end='', flush=True)
A0 = attachment_point(x0, y0=y0, h0=h0, radius=radius)
return xfe0_nuz(A0, bases=bases, nbs=nbs, nbp=nbp, nbs_loop=nbs_loop,
radius=radius, kappa=kappa,
S=S, L_p_ssDNA=L_p_ssDNA, z=z,
pitch=pitch, L_p_dsDNA=L_p_dsDNA,
NNBP=NNBP, c=c, e_loop=e_loop, T=T,
spacing=spacing, min_stepsize=min_stepsize,
boltzmann_factor=boltzmann_factor)
f = unboundfunction(f)
# Process function in pool with 8 parallelly executed processes
with Pool(processes=processes) as pool:
start = time.time()
XFE0 = pool.map(f, X0)
stop = time.time()
print('\nDone, elapsed time: {:.1f} s'.format(stop - start))
# combine all individually simulated points into one array
XFE = {
'X0': np.array([xfe0['settings']['A0'][0] for xfe0 in XFE0]),
'EX_avg': np.array([xfe0['EX_avg'] for xfe0 in XFE0]),
'NUZ0_avg': np.array([xfe0['NUZ0_avg'] for xfe0 in XFE0]),
'NUZ0_max_W0': np.array([xfe0['NUZ0_max_W0'] for xfe0 in XFE0]),
'D0_avg': np.array([xfe0['D0_avg'] for xfe0 in XFE0]),
'F0_avg': np.array([xfe0['F0_avg'] for xfe0 in XFE0]),
'F0_max_W0': np.array([xfe0['F0_max_W0'] for xfe0 in XFE0]),
'settings': {
'x0_min': x0_min,
'x0_max': x0_max,
'y0': y0,
'h0': h0,
'resolution': resolution,
'bases': bases,
'nbs': nbs,
'nbp': nbp,
'nbs_loop': nbs_loop,
'radius': radius,
'kappa': kappa,
'S': S,
'L_p_ssDNA': L_p_ssDNA,
'z': z,
'pitch': pitch,
'L_p_dsDNA': L_p_dsDNA,
'NNBP': NNBP,
'c': c,
'e_loop': e_loop,
'T': T,
'spacing': spacing,
'min_stepsize': min_stepsize,
'boltzmann_factor': boltzmann_factor
}
}
if individual_points:
return XFE, XFE0
return XFE
def attachment_point(x0, y0=0.0, h0=0.0, radius=0.0):
"""
x0 : float
Position of the stage x (m) relative to the trap center.
y0 : float
Position of the stage y (m) relative to the trap center.
h0 : float
Distance (m) of the bead surface to the glass surface, if
the bead is in its resting position, i.e. no force in the
vertical direction is applied.
radius : float
Radius of the bead (m).
"""
return np.array([x0, y0, - (h0 + radius)])
def xfe0_nuz(A0, bases='', nuz_est=-1, nbs=0, nbp=0, nbs_loop=0,
radius=0.0, kappa=None,
S=None, L_p_ssDNA=None, z=None,
pitch=None, L_p_dsDNA=None,
NNBP=False, c=0, e_loop=0.0, T=298.2,
spacing=5, min_stepsize=10,
boltzmann_factor=1e-9, verbose=False):
"""
Calculate the equilibrium extensions, forces and energies for a given stage
displacement `x0` for most probable numbers of unzipped basepairs and find
the number of unzipped bases, at which the unzipping fork will most likely
fluctuate. Fluctuations of the extensions of the DNA and the bead in the
trap are ignored.
Speed up calculation, i.e. only select the number of unzipped bases, at
which the unzipping fork will fluctuate. Calculate only around the most
likely nuz and, if no nuz_est is given, perform binary search to find most
likely nuz, first.
Parameters
----------
A0 : np.ndarray of type float
Position (m) of the DNA attachment point on the glass surface relative
to the trap center: [x, y, z].
bases : str
Sequence of sense strand of dsDNA which is (will be) unzipped
nuz_est : int
Estimate number of unzipped basepairs. 0 <= `nuz_est` <= `nuz_max`.
If `nuz_est` < 0, the number is approximated automatically with a
binary search using the function `approx_eq_nuz`.
nbs : int
Number of extra ssDNA bases in the construct
nbp : int
Number of basepairs of dsDNA spacer
nbs_loop : int
Number of extra ssDNA bases in the hairpin
radius : float
Radius of the bead (m).
kappa : float or np.ndarray of type float
Stiffness for x, [x, z], or [x, y, z] of lever (handle) attached to DNA
in N/m.
boltzmann_factor : float
The minimum probability each number of unzipped basepairs (nuz) state
has to have relative to the most probable one to be considered in the
calculation. The smaller the boltzmann_factor, the more exact the
result is. The larger the boltzmann factor is, the faster the
calculation. The default of 1e-9 corresponds to more than 20 kT
difference (mpmath.exp(-20) > 1e-9).
"""
# Maximum number of unzippabel bps
nuz_max = len(bases)
# If hairpin exists, add one possible unzipping event representative for
# opening the hairpin
if nbs_loop > 0:
nuz_max += 1
# Either select number of unzipped basepairs with low energy state only, or
# select all possible number of unzipped basepairs.
if boltzmann_factor <= 0:
# All nuz will be calculated, start in the middle
nuz_est = int(round(nuz_max / 2))
elif nuz_est < 0:
# Autodetermine the approximate nuz which will have the lowest energy
# state, i.e. will be in equilibrium between open and closed state.
nuz_est = approx_eq_nuz(A0, bases=bases, nbs=nbs, nbp=nbp,
radius=radius, kappa=kappa,
S=S, L_p_ssDNA=L_p_ssDNA, z=z,
pitch=pitch, L_p_dsDNA=L_p_dsDNA,
NNBP=NNBP, c=c, T=T,
spacing=spacing, min_stepsize=min_stepsize,
verbose=verbose)
else:
# Set nuz_est to valid value
nuz_est = max(0, nuz_est)
nuz_est = min(nuz_est, nuz_max)
# Find and calculate energy for the most likely nuzes, i.e. nuzes with a
# low energy state. First, define a function to calculate force, extension,
# energy, and weight for a given number of unzipped basepairs and define
# variables to be filled upon calculation.
NUZ0 = []
EX_ss = []
EX_ds = []
D0 = []
F0 = []
E0 = []
W0 = []
def eq_few0(nuz, w0_likely):
ex_ss, ex_ds, d0, f0, e0 = \
equilibrium_xfe0(A0, bases=bases, nuz=nuz, nbs=nbs, nbp=nbp,
nbs_loop=nbs_loop,
radius=radius, kappa=kappa,
S=S, L_p_ssDNA=L_p_ssDNA, z=z,
pitch=pitch, L_p_dsDNA=L_p_dsDNA,
NNBP=NNBP, c=c, e_loop=e_loop, T=T,
verbose=verbose)
NUZ0.append(nuz)
EX_ss.append(ex_ss)
EX_ds.append(ex_ds)
D0.append(d0)
F0.append(f0)
E0.append(e0)
w0 = mpmath.exp(- e0 / (kB*T))
W0.append(w0)
# Select new minimum energy, if energy is smaller than all previous
# calculated energies, and if energy is not calculated from max nuz
# opened, which could result in a huge drop of the energy, due to the
# loop opening and therefore in too many falsly neglected calculated
# energies, due to boltzmann_factor selection.
if w0 > w0_likely and nuz < nuz_max:
w0_likely = w0
stop = False
# Stop if energy difference > - mpmath.log(boltzmann_factor).
if w0_likely != 0 and w0 / w0_likely < boltzmann_factor:
stop = True
return w0_likely, stop
# Then, Calculate energy for estimated nuz.
w0_likely, _ = eq_few0(nuz_est, 0)
# Iteratively find and calculate energy for the most likely nuzes:
# Calculate energies of neighbouring nuzes (nuz_left / nuz_right) starting
# from the estimated nuz. The most likely nuz is constantly tracked. The
# calculation is stopped, if either the weight of the nuzes do differ more
# than the weight of the most likely nuz times the boltzmann_factor or if
# there are no more nuzes left to calculate energies from.
nuz_left = nuz_est - 1
nuz_right = nuz_est + 1
stop_left = nuz_left < 0
stop_right = nuz_right > nuz_max
while not (stop_left and stop_right):
if not stop_left:
w0_likely, stop_left = eq_few0(nuz_left, w0_likely)
nuz_left -= 1
# stop, if nuz_left is negative
stop_left = stop_left or nuz_left < 0
if not stop_right:
w0_likely, stop_right = eq_few0(nuz_right, w0_likely)
nuz_right += 1
# stop, if nuz_right is larger than number of unzippable basepairs
stop_right = stop_right or nuz_right > nuz_max
# Finally, select all nuzes that are at least equally likely as the most
# likely nuz times the boltzmann_factor.
W0 = np.array(W0)
idx_vld = W0 / w0_likely >= boltzmann_factor
# Sort nuz and other values in ascending order
NUZ0 = np.array(NUZ0)[idx_vld]
idx_srt = np.argsort(NUZ0)
NUZ0 = NUZ0[idx_srt]
EX_ss = np.array(EX_ss)[idx_vld][idx_srt]
EX_ds = np.array(EX_ds)[idx_vld][idx_srt]
D0 = np.array(D0)[idx_vld][idx_srt]
F0 = np.array(F0)[idx_vld][idx_srt]
E0 = np.array(E0)[idx_vld][idx_srt]
W0 = W0[idx_vld][idx_srt]
# Calculate weighted averages of unzipped basepairs, bead displacements,
# force, and extension of the construct
W0_sum = W0.sum()
P0 = W0 / W0_sum
NUZ0_avg = (NUZ0 * W0).sum() / W0_sum
D0_avg = (D0 * W0[np.newaxis].T).sum(axis=0) / W0_sum
F0_avg = (F0 * W0).sum() / W0_sum
EX_avg = ((EX_ss + EX_ds) * W0).sum() / W0_sum
# Select values of most likely state
idx_max = W0.argmax()
NUZ0_max_W0 = NUZ0[idx_max]
F0_max_W0 = F0[idx_max]
W0_max = W0[idx_max]
r = {
'EX_avg': EX_avg,
'NUZ0': NUZ0,
'NUZ0_avg': NUZ0_avg,
'NUZ0_max_W0': NUZ0_max_W0,
'D0': D0,
'D0_avg': D0_avg,
'F0': F0,
'F0_avg': F0_avg,
'F0_max_W0': F0_max_W0,
'E0': E0,
'W0': W0,
'W0_max': W0_max,
'P0': P0,
'settings': {
'A0': A0,
'bases': bases,
'nbs': nbs,
'nbp': nbp,
'nbs_loop': nbs_loop,
'radius': radius,
'kappa': kappa,
'S': S,
'L_p_ssDNA': L_p_ssDNA,
'z': z,
'pitch': pitch,
'L_p_dsDNA': L_p_dsDNA,
'NNBP': NNBP,
'c': c,
'e_loop': e_loop,
'T': T,
'spacing': spacing,
'min_stepsize': min_stepsize,
'boltzmann_factor': boltzmann_factor
}
}
return r
class _xfe0_nuz_chained(object):
"""Speed up calculation of xfe0_nuz by taking the nuz_est from previous
calculation for next calculation
The object of this class is a drop in replacement for the original
`xfe0_nuz` function, if using the the multiprocessing package.
Each process gets its own copy of the a _xfe0_nuz_chained object, which is
initialized with nuz_est = -1. Upon each call nuz_est is set to the
previous outcome of the calculated NUZ0_avg.
"""
def __init__(self):
self.nuz_est = -1
def __call__(self, A0, bases='', nuz_est=-1, nbs=0, nbp=0, nbs_loop=0,
radius=0.0, kappa=None,
S=None, L_p_ssDNA=None, z=None,
pitch=None, L_p_dsDNA=None,
NNBP=False, c=0, e_loop=0.0, T=298.2,
spacing=5, min_stepsize=10,
boltzmann_factor=1e-9, verbose=False):
if nuz_est == -1:
nuz_est = self.nuz_est
r = xfe0_nuz(A0, bases=bases, nuz_est=nuz_est, nbs=nbs, nbp=nbp,
nbs_loop=nbs_loop,
radius=radius, kappa=kappa,
S=S, L_p_ssDNA=L_p_ssDNA, z=z,
pitch=pitch, L_p_dsDNA=L_p_dsDNA,
NNBP=NNBP, c=c, e_loop=e_loop, T=T,
spacing=spacing, min_stepsize=min_stepsize,
boltzmann_factor=boltzmann_factor, verbose=verbose)
self.nuz_est = int(round(r['NUZ0_avg']))
return r
def approx_eq_nuz(A0, bases='', nbs=0, nbp=0,
radius=0.0, kappa=None,
S=None, L_p_ssDNA=None, z=None,
pitch=None, L_p_dsDNA=None,
NNBP=False, c=None, T=298.2,
spacing=5, min_stepsize=10, verbose=False):
"""
Find the approximate number of unzipped basepairs the unzipping construct
automatically adjust itself when in equilibrium.
The search is performed in a binary mode, i.e. the number of calculations
to find the number of unzipped basepairs is of class O(log(n)), where n is
the number of basepairs in the unzipping seagment.
"""
# maximal number of unzipped basepairs
nuz_max = len(bases)
# verify sppacing and set limits for nuz
spacing = min(spacing, nuz_max)
minimum = 0
maximum = nuz_max - spacing
# initialize step size and starting nuz
step = int(round((maximum - minimum) / 2))
nuz = int(round((maximum - minimum) / 2))
def unzip_for_eq(nuz=0):
"""
Calculate the gradient of the energy.
Return True, if unzipping construct has to be further unzipped, to
reach equilibrium. Return False, if unziping construct has to be
further annealed, to reach equilibrium. Ignore the opening of the
endloop (nbs_loop=0, e_loop=0.0) for finding the minimum of the total
energy, to avoid falsly high numbers of unzipped basepairs, due to
energy jump upon opening of the end loop.
"""
nuzl = nuz
nuzr = nuz + spacing
_, _, _, f0l, e0l = \
equilibrium_xfe0(A0, bases=bases, nuz=nuzl, nbs=nbs, nbp=nbp,
nbs_loop=0, radius=radius, kappa=kappa,
S=S, L_p_ssDNA=L_p_ssDNA, z=z,
pitch=pitch, L_p_dsDNA=L_p_dsDNA,
NNBP=NNBP, c=c, e_loop=0.0, T=T,
verbose=verbose)
_, _, _, f0r, e0r = \
equilibrium_xfe0(A0, bases=bases, nuz=nuzr, nbs=nbs, nbp=nbp,
nbs_loop=0, radius=radius, kappa=kappa,
S=S, L_p_ssDNA=L_p_ssDNA, z=z,
pitch=pitch, L_p_dsDNA=L_p_dsDNA,
NNBP=NNBP, c=c, e_loop=0.0, T=T,
verbose=verbose)
return e0l > e0r
# Search for the approximate number of unzipped basepairs, to be in
# equilibrium
i = 0
while step > min_stepsize:
i += 1
if unzip_for_eq(nuz=nuz):
if verbose:
print('nuz + step -> new: {} + {}'.format(nuz, step),
end=' -> ')
nuz += step
else:
if verbose:
print('nuz - step -> new: {} - {}'.format(nuz, step),
end=' -> ')
nuz -= step
if verbose:
print(nuz)
if nuz < minimum or nuz > maximum:
# unzipping construct has to be either fully closed or fully opened
# to be in equilibrium -> stop the loop and return either 0 or
# nuz_max
step = 0
nuz = max(0, nuz)
nuz = min(nuz, nuz_max)
# half the stepsize
step = int(round(step / 2))
if verbose:
print('Number of iterations to find approximation of eq nuz: {}'
''.format(i))
return nuz
def equilibrium_xfe0(A0, bases='', nuz=0, nbs=0, nbp=0, nbs_loop=0,
radius=0.0, kappa=None,
S=None, L_p_ssDNA=None, z=None,
pitch=None, L_p_dsDNA=None,
NNBP=False, c=None, e_loop=0.0, T=298.2,
verbose=False):
"""
Calculate the equilibrium extension, force, and energy for a given stage
displacement `x0` and a fixed set of the following parameters.
Parameters
----------
A0 : np.ndarray of type float
Position (m) of the DNA attachment point on the glass surface relative
to the trap center: [x, y, z].
nuz : int
Number of unzipped basepairs
nbs : int
Number of extra ssDNA bases in the construct
nbp : int
Number of basepairs of dsDNA spacer
nbs_loop : int
Number of extra ssDNA bases in the hairpin
radius : float
Radius of the bead (m).
kappa : np.ndarray of type float
Stiffness for [x, y, z] of lever (handle) attached to DNA in N/m.
"""
# One unzipped basepair leads to 2 free ssDNA bases
nbs = 2*nuz + nbs
# If unzipping fork has reached the last basepair and the end loop of the
# unzipping construct should be unzipped, elongate the ssDNA by nbs_loop
# bases
if nbs_loop > 0 and nuz >= len(bases) + 1:
nbs += nbs_loop
# Calculate most probable force for
# number of unzipped bases nbs and
# number of basepairs nbp and
# stage displacement x0 = A0[0]
f0, d0 = F_0(A0, nbs=nbs, S=S, L_p_ssDNA=L_p_ssDNA, z=z, T=T,
nbp=nbp, pitch=pitch, L_p_dsDNA=L_p_dsDNA,
radius=radius, kappa=kappa,
verbose=verbose)
# Calculate most probable extension for most probable force for
# both of the two ssDNA strands and
# one dsDNA strand for
# number of unzipped base pairs nuz
ex_ss = ext_ssDNA(f0, nbs=nbs, S=S, L_p=L_p_ssDNA, z=z, T=T)
ex_ds = ext_dsDNA_wlc(f0, nbp=nbp, pitch=pitch, L_p=L_p_dsDNA, T=T)
e0 = E_tot(bases=bases, nuz=nuz, nbs=nbs, ex_ss=ex_ss,
nbp=nbp, ex_ds=ex_ds,
displacement=d0, kappa=kappa,
S=S, L_p_ssDNA=L_p_ssDNA, z=z,
pitch=pitch, L_p_dsDNA=L_p_dsDNA,
NNBP=NNBP, c=c, e_loop=e_loop, T=T, verbose=verbose)
if verbose:
template = "nuz: {:03d}, f0: {:.3e}, e0: {:.3e}"
print(template.format(nuz, f0, e0))
return ex_ss, ex_ds, d0, f0, e0
def F_0(A0, nbs=0, S=None, L_p_ssDNA=None, z=None, T=298.2,
nbp=0, pitch=None, L_p_dsDNA=None,
f_min=0e-12, f_max=200e-12, xtol=1e-18,
radius=0.0, kappa=None,
verbose=False):
"""
Find most probable force for a given sample displacement `A0`, a number of
separated basepairs `nuz`, and number of basepairs `nbp` of dsDNA spacers.
Extended 3D version of the equation (10) in Bockelmann 1998.
Parameters
----------
A0 : np.ndarray of type float
Position (m) of the DNA attachment point on the glass surface relative
to the trap center: [x, y, z].
nbs : int
Number of bases of ssDNA strand
nbp : int
Number of basepairs of dsDNA spacers
radius : float
Radius of the bead (m).
kappa : float or np.ndarray of type float
Stiffness for x, [x, z], or [x, y, z] of lever (handle) attached to DNA
in N/m.
"""
# Find the equilibrium force f at given nuz and A0
def f_lev_cost(f, return_d=False):
ex_ss = ext_ssDNA(f, nbs=nbs, S=S, L_p=L_p_ssDNA, z=z, T=T)
ex_ds = ext_dsDNA_wlc(f, nbp=nbp, pitch=pitch, L_p=L_p_dsDNA, T=T)
if A0[1] == 0.0 and len(kappa) == 2:
# Simulate in 2D only (3x as fast as 3D)
x0 = A0[0]
z0 = - A0[2] - radius
f_construct, d = F_construct_2D(x0, z0=z0,
ex_ss=ex_ss, ex_ds=ex_ds,
radius=radius, kappa=kappa)
else:
# Simulate in 3D
f_construct, d = F_construct_3D(A0, ex_ss=ex_ss, ex_ds=ex_ds, f_dna=f,
radius=radius, kappa=kappa,
verbose=verbose)
if return_d:
return d
return (f - f_construct)**2
# Find the force, which will result in input extension
f0 = fminbound(f_lev_cost,
f_min,
f_max,
xtol=xtol)
# calculate bead displacements in the trap for found force
d = f_lev_cost(f0, return_d=True)
return f0, d
def ext_ssDNA(F, nbs=0, S=None, L_p=None, z=None, T=298.2, avoid_neg_ext=True):
"""
Freely jointed chain (FJC) model, relating the total polymer length
ext_ssDNA to an applied force F.
Bockelmann, U., Essevaz-Roulet, B., Heslot, F. (1998). "DNA strand
separation studied by single molecule force measurements". Physical Review
E, 58(2), 2386-94.
Steven B. Smith, Yujia Cui, Carlos Bustamante (1996). "Overstretching
B-DNA: The Elastic Response of Individual Double-Stranded and
Single-Stranded DNA Molecules". Science Reports, 271, 795-799
Contour length of ssDNA: L_0 = nbs*z
Kuhn length b (in FJC b = 2 * persistence length), in paper: b = 1.5 nm
Parameters
----------
nbs : int
Number of bases of ssDNA
F : float
Force in N
S : float
Stretch modulus in N. Defaults to 800e-12 m.
L_p : float
Persistence length in m. Defaults to 0.797e-9 m.
z : float
Length of a single base in m. Defaults to 0.537e-9 m.
T : float
Temperature in K.
avoid_neg_ext : bool
Avoid negative extension due to rounding errors close to zero.
Returns
-------
float
Extension in m
"""
S = S or _ssDNA_DEFAULT['S']
L_p = L_p or _ssDNA_DEFAULT['L_p']
z = z or _ssDNA_DEFAULT['z']
if F == 0:
return 0
if nbs <= 0:
return 0
sign = 1
if F < 0:
F = -F
sign = -1
# Prevent float overflow in downstream calculation leading to
# too high x value. The exact value seems to depend on the system.
# On a laptop with Intel(R) Core(TM) i7-7500U CPU @ 2.70GHz a
# value of F < 1.28323360182078e-26 was sufficient. However, on
# another Intel system it was not. Therefore, to be one the save
# side, choose a value as a break criterion of F < 1e-17 N
# (i.e. 10 aN), which is more than sufficiently small to not
# affect a precise determination of x and still should work on
# most systems.
# Alternatively, one could use mpmath.coth, but would have to live
# with a 10 fold increase in execution time.
if F < 1e-17:
return 0
b = 2*L_p
# modified FJC model incorporating Kuhn segments that can stretch
# entropic (coth term) and stretching (F/S term) contribution
x = nbs*z * (coth(F*b / (kB*T)) - kB*T / (F*b)) * (1 + F/S)
if avoid_neg_ext:
x = max(x, 0)
return sign * x
def coth(x):
"""
Cotangens hyperbolicus
"""
typ = type(x)
if typ is mpmath.mpf or typ is mpmath.mpc:
return mpmath.coth(x)
else:
# return np.cosh(x) / np.sinh(x) # can cause overflows
return (1 + np.exp(-2*x)) / (1 - np.exp(-2*x))
def _ext_dsDNA_wlc(F, nbp=0, pitch=None, L_p=None, T=298.2, x_min=0e-12,
xtol=1e-15):
# See also default value in function in `F_dsDNA_wlc`
pitch = pitch or _dsDNA_DEFAULT['pitch']
if F == 0:
return 0
if nbp <= 0:
return 0
sign = 1
if F < 0:
F = -F
sign = -1
# WLC only valid in the interval x = (-L_0, L_0)
# - 1e-25 # corresponds to force of inf # >> 2e+9 N
x_max = nbp * pitch
# Numerical invert ext_dsDNA function
def ext_dsDNA_cost(x):
return (F_dsDNA_wlc(x, nbp=nbp, pitch=pitch, L_p=L_p, T=T) - F)**2
# Find the force, which will result in input extension
# To speed up the minimization, first find an unprecise
# answer with a quick algorithm and than make it precise
# x_fit = minimize_scalar(ext_dsDNA_cost,
# bounds=(x_min, x_max),
# options={'xtol': xtol}).x
# x_min = x_fit - 1e-10
# x_max = x_fit + 1e-10
x_fit = fminbound(ext_dsDNA_cost,
x_min,
x_max,
xtol=xtol)
return sign * x_fit
def _F_ssDNA(x, nbs=0, S=None, L_p=None, z=None, T=298.2, f_min=None,
f_max=None, xtol=None, avoid_neg_F=True):
"""
Freely jointed chain (FJC) model, relating the applied force F to a
total polymer length ext_ssDNA.
Contour length of ssDNA: L_SS = j*z
Kuhn length b (in FJC b = 2 * persistence length), in paper: b=1.5e-9
Parameters
----------
nbs : int
Number of bases of ssDNA
x : float
Extension in m
S : float
Stretch modulus in N
L_p : float
Persistence length in m
z : float
Length of a single base in m
T : float
Temperature in K
"""
f_min = f_min or 0e-12
f_max = f_max or 200e-12
xtol = xtol or 1e-18
if x == 0:
return 0
if nbs <= 0:
return 0
sign = 1
if x < 0:
x = -x
sign = -1
# Numerical invert ext_ssDNA function
def f_ssDNA_cost(f):
return (ext_ssDNA(f, nbs=nbs, S=S, L_p=L_p, z=z, T=T,
avoid_neg_ext=False) - x)**2
# Find the force, which will result in input extension
# To speed up the minimization, first find an unprecise
# answer with a quick algorithm and than make it precise
# f_fit = minimize(f_ssDNA_cost,
# x0=12e-9,
# bounds=((f_min, f_max), ),
# tol=xtol).x
# f_fit = brent(f_ssDNA_cost,
# brack=(f_min, f_max),
# tol=xtol)
f_fit = minimize_scalar(f_ssDNA_cost,
bounds=(f_min, f_max),
options={'xtol': xtol}).x
f_min = f_fit - 1e-10
f_max = f_fit + 1e-10
f_fit = fminbound(f_ssDNA_cost,
f_min,
f_max,
xtol=xtol)
if avoid_neg_F:
f_fit = max(f_fit, 0)
return sign * f_fit
def F_dsDNA_wlc(x, nbp=0, pitch=None, L_p=None, T=298.2):
"""
A worm-like chain model.
Parameters
----------
x : float
Extension (m)
pitch : float
Contour length (m). Also denoted as 'L_0'.
L_p : float
Persistence length (m)
T : float
Temperature (K)
Returns
-------
1D numpy.ndarray of type float
Force (N).
"""
pitch = pitch or _dsDNA_DEFAULT['pitch']
L_p = L_p or _dsDNA_DEFAULT['L_p']
if x == 0:
return 0
if nbp <= 0:
return 0
sign = 1
if x < 0:
x = -x
sign = -1
# Contour length
L_0 = nbp*pitch
if x >= L_0:
return float('inf') * sign
# Marko, J.F.; Eric D. Siggia. "Stretching DNA". Macromolecules. 1995. 28:
# 8759–8770. doi:10.1021/ma00130a008
# F = kB * T / L_p * (1 / (4 * (1 - x / L_0)**2) - 1/4 + x / L_0)
# Relative extension
x = x / L_0
# Petrosyan, R. "Improved approximations for some polymer extension
# models". Rehol Acta. 2016. doi:10.1007/s00397-016-0977-9
F = kB * T / L_p * (1 / (4 * (1 - x)**2) - 1/4 + x - 0.8 * x**2.15)
return F * sign
def F_construct_2D(x0, z0=0.0, ex_ss=0.0, ex_ds=0.0, radius=0.0, kappa=0.0,
xtol=1e-18):
"""
Parameters
----------
x0 : float
Total displacement (m)
z0 : float
Distance of the bead surface to the glass surface, if
the bead is in its resting position, i.e. no force in
the vertical direction is applied (m).
ex_ss : float
Extension of ssDNA (m)
ex_ds : float
Extension of dsDNA (m)
radius : float
Radius of the bead/handle (m)
kappa : float or np.ndarray of type float
Spring constant (N/m). If `kappa` is of type float, only one axis
(i.e. X or Y) is considered. If `kappa` is of type np.ndarray, the
first number is X (or Y) axis and the second number is Z.
"""
# Pythagoras:
# a is horizontal distance of attachment point to the center of the bead
# b is vertical distance of the surface to the center of the bead
# c is extension of the construct (ex_ss + ex_ds) plus the bead radius (r)
# dx is the horizontal displacement of the bead (x or y)
# dz is the vertical displacement of the bead (z)
r = radius
z0_r = z0 + r
# a = x0 - dx
# b = z0_r - dz
c = ex_ss + ex_ds + r
# a**2 + b**2 = c**2
# ->
# (x0 - dx)**2 + (z0_r - dz)**2 = c**2
# dz = z0_r - math.sqrt(c**2 - (x0 - dx)**2)
# dx = x0 - math.sqrt(c**2 - (z0_r - dz)**2)
# construct is longer than possible stretching with dx/dz >= 0.
# -> bead will equilibrate in the middle of the trap with zero force
if c**2 >= x0**2 + (z0_r)**2:
fxz = 0.0
dxz = np.array([0.0, 0.0])
return fxz, dxz
# If z0 is 0 or the stiffness of z is 0 bead will always
# touch the surface and dx only depends on x0, ex_ss, ex_ds, and r.
if z0 == 0 or isinstance(kappa, float):
if not isinstance(kappa, float):
kappa = kappa[0]
dx = x0 - math.sqrt(c**2 - r**2)
dz = z0
dxz = np.array([dx, dz])
# force that need to be acting on the construct to
# result in a corresponding horizontal force (in x/y)
fx = dx * kappa # * (x0 - dx) / c
cos_alpha = (x0 - dx) / c
fxz = fx / cos_alpha
return fxz, dxz
# displacement dz dependent upon dx
def _dz(dx):
# print('z0 {:.1e}, c {:.1e}, x0 {:.1e}, dx {:.1e}'
# ''.format(z0, c, x0, dx))
return z0_r - math.sqrt(c**2 - (x0 - dx)**2)
# displacement dx dependent upon dz
def _dx(dz):
# x0 8.0e-07, c 1.3e-07, z0 2.0e-07, r 0.0e+00, dz 0.0e+00
# print('x0 {:.1e}, c {:.1e}, z0 {:.1e}, r {:.1e}, dz {:.1e}'
# ''.format(x0, c, z0, r, dz))
return x0 - math.sqrt(c**2 - (z0_r - dz)**2)
# difference of the ratio of the force in x/z to the ratio of a/b
# the construct with the handle equilibrates where diff == 0
def diff_tan_fxz_ab(dx):
a = x0 - dx
b = z0_r - _dz(dx)
fx = dx * kappa[0]
fz = _dz(dx) * kappa[1]
diff = b/a - fz/fx
# diff = math.sqrt(c**2 - (x0 - dx)**2)
# / (x0 - dx)
# - (_dz(dx) * kappa[1])
# / (dx * kappa[0])
return diff**2
# if construct is shorter than z0_r, dz has to be at least the difference
dz_min = max(0, z0_r - c)
# dz can be at max as large as z0, then the bead touches the surface
dz_max = z0
# dx has to be at least x0 - c
dx_min = max(0, x0 - c, _dx(dz_max))
dx_max = max(0, _dx(dz_min))
# print('dx_min {:.1e}, dx_max {:.1e}'.format(dx_min, dx_max))
# Calculate the displacement of x (and z), where the angle between the
# force vector of the construct and the force vector of the bead
# displacement is 0° (180°)
# Unfortunately, there is no analytical solution to this ...
dx = fminbound(diff_tan_fxz_ab, dx_min, dx_max, xtol=xtol)
# the force needed to be acting on the construct to result in a
# corresponding force acting on the handle
# the resulting force is the combination of the horizontal force acting on
# the handle and the normal force of the bead touching the surface and/or
# the vertical trapping force acting on the handle
fx = dx * kappa[0]
cos_alpha = (x0 - dx) / c
# print(fx / f(dx) - cos_alpha)
fxz = fx / cos_alpha
# print(dx, dz_min, dz_max, dx_min, dx_max)
# dz = z0_r - math.sqrt(c**2 - (x0 - dx)**2)
# print('dx {:.1e}, dz {:.1e}'
# ''.format(dx, z0_r - math.sqrt(c**2 - (x0 - dx)**2)))
# a = x0 - dx
# b = z0_r - _dz(dx)
# print('x0 {:.3e}, a {:.3e}, b {:.3e}, c {:.3e}'.format(x0, a, b, c))
# #print('dx {:.3e}, dz {:.3e}, fx{:.1e}, fz {:.1e}'
# # ''.format(dx, _dz(dx), dx*kappa[0], _dz(dx)*kappa[1]))
# #print('dzmin {:.1e}, dzmax {:.1e}, dxmin {:.1e}, dxmax {:.1e}, f {:.1e}'
# # ''.format(dz_min, dz_max, dx_min, dx_max, f(dx)))
dxz = np.array([dx, _dz(dx)])
return fxz, dxz
def F_construct_3D(A0, ex_ss=0.0, ex_ds=0.0, f_dna=0.0, radius=0.0, kappa=None,
factr=1e5, gtol=1e-5, eps_angle=1e-8,
verbose=False, deep_verbose=False, print_result=False,
return_plus=False):
"""
Origin of the coordinate system is the center of the trap [0, 0, 0].
The coordinates are given for a right handed cartesian coordinate system.
Parameters
----------
A0 : np.ndarray of type float
Position (m) of the DNA attachment point on the glass surface relative
to the trap center: [x, y, z].
ex_ss : float
Extension of ssDNA (m).
ex_ds : float
Extension of dsDNA (m).
f_dna : float
Force (N) acting on the DNA construct that corresponds to the
extensions `ex_ss` and `ex_ds`.
radius : float
Radius of the bead/handle (m)
kappa : np.ndarray of type float
Stiffness for [x, z] or [x, y, z] of lever (handle) attached to DNA in
N/m.
factr : float, optional
The iteration stops when
``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``,
where ``eps`` is the machine precision, which is automatically
generated by the code. Typical values for `factr` are: 1e12 for
low accuracy; 1e7 for moderate accuracy; 10.0 for extremely
high accuracy. See Notes for relationship to `ftol`, which is exposed
(instead of `factr`) by the `scipy.optimize.minimize` interface to
L-BFGS-B.
gtol : float
The iteration will stop when ``max{|proj g_i | i = 1, ..., n}
<= gtol`` where ``pg_i`` is the i-th component of the
projected gradient.
eps_angle : float
Step size used for numerical approximation of the jacobian for the
fitting of the angle of the DNA/bead construct.
Notes
-----
The option `ftol` is exposed via the `scipy.optimize.minimize` interface,
but calling `scipy.optimize.fmin_l_bfgs_b` directly exposes `factr`. The
relationship between the two is ``ftol = factr * numpy.finfo(float).eps``.
I.e., `factr` multiplies the default machine floating-point precision to
arrive at `ftol`
"""
if kappa is None:
kappa = np.array([0, 0, 0])
# Length of the DNA construct
l_dna = ex_ss + ex_ds
# Initial distance of DNA attachment point on the glass surface to the bead
# center, i.e. the length of the DNA/bead construct
l_c = l_dna + radius
# Distance of the DNA glass attachment point to the trap center
l_A0 = np.linalg.norm(A0)
# Check, if the DNA/bead construct is longer than the distance of the
# attachment point to the trap center. If the DNA/bead construct is longer
# than possible stretching with displacement ||d|| >= 0, the bead will
# equilibrate in the middle of the trap with zero force.
if l_c >= l_A0:
angles_c = np.array(cart2sph(*-A0))[1:]
success_c = True
d = np.zeros(3)
f_bead = np.zeros(3)
if verbose:
print('DNA/bead construct extension too long, assume construct'
'pointing through the trap center')
print('Return zero foce.')
else:
# Fit the angles of the DNA/bead construct
angles_c, success_c \
= minimize_angle_cost(A0, l_c, radius, kappa, init_c=None,
factr=factr, gtol=gtol,
eps=eps_angle, verbose=deep_verbose,
print_result=print_result)
# Calculate optimized displacement and corresponding forces
d, f_bead = cost_angle_opp_force_bead_attachment(angles_c, l_c, A0,
kappa, cost=False)
f_mag = np.linalg.norm(f_bead)
if verbose:
print('a_c: {}°'.format(
angles_c*180/math.pi))
print('f_bead: {:.3f} pN, f_dna: {:.3f} pN'.format(
np.linalg.norm(f_bead)*1e12,
np.linalg.norm(-f_bead)*1e12))
print()
if return_plus:
f_dna = - f_bead
return f_dna, f_bead, d, angles_c
return f_mag, d
def cart2sph(x, y, z, offset_phi=0, positive_phi=False):
"""
offset_phi : float
angle in Euclidian plane that should point in the direction of positive
x
"""
# cart2sph -- Transform Cartesian to spherical coordinates
# Spherical coordinates (r, θ, φ) as commonly used in physics (ISO
# convention): radial distance r, inclination θ (theta), and azimuth φ
# (phi).
hxy = math.hypot(x, y)
r = math.hypot(hxy, z)
theta = math.atan2(hxy, z)
phi = math.atan2(y, x) - offset_phi
if positive_phi and phi < 0:
phi += 2 * math.pi
return r, theta, phi
def sph2cart(r, theta, phi, offset_phi=0):
"""
offset_phi : float
angle in Euclidian plane that points in the directon of positive x
"""
# sph2cart -- Transform spherical to Cartesian coordinates
# Spherical coordinates (r, θ, φ) as commonly used in physics (ISO
# convention): radial distance r, inclination θ (theta), and azimuth φ
# (phi).
phi += offset_phi
rsin_theta = r * math.sin(theta)
x = rsin_theta * math.cos(phi)
y = rsin_theta * math.sin(phi)
z = r * math.cos(theta)
return x, y, z
def coord_sph2cart(theta, phi, v, offset_phi=0):
# v is vector with components pointing in the direction of the
# v[0] radius vector
# v[1] circle formed by changing theta (inclination)
# v[2] circle formed by changin phi (azimut)
# returns a vector rotated according to the local orthogonal unit vectors
# of the spherical coordinate system
phi += offset_phi
sint = math.sin(theta)
cost = math.cos(theta)
sinp = math.sin(phi)
cosp = math.cos(phi)
return np.array([
[sint*cosp, cost*cosp, -sinp],
[sint*sinp, cost*sinp, cosp],
[cost, -sint, 0]
]).dot(v)
def angle(v1, v2):
# angle between two vectors
# return math.atan2(np.linalg.norm(np.cross(v1,v2)), np.dot(v1,v2))
# does not work as well for small angles, but is faster:
cos_theta = v1.dot(v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
cos_theta = max(-1, cos_theta)
cos_theta = min(1, cos_theta)
return math.acos(cos_theta)
def parallel_cmp(v1, v2):
# project v1 onto v2
# component of v1 parallel to v2
amp_v2 = np.linalg.norm(v2)
if amp_v2 == 0:
return(v2)
return (v1.dot(v2) / amp_v2**2) * v2
def orthogonal_cmp(v1, v2):
# component of v1 orthogonal to v2
return v1 - parallel_cmp(v1, v2)
def minimize_angle_cost(A0, l_c, radius, kappa,
init_c=None, copy_init=False, factr=1e5, gtol=1e-5,
eps=1e-8, verbose=False, print_result=False):
ftol = factr * np.finfo(float).eps
# Set offset_phi_c to the direction of attachment point -> trap center
_, theta_c, offset_phi_c = cart2sph(*-A0)
# Boundaries of theta and phi for the DNA attachment point -> bead center
# vector.
# The construct can point straight upwards to sidewards, where the bead
# would touch the glass surface
theta_c_min = 0
sin_theta = min(1, radius / l_c)
theta_c_max = math.pi / 2 - math.asin(sin_theta)
# The construct can point towards the hemisphere of the trap center
# (i.e. +/- 90°)
phi_c_min = - math.pi / 2
phi_c_max = math.pi / 2
bounds = ((theta_c_min, theta_c_max), (phi_c_min, phi_c_max))
if init_c is None:
# Find proper start values for theta_c and phi_c
# Geometrically assume the DNA attachment point -> bead center vector
# pointing towards the center of the trap.
init_c = np.array([theta_c, 0])
else:
# correct phi of init_c by the offset of phi
if copy_init:
init_c = init_c.copy()
init_c[1] -= offset_phi_c
init_c = normalize_phi(init_c)
if verbose:
print('## ANGLE CONSTRUCT MINIMIZATION ##')
print('bounds theta: {:.2f}° -> {:.2f}°, phi: {:.2f}° -> {:.2f}°'
''.format(theta_c_min*180/math.pi,
theta_c_max*180/math.pi,
normalize(phi_c_min + offset_phi_c)*180/math.pi,
normalize(phi_c_max + offset_phi_c)*180/math.pi))
print('offset phi: {:.2f}°'.format(offset_phi_c*180/math.pi))
# Iteratively change theta and phi of the DNA attachment point -> bead
# center vector such a way that the angle of the attachment point -> bead
# center vector c and the force vector f_bead are pointing in the exact
# opposite direction (method 'L-BFGS-B').
res = _minimize_lbfgsb(cost_angle_opp_force_bead_attachment,
x0=init_c,
bounds=bounds,
args=(l_c, A0, kappa, offset_phi_c, True, verbose),
ftol=ftol,
gtol=gtol,
eps=eps)
angles_c = res.x
success_c = res.success
# correct for the offset phi
angles_c[1] += offset_phi_c
angles_c = normalize_phi(angles_c)
init_c[1] += offset_phi_c
if verbose:
d, f_bead = cost_angle_opp_force_bead_attachment(angles_c, l_c, A0,
kappa, cost=False,
verbose=False)
print('----------')
print('## ANGLE CONSTRUCT RESULT ##')
if print_result:
print(res)
print('----------')
print('ANGLE: θ (deg) φ (deg)')
print('f_bead: {:8.3f} {:8.3f}'.format(
*np.array(cart2sph(*f_bead))[1:]*180/math.pi))
print('construct: {:8.3f} {:8.3f}'.format(*angles_c*180/math.pi))
print('-> force: {:.3f} pN'.format(np.sqrt(np.sum(f_bead**2))*1e12))
print()
return angles_c, success_c
def normalize_phi(angles):
angles[1] = normalize(angles[1])
return angles
def normalize(phi):
if phi > math.pi:
phi -= 2*math.pi
if phi <= -math.pi:
phi += 2*math.pi
return phi
def cost_angle_opp_force_bead_attachment(angles_c, l_c, A0, kappa,
offset_phi_c=0, cost=True,
verbose=False):
"""
l_c : float
length of attachment point to bead center
"""
if verbose:
print(' # CALCULATE ANGLE CONSTRUCT COST ...')
print(' theta_c: {:.6f}, delta_phi_c: {:.6f}'.format(
*angles_c*180/math.pi))
# 1. calculate attachment point -> bead center vector of the construct for
# given theta and phi
c = np.array(sph2cart(l_c, *angles_c, offset_phi=offset_phi_c))
# 2. calculate position of the center of the bead (i.e. displacement
# vector) for a given attachment point -> bead center vector c
d = A0 + c
# 3. calculate force vector of bead due to displacement
f_bead = - d * kappa
if cost:
# 4. calculate the angle between f_bead and the vector opposing the r
# vector, which is connected to the attachment point -> bead center
# vector c (i.e. the vector opposing the force vector along the bead
# center / DNA attachment point axis). If they are pointing in the same
# direction, angle_opp is 0.
angle_opp = angle(f_bead, c)
# print(angle_opp*180/math.pi)
if verbose:
print(' f_bead_theta_phi: {}°'.format(
np.array(cart2sph(*f_bead))[1:]*180/math.pi))
print(' c_theta_phi: {}°'.format(angles_c*180/math.pi))
print(' angle_opp: {:.3f}°'.format(angle_opp*180/math.pi))
if cost:
return angle_opp**2
else:
return d, f_bead
def E_pair(bases, NNBP=False, c=None, T=None):
"""
Work necessary to separate base pairs A-T and G-C of a DNA double helix.
Includes the contributions of unpairing, unstacking, and rearrangement of
bases.
Parmeters
---------
bases : str
Sequence of bases 'A', 'T', 'C', and 'G'.
NNBP : bool
Nearest-neighbour base-pair determination of the base-pair energies
c : float
Concentration of monovalent cations in mol, defaults to 1 M.
T : float
T is not considered
"""
c = 1 if c is None else c
bases = bases.upper()
if NNBP:
# TODO: include proper energy term for the first and last bp
e_pair = [_E_pair[''.join((a, b))]
for a, b
in zip(bases[:-1], bases[1:])]
m_pair = [_M_pair[''.join((a, b))]
for a, b
in zip(bases[:-1], bases[1:])]
e_pair = np.array(e_pair)
m_pair = np.array(m_pair)
e = e_pair + m_pair * np.log(c)
else:
e = np.array([_E_pair[base] for base in bases])
return e
def _E_pair_T(bases, NNBP=False, c=None, T=298.2):
"""
Work necessary to separate base pairs A-T and G-C of a DNA double helix.
Includes the contributions of unpairing, unstacking, and rearrangement of
bases.
Parmeters
---------
bases : str
Sequence of bases 'A', 'T', 'C', and 'G'.
NNBP : bool
Nearest-neighbour base-pair determination of the base-pair energies
c : float
Concentration of monovalent cations in mol
T : float
Temperature in K
"""
c = 1 if c is None else c
bases = bases.upper()
if NNBP:
dh_pair = [_DH_pair[''.join((a, b))]
for a, b
in zip(bases[:-1], bases[1:])]
ds_pair = [_DS_pair[''.join((a, b))]
for a, b
in zip(bases[:-1], bases[1:])]
m_pair = [_M_pair[''.join((a, b))]
for a, b
in zip(bases[:-1], bases[1:])]
dh_pair = np.array(dh_pair)
ds_pair = np.array(ds_pair)
m_pair = np.array(m_pair)
# salt dependent entropy
# only entropy depends on salt concentration
ds_pair_salt = ds_pair - m_pair/298 * np.log(c)
# temperature dependent energy
e_pair = dh_pair - T*ds_pair_salt
e = e_pair # + m_pair * np.log(c)
else:
e = np.array([_E_pair[base] for base in bases])
return e
def E_unzip_DNA(bases, nuz=0, NNBP=False, c=None, T=298.2):
"""
Work necessary to separate two single strands of DNA double helix of `nuz`
base pairs.
Includes the contributions of unpairing, unstacking, and rearrangement of
bases.
Parameters
----------
bases : str
Sequence of bases 'A', 'T', 'C', and 'G'.
nuz : int or float
Number of base(pair)s up to where the unpairing energy should be
calculated ([1,`nuz`]). If `nuz` is 1, calculate energy for first
basepair.
T : float
Temperature in K
"""
if nuz <= 0:
return 0
# if NNBP:
# TODO: include proper energy term for the first and last bp
ni = int(nuz)
nr = nuz % 1
E = np.sum(E_pair(bases[:ni], NNBP=NNBP, c=c, T=T))
E += np.sum(E_pair(bases[ni-1:ni+1], NNBP=NNBP, c=c, T=T)) * nr
return E
def _E_ext_ssDNA(x, nbs=0, S=None, L_p=None, z=None, T=298.2):
"""
Elastic energy stored in a single strand of j bases
extended by force F to length x.
Parameters
----------
nbs : intf
Number of bases of ssDNA
x : float
Extension of ssDNA in m
z : float
Length of a single base in m
"""
if nbs <= 0:
return 0
if x < 0:
x = -x
# Slow variant of numerical integration
# E _fjc = quad(F_ssDNA, 0, x, (j, S, L_p, z, T))[0]
f = F_ssDNA(x, nbs=nbs, S=S, L_p=L_p, z=z, T=T)
# integral_ext_dF = ext_ssDNA_int(f, j, S=S, L_p=L_p, z=z, T=T)
# The ext_ssDNA_int seems to be not correct -> numerical integration
integral_ext_dF = quad(ext_ssDNA, 0, f, (nbs, S, L_p, z, T))[0]
E_fjc = f * x - integral_ext_dF
# There is no negative energy.
E_fjc = max(E_fjc, 0)
return E_fjc
def E_ext_dsDNA_wlc(x, nbp=0, pitch=None, L_p=None, T=298.2):
"""
Elastic energy stored in a double strand of nbp basepairs
extended by force F to length x.
Integral of the worm-like chain model [1].
[1] Marko, J.F.; Eric D. Siggia. "Stretching DNA". Macromolecules. 1995.
28: 8759–8770. doi:10.1021/ma00130a008
Parameters
----------
x : float
Extension (m)
L_0 : float
Contour length (m)
L_p : float
Persistence length (m)
T : float
Temperature (K)
"""
pitch = pitch or _dsDNA_DEFAULT['pitch']
L_p = L_p or _dsDNA_DEFAULT['L_p']
if nbp <= 0:
return 0
if x < 0:
x = -x
L_0 = nbp*pitch
# WLC only valid in the interval x = (-L_0, L_0)
# Higher x would lead to wrongly calculated energies.
# if x > L_0, even negative energies are possible, which
# would lead to exceptionally high valies in the partition
# function.
if x >= L_0:
return float('inf')
def integral(x):
# from wolfram alpha
# return (kB * T * (L_0**2 / (L_0 - x) + (2 * x**2) / L_0 - x)) / (4 * L_p)
# (k T (L^2/(L - x) + (2 x^2)/L - x))/(4 P)
# Petrosyan, R. "Improved approximations for some polymer extension
# models". Rehol Acta. 2016. doi:10.1007/s00397-016-0977-9
return (kB * T * (L_0**2 / (L_0 - x) + (2 * x**2) / L_0 - 1.01587 * x *
(x/L_0)**2.15 - x)) / (4 * L_p)
# (k T (L^2/(L - x) + (2 x^2)/L - 1.01587 x^1 (x/L)^2.15 - x))/(4 P)
return integral(x) - integral(0)
def E_lev(displacement, kappa):
"""
The elastic energy of the lever/handle.
Parameters
----------
kappa : float
Stiffness of lever in N/m
displacement : float
Displacement of lever in m
"""
return 1/2 * kappa * displacement**2
def E_tot(bases='', nuz=0, nbs=0, ex_ss=0.0, nbp=0, ex_ds=0.0,
displacement=0.0, kappa=0.0,
S=None, L_p_ssDNA=None, z=None,
pitch=None, L_p_dsDNA=None,
NNBP=False, c=None, e_loop=0.0, T=298.2, verbose=False):
"""
Parameters
----------
bases : str
Sequence of sense strand of dsDNA which is (will be) unzipped
nuz : int
Number of unzipped basepairs to calculate the unzip energy.
nbs : int
Number of ssDNA bases
ex_ss : float
Extension of an ssDNA strand
nbp : int
Number of basepairs of the spacer dsDNA
ex_ds : float
Extension of the spacer dsDNA
kappa : float
Stiffness of lever (handle) attached to DNA in N/m
e_loop : float
Free energy for opening the last bp and terminal hairpin (kcal/mol).
"""
e_ext_ssDNA = E_ext_ssDNA(ex_ss, nbs=nbs, S=S, L_p=L_p_ssDNA, z=z, T=T)
e_ext_dsDNA = E_ext_dsDNA_wlc(ex_ds, nbp=nbp, pitch=pitch, L_p=L_p_dsDNA,
T=T)
e_unzip_DNA = E_unzip_DNA(bases, nuz=nuz, NNBP=NNBP, c=c, T=T)
e_lev = np.sum(E_lev(displacement, kappa))
# Include proper energy term for opening the terminal hairpin, only if all
# bps are already unzipped and hairpin is to be opened
if nuz >= len(bases) + 1:
e_loop = e_loop*kcal/Na
else:
e_loop = 0.0
e_total = e_ext_ssDNA + e_ext_dsDNA + e_unzip_DNA + e_lev + e_loop
if verbose:
print('E_ext_ssDNA: ' + str(e_ext_ssDNA/(kB*T)))
print('E_ext_dsDNA: ' + str(e_ext_dsDNA/(kB*T)))
print('E_unzip_DNA: ' + str(e_unzip_DNA/(kB*T)))
print('E_lev: ' + str(e_lev/(kB*T)))
return e_total
def plot_simulated_force_extension(simulation, x=None, y=None, yXYZ=None,
axes=None, ylim=None, theta=False):
# Get data to be plotted
sim_values = get_simulation_values(simulation, df_xyz=True)
e = sim_values['extension']
f = sim_values['force']
forceXYZ = sim_values['forceXYZ']
nuz = sim_values['nuz']
theta = theta if 'theta' in sim_values else False
if theta:
th = sim_values['theta']
if axes is None:
fig, axes = plt.subplots(2, 1)
else:
fig = axes[0].get_figure()
ax = axes[0]
ax2 = ax.twinx()
ax2._get_lines.prop_cycler = ax._get_lines.prop_cycler
# Plot simulated unzipping curve
ax.plot(e * 1e9, f * 1e12, label='Force microsphere')
# Plot measured unzipping curve
if x is not None and y is not None:
ax.plot(x * 1e9, y * 1e12)
# Plot number of simulated unzipped basepairs
ax2.plot(e * 1e9, nuz, color='cyan')
ax.grid(True)
ax.set_xlabel('(Apparent) ext of construct (nm)')
ax.set_ylabel('Force (pN)')
ax2.set_ylabel('# unzip bps')
ylim = ylim or (-1, 18)
ax.set_ylim(ylim)
# Plot individual forces
ax = axes[1]
if theta:
ax2 = plt.twinx(ax=ax)
ax2.xaxis.set_visible(False)
# Plot simulated unzipping curves
ax.plot(e * 1e9, forceXYZ * 1e12)
# Plot measured unzipping curves
if x is not None and yXYZ is not None:
ax.plot(x * 1e9, np.abs(yXYZ) * 1e12)
if theta:
# Plot differenc of angle r0 and r
ax2.plot(e * 1e9, th * 180 / math.pi, color='cyan')
ax2.set_ylabel(r'$\theta$ diff (°)')
ax.grid(True)
ax.set_xlabel('(Apparent) ext of construct (nm)')
ax.set_ylabel('Force (pN)')
ax.set_ylim(ylim)
return fig, axes
def plot_unzip_energy(x0, y0=0.0, h0=0.0, bases='', nuz_est=-1, nbs=0, nbp=0,
nbs_loop=0,
radius=0.0, kappa=None,
S=None, L_p_ssDNA=None, z=None,
pitch=None, L_p_dsDNA=None,
NNBP=False, c=0, e_loop=0.0, T=298.2,
spacing=5, min_stepsize=10,
boltzmann_factor=1e-9,
verbose=False, axes=None):
A0 = attachment_point(x0, y0=y0, h0=h0, radius=radius)
xfe0 = xfe0_nuz(A0, bases=bases, nuz_est=nuz_est, nbs=nbs, nbp=nbp,
nbs_loop=nbs_loop,
radius=radius, kappa=kappa,
S=S, L_p_ssDNA=L_p_ssDNA, z=z,
pitch=pitch, L_p_dsDNA=L_p_dsDNA,
NNBP=NNBP, c=c, e_loop=e_loop, T=T,
spacing=spacing, min_stepsize=min_stepsize,
boltzmann_factor=boltzmann_factor,
verbose=verbose)
# with cnps.cn_plot('notebook') as cnp:
if axes is None:
fig, ax = plt.subplots()
ax2 = ax.twinx()
ax2._get_lines.prop_cycler = ax._get_lines.prop_cycler
# ax2 = cnps.second_ax(link_ax=ax)
# ax2.xaxis.set_visible(False)
else:
ax, ax2 = axes
fig = ax.get_figure()
nuz = xfe0['NUZ0']
energy = xfe0['E0']
min_e = energy.min()
# normalize energy relative to min_e in unzits of kT
energy -= min_e
energy /= kB*T
# displacement = xfe0['D0']
boltzmann_factor = xfe0['W0'] / np.sum(xfe0['W0'])
cumsum = np.cumsum(xfe0['W0']) / np.sum(xfe0['W0'])
# if cnp is not None:
# ax.plot(nuz, energy, c=cnp.color)
# # ax.axhline(xfe0['D0_avg'], c=cnp.color)
# ax2.plot(nuz, boltzmann_factor, c=cnp.color)
# ax2.plot(nuz, cumsum, c=cnp.color)
# else:
ax.plot(nuz, energy)
# ax.axhline(xfe0['D0_avg'])
ax2.plot(nuz, boltzmann_factor)
ax2.plot(nuz, cumsum)
ax.axvline(xfe0['NUZ0_avg'], c='magenta')
ax.set_xlabel('Number of unzipped basepairs')
ax.set_ylabel('Energy difference ($k_{B}*T$)')
ax2.set_ylabel('Boltzmann factor')
return fig, ax, ax2
def get_simulation_values(simulation, extension=True, force=True, nuz=True,
df_xyz=False, weighted_energies=False,
energy_keys=None):
"""
Get extension, force, and number of unzipped basepairs of a simulation.
Parmaters
---------
extension : bool
Return the extension.
force : bool
Return the force.
nuz : bool
Return the number of unzipped basepairs.
df_xyz : bool
Return the individual xyz components of displacement and force.
weighted_energies : bool
Calculate and return weighted and averaged energies as returned by the
the function `get_weighted_energies()`.
energy_keys : list of str
Energies to be calculated. Possible options and defaulting to
[ 'e_ext_ssDNA', 'e_ext_dsDNA', 'e_unzip_DNA', 'e_lev' ].
"""
# Set variables of simulated data
XFE, XFE0 = simulation['XFE'], simulation['XFE0']
# Get extension, force, and number of unzipped basepairs ...
# Extension of the construct
try:
EX_avg = XFE['EX_avg']
except KeyError:
# Old simulation object with different key
EX_avg = XFE['X']
# Select data which was properly fitted
idx_valid = (EX_avg != 0)
return_value = {}
if extension:
return_value['extension'] = EX_avg[idx_valid]
if force:
return_value['force'] = XFE['F0_avg'][idx_valid]
if nuz:
return_value['nuz'] = XFE['NUZ0_avg'][idx_valid]
if df_xyz:
try:
D0XYZ_avg = XFE['D0_avg'][idx_valid]
except KeyError:
# Old simulation object
D0XYZ_avg = np.array([xfe0['D0_avg'] for xfe0 in XFE0])
return_value['displacementXYZ'] = D0XYZ_avg
kappa = XFE['settings']['kappa']
F0XYZ_avg = kappa * D0XYZ_avg
return_value['forceXYZ'] = F0XYZ_avg
if weighted_energies:
E0s_avg = get_weighted_energies(simulation, keys=energy_keys)
for key, E0_avg in E0s_avg.items():
return_value[key] = E0_avg[idx_valid]
return return_value
def get_weighted_energies(simulation, keys=None, processes=8):
"""
Get weighted and averaged energies from simulation
Parameters
----------
simulation : dict
The simulation to get the weighted averaged energies from.
keys : str or list of str
Energies to be calculated. Possible values are and defaulting to
[ 'e_ext_ssDNA', 'e_ext_dsDNA', 'e_unzip_DNA', 'e_lev' ].
Returns
-------
dict
Weighted and averaged energies for ssDNA and dsDNA extension,
lever/handle displacement, and basepair unzipping.
"""
# Speed up energy calculation with multiprocessing and per buffering of the
# dsDNA_wlc model for all processes of multiprocessing
nbp = simulation['settings']['nbp']
pitch = simulation['settings']['pitch']
L_p_dsDNA = simulation['settings']['L_p_dsDNA']
T = simulation['settings']['T']
global ext_dsDNA_wlc
ext_dsDNA_wlc = init_buf_ext_dsDNA_wlc(nbp=nbp, pitch=pitch, L_p=L_p_dsDNA,
T=T)
# Define the keys of the energies to be calculated
keys = get_energy_keys(keys=keys)
# Defince closure to be used with multiprocessing
def f(i):
# Get all unweighted energies for simulation point `i` and calculate
# the averaged weighted energies
XFE0 = simulation['XFE0'][i]
D0 = XFE0['D0']
F0 = XFE0['F0']
NUZ0 = XFE0['NUZ0']
W0 = XFE0['W0']
W0_sum = W0.sum()
E0s = get_energies(simulation, D0, F0, NUZ0, keys=keys)
E0s_avg = []
for E0 in E0s.values():
E0s_avg.append(np.sum(E0 * W0) / W0_sum)
return E0s_avg
f = unboundfunction(f)
# Get all averaged and weighted energies for all simulation points
with Pool(processes=processes) as pool:
E0_avg_lists = pool.map(f, range(len(simulation['XFE0'])))
# Get and combine all calculated energy points according to their key and
# finally convert lists to arrays
E0s_avg = {}
for key in keys:
E0s_avg[key] = []
for E0_avg_list in E0_avg_lists:
for key, E0_avg in zip(keys, E0_avg_list):
E0s_avg[key].append(E0_avg)
for key, E0_avg in E0s_avg.items():
E0s_avg[key] = np.array(E0_avg)
return E0s_avg
def get_energies(simulation, displacement=None, force=None, nuz=None,
keys=None, F_ssDNA_mod=None, E_ext_ssDNA_mod=None,
ext_dsDNA_wlc_mod=None):
"""
Get energies from simulation
Parameters
----------
simulation : dict
Simulation with the settings used for the calculation of the energies
force : Iterable
1D array of forca acting on the unzipping construct
nuz : Iterable
1D array of number of unzippped baspairs
displacement : Iterable
2D array with displacement in X or Y and Z
Returns
-------
dict
Energies for ssDNA and dsDNA extension, lever/handle displacement,
and basepair unzipping.
"""
bases = simulation['settings']['bases']
S = simulation['settings']['S']
L_p_ssDNA = simulation['settings']['L_p_ssDNA']
z = simulation['settings']['z']
nbp = simulation['settings']['nbp']
pitch = simulation['settings']['pitch']
L_p_dsDNA = simulation['settings']['L_p_dsDNA']
kappa = simulation['settings']['kappa']
bases = simulation['settings']['bases']
NNBP = simulation['settings']['NNBP']
c = simulation['settings']['c']
T = simulation['settings']['T']
# Calculate all NUZs and NBSs
if displacement is None or force is None or nuz is None:
sim_values = get_simulation_values(simulation, df_xyz=True)
D = sim_values['displacementXYZ'] if displacement is None else displacement
F = sim_values['force'].astype(float) if force is None else force
NUZ = sim_values['nuz'].astype(float) if nuz is None else nuz
NBS = simulation['settings']['nbs'] + NUZ * 2
keys = get_energy_keys(keys=keys)
e_ext_ssDNA = 'e_ext_ssDNA' in keys
e_ext_dsDNA = 'e_ext_dsDNA' in keys
e_unzip_DNA = 'e_unzip_DNA' in keys
e_lev = 'e_lev' in keys
Es = {}
for key in keys:
Es[key] = []
# Set DNA model functions to the unbuffered default functions
# Initialize the approximations of the dsDNA extension model function with
# fixed model function parameters and substitute the original DNA model
# function on the module level
global F_ssDNA
global E_ext_ssDNA
global ext_dsDNA_wlc
F_ssDNA = F_ssDNA_mod or _F_ssDNA
E_ext_ssDNA = E_ext_ssDNA_mod or _E_ext_ssDNA
if e_ext_dsDNA:
undefined = ext_dsDNA_wlc_mod is None
unbuffered = not isinstance(ext_dsDNA_wlc, EXT_DSDNA)
changed = (isinstance(ext_dsDNA_wlc, EXT_DSDNA)
and (ext_dsDNA_wlc._kwargs['pitch'] != pitch
or ext_dsDNA_wlc._kwargs['L_p'] != L_p_dsDNA
or ext_dsDNA_wlc._kwargs['T'] != T))
if undefined:
if unbuffered or changed:
ext_dsDNA_wlc_mod = init_buf_ext_dsDNA_wlc(nbp=nbp, pitch=pitch,
L_p=L_p_dsDNA, T=T)
else:
ext_dsDNA_wlc_mod = ext_dsDNA_wlc
ext_dsDNA_wlc = ext_dsDNA_wlc_mod
for d, f, nuz, nbs in zip(D, F, NUZ, NBS):
if e_ext_ssDNA:
ex_ss = ext_ssDNA(f, nbs=nbs, S=S, L_p=L_p_ssDNA, z=z, T=T)
e = E_ext_ssDNA(ex_ss, nbs=nbs, S=S, L_p=L_p_ssDNA, z=z, T=T)
Es['e_ext_ssDNA'].append(e)
if e_ext_dsDNA:
ex_ds = ext_dsDNA_wlc(f, nbp=nbp, pitch=pitch, L_p=L_p_dsDNA, T=T)
e = E_ext_dsDNA_wlc(ex_ds, nbp=nbp, pitch=pitch, L_p=L_p_dsDNA,
T=T)
Es['e_ext_dsDNA'].append(e)
if e_unzip_DNA:
e = E_unzip_DNA(bases, nuz=nuz, NNBP=NNBP, c=c, T=T)
Es['e_unzip_DNA'].append(e)
if e_lev:
e = np.sum(E_lev(d, kappa))
Es['e_lev'].append(e)
for key, e in Es.items():
Es[key] = np.array(e)
return Es
def get_energy_keys(keys=None):
keys = [keys] if isinstance(keys, str) else keys
if keys is None:
keys = [
'e_ext_ssDNA',
'e_ext_dsDNA',
'e_unzip_DNA',
'e_lev' ]
return keys
class unboundfunction(object):
"""
Class to hold references to functions and still be able to pickle them.
To reference the function you want to bind:
function_reference = boundmethod(function)
"""
def __init__(self, ft, **kwargs):
self.ft = ft
def __getstate__(self):
return cloudpickle.dumps(self.ft)
def __setstate__(self, cloudepickled_ft):
self.ft = cloudpickle.loads(cloudepickled_ft)
def __call__(self, *a, **kw):
return self.ft(*a, **kw)
class DNA_MODEL_APPROX(object):
def __init__(self, f, yx_f, key,
x_min=0e-12, x_mid=25e-12, x_max=200e-12,
num_f_low=501, num_f_high=501, p_deg_low=10, p_deg_high=10,
**kwargs):
self._f = f
self._yx_f = unboundfunction(yx_f)
self._p_min = x_min
self._p_mid = x_mid
self._p_max = x_max
self._num_f_low = num_f_low
self._p_deg_low = p_deg_low
self._num_f_high = num_f_high
self._p_deg_high = p_deg_high
self._kwargs = {}
self._key = key
# Set the default values for the model
self._kwargs = {}
self._kwargs.update(kwargs)
# Set the approximation
self.reset()
def reset(self):
self.Fp_low = {}
self.Fp_high = {}
self.x_min = {}
self.x_mid = {}
self.x_max = {}
self.keys = set([])
def calculate_approximation(self, key, verbose=False):
# if verbose:
# print('Calculating approximation model for {:04d} ...\r'
# ''.format(key), end='', flush=True)
# Set default values for model
kwargs = self._kwargs
# Get intervals for low/high approximation
x_min = self.yx_f(self._p_min, key, **kwargs)
x_mid = self.yx_f(self._p_mid, key, **kwargs)
x_max = self.yx_f(self._p_max, key, **kwargs)
# Calculate factors for low approximation
num_f = self.num_f_low
p_deg = self.p_deg_low
cp_low = polynomial(self.f, x_min, x_mid, num_f, p_deg,
verbose=verbose, factors_only=True, key=key,
**kwargs)
# Calculate high approximation
num_f = self.num_f_high
p_deg = self.p_deg_high
cp_high = polynomial(self.f, x_mid, x_max, num_f, p_deg,
verbose=verbose, factors_only=True, key=key,
**kwargs)
return key, cp_low, cp_high, x_min, x_mid, x_max
def set_approximation(self, ap, key=None):
key = key or ap[0]
fp_l = lambda x, c=ap[1]: polyn(c, x)
fp_h = lambda x, c=ap[2]: polyn(c, x)
# def fp_l(x, c=ap[1]):
# return polyn(c, x)
# def fp_h(x, c=ap[2]):
# return polyn(c,x)
x_min = ap[3]
x_mid = ap[4]
x_max = ap[5]
self.Fp_low[key] = unboundfunction(fp_l)
self.Fp_high[key] = unboundfunction(fp_h)
self.x_min[key] = x_min
self.x_mid[key] = x_mid
self.x_max[key] = x_max
self.keys.add(key)
def __call__(self, x=0.0, key=0, avoid_neg_Y=True, verbose=False):
# Automatically create Fp(x) polynomial and cache it for future calls
if key not in self.keys:
ap = self.calculate_approximation(key, verbose=verbose)
self.set_approximation(ap, key)
# Fallback, if x out of fitted range from polynomial
if x < self.x_min[key] or self.x_max[key] < x:
if verbose:
print('Calculation in {} for {:.3e} !< {:.3e} !< {:.3e} is out'
'of range!'.format(
self.__class__.__name__,
self.x_min[key], x, self.x_max[key]),
'Adjust x_min or x_max for polynomial approximation.')
return self.f(x, **self._kwargs)
# Return approximation according to the low/high range
if self.x_min[key] <= x and x <= self.x_mid[key]:
y = self.Fp_low[key](x)
elif self.x_mid[key] < x and x <= self.x_max[key]:
y = self.Fp_high[key](x)
if avoid_neg_Y:
y = max(y, 0)
return y
@property
def f(self):
return self._f
@property
def yx_f(self):
return self._yx_f
@property
def key(self):
return self._key
@property
def num_f_low(self):
return self._num_f_low
@property
def p_deg_low(self):
return self._p_deg_low
@property
def num_f_high(self):
return self._num_f_high
@property
def p_deg_high(self):
return self._p_deg_high
# does not work well with multiprocessing
# def __getattr__(self, name):
# """
# Allow attributes to be used as kwargs keys
# """
# if name in self._kwargs:
# return self._kwargs[name]
# else:
# raise AttributeError(name)
def polynomial(f, x_min, x_max, num_x, p_deg, verbose=False,
factors_only=False, key=0, **kwargs):
# Create test data to be fitted, according to the actual
# fjc model for a Y range which is expected to
# happen during unzipping
X = np.linspace(x_min, x_max, num_x)
Y = np.array([f(x, key, **kwargs) for x in X])
# Create polynomial for the f(x) curve
c = np.polyfit(X, Y, p_deg)
fp = lambda x: polyn(c, x)
if verbose:
# Calculate STD (STE) of polynomial
X = np.linspace(x_min, x_max, num_x)
Y = np.array([f(x, key, **kwargs) for x in X])
Yp = np.array([fp(x) for x in X])
std = np.sqrt(((Y - Yp)**2).sum() / (len(Y) - 1))
# ste = std / np.sqrt(len(Y))
print('The STE of the polynomial is: ', std)
if factors_only:
return c
return fp
def polynom(c, x, n):
if n == 10:
return poly10(c, x)
if n == 11:
return poly11(c, x)
return polyn(c, x)
def poly10(c, x):
return (c[0]*x**10
+ c[1]*x**9
+ c[2]*x**8
+ c[3]*x**7
+ c[4]*x**6
+ c[5]*x**5
+ c[6]*x**4
+ c[7]*x**3
+ c[8]*x**2
+ c[9]*x
+ c[10])
def poly11(c, x):
return (c[0]*x**11
+ c[1]*x**10
+ c[2]*x**9
+ c[3]*x**8
+ c[4]*x**7
+ c[5]*x**6
+ c[6]*x**5
+ c[7]*x**4
+ c[8]*x**3
+ c[9]*x**2
+ c[10]*x
+ c[11])
power = np.arange(20)[::-1]
def polyn(c, x):
return np.dot(c, x ** power[-c.size:])
def init_buf_ext_dsDNA_wlc(nbp=0, pitch=None, L_p=None, T=298.2,
f_min=0e-12, f_mid=4e-12, f_max=200e-12,
num_f_low=501, num_f_high=501,
p_deg_low=14, p_deg_high=16):
# Assign DNA model function to the variable of the global (module) scope,
# such that `multiprocessing.Pool` will see these variables.
global ext_dsDNA_wlc
# Initialize the approximations of the dsDNA model function
# with fixed model function parameters and substitute the original
# DNA model functons
ext_dsDNA_wlc = EXT_DSDNA(pitch=pitch, L_p=L_p, T=T,
f_min=f_min, f_mid=f_mid, f_max=f_max,
num_f_low=num_f_low, num_f_high=num_f_high,
p_deg_low=p_deg_low, p_deg_high=p_deg_high)
# Initialize the ext_dsDNA_wlc object approximation with the needed nbp
if nbp > 0:
ap_ext_dsDNA = ext_dsDNA_wlc.calculate_approximation(nbp)
ext_dsDNA_wlc.set_approximation(ap_ext_dsDNA, nbp)
return ext_dsDNA_wlc
def init_buf_E_ext_ssDNA(read=True, write=False, filename='E_ext_ssDNA',
processes=8, bases='', nbs=0, nbs_loop=0,
S=None, L_p=None, z=None,
T=298.2, f_min=0e-12, f_mid=25e-12, f_max=200e-12,
num_f_low=501, num_f_high=501,
p_deg_low=11, p_deg_high=8):
# Assign DNA model function to the variable of the global (module) scope,
# such that `multiprocessing.Pool` will see these variables.
global E_ext_ssDNA
# Initialize the E_ext_ssDNA objects approximation with all needed nbs,
# either read from file or calculate
if read:
# read aps_E_ext_ssDNA from file
with open(filename, 'rb') as f:
E_ext_ssDNA = pickle.load(f)
model_kwargs = {
'S': S,
'L_p': L_p,
'z': z,
'T': T
}
for k in model_kwargs.keys():
if model_kwargs[k] != E_ext_ssDNA._kwargs[k]:
warnings.warn('{} in E_ext_ssDNA was set from read model to:'
'{}'.format(k, E_ext_ssDNA._kwargs[k]))
else:
# Initialize the approximations of the ssDNA model function
# with fixed model function parameters and substitute the original
# DNA model functons
E_ext_ssDNA = E_EXT_SSDNA(S=S, L_p=L_p, z=z, T=T,
f_min=f_min, f_mid=f_mid, f_max=f_max,
num_f_low=num_f_low, num_f_high=num_f_high,
p_deg_low=p_deg_low, p_deg_high=p_deg_high)
# Define a closure to be executed by the pool
def f(nbs):
# E_ext_ssDNA, nbs = args
print('Calculating approximation model for nbs = {:04d} ...\r'
''.format(nbs), end='', flush=True)
return E_ext_ssDNA.calculate_approximation(nbs)
f = unboundfunction(f)
# Use all available CPUs for the calculation to speed up calculations
with Pool(processes=processes) as pool:
start = time.time()
# Calculate all possible bases of ssDNA, i.e. 10 bases of 5*pT
# spacer, + all possibilities of unzipped basepairs (1 to 811
# basepairs * 2), + 10 bases hairpin, if last basepair is unzipped
nuz_max = len(bases)
nob = list(range(nbs, nuz_max*2+nbs+1, 2))
nob.append(nob[-1] + nbs_loop)
# args = [(E_ext_ssDNA, nbs) for nbs in nob]
aps_E_ext_ssDNA = pool.map(f, nob)
stop = time.time()
print('Done, elapsed time: {:.1f} s'.format(stop - start))
for ap in aps_E_ext_ssDNA:
E_ext_ssDNA.set_approximation(ap)
if write:
# save E_ext_ssDNA to file
with open(filename, 'wb') as f:
pickle.dump(E_ext_ssDNA, f)
return E_ext_ssDNA
class EXT_DSDNA(DNA_MODEL_APPROX):
def __init__(self, pitch=None, L_p=None, T=298.2,
f_min=0e-12, f_mid=4e-12, f_max=200e-12,
num_f_low=501, num_f_high=501, p_deg_low=14, p_deg_high=16):
f = _ext_dsDNA_wlc
def yx_f(x, *args, **kwargs):
return x
super().__init__(f, yx_f, key='nbp', pitch=pitch, L_p=L_p, T=T,
x_min=f_min, x_mid=f_mid, x_max=f_max,
num_f_low=num_f_low, num_f_high=num_f_high,
p_deg_low=p_deg_low, p_deg_high=p_deg_high)
def __call__(self, F=0.0, nbp=0, avoid_neg_e=True, verbose=False,
**ignored):
if nbp <= 0:
return 0
if F == 0.0:
return 0.0
sign = 1
if F < 0:
F = -F
sign = -1
if F > self._p_max:
return self._kwargs['pitch'] + nbp
# return float('inf')
x = super().__call__(x=F, key=nbp, avoid_neg_Y=avoid_neg_e,
verbose=verbose)
return sign * x
class F_SSDNA(DNA_MODEL_APPROX):
def __init__(self, S=None, L_p=None, z=None, T=298.2,
f_min=0e-12, f_mid=25e-12, f_max=200e-12,
num_f_low=501, num_f_high=501, p_deg_low=10, p_deg_high=8):
f = _F_ssDNA
yx_f = ext_ssDNA
super().__init__(f, yx_f, key='nbs', S=S, L_p=L_p, z=z, T=T,
x_min=f_min, x_mid=f_mid, x_max=f_max,
num_f_low=num_f_low, num_f_high=num_f_high,
p_deg_low=p_deg_low, p_deg_high=p_deg_high)
def __call__(self, x=0.0, nbs=0, avoid_neg_F=True, verbose=False,
**ignored):
if nbs <= 0:
return 0
if x == 0.0:
return 0.0
sign = 1
if x < 0:
x = -x
sign = -1
f = super().__call__(x=x, key=nbs, avoid_neg_Y=avoid_neg_F,
verbose=verbose)
return sign * f
class E_EXT_SSDNA(DNA_MODEL_APPROX):
def __init__(self, S=None, L_p=None, z=None, T=298.2,
f_min=0e-12, f_mid=25e-12, f_max=200e-12,
num_f_low=501, num_f_high=501,
p_deg_low=11, p_deg_high=8):
f = _E_ext_ssDNA
yx_f = ext_ssDNA
super().__init__(f, yx_f, key='nbs', S=S, L_p=L_p, z=z, T=T,
x_min=f_min, x_mid=f_mid, x_max=f_max,
num_f_low=num_f_low, num_f_high=num_f_high,
p_deg_low=p_deg_low, p_deg_high=p_deg_high)
def __call__(self, x=0.0, nbs=0, avoid_neg_E=True, verbose=False,
**ignored):
if nbs <= 0:
return 0.0
if x == 0.0:
return 0.0
# There are no negative energies, even for negative extensions
if x < 0:
x = -x
return super().__call__(x=x, key=nbs, avoid_neg_Y=avoid_neg_E,
verbose=verbose)
# Set DNA model functions to unbuffered versions per default
F_ssDNA = _F_ssDNA
E_ext_ssDNA = _E_ext_ssDNA
ext_dsDNA_wlc = _ext_dsDNA_wlc
| 34.799267
| 135
| 0.579725
|
ece9f7584ea22f4306d68fbfa2b8ef9b040fa85f
| 792
|
py
|
Python
|
AddExonSN/genExonRefBed/sortBed.py
|
seahurt/tcga_tools
|
97ff6954df676e73b7c70f0515aec1ab6e026151
|
[
"MIT"
] | 1
|
2020-06-08T06:35:45.000Z
|
2020-06-08T06:35:45.000Z
|
AddExonSN/genExonRefBed/sortBed.py
|
seahurt/tcga-utils
|
97ff6954df676e73b7c70f0515aec1ab6e026151
|
[
"MIT"
] | null | null | null |
AddExonSN/genExonRefBed/sortBed.py
|
seahurt/tcga-utils
|
97ff6954df676e73b7c70f0515aec1ab6e026151
|
[
"MIT"
] | null | null | null |
#!python
import os
import sys
input = sys.argv[1]
output = sys.argv[2]
f = open(input)
exon_pool=dict()
o = open(output,'w')
for line in f.readlines():
if('#' in line):
continue
if(line==""):
break
(chr,start,end,gene)=line.split()
#print(gene)
if (gene in exon_pool):
exon_pool[gene].append((start,end,chr))
else:
exon_pool[gene]=[(start,end,chr)]
for gene in exon_pool.keys():
count=1
#print(gene)
#print(exon_pool[gene])
#exon_pool[gene].sort(key=lambda x:x[0])
for exon in sorted(exon_pool[gene],key=lambda x: int(x[0])):
o.write("{chr}\t{start}\t{end}\t{gene}\t{count}\n".format(chr=exon[2],start=exon[0],end=exon[1],gene=gene,count=count))
o.flush()
count=count+1
f.close()
o.close()
| 24
| 127
| 0.60101
|
c268f81c536f50746e8b9c223ed48f76781fb78f
| 1,731
|
py
|
Python
|
tests/test_thriftstruct.py
|
WKPlus/jsonthrift
|
975cc097afc92c53fa8b46f5f6c5b6da807a2acb
|
[
"MIT"
] | 4
|
2017-04-05T07:01:27.000Z
|
2017-04-10T04:15:37.000Z
|
tests/test_thriftstruct.py
|
WKPlus/jsonthrift
|
975cc097afc92c53fa8b46f5f6c5b6da807a2acb
|
[
"MIT"
] | null | null | null |
tests/test_thriftstruct.py
|
WKPlus/jsonthrift
|
975cc097afc92c53fa8b46f5f6c5b6da807a2acb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import sys
import json
import unittest
sys.path.append(os.path.abspath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "../")
))
from jsonthrift.thriftstruct import ThriftStruct
protocol = 'TBinaryProtocol'
class ThriftStructTest(unittest.TestCase):
def test_pack_struct(self):
thrift_file = 'data/complex.thrift'
ts = ThriftStruct(thrift_file)
data = {
'index': 2,
'istrue': 'true'
}
binary = ts.pack_struct('complex.returnType', data)
expect1 = '\x08\x00\x01\x00\x00\x00\x02\x0b\x00\x02\x00\x00\x00\x04true\x00'
expect2 = '\x0b\x00\x02\x00\x00\x00\x04true\x08\x00\x01\x00\x00\x00\x02\x00'
equal = expect1 == binary or expect2 == binary
self.assertTrue(equal)
def test_unpack_struct(self):
thrift_file = 'data/complex.thrift'
ts = ThriftStruct(thrift_file)
binary = '\x08\x00\x01\x00\x00\x00\x02\x0b\x00\x02\x00\x00\x00\x04true\x00'
data = ts.unpack_struct('complex.returnType', binary)
expect = {
'index': 2,
'istrue': 'true'
}
self.assertEqual(expect, data)
def test_pack_intkey(self):
thrift_file = 'data/intkey.thrift'
ts = ThriftStruct(thrift_file)
data = {
'byte_key': {
'1': '1',
},
'i16_key': {
'2': '2',
},
'i32_key': {
'3': '3',
},
'i64_key': {
'4': '4',
},
}
binary = ts.pack_struct('intkey.IntKey', data)
if __name__ == '__main__':
unittest.main()
| 27.046875
| 84
| 0.544772
|
e48208d587d4f40385302196a132d2236c757b76
| 813
|
py
|
Python
|
app/fedgraphnn/ego_networks_node_clf/model/gat.py
|
ray-ruisun/FedML
|
24ff30d636bb70f64e94e9ca205375033597d3dd
|
[
"Apache-2.0"
] | null | null | null |
app/fedgraphnn/ego_networks_node_clf/model/gat.py
|
ray-ruisun/FedML
|
24ff30d636bb70f64e94e9ca205375033597d3dd
|
[
"Apache-2.0"
] | null | null | null |
app/fedgraphnn/ego_networks_node_clf/model/gat.py
|
ray-ruisun/FedML
|
24ff30d636bb70f64e94e9ca205375033597d3dd
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn.functional as F
from torch_geometric.nn import GATConv
class GATNodeCLF(torch.nn.Module):
def __init__(self, in_channels, out_channels, dropout, heads=2):
super(GATNodeCLF, self).__init__()
self.dropout = dropout
self.conv1 = GATConv(in_channels, heads, heads=heads)
# On the Pubmed dataset, use heads=8 in conv2.
self.conv2 = GATConv(heads * heads, out_channels, heads=1, concat=False, dropout=0.6)
self.nclass = out_channels
def forward(self, inp):
x = F.elu(self.conv1(inp.x, inp.edge_index))
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.conv2(x, inp.edge_index)
return F.log_softmax(x, dim=-1)
def loss(self, pred, label):
return F.nll_loss(pred, label)
| 33.875
| 93
| 0.661747
|
fa69897a9fb3059f78660df2151df89a3dc3c33a
| 4,032
|
py
|
Python
|
flask_web/werkzeug-master/scripts/make-release.py
|
bopopescu/local_scda
|
40fa4a586f140dc00b8d3f53c732e22e022be338
|
[
"MIT"
] | 1
|
2015-11-06T02:41:24.000Z
|
2015-11-06T02:41:24.000Z
|
scripts/make-release.py
|
Chitrank-Dixit/werkzeug
|
60670cb98db1934b7aa7df454cbe4508c467f403
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/make-release.py
|
Chitrank-Dixit/werkzeug
|
60670cb98db1934b7aa7df454cbe4508c467f403
|
[
"BSD-3-Clause"
] | 2
|
2020-07-23T21:55:21.000Z
|
2021-01-14T12:27:19.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
make-release
~~~~~~~~~~~~
Helper script that performs a release. Does pretty much everything
automatically for us.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
import re
from datetime import datetime, date
from subprocess import Popen, PIPE
_date_clean_re = re.compile(r'(\d+)(st|nd|rd|th)')
def parse_changelog():
with open('CHANGES') as f:
lineiter = iter(f)
for line in lineiter:
match = re.search('^Version\s+(.*)', line.strip())
if match is None:
continue
length = len(match.group(1))
version = match.group(1).strip()
if lineiter.next().count('-') != len(match.group(0)):
continue
while 1:
change_info = lineiter.next().strip()
if change_info:
break
match = re.search(r'released on (\w+\s+\d+\w+\s+\d+)'
r'(?:, codename (.*))?(?i)', change_info)
if match is None:
continue
datestr, codename = match.groups()
return version, parse_date(datestr), codename
def bump_version(version):
try:
parts = map(int, version.split('.'))
except ValueError:
fail('Current version is not numeric')
parts[-1] += 1
return '.'.join(map(str, parts))
def parse_date(string):
string = _date_clean_re.sub(r'\1', string)
return datetime.strptime(string, '%B %d %Y')
def set_filename_version(filename, version_number, pattern):
changed = []
def inject_version(match):
before, old, after = match.groups()
changed.append(True)
return before + version_number + after
with open(filename) as f:
contents = re.sub(r"^(\s*%s\s*=\s*')(.+?)(')(?sm)" % pattern,
inject_version, f.read())
if not changed:
fail('Could not find %s in %s', pattern, filename)
with open(filename, 'w') as f:
f.write(contents)
def set_init_version(version):
info('Setting __init__.py version to %s', version)
set_filename_version('werkzeug/__init__.py', version, '__version__')
def set_setup_version(version):
info('Setting setup.py version to %s', version)
set_filename_version('setup.py', version, 'version')
def build_and_upload():
Popen([sys.executable, 'setup.py', 'release', 'sdist', 'upload']).wait()
def fail(message, *args):
print >> sys.stderr, 'Error:', message % args
sys.exit(1)
def info(message, *args):
print >> sys.stderr, message % args
def get_git_tags():
return set(Popen(['git', 'tag'], stdout=PIPE).communicate()[0].splitlines())
def git_is_clean():
return Popen(['git', 'diff', '--quiet']).wait() == 0
def make_git_commit(message, *args):
message = message % args
Popen(['git', 'commit', '-am', message]).wait()
def make_git_tag(tag):
info('Tagging "%s"', tag)
Popen(['git', 'tag', tag]).wait()
def main():
os.chdir(os.path.join(os.path.dirname(__file__), '..'))
rv = parse_changelog()
if rv is None:
fail('Could not parse changelog')
version, release_date, codename = rv
dev_version = bump_version(version) + '-dev'
info('Releasing %s (codename %s, release date %s)',
version, codename, release_date.strftime('%d/%m/%Y'))
tags = get_git_tags()
if version in tags:
fail('Version "%s" is already tagged', version)
if release_date.date() != date.today():
fail('Release date is not today (%s != %s)')
if not git_is_clean():
fail('You have uncommitted changes in git')
set_init_version(version)
set_setup_version(version)
make_git_commit('Bump version number to %s', version)
make_git_tag(version)
build_and_upload()
set_init_version(dev_version)
set_setup_version(dev_version)
if __name__ == '__main__':
main()
| 26.526316
| 80
| 0.59747
|
9e77334169ede1b9a29930a97e93e5d3014fef0a
| 1,759
|
py
|
Python
|
paper/F5a.py
|
mlares/hearsay
|
27e833f49eaea1c3d185c27036f149bc6f834edc
|
[
"MIT"
] | null | null | null |
paper/F5a.py
|
mlares/hearsay
|
27e833f49eaea1c3d185c27036f149bc6f834edc
|
[
"MIT"
] | null | null | null |
paper/F5a.py
|
mlares/hearsay
|
27e833f49eaea1c3d185c27036f149bc6f834edc
|
[
"MIT"
] | null | null | null |
from itertools import product as pp
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from hearsay import hearsay
# Figura 5
# (a) Variation of tau_survive for fixed tau_awakening
# -----------------------------------------------------
ta = [10000]
ts = [5000, 10000, 20000, 50000, 500000]
td = [32615]
z = pp(ta, ts, td)
tau_a = []
tau_s = []
d_max = []
fname = []
for k, i in enumerate(z):
tau_a.append(i[0])
tau_s.append(i[1])
d_max.append(i[2])
fname.append(f"../out/F5a/{str(k).zfill(5)}_001.pk")
df = pd.DataFrame(list(zip(tau_a, tau_s, d_max, fname)),
columns=['tau_awakening', 'tau_survive', 'D_max',
'filename'])
df.to_csv('F5a.csv')
df = pd.read_csv('F5a.csv')
conf = hearsay.Parser('F5a.ini')
conf.load_config()
G = hearsay.C3Net(conf)
G.set_parameters(df)
print('RUN simulation')
G.run()
print('REDUCE simulation')
R = hearsay.Results(G)
R.load()
res = R.redux()
FirstContactTimes = res['lF']
minval = 9999.
maxval = -9999.
for c1 in FirstContactTimes:
imax = max(c1)
imin = min(c1)
minval = min(minval, imin)
maxval = max(maxval, imax)
fig = plt.figure()
ax = fig.add_subplot()
for k, c1 in enumerate(FirstContactTimes):
if len(c1) == 0:
continue
imax = max(c1)
imin = min(c1)
if imax < imin+1.e-4:
continue
breaks = np.linspace(minval, maxval, 200)
hy, hx = np.histogram(c1, breaks, density=False)
lbl = (f"A={R.params.iloc[k]['tau_awakening']},"
f"S={R.params.iloc[k]['tau_survive']}")
hy = np.append(hy, hy[-1])
ax.step(breaks, hy, where='post', label=lbl)
ax.set_yscale('log')
ax.set_xlim(0, 6.e5)
ax.legend()
fig.savefig('F5a.png')
fig.savefig('F5a.pdf')
plt.close()
| 21.716049
| 67
| 0.608869
|
cfeda6cb146e07955dd21973e578939cab32d5c1
| 2,872
|
py
|
Python
|
iseq/profile.py
|
EBI-Metagenomics/iseq
|
3c28fc92e5af05c91c6669d7f1a28d1ce857f3f1
|
[
"MIT"
] | null | null | null |
iseq/profile.py
|
EBI-Metagenomics/iseq
|
3c28fc92e5af05c91c6669d7f1a28d1ce857f3f1
|
[
"MIT"
] | null | null | null |
iseq/profile.py
|
EBI-Metagenomics/iseq
|
3c28fc92e5af05c91c6669d7f1a28d1ce857f3f1
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from math import log
from typing import NamedTuple
from imm import Alphabet, Sequence, lprob_zero
from .model import AltModel, NullModel, SpecialTransitions
from .result import SearchResults
__all__ = ["Profile", "ProfileID"]
ProfileID = NamedTuple("ProfileID", [("name", str), ("acc", str)])
class Profile(ABC):
def __init__(
self,
profid: ProfileID,
alphabet: Alphabet,
null_model: NullModel,
alt_model: AltModel,
hmmer3_compat: bool,
):
self._profid = profid
self._alphabet = alphabet
self._null_model = null_model
self._alt_model = alt_model
self._multiple_hits: bool = True
self._special_trans = SpecialTransitions()
self._hmmer3_compat = hmmer3_compat
self._set_target_length_model(1)
self._window_length: int = 0
@property
def profid(self) -> ProfileID:
return self._profid
@property
def window_length(self) -> int:
return self._window_length
@window_length.setter
def window_length(self, length: int) -> None:
self._window_length = length
@property
def alphabet(self):
return self._alphabet
@abstractmethod
def create_sequence(self, sequence: bytes) -> Sequence:
del self
del sequence
raise NotImplementedError()
@property
def null_model(self) -> NullModel:
del self
raise NotImplementedError()
@property
def alt_model(self) -> AltModel:
del self
raise NotImplementedError()
@property
def multiple_hits(self) -> bool:
return self._multiple_hits
@multiple_hits.setter
def multiple_hits(self, multiple_hits: bool):
self._multiple_hits = multiple_hits
@abstractmethod
def search(self, sequence: Sequence) -> SearchResults:
del self
del sequence
raise NotImplementedError()
def _set_target_length_model(self, target_length: int):
t = self._get_target_length_model(target_length)
self._null_model.set_special_transitions(t)
self._alt_model.set_special_transitions(t, self._hmmer3_compat)
def _get_target_length_model(self, target_length: int) -> SpecialTransitions:
L = target_length
if L == 0:
raise ValueError("Target length cannot be zero.")
if self._multiple_hits:
q = 0.5
log_q = log(0.5)
else:
q = 0.0
log_q = lprob_zero()
lp = log(L) - log(L + 2 + q / (1 - q))
l1p = log(2 + q / (1 - q)) - log(L + 2 + q / (1 - q))
lr = log(L) - log(L + 1)
t = self._special_trans
t.NN = t.CC = t.JJ = lp
t.NB = t.CT = t.JB = l1p
t.RR = lr
t.EJ = log_q
t.EC = log(1 - q)
return t
| 26.109091
| 81
| 0.613162
|
26700d5e46c5c50620d587ffc22163c397dcf05a
| 193
|
py
|
Python
|
ex2/11.py
|
EAGLE12/studyPython
|
3ffffd89fd5cf8a2183bcc2fa3584e5ab2b64146
|
[
"MIT"
] | null | null | null |
ex2/11.py
|
EAGLE12/studyPython
|
3ffffd89fd5cf8a2183bcc2fa3584e5ab2b64146
|
[
"MIT"
] | null | null | null |
ex2/11.py
|
EAGLE12/studyPython
|
3ffffd89fd5cf8a2183bcc2fa3584e5ab2b64146
|
[
"MIT"
] | null | null | null |
import numpy as np
import math
p0 = np.array((1, 1))
p1 = np.array((6, 4))
A = p1 - p0
print(A)
dist = math.sqrt(pow(A[0],2) + pow(A[1],2))
print(dist)
a_dist = np.linalg.norm(A)
print(a_dist)
| 19.3
| 43
| 0.626943
|
ba3a11d862bc2d57fcf3312fb481cae4884047e0
| 7,561
|
py
|
Python
|
tests/components/sensor/test_jewish_calendar.py
|
raymondelooff/home-assistant
|
a9a8cbbd100b4ca5c7f90210fb37da37bc634923
|
[
"Apache-2.0"
] | 1
|
2019-07-24T09:26:57.000Z
|
2019-07-24T09:26:57.000Z
|
tests/components/sensor/test_jewish_calendar.py
|
raymondelooff/home-assistant
|
a9a8cbbd100b4ca5c7f90210fb37da37bc634923
|
[
"Apache-2.0"
] | 5
|
2021-02-08T20:32:11.000Z
|
2022-01-13T01:19:23.000Z
|
tests/components/sensor/test_jewish_calendar.py
|
raymondelooff/home-assistant
|
a9a8cbbd100b4ca5c7f90210fb37da37bc634923
|
[
"Apache-2.0"
] | null | null | null |
"""The tests for the Jewish calendar sensor platform."""
import unittest
from datetime import time
from datetime import datetime as dt
from unittest.mock import patch
from homeassistant.util.async_ import run_coroutine_threadsafe
from homeassistant.util.dt import get_time_zone
from homeassistant.setup import setup_component
from homeassistant.components.sensor.jewish_calendar import JewishCalSensor
from tests.common import get_test_home_assistant
class TestJewishCalenderSensor(unittest.TestCase):
"""Test the Jewish Calendar sensor."""
TEST_LATITUDE = 31.778
TEST_LONGITUDE = 35.235
def setUp(self):
"""Set up things to run when tests begin."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_jewish_calendar_min_config(self):
"""Test minimum jewish calendar configuration."""
config = {
'sensor': {
'platform': 'jewish_calendar'
}
}
assert setup_component(self.hass, 'sensor', config)
def test_jewish_calendar_hebrew(self):
"""Test jewish calendar sensor with language set to hebrew."""
config = {
'sensor': {
'platform': 'jewish_calendar',
'language': 'hebrew',
}
}
assert setup_component(self.hass, 'sensor', config)
def test_jewish_calendar_multiple_sensors(self):
"""Test jewish calendar sensor with multiple sensors setup."""
config = {
'sensor': {
'platform': 'jewish_calendar',
'sensors': [
'date', 'weekly_portion', 'holiday_name',
'holyness', 'first_light', 'gra_end_shma',
'mga_end_shma', 'plag_mincha', 'first_stars'
]
}
}
assert setup_component(self.hass, 'sensor', config)
def test_jewish_calendar_sensor_date_output(self):
"""Test Jewish calendar sensor date output."""
test_time = dt(2018, 9, 3)
sensor = JewishCalSensor(
name='test', language='english', sensor_type='date',
latitude=self.TEST_LATITUDE, longitude=self.TEST_LONGITUDE,
timezone="UTC", diaspora=False)
with patch('homeassistant.util.dt.now', return_value=test_time):
run_coroutine_threadsafe(
sensor.async_update(),
self.hass.loop).result()
self.assertEqual(sensor.state, '23 Elul 5778')
def test_jewish_calendar_sensor_date_output_hebrew(self):
"""Test Jewish calendar sensor date output in hebrew."""
test_time = dt(2018, 9, 3)
sensor = JewishCalSensor(
name='test', language='hebrew', sensor_type='date',
latitude=self.TEST_LATITUDE, longitude=self.TEST_LONGITUDE,
timezone="UTC", diaspora=False)
with patch('homeassistant.util.dt.now', return_value=test_time):
run_coroutine_threadsafe(
sensor.async_update(), self.hass.loop).result()
self.assertEqual(sensor.state, "כ\"ג באלול ה\' תשע\"ח")
def test_jewish_calendar_sensor_holiday_name(self):
"""Test Jewish calendar sensor holiday name output in hebrew."""
test_time = dt(2018, 9, 10)
sensor = JewishCalSensor(
name='test', language='hebrew', sensor_type='holiday_name',
latitude=self.TEST_LATITUDE, longitude=self.TEST_LONGITUDE,
timezone="UTC", diaspora=False)
with patch('homeassistant.util.dt.now', return_value=test_time):
run_coroutine_threadsafe(
sensor.async_update(), self.hass.loop).result()
self.assertEqual(sensor.state, "א\' ראש השנה")
def test_jewish_calendar_sensor_holiday_name_english(self):
"""Test Jewish calendar sensor holiday name output in english."""
test_time = dt(2018, 9, 10)
sensor = JewishCalSensor(
name='test', language='english', sensor_type='holiday_name',
latitude=self.TEST_LATITUDE, longitude=self.TEST_LONGITUDE,
timezone="UTC", diaspora=False)
with patch('homeassistant.util.dt.now', return_value=test_time):
run_coroutine_threadsafe(
sensor.async_update(), self.hass.loop).result()
self.assertEqual(sensor.state, "Rosh Hashana I")
def test_jewish_calendar_sensor_holyness(self):
"""Test Jewish calendar sensor holyness value."""
test_time = dt(2018, 9, 10)
sensor = JewishCalSensor(
name='test', language='hebrew', sensor_type='holyness',
latitude=self.TEST_LATITUDE, longitude=self.TEST_LONGITUDE,
timezone="UTC", diaspora=False)
with patch('homeassistant.util.dt.now', return_value=test_time):
run_coroutine_threadsafe(
sensor.async_update(), self.hass.loop).result()
self.assertEqual(sensor.state, 1)
def test_jewish_calendar_sensor_torah_reading(self):
"""Test Jewish calendar sensor torah reading in hebrew."""
test_time = dt(2018, 9, 8)
sensor = JewishCalSensor(
name='test', language='hebrew', sensor_type='weekly_portion',
latitude=self.TEST_LATITUDE, longitude=self.TEST_LONGITUDE,
timezone="UTC", diaspora=False)
with patch('homeassistant.util.dt.now', return_value=test_time):
run_coroutine_threadsafe(
sensor.async_update(), self.hass.loop).result()
self.assertEqual(sensor.state, "פרשת נצבים")
def test_jewish_calendar_sensor_first_stars_ny(self):
"""Test Jewish calendar sensor first stars time in NY, US."""
test_time = dt(2018, 9, 8)
sensor = JewishCalSensor(
name='test', language='hebrew', sensor_type='first_stars',
latitude=40.7128, longitude=-74.0060,
timezone=get_time_zone("America/New_York"), diaspora=False)
with patch('homeassistant.util.dt.now', return_value=test_time):
run_coroutine_threadsafe(
sensor.async_update(), self.hass.loop).result()
self.assertEqual(sensor.state, time(19, 48))
def test_jewish_calendar_sensor_first_stars_jerusalem(self):
"""Test Jewish calendar sensor first stars time in Jerusalem, IL."""
test_time = dt(2018, 9, 8)
sensor = JewishCalSensor(
name='test', language='hebrew', sensor_type='first_stars',
latitude=self.TEST_LATITUDE, longitude=self.TEST_LONGITUDE,
timezone="Asia/Jerusalem", diaspora=False)
with patch('homeassistant.util.dt.now', return_value=test_time):
run_coroutine_threadsafe(
sensor.async_update(), self.hass.loop).result()
self.assertEqual(sensor.state, time(19, 21))
def test_jewish_calendar_sensor_torah_reading_weekday(self):
"""Test the sensor showing torah reading also on weekdays."""
test_time = dt(2018, 10, 14)
sensor = JewishCalSensor(
name='test', language='hebrew', sensor_type='weekly_portion',
latitude=self.TEST_LATITUDE, longitude=self.TEST_LONGITUDE,
timezone="Asia/Jerusalem", diaspora=False)
with patch('homeassistant.util.dt.now', return_value=test_time):
run_coroutine_threadsafe(
sensor.async_update(), self.hass.loop).result()
self.assertEqual(sensor.state, "פרשת לך לך")
| 44.216374
| 76
| 0.638804
|
47f5df51db4c94ecef9d5b389fe266d707826d12
| 897
|
py
|
Python
|
core/shell.py
|
swagkarna/arissploit
|
b0a58f61afc12ac78c65e0275dfa5e4d1e44989e
|
[
"MIT"
] | 3
|
2019-12-09T10:07:10.000Z
|
2021-09-18T18:20:09.000Z
|
core/shell.py
|
swagkarna/arissploit
|
b0a58f61afc12ac78c65e0275dfa5e4d1e44989e
|
[
"MIT"
] | null | null | null |
core/shell.py
|
swagkarna/arissploit
|
b0a58f61afc12ac78c65e0275dfa5e4d1e44989e
|
[
"MIT"
] | 1
|
2021-11-04T11:18:09.000Z
|
2021-11-04T11:18:09.000Z
|
# Import python modules
import sys
# Import core modules
from core.module_manager import ModuleManager
from core import colors
from core import command_handler
shellface = "["+colors.bold+"arissploit"+colors.end+"]:"
mm = ModuleManager
def run():
global shellface
global mm
ch = command_handler.Commandhandler(mm, False)
while True:
try:
setFace()
command = input(shellface+" ")
ch.handle(command)
except KeyboardInterrupt:
if mm.moduleLoaded == 0:
print()
sys.exit(0)
else:
print()
mm.moduleLoaded = 0
mm.moduleName = ""
print(colors.bold + colors.red + "Ctrl + C detected, going back..." + colors.end)
def setFace():
global shellface
global mm
if mm.moduleLoaded == 0:
shellface = "["+colors.bold+"arissploit"+colors.end+"]:"
else:
shellface = "["+colors.bold+"arissploit"+colors.end+"]"+"("+colors.red+mm.moduleName+colors.end+"):"
| 21.878049
| 102
| 0.684504
|
fa5edfd07282bc524bb36b6e8128503dcde00958
| 2,036
|
py
|
Python
|
tests/e2e/binance_blvt.py
|
PetrZufan/cryptoxlib-aio
|
8fbb817ee7a7a88693804e24877863370d1d53c7
|
[
"MIT"
] | 90
|
2020-04-09T18:34:49.000Z
|
2022-03-09T14:29:32.000Z
|
tests/e2e/binance_blvt.py
|
PetrZufan/cryptoxlib-aio
|
8fbb817ee7a7a88693804e24877863370d1d53c7
|
[
"MIT"
] | 44
|
2020-04-03T17:02:20.000Z
|
2022-01-29T14:51:51.000Z
|
tests/e2e/binance_blvt.py
|
PetrZufan/cryptoxlib-aio
|
8fbb817ee7a7a88693804e24877863370d1d53c7
|
[
"MIT"
] | 28
|
2020-04-25T21:34:53.000Z
|
2022-03-31T07:20:07.000Z
|
import unittest
import os
import logging
from cryptoxlib.CryptoXLib import CryptoXLib
from cryptoxlib.clients.binance.exceptions import BinanceRestException
from CryptoXLibTest import CryptoXLibTest
api_key = os.environ['BINANCEAPIKEY']
sec_key = os.environ['BINANCESECKEY']
class BinanceBLVTRestApi(CryptoXLibTest):
@classmethod
def initialize(cls) -> None:
cls.print_logs = True
cls.log_level = logging.DEBUG
async def init_test(self):
self.client = CryptoXLib.create_binance_client(api_key, sec_key)
async def clean_test(self):
await self.client.close()
def check_positive_response(self, response):
return str(response['status_code'])[0] == '2'
async def test_get_blvt_info(self):
response = await self.client.get_blvt_info()
self.assertTrue(self.check_positive_response(response))
async def test_get_blvt_subscribtion_record(self):
response = await self.client.get_blvt_subscribtion_record()
self.assertTrue(self.check_positive_response(response))
async def test_get_blvt_redemption_record(self):
response = await self.client.get_blvt_redemption_record()
self.assertTrue(self.check_positive_response(response))
async def test_get_blvt_user_info(self):
response = await self.client.get_blvt_user_info()
self.assertTrue(self.check_positive_response(response))
async def test_blvt_subscribe(self):
with self.assertRaises(BinanceRestException) as cm:
await self.client.blvt_subscribe("BTCUP", "50000000000")
e = cm.exception
self.assertEqual(e.status_code, 400)
self.assertEqual(e.body['code'], -5002)
async def test_blvt_redeem(self):
with self.assertRaises(BinanceRestException) as cm:
await self.client.blvt_redeem("BTCUP", "50000000000")
e = cm.exception
self.assertEqual(e.status_code, 400)
self.assertEqual(e.body['code'], -5002)
if __name__ == '__main__':
unittest.main()
| 32.31746
| 72
| 0.717092
|
7e89f5b1930fd07585b2e216d2e67e5f2686d333
| 10,663
|
py
|
Python
|
sdk/python/pulumi_aws/get_availability_zone.py
|
alexbowers/pulumi-aws
|
7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/get_availability_zone.py
|
alexbowers/pulumi-aws
|
7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/get_availability_zone.py
|
alexbowers/pulumi-aws
|
7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = [
'GetAvailabilityZoneResult',
'AwaitableGetAvailabilityZoneResult',
'get_availability_zone',
]
@pulumi.output_type
class GetAvailabilityZoneResult:
"""
A collection of values returned by getAvailabilityZone.
"""
def __init__(__self__, all_availability_zones=None, filters=None, group_name=None, id=None, name=None, name_suffix=None, network_border_group=None, opt_in_status=None, parent_zone_id=None, parent_zone_name=None, region=None, state=None, zone_id=None, zone_type=None):
if all_availability_zones and not isinstance(all_availability_zones, bool):
raise TypeError("Expected argument 'all_availability_zones' to be a bool")
pulumi.set(__self__, "all_availability_zones", all_availability_zones)
if filters and not isinstance(filters, list):
raise TypeError("Expected argument 'filters' to be a list")
pulumi.set(__self__, "filters", filters)
if group_name and not isinstance(group_name, str):
raise TypeError("Expected argument 'group_name' to be a str")
pulumi.set(__self__, "group_name", group_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if name_suffix and not isinstance(name_suffix, str):
raise TypeError("Expected argument 'name_suffix' to be a str")
pulumi.set(__self__, "name_suffix", name_suffix)
if network_border_group and not isinstance(network_border_group, str):
raise TypeError("Expected argument 'network_border_group' to be a str")
pulumi.set(__self__, "network_border_group", network_border_group)
if opt_in_status and not isinstance(opt_in_status, str):
raise TypeError("Expected argument 'opt_in_status' to be a str")
pulumi.set(__self__, "opt_in_status", opt_in_status)
if parent_zone_id and not isinstance(parent_zone_id, str):
raise TypeError("Expected argument 'parent_zone_id' to be a str")
pulumi.set(__self__, "parent_zone_id", parent_zone_id)
if parent_zone_name and not isinstance(parent_zone_name, str):
raise TypeError("Expected argument 'parent_zone_name' to be a str")
pulumi.set(__self__, "parent_zone_name", parent_zone_name)
if region and not isinstance(region, str):
raise TypeError("Expected argument 'region' to be a str")
pulumi.set(__self__, "region", region)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if zone_id and not isinstance(zone_id, str):
raise TypeError("Expected argument 'zone_id' to be a str")
pulumi.set(__self__, "zone_id", zone_id)
if zone_type and not isinstance(zone_type, str):
raise TypeError("Expected argument 'zone_type' to be a str")
pulumi.set(__self__, "zone_type", zone_type)
@property
@pulumi.getter(name="allAvailabilityZones")
def all_availability_zones(self) -> Optional[bool]:
return pulumi.get(self, "all_availability_zones")
@property
@pulumi.getter
def filters(self) -> Optional[Sequence['outputs.GetAvailabilityZoneFilterResult']]:
return pulumi.get(self, "filters")
@property
@pulumi.getter(name="groupName")
def group_name(self) -> str:
"""
For Availability Zones, this is the same value as the Region name. For Local Zones, the name of the associated group, for example `us-west-2-lax-1`.
"""
return pulumi.get(self, "group_name")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nameSuffix")
def name_suffix(self) -> str:
"""
The part of the AZ name that appears after the region name, uniquely identifying the AZ within its region.
For Availability Zones this is usually a single letter, for example `a` for the `us-west-2a` zone.
For Local and Wavelength Zones this is a longer string, for example `wl1-sfo-wlz-1` for the `us-west-2-wl1-sfo-wlz-1` zone.
"""
return pulumi.get(self, "name_suffix")
@property
@pulumi.getter(name="networkBorderGroup")
def network_border_group(self) -> str:
"""
The name of the location from which the address is advertised.
"""
return pulumi.get(self, "network_border_group")
@property
@pulumi.getter(name="optInStatus")
def opt_in_status(self) -> str:
"""
For Availability Zones, this always has the value of `opt-in-not-required`. For Local Zones, this is the opt in status. The possible values are `opted-in` and `not-opted-in`.
"""
return pulumi.get(self, "opt_in_status")
@property
@pulumi.getter(name="parentZoneId")
def parent_zone_id(self) -> str:
"""
The ID of the zone that handles some of the Local Zone or Wavelength Zone control plane operations, such as API calls.
"""
return pulumi.get(self, "parent_zone_id")
@property
@pulumi.getter(name="parentZoneName")
def parent_zone_name(self) -> str:
"""
The name of the zone that handles some of the Local Zone or Wavelength Zone control plane operations, such as API calls.
"""
return pulumi.get(self, "parent_zone_name")
@property
@pulumi.getter
def region(self) -> str:
"""
The region where the selected availability zone resides. This is always the region selected on the provider, since this data source searches only within that region.
"""
return pulumi.get(self, "region")
@property
@pulumi.getter
def state(self) -> str:
return pulumi.get(self, "state")
@property
@pulumi.getter(name="zoneId")
def zone_id(self) -> str:
return pulumi.get(self, "zone_id")
@property
@pulumi.getter(name="zoneType")
def zone_type(self) -> str:
"""
The type of zone. Values are `availability-zone`, `local-zone`, and `wavelength-zone`.
"""
return pulumi.get(self, "zone_type")
class AwaitableGetAvailabilityZoneResult(GetAvailabilityZoneResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAvailabilityZoneResult(
all_availability_zones=self.all_availability_zones,
filters=self.filters,
group_name=self.group_name,
id=self.id,
name=self.name,
name_suffix=self.name_suffix,
network_border_group=self.network_border_group,
opt_in_status=self.opt_in_status,
parent_zone_id=self.parent_zone_id,
parent_zone_name=self.parent_zone_name,
region=self.region,
state=self.state,
zone_id=self.zone_id,
zone_type=self.zone_type)
def get_availability_zone(all_availability_zones: Optional[bool] = None,
filters: Optional[Sequence[pulumi.InputType['GetAvailabilityZoneFilterArgs']]] = None,
name: Optional[str] = None,
state: Optional[str] = None,
zone_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAvailabilityZoneResult:
"""
`getAvailabilityZone` provides details about a specific availability zone (AZ)
in the current region.
This can be used both to validate an availability zone given in a variable
and to split the AZ name into its component parts of an AWS region and an
AZ identifier letter. The latter may be useful e.g. for implementing a
consistent subnet numbering scheme across several regions by mapping both
the region and the subnet letter to network numbers.
This is different from the `getAvailabilityZones` (plural) data source,
which provides a list of the available zones.
:param bool all_availability_zones: Set to `true` to include all Availability Zones and Local Zones regardless of your opt in status.
:param Sequence[pulumi.InputType['GetAvailabilityZoneFilterArgs']] filters: Configuration block(s) for filtering. Detailed below.
:param str name: The name of the filter field. Valid values can be found in the [EC2 DescribeAvailabilityZones API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html).
:param str state: A specific availability zone state to require. May be any of `"available"`, `"information"` or `"impaired"`.
:param str zone_id: The zone ID of the availability zone to select.
"""
__args__ = dict()
__args__['allAvailabilityZones'] = all_availability_zones
__args__['filters'] = filters
__args__['name'] = name
__args__['state'] = state
__args__['zoneId'] = zone_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:index/getAvailabilityZone:getAvailabilityZone', __args__, opts=opts, typ=GetAvailabilityZoneResult).value
return AwaitableGetAvailabilityZoneResult(
all_availability_zones=__ret__.all_availability_zones,
filters=__ret__.filters,
group_name=__ret__.group_name,
id=__ret__.id,
name=__ret__.name,
name_suffix=__ret__.name_suffix,
network_border_group=__ret__.network_border_group,
opt_in_status=__ret__.opt_in_status,
parent_zone_id=__ret__.parent_zone_id,
parent_zone_name=__ret__.parent_zone_name,
region=__ret__.region,
state=__ret__.state,
zone_id=__ret__.zone_id,
zone_type=__ret__.zone_type)
| 43.880658
| 271
| 0.67542
|
f6588627082e0a9c95c4156478e7d75abaef3ede
| 1,311
|
py
|
Python
|
console_basic/exception.py
|
QthCN/console-basic
|
619fe47ac28dbc669bf84064ff4a729923aa978d
|
[
"Apache-2.0"
] | null | null | null |
console_basic/exception.py
|
QthCN/console-basic
|
619fe47ac28dbc669bf84064ff4a729923aa978d
|
[
"Apache-2.0"
] | null | null | null |
console_basic/exception.py
|
QthCN/console-basic
|
619fe47ac28dbc669bf84064ff4a729923aa978d
|
[
"Apache-2.0"
] | null | null | null |
from oslo_config import cfg
from oslo_log import log
from oslo_utils import encodeutils
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class Error(Exception):
message_format = None
def __init__(self, message=None, **kwargs):
try:
message = self._build_message(message, **kwargs)
except KeyError:
LOG.warning('missing exception kwargs (programmer error)')
message = self.message_format
super(Error, self).__init__(message)
def _build_message(self, message, **kwargs):
"""Builds and returns an exception message.
:raises: KeyError given insufficient kwargs
"""
if not message:
try:
message = self.message_format % kwargs
except UnicodeDecodeError:
try:
kwargs = {k: encodeutils.safe_decode(v)
for k, v in kwargs.items()}
except UnicodeDecodeError:
message = self.message_format
else:
message = self.message_format % kwargs
return message
class HTTPCodeError(Error):
message_format = "HTTP Code is %(code)d."
class HTTPContentError(Error):
message_format = "There is error in HTTP content, %(error)s."
| 26.22
| 70
| 0.595728
|
282610eab654ffcd0140a3aff241a85554b29381
| 3,770
|
py
|
Python
|
tests/web/unauthenticated.py
|
eBrandValue/instagram_private_api
|
2cd84362caccc83e2c87cfd359f714d4b43a5b3a
|
[
"MIT"
] | 2,564
|
2017-01-17T07:48:00.000Z
|
2022-03-31T18:52:55.000Z
|
tests/web/unauthenticated.py
|
NTgitdude23/instagram_private_api
|
1d70e99bc11591161b0cd71cff3f0c08cd04b34f
|
[
"MIT"
] | 335
|
2017-01-19T13:44:47.000Z
|
2022-03-15T09:51:43.000Z
|
tests/web/unauthenticated.py
|
NTgitdude23/instagram_private_api
|
1d70e99bc11591161b0cd71cff3f0c08cd04b34f
|
[
"MIT"
] | 729
|
2017-01-27T14:40:56.000Z
|
2022-03-31T12:30:59.000Z
|
from ..common import WebApiTestBase
class UnauthenticatedTests(WebApiTestBase):
"""Tests for endpoints with authentication"""
@staticmethod
def init_all(api):
return [
{
'name': 'test_unauthenticated_tag_feed',
'test': UnauthenticatedTests('test_unauthenticated_tag_feed', api),
},
{
'name': 'test_unauthenticated_user_feed',
'test': UnauthenticatedTests('test_unauthenticated_user_feed', api),
},
{
'name': 'test_unauthenticated_location_feed',
'test': UnauthenticatedTests('test_unauthenticated_location_feed', api),
},
{
'name': 'test_unauthenticated_media_comments',
'test': UnauthenticatedTests('test_unauthenticated_media_comments', api),
},
{
'name': 'test_unauthenticated_media_comments_noextract',
'test': UnauthenticatedTests('test_unauthenticated_media_comments_noextract', api),
},
{
'name': 'test_unauthenticated_user_info2',
'test': UnauthenticatedTests('test_unauthenticated_user_info2', api),
},
{
'name': 'test_unauthenticated_tag_story_feed',
'test': UnauthenticatedTests('test_unauthenticated_tag_story_feed', api),
},
{
'name': 'test_unauthenticated_location_story_feed',
'test': UnauthenticatedTests('test_unauthenticated_location_story_feed', api),
},
]
def test_unauthenticated_tag_feed(self):
results = self.api.tag_feed('catsofinstagram').get('data', {})
self.assertIsNotNone(results.get('hashtag', {}).get('name'))
self.assertGreater(
len(results.get('hashtag', {}).get('edge_hashtag_to_media', {}).get('edges', [])), 0)
self.assertGreater(
len(results.get('hashtag', {}).get('edge_hashtag_to_top_posts', {}).get('edges', [])), 0)
def test_unauthenticated_user_feed(self):
results = self.api.user_feed(self.test_user_id)
self.assertGreater(len(results), 0)
self.assertIsInstance(results, list)
self.assertIsInstance(results[0], dict)
def test_unauthenticated_location_feed(self):
results = self.api.location_feed('212988663').get('data', {})
self.assertIsNotNone(results.get('location', {}).get('name'))
self.assertGreater(
len(results.get('location', {}).get('edge_location_to_media', {}).get('edges', [])), 0)
self.assertGreater(
len(results.get('location', {}).get('edge_location_to_top_posts', {}).get('edges', [])), 0)
def test_unauthenticated_media_comments(self):
results = self.api.media_comments(self.test_media_shortcode, count=20)
self.assertGreaterEqual(len(results), 0)
self.assertIsInstance(results, list)
self.assertIsInstance(results[0], dict)
def test_unauthenticated_media_comments_noextract(self):
results = self.api.media_comments(self.test_media_shortcode, count=20, extract=False)
self.assertIsInstance(results, dict)
def test_unauthenticated_user_info2(self):
results = self.api.user_info2('instagram')
self.assertIsNotNone(results.get('id'))
def test_unauthenticated_tag_story_feed(self):
results = self.api.tag_story_feed('catsofinstagram').get('data', {})
self.assertTrue('reels_media' in results)
def test_unauthenticated_location_story_feed(self):
results = self.api.location_story_feed('7226110').get('data', {})
self.assertTrue('reels_media' in results)
| 42.840909
| 103
| 0.625729
|
68f39a8375980b7220a1bbd78bfe0b258881d830
| 3,871
|
py
|
Python
|
log_it/extensions/marshmallow/log.py
|
tanj/log-it
|
d7223af1d0216d3febe4ebc39e06e24dceb3115f
|
[
"BSD-3-Clause"
] | null | null | null |
log_it/extensions/marshmallow/log.py
|
tanj/log-it
|
d7223af1d0216d3febe4ebc39e06e24dceb3115f
|
[
"BSD-3-Clause"
] | null | null | null |
log_it/extensions/marshmallow/log.py
|
tanj/log-it
|
d7223af1d0216d3febe4ebc39e06e24dceb3115f
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# pylint: disable=R0903, C0115
"""
log_it.extensions.marshmallow.log
---------------------------------
Marshmallow Log Models
:copyright: (c) 2021 by John te Bokkel
:license: BSD, see LICENSE for more details
"""
from datetime import datetime
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema, auto_field
from marshmallow_sqlalchemy.fields import Nested
from log_it.log.model import (
TLog,
TField,
TLogField,
TMessage,
TMessageType,
TTag,
TTagMessage,
TUserPermission,
TRolePermission,
)
from . import FixtureSchema
from .user import UserFixture, RoleFixture, ActionFixture
class LogSchema(SQLAlchemyAutoSchema):
class Meta:
model = TLog
class FieldSchema(SQLAlchemyAutoSchema):
class Meta:
model = TField
class LogFieldSchema(SQLAlchemyAutoSchema):
class Meta:
model = TLogField
class MessageSchema(SQLAlchemyAutoSchema):
class Meta:
model = TMessage
class MessageTypeSchema(SQLAlchemyAutoSchema):
class Meta:
model = TMessageType
class TagSchema(SQLAlchemyAutoSchema):
class Meta:
model = TTag
class TagMessageSchema(SQLAlchemyAutoSchema):
class Meta:
model = TTagMessage
class UserPermissionSchema(SQLAlchemyAutoSchema):
class Meta:
model = TUserPermission
class RolePermissionSchema(SQLAlchemyAutoSchema):
class Meta:
model = TRolePermission
# FixtureSchema
class LogFixture(FixtureSchema):
"""Barebones Log Fixture for stubs"""
class Meta(FixtureSchema.Meta):
model = TLog
filter_attrs = ["sLog"]
sLog = auto_field()
class FieldFixture(FixtureSchema):
class Meta(FixtureSchema.Meta):
model = TField
filter_attrs = ["sField"]
sField = auto_field()
class LogFieldFixture(FixtureSchema):
class Meta(FixtureSchema.Meta):
model = TLogField
filter_attrs = [
"log.ixLog",
"field.ixField",
]
log = Nested(LogFixture, many=False)
field = Nested(FieldFixture, many=False)
sValue = auto_field()
iOrder = auto_field(missing=None)
class MessageTypeFixture(FixtureSchema):
class Meta(FixtureSchema.Meta):
model = TMessageType
filter_attrs = ["sMessageType"]
class TagFixture(FixtureSchema):
class Meta(FixtureSchema.Meta):
model = TTag
filter_attrs = ["sTag"]
sTag = auto_field()
class TagMessageFixture(FixtureSchema):
class Meta(FixtureSchema.Meta):
model = TTagMessage
class MessageFixture(FixtureSchema):
class Meta(FixtureSchema.Meta):
model = TMessage
# message fixtures are always inserted, never looked up
filter_attrs = None
log = Nested(LogFixture, many=False)
message_type = Nested(MessageTypeFixture, many=False)
user = Nested(UserFixture, many=False)
utcMessage = auto_field(missing=datetime.utcnow)
sMessage = auto_field()
tags = Nested(TagFixture, many=True)
class UserPermissionFixture(FixtureSchema):
class Meta(FixtureSchema.Meta):
model = TUserPermission
log = Nested(LogFixture, many=False)
user = Nested(UserFixture, many=False)
action = Nested(ActionFixture, many=False)
class RolePermissionFixture(FixtureSchema):
class Meta(FixtureSchema.Meta):
model = TRolePermission
log = Nested(LogFixture, many=False)
role = Nested(RoleFixture, many=False)
action = Nested(ActionFixture, many=False)
class LogFullFixture(FixtureSchema):
class Meta(FixtureSchema.Meta):
model = TLog
filter_attrs = ["sLog"]
sLog = auto_field()
user = Nested(UserFixture, many=False)
fields = Nested(FieldFixture, many=True)
user_permissions = Nested(UserPermissionFixture)
role_permissions = Nested(RolePermissionFixture)
| 22.375723
| 67
| 0.686386
|
363de2138d30267f26bed5b17cf288ffd906442b
| 28,224
|
py
|
Python
|
pysnmp/HM2-LOGGING-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/HM2-LOGGING-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/HM2-LOGGING-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module HM2-LOGGING-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HM2-LOGGING-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:18:56 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion")
Hm2TlsCipherSuites, Hm2TlsVersions = mibBuilder.importSymbols("HM2-MGMTACCESS-MIB", "Hm2TlsCipherSuites", "Hm2TlsVersions")
HmTimeSeconds1970, HmEnabledStatus, hm2ConfigurationMibs = mibBuilder.importSymbols("HM2-TC-MIB", "HmTimeSeconds1970", "HmEnabledStatus", "hm2ConfigurationMibs")
InetPortNumber, InetAddressType, InetAddress = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetPortNumber", "InetAddressType", "InetAddress")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, MibIdentifier, Counter64, iso, TimeTicks, Integer32, Counter32, ObjectIdentity, NotificationType, Unsigned32, IpAddress, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "MibIdentifier", "Counter64", "iso", "TimeTicks", "Integer32", "Counter32", "ObjectIdentity", "NotificationType", "Unsigned32", "IpAddress", "Bits")
TextualConvention, DisplayString, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "RowStatus")
hm2LoggingMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 248, 11, 23))
hm2LoggingMib.setRevisions(('2012-08-08 00:00', '2011-03-16 00:00',))
if mibBuilder.loadTexts: hm2LoggingMib.setLastUpdated('201208080000Z')
if mibBuilder.loadTexts: hm2LoggingMib.setOrganization('Hirschmann Automation and Control GmbH')
class HmAgentLogSeverity(TextualConvention, Integer32):
reference = 'RFC3164 - 4.1.1: Table 2'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7))
namedValues = NamedValues(("emergency", 0), ("alert", 1), ("critical", 2), ("error", 3), ("warning", 4), ("notice", 5), ("informational", 6), ("debug", 7))
hm2LoggingMibNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 248, 11, 23, 0))
hm2LoggingMibObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 248, 11, 23, 1))
hm2LogSnmpLoggingGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 1))
hm2LogCliCommandsLoggingGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 2))
hm2LogConsoleLoggingGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 3))
hm2LogBufferedLoggingGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 4))
hm2LogSyslogGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 5))
hm2LogPersistentGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 6))
hm2LogCounterGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 7))
hm2LogTemperatureGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 8))
hm2LogAuditGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 9))
hm2LogEmailAlertGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10))
hm2LogSnmpLogGetRequest = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 1, 1), HmEnabledStatus().clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2LogSnmpLogGetRequest.setStatus('current')
hm2LogSnmpLogSetRequest = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 1, 2), HmEnabledStatus().clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2LogSnmpLogSetRequest.setStatus('current')
hm2LogSnmpLogGetSeverity = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 1, 3), HmAgentLogSeverity().clone('notice')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2LogSnmpLogGetSeverity.setStatus('current')
hm2LogSnmpLogSetSeverity = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 1, 4), HmAgentLogSeverity().clone('notice')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2LogSnmpLogSetSeverity.setStatus('current')
hm2LogCliCommandsAdminStatus = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 2, 1), HmEnabledStatus().clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2LogCliCommandsAdminStatus.setStatus('current')
hm2LogConsoleAdminStatus = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 3, 1), HmEnabledStatus().clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2LogConsoleAdminStatus.setStatus('current')
hm2LogConsoleSeverityFilter = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 3, 2), HmAgentLogSeverity().clone('warning')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2LogConsoleSeverityFilter.setStatus('current')
hm2LogBufferdLogLevelThreshold = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 4, 1), HmAgentLogSeverity().clone('warning')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2LogBufferdLogLevelThreshold.setStatus('current')
hm2LogSyslogAdminStatus = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 5, 1), HmEnabledStatus().clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2LogSyslogAdminStatus.setStatus('current')
hm2LogSyslogClientTlsVersions = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 5, 2), Hm2TlsVersions().clone(namedValues=NamedValues(("tlsv1-2", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2LogSyslogClientTlsVersions.setStatus('current')
hm2LogSyslogClientTlsCipherSuites = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 5, 3), Hm2TlsCipherSuites().clone(namedValues=NamedValues(("tls-rsa-with-aes-128-cbc-sha", 1), ("tls-dhe-rsa-with-aes-128-cbc-sha", 2), ("tls-ecdhe-rsa-with-aes-128-cbc-sha", 4), ("tls-ecdhe-rsa-with-aes-128-gcm-sha256", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2LogSyslogClientTlsCipherSuites.setStatus('current')
hm2LogSyslogServerTable = MibTable((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 5, 10), )
if mibBuilder.loadTexts: hm2LogSyslogServerTable.setStatus('current')
hm2LogSyslogServerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 5, 10, 1), ).setIndexNames((0, "HM2-LOGGING-MIB", "hm2LogSyslogServerIndex"))
if mibBuilder.loadTexts: hm2LogSyslogServerEntry.setStatus('current')
hm2LogSyslogServerIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 5, 10, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2LogSyslogServerIndex.setStatus('current')
hm2LogSyslogServerIPAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 5, 10, 1, 2), InetAddressType().clone('ipv4')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2LogSyslogServerIPAddrType.setStatus('current')
hm2LogSyslogServerIPAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 5, 10, 1, 3), InetAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2LogSyslogServerIPAddr.setStatus('current')
hm2LogSyslogServerUdpPort = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 5, 10, 1, 4), InetPortNumber().clone(514)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2LogSyslogServerUdpPort.setStatus('current')
hm2LogSyslogServerLevelUpto = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 5, 10, 1, 5), HmAgentLogSeverity().clone('warning')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2LogSyslogServerLevelUpto.setStatus('current')
hm2LogSyslogServerLogType = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 5, 10, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("systemlog", 1), ("audittrail", 2))).clone('systemlog')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2LogSyslogServerLogType.setStatus('current')
hm2LogSyslogServerRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 5, 10, 1, 7), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2LogSyslogServerRowStatus.setStatus('current')
hm2LogSyslogServerTransportType = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 5, 10, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("udp", 1), ("tls", 2))).clone('udp')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2LogSyslogServerTransportType.setStatus('current')
hm2LogPersistAdminStatus = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 6, 1), HmEnabledStatus().clone('enable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2LogPersistAdminStatus.setStatus('current')
hm2LogPersistMaxFileSize = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 6, 2), Integer32().clone(1024)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2LogPersistMaxFileSize.setStatus('current')
hm2LogPersistFilesMax = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 6, 3), Integer32().clone(4)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2LogPersistFilesMax.setStatus('current')
hm2LogPersistLevelUpto = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 6, 4), HmAgentLogSeverity().clone('warning')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2LogPersistLevelUpto.setStatus('current')
hm2LogPersistentFileTable = MibTable((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 6, 5), )
if mibBuilder.loadTexts: hm2LogPersistentFileTable.setStatus('current')
hm2LogPersistentFileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 6, 5, 1), ).setIndexNames((0, "HM2-LOGGING-MIB", "hm2LogPersistentFileIndex"))
if mibBuilder.loadTexts: hm2LogPersistentFileEntry.setStatus('current')
hm2LogPersistentFileIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 6, 5, 1, 1), Integer32())
if mibBuilder.loadTexts: hm2LogPersistentFileIndex.setStatus('current')
hm2LogPersistentFileName = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 6, 5, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2LogPersistentFileName.setStatus('current')
hm2LogPersistentFileSize = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 6, 5, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2LogPersistentFileSize.setStatus('current')
hm2LogCounterOperatingHours = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 7, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2LogCounterOperatingHours.setStatus('current')
hm2LogCounterFlashTable = MibTable((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 7, 10), )
if mibBuilder.loadTexts: hm2LogCounterFlashTable.setStatus('current')
hm2LogCounterFlashEntry = MibTableRow((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 7, 10, 1), ).setIndexNames((0, "HM2-LOGGING-MIB", "hm2LogCounterFlashBlock"))
if mibBuilder.loadTexts: hm2LogCounterFlashEntry.setStatus('current')
hm2LogCounterFlashBlock = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 7, 10, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("bootBlock", 1), ("fileSystem", 2), ("imageStorage", 3), ("parameters", 4), ("formatFs", 5), ("userFormatFs", 6), ("dhcpBindings", 7), ("persistentLog", 8))))
if mibBuilder.loadTexts: hm2LogCounterFlashBlock.setStatus('current')
hm2LogCounterFlashDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 7, 10, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2LogCounterFlashDescription.setStatus('current')
hm2LogCounterFlashCount = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 7, 10, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2LogCounterFlashCount.setStatus('current')
hm2LogCounterFlashValue = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 7, 10, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2LogCounterFlashValue.setStatus('current')
hm2LogTempMinimum = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 8, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2LogTempMinimum.setStatus('current')
hm2LogTempMaximum = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 8, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2LogTempMaximum.setStatus('current')
hm2LogTempVariationCount = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 8, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2LogTempVariationCount.setStatus('current')
hm2LogTempHistTable = MibTable((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 8, 10), )
if mibBuilder.loadTexts: hm2LogTempHistTable.setStatus('current')
hm2LogTempHistEntry = MibTableRow((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 8, 10, 1), ).setIndexNames((0, "HM2-LOGGING-MIB", "hm2LogTempHistIndex"))
if mibBuilder.loadTexts: hm2LogTempHistEntry.setStatus('current')
hm2LogTempHistIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 8, 10, 1, 1), Integer32())
if mibBuilder.loadTexts: hm2LogTempHistIndex.setStatus('current')
hm2LogTempHistRangeMin = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 8, 10, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2LogTempHistRangeMin.setStatus('current')
hm2LogTempHistRangeMax = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 8, 10, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2LogTempHistRangeMax.setStatus('current')
hm2LogTempHistTime = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 8, 10, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2LogTempHistTime.setStatus('current')
hm2LogAuditTrailComment = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 9, 1), DisplayString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(1, 80), ))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2LogAuditTrailComment.setStatus('current')
hm2LogEmailAdminStatus = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 1), HmEnabledStatus().clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2LogEmailAdminStatus.setStatus('current')
hm2LogEmailFromAddress = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2LogEmailFromAddress.setStatus('current')
hm2LogEmailLogDuration = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(30, 1440)).clone(30)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2LogEmailLogDuration.setStatus('current')
hm2LogEmailUrgentSeverity = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 4), HmAgentLogSeverity().clone('alert')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2LogEmailUrgentSeverity.setStatus('current')
hm2LogEmailNonUrgentSeverity = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 5), HmAgentLogSeverity().clone('warning')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2LogEmailNonUrgentSeverity.setStatus('current')
hm2LogEmailNumEmailsSent = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2LogEmailNumEmailsSent.setStatus('current')
hm2LogEmailNumEmailFailures = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2LogEmailNumEmailFailures.setStatus('current')
hm2LogEmailTimeOfLastMailSent = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 8), HmTimeSeconds1970()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hm2LogEmailTimeOfLastMailSent.setStatus('current')
hm2LogEmailAction = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("test", 2), ("non-urgent", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2LogEmailAction.setStatus('current')
hm2LogEmailTestMessageType = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("urgent", 1), ("non-urgent", 2))).clone('urgent')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2LogEmailTestMessageType.setStatus('current')
hm2LogEmailTestMessageBody = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 11), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2LogEmailTestMessageBody.setStatus('current')
hm2LogEmailToAddressTable = MibTable((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 15), )
if mibBuilder.loadTexts: hm2LogEmailToAddressTable.setStatus('current')
hm2LogEmailToAddressEntry = MibTableRow((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 15, 1), ).setIndexNames((0, "HM2-LOGGING-MIB", "hm2LogEmailToAddrMessageIndex"))
if mibBuilder.loadTexts: hm2LogEmailToAddressEntry.setStatus('current')
hm2LogEmailToAddrMessageIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 15, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10)))
if mibBuilder.loadTexts: hm2LogEmailToAddrMessageIndex.setStatus('current')
hm2LogEmailToAddrMessageType = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 15, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("urgent", 1), ("non-urgent", 2))).clone('urgent')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2LogEmailToAddrMessageType.setStatus('current')
hm2LogEmailToAddrAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 15, 1, 3), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2LogEmailToAddrAddress.setStatus('current')
hm2LogEmailToAddrEntryStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 15, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2LogEmailToAddrEntryStatus.setStatus('current')
hm2LogEmailSubjectTable = MibTable((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 16), )
if mibBuilder.loadTexts: hm2LogEmailSubjectTable.setStatus('current')
hm2LogEmailSubjectEntry = MibTableRow((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 16, 1), ).setIndexNames((0, "HM2-LOGGING-MIB", "hm2LogEmailSubjectMessageType"))
if mibBuilder.loadTexts: hm2LogEmailSubjectEntry.setStatus('current')
hm2LogEmailSubjectMessageType = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 16, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("urgent", 1), ("non-urgent", 2))))
if mibBuilder.loadTexts: hm2LogEmailSubjectMessageType.setStatus('current')
hm2LogEmailSubject = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 16, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2LogEmailSubject.setStatus('current')
hm2LogEmailSubjectEntryStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 16, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2LogEmailSubjectEntryStatus.setStatus('current')
hm2LogEmailMailServerTable = MibTable((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 17), )
if mibBuilder.loadTexts: hm2LogEmailMailServerTable.setStatus('current')
hm2LogEmailMailServerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 17, 1), ).setIndexNames((0, "HM2-LOGGING-MIB", "hm2LogEmailSmtpAddrIndex"))
if mibBuilder.loadTexts: hm2LogEmailMailServerEntry.setStatus('current')
hm2LogEmailSmtpAddrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 17, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 5)))
if mibBuilder.loadTexts: hm2LogEmailSmtpAddrIndex.setStatus('current')
hm2LogEmailSmtpAddrDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 17, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2LogEmailSmtpAddrDescr.setStatus('current')
hm2LogEmailSmtpAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 17, 1, 3), InetAddressType().clone('ipv4')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2LogEmailSmtpAddrType.setStatus('current')
hm2LogEmailSmtpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 17, 1, 4), InetAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2LogEmailSmtpAddr.setStatus('current')
hm2LogEmailSmtpPort = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 17, 1, 5), InetPortNumber().clone(25)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2LogEmailSmtpPort.setStatus('current')
hm2LogEmailSmtpSecurity = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 17, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("none", 1), ("tlsv1", 2))).clone('none')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2LogEmailSmtpSecurity.setStatus('current')
hm2LogEmailSmtpLoginID = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 17, 1, 7), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2LogEmailSmtpLoginID.setStatus('current')
hm2LogEmailSmtpPassword = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 17, 1, 8), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2LogEmailSmtpPassword.setStatus('current')
hm2LogEmailSmtpEntryStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 17, 1, 9), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2LogEmailSmtpEntryStatus.setStatus('current')
hm2LogEmailSmtpTimeout = MibTableColumn((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 17, 1, 10), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 15)).clone(3)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hm2LogEmailSmtpTimeout.setStatus('current')
hm2LogEmailClientTlsVersions = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 18), Hm2TlsVersions().clone(namedValues=NamedValues(("tlsv1-0", 0), ("tlsv1-2", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2LogEmailClientTlsVersions.setStatus('current')
hm2LogEmailClientTlsCipherSuites = MibScalar((1, 3, 6, 1, 4, 1, 248, 11, 23, 1, 10, 19), Hm2TlsCipherSuites().clone(namedValues=NamedValues(("tls-dhe-rsa-with-aes-128-cbc-sha", 2), ("tls-ecdhe-rsa-with-aes-128-cbc-sha", 4), ("tls-ecdhe-rsa-with-aes-128-gcm-sha256", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hm2LogEmailClientTlsCipherSuites.setStatus('current')
hm2LogAuditStartNextSector = NotificationType((1, 3, 6, 1, 4, 1, 248, 11, 23, 0, 1))
if mibBuilder.loadTexts: hm2LogAuditStartNextSector.setStatus('current')
hm2LogEmailSendFailed = NotificationType((1, 3, 6, 1, 4, 1, 248, 11, 23, 0, 2)).setObjects(("HM2-LOGGING-MIB", "hm2LogEmailNumEmailFailures"))
if mibBuilder.loadTexts: hm2LogEmailSendFailed.setStatus('current')
mibBuilder.exportSymbols("HM2-LOGGING-MIB", hm2LogSyslogClientTlsVersions=hm2LogSyslogClientTlsVersions, hm2LogEmailTestMessageType=hm2LogEmailTestMessageType, hm2LogEmailNonUrgentSeverity=hm2LogEmailNonUrgentSeverity, HmAgentLogSeverity=HmAgentLogSeverity, hm2LogEmailSmtpEntryStatus=hm2LogEmailSmtpEntryStatus, hm2LogEmailMailServerTable=hm2LogEmailMailServerTable, hm2LogEmailSmtpTimeout=hm2LogEmailSmtpTimeout, hm2LogCounterGroup=hm2LogCounterGroup, hm2LogEmailSmtpAddrDescr=hm2LogEmailSmtpAddrDescr, hm2LoggingMibNotifications=hm2LoggingMibNotifications, hm2LogSnmpLogGetRequest=hm2LogSnmpLogGetRequest, hm2LoggingMib=hm2LoggingMib, hm2LogSnmpLoggingGroup=hm2LogSnmpLoggingGroup, hm2LogPersistAdminStatus=hm2LogPersistAdminStatus, hm2LogSyslogAdminStatus=hm2LogSyslogAdminStatus, hm2LogEmailSmtpPassword=hm2LogEmailSmtpPassword, hm2LogEmailLogDuration=hm2LogEmailLogDuration, hm2LogCliCommandsLoggingGroup=hm2LogCliCommandsLoggingGroup, hm2LogSyslogServerIndex=hm2LogSyslogServerIndex, hm2LogPersistentFileEntry=hm2LogPersistentFileEntry, hm2LogTempHistRangeMin=hm2LogTempHistRangeMin, hm2LogSyslogGroup=hm2LogSyslogGroup, hm2LogEmailNumEmailFailures=hm2LogEmailNumEmailFailures, hm2LogPersistLevelUpto=hm2LogPersistLevelUpto, hm2LogTempVariationCount=hm2LogTempVariationCount, hm2LogEmailToAddressEntry=hm2LogEmailToAddressEntry, hm2LogTempMinimum=hm2LogTempMinimum, hm2LogEmailAdminStatus=hm2LogEmailAdminStatus, hm2LogEmailSmtpSecurity=hm2LogEmailSmtpSecurity, hm2LogSyslogServerIPAddrType=hm2LogSyslogServerIPAddrType, hm2LogEmailToAddrEntryStatus=hm2LogEmailToAddrEntryStatus, hm2LogEmailSmtpAddrIndex=hm2LogEmailSmtpAddrIndex, hm2LogEmailTestMessageBody=hm2LogEmailTestMessageBody, hm2LogTempHistTime=hm2LogTempHistTime, hm2LogEmailSubjectTable=hm2LogEmailSubjectTable, hm2LogEmailMailServerEntry=hm2LogEmailMailServerEntry, hm2LogEmailSubjectEntryStatus=hm2LogEmailSubjectEntryStatus, hm2LogEmailClientTlsVersions=hm2LogEmailClientTlsVersions, hm2LogCounterOperatingHours=hm2LogCounterOperatingHours, hm2LogConsoleAdminStatus=hm2LogConsoleAdminStatus, hm2LogSyslogServerLevelUpto=hm2LogSyslogServerLevelUpto, PYSNMP_MODULE_ID=hm2LoggingMib, hm2LogTempMaximum=hm2LogTempMaximum, hm2LogPersistentFileName=hm2LogPersistentFileName, hm2LogEmailAlertGroup=hm2LogEmailAlertGroup, hm2LogCounterFlashTable=hm2LogCounterFlashTable, hm2LogPersistFilesMax=hm2LogPersistFilesMax, hm2LogSyslogServerEntry=hm2LogSyslogServerEntry, hm2LogCounterFlashDescription=hm2LogCounterFlashDescription, hm2LogCliCommandsAdminStatus=hm2LogCliCommandsAdminStatus, hm2LogConsoleSeverityFilter=hm2LogConsoleSeverityFilter, hm2LogPersistentFileIndex=hm2LogPersistentFileIndex, hm2LogEmailSubject=hm2LogEmailSubject, hm2LogCounterFlashEntry=hm2LogCounterFlashEntry, hm2LogPersistentFileTable=hm2LogPersistentFileTable, hm2LogEmailUrgentSeverity=hm2LogEmailUrgentSeverity, hm2LogTempHistIndex=hm2LogTempHistIndex, hm2LogCounterFlashValue=hm2LogCounterFlashValue, hm2LogEmailSubjectMessageType=hm2LogEmailSubjectMessageType, hm2LogEmailSendFailed=hm2LogEmailSendFailed, hm2LogSyslogServerLogType=hm2LogSyslogServerLogType, hm2LogSyslogServerTransportType=hm2LogSyslogServerTransportType, hm2LogSnmpLogGetSeverity=hm2LogSnmpLogGetSeverity, hm2LogEmailSmtpLoginID=hm2LogEmailSmtpLoginID, hm2LogEmailAction=hm2LogEmailAction, hm2LoggingMibObjects=hm2LoggingMibObjects, hm2LogSyslogServerTable=hm2LogSyslogServerTable, hm2LogEmailSmtpAddr=hm2LogEmailSmtpAddr, hm2LogBufferdLogLevelThreshold=hm2LogBufferdLogLevelThreshold, hm2LogEmailSmtpPort=hm2LogEmailSmtpPort, hm2LogSnmpLogSetRequest=hm2LogSnmpLogSetRequest, hm2LogPersistentGroup=hm2LogPersistentGroup, hm2LogAuditStartNextSector=hm2LogAuditStartNextSector, hm2LogSyslogClientTlsCipherSuites=hm2LogSyslogClientTlsCipherSuites, hm2LogPersistentFileSize=hm2LogPersistentFileSize, hm2LogEmailToAddrMessageType=hm2LogEmailToAddrMessageType, hm2LogEmailToAddrAddress=hm2LogEmailToAddrAddress, hm2LogEmailToAddressTable=hm2LogEmailToAddressTable, hm2LogEmailSubjectEntry=hm2LogEmailSubjectEntry, hm2LogEmailSmtpAddrType=hm2LogEmailSmtpAddrType, hm2LogTempHistTable=hm2LogTempHistTable, hm2LogSyslogServerUdpPort=hm2LogSyslogServerUdpPort, hm2LogTempHistEntry=hm2LogTempHistEntry, hm2LogAuditGroup=hm2LogAuditGroup, hm2LogEmailClientTlsCipherSuites=hm2LogEmailClientTlsCipherSuites, hm2LogCounterFlashBlock=hm2LogCounterFlashBlock, hm2LogTemperatureGroup=hm2LogTemperatureGroup, hm2LogSnmpLogSetSeverity=hm2LogSnmpLogSetSeverity, hm2LogEmailNumEmailsSent=hm2LogEmailNumEmailsSent, hm2LogEmailToAddrMessageIndex=hm2LogEmailToAddrMessageIndex, hm2LogBufferedLoggingGroup=hm2LogBufferedLoggingGroup, hm2LogCounterFlashCount=hm2LogCounterFlashCount, hm2LogSyslogServerRowStatus=hm2LogSyslogServerRowStatus, hm2LogEmailFromAddress=hm2LogEmailFromAddress, hm2LogAuditTrailComment=hm2LogAuditTrailComment, hm2LogTempHistRangeMax=hm2LogTempHistRangeMax, hm2LogConsoleLoggingGroup=hm2LogConsoleLoggingGroup, hm2LogEmailTimeOfLastMailSent=hm2LogEmailTimeOfLastMailSent, hm2LogPersistMaxFileSize=hm2LogPersistMaxFileSize, hm2LogSyslogServerIPAddr=hm2LogSyslogServerIPAddr)
| 133.763033
| 5,084
| 0.777884
|
a382adbdfe03beb9a45528820dbc73d5f134ca4e
| 823
|
py
|
Python
|
String/python/leetcode383_Ransom_Note.py
|
wenxinjie/leetcode
|
c459a01040c8fe0783e15a16b8d7cca4baf4612a
|
[
"Apache-2.0"
] | null | null | null |
String/python/leetcode383_Ransom_Note.py
|
wenxinjie/leetcode
|
c459a01040c8fe0783e15a16b8d7cca4baf4612a
|
[
"Apache-2.0"
] | null | null | null |
String/python/leetcode383_Ransom_Note.py
|
wenxinjie/leetcode
|
c459a01040c8fe0783e15a16b8d7cca4baf4612a
|
[
"Apache-2.0"
] | null | null | null |
# Given an arbitrary ransom note string and another string containing letters from all the magazines, write a function that will return true if the ransom note can be constructed from the magazines ; otherwise, it will return false.
# Each letter in the magazine string can only be used once in your ransom note.
# Note:
# You may assume that both strings contain only lowercase letters.
# canConstruct("a", "b") -> false
# canConstruct("aa", "ab") -> false
# canConstruct("aa", "aab") -> true
class Solution(object):
def canConstruct(self, ransomNote, magazine):
"""
:type ransomNote: str
:type magazine: str
:rtype: bool
"""
return not (collections.Counter(ransomNote) - collections.Counter(magazine))
# Time: O(n + m)
# Space: O(n + m)
# Difficulty: easy
| 34.291667
| 232
| 0.679222
|
52f5f9d4fdf2d4a2ce4dab9e84d93692e8770331
| 6,493
|
py
|
Python
|
src/openprocurement/tender/openeu/validation.py
|
pontostroy/api
|
5afdd3a62a8e562cf77e2d963d88f1a26613d16a
|
[
"Apache-2.0"
] | 3
|
2020-03-13T06:44:23.000Z
|
2020-11-05T18:25:29.000Z
|
src/openprocurement/tender/openeu/validation.py
|
pontostroy/api
|
5afdd3a62a8e562cf77e2d963d88f1a26613d16a
|
[
"Apache-2.0"
] | 2
|
2021-03-25T23:29:58.000Z
|
2022-03-21T22:18:37.000Z
|
src/openprocurement/tender/openeu/validation.py
|
pontostroy/api
|
5afdd3a62a8e562cf77e2d963d88f1a26613d16a
|
[
"Apache-2.0"
] | 3
|
2020-10-16T16:25:14.000Z
|
2021-05-22T12:26:20.000Z
|
# -*- coding: utf-8 -*-
from openprocurement.api.utils import get_now, raise_operation_error, get_first_revision_date
from openprocurement.api.validation import validate_data, OPERATIONS
from openprocurement.api.constants import RELEASE_2020_04_19
from openprocurement.tender.openeu.models import Qualification
def validate_qualification_update_with_cancellation_lot_pending(request):
tender = request.validated["tender"]
tender_created = get_first_revision_date(tender, default=get_now())
qualification = request.validated["qualification"]
lot_id = qualification.lotID
if tender_created < RELEASE_2020_04_19 or not lot_id:
return
accept_lot = all([
any([j.status == "resolved" for j in i.complaints])
for i in tender.cancellations
if i.status == "unsuccessful" and getattr(i, "complaints", None) and i.relatedLot == lot_id
])
if (
request.authenticated_role == "tender_owner"
and (
any([
i for i in tender.cancellations
if i.relatedLot and i.status == "pending" and i.relatedLot == lot_id])
or not accept_lot
)
):
raise_operation_error(
request,
"Can't update qualification with pending cancellation lot",
)
def validate_patch_qualification_data(request):
return validate_data(request, Qualification, True)
# bids
def validate_view_bids_in_active_tendering(request):
if request.validated["tender_status"] == "active.tendering":
raise_operation_error(
request,
"Can't view {} in current ({}) tender status".format(
"bid" if request.matchdict.get("bid_id") else "bids", request.validated["tender_status"]
),
)
# bid documents
def validate_bid_document_operation_in_bid_status(request):
bid = request.validated["bid"]
if bid.status in ("invalid", "unsuccessful", "deleted"):
raise_operation_error(
request,
"Can't {} document at '{}' bid status".format(
OPERATIONS.get(request.method),
bid.status
)
)
def validate_view_bid_documents_allowed_in_tender_status(request):
tender_status = request.validated["tender_status"]
if tender_status == "active.tendering" and request.authenticated_role != "bid_owner":
raise_operation_error(
request,
"Can't view bid documents in current ({}) tender status".format(tender_status),
)
def validate_view_financial_bid_documents_allowed_in_tender_status(request):
tender_status = request.validated["tender_status"]
view_forbidden_states = (
"active.tendering",
"active.pre-qualification",
"active.pre-qualification.stand-still",
"active.auction",
)
if tender_status in view_forbidden_states and request.authenticated_role != "bid_owner":
raise_operation_error(
request,
"Can't view bid documents in current ({}) tender status".format(tender_status),
)
def validate_view_bid_documents_allowed_in_bid_status(request):
bid_status = request.validated["bid"].status
if bid_status in ("invalid", "deleted") and request.authenticated_role != "bid_owner":
raise_operation_error(
request,
"Can't view bid documents in current ({}) bid status".format(bid_status)
)
def validate_view_financial_bid_documents_allowed_in_bid_status(request):
bid_status = request.validated["bid"].status
if bid_status in ("invalid", "deleted", "invalid.pre-qualification", "unsuccessful") \
and request.authenticated_role != "bid_owner":
raise_operation_error(
request,
"Can't view bid documents in current ({}) bid status".format(bid_status)
)
# qualification
def validate_qualification_document_operation_not_in_allowed_status(request):
if request.validated["tender_status"] != "active.pre-qualification":
raise_operation_error(
request,
"Can't {} document in current ({}) tender status".format(
OPERATIONS.get(request.method), request.validated["tender_status"]
),
)
def validate_qualification_document_operation_not_in_pending(request):
qualification = request.validated["qualification"]
if qualification.status != "pending":
raise_operation_error(
request, "Can't {} document in current qualification status".format(OPERATIONS.get(request.method))
)
# qualification complaint
def validate_qualification_update_not_in_pre_qualification(request):
tender = request.validated["tender"]
if tender.status not in ["active.pre-qualification"]:
raise_operation_error(request, "Can't update qualification in current ({}) tender status".format(tender.status))
def validate_cancelled_qualification_update(request):
if request.context.status == "cancelled":
raise_operation_error(request, "Can't update qualification in current cancelled qualification status")
def validate_add_complaint_not_in_pre_qualification(request):
tender = request.validated["tender"]
if tender.status not in ["active.pre-qualification.stand-still"]:
raise_operation_error(request, "Can't add complaint in current ({}) tender status".format(tender.status))
def validate_update_complaint_not_in_pre_qualification(request):
tender = request.validated["tender"]
if tender.status not in ["active.pre-qualification", "active.pre-qualification.stand-still"]:
raise_operation_error(request, "Can't update complaint in current ({}) tender status".format(tender.status))
def validate_update_qualification_complaint_only_for_active_lots(request):
tender = request.validated["tender"]
if any([i.status != "active" for i in tender.lots if i.id == request.validated["qualification"].lotID]):
raise_operation_error(request, "Can update complaint only in active lot status")
def validate_add_complaint_not_in_qualification_period(request):
tender = request.validated["tender"]
if tender.qualificationPeriod and (
tender.qualificationPeriod.startDate
and tender.qualificationPeriod.startDate > get_now()
or tender.qualificationPeriod.endDate
and tender.qualificationPeriod.endDate < get_now()
):
raise_operation_error(request, "Can add complaint only in qualificationPeriod")
| 38.88024
| 120
| 0.699677
|
f09480347b63010cd167a50e14a1de1bf78d4c7d
| 27,839
|
py
|
Python
|
sqliteanalyzer/storageanalyzer.py
|
santigl/sqliteanalyzer
|
16b269337d3a13b3b438775a60866a732e05beaf
|
[
"BSD-2-Clause"
] | null | null | null |
sqliteanalyzer/storageanalyzer.py
|
santigl/sqliteanalyzer
|
16b269337d3a13b3b438775a60866a732e05beaf
|
[
"BSD-2-Clause"
] | null | null | null |
sqliteanalyzer/storageanalyzer.py
|
santigl/sqliteanalyzer
|
16b269337d3a13b3b438775a60866a732e05beaf
|
[
"BSD-2-Clause"
] | 1
|
2021-08-02T02:43:02.000Z
|
2021-08-02T02:43:02.000Z
|
# Copyright 2018 Santiago Gil
# (github.com/santigl)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Module to extract metrics about the storage use of an
SQLite3 database.
"""
from collections import namedtuple
from math import ceil
from os import stat
from .manager import SQLite3Manager
from .types import Index, IndexListEntry, Page
class StorageMetrics(dict):
"""Storage metrics for a given database object.
It contains the following keys:
* ``'nentry'``
* ``'payload'``
* ``'ovfl_payload'``
* ``'mx_payload'``
* ``'ovfl_cnt'``
* ``'leaf_pages'``
* ``'int_pages'``
* ``'ovfl_pages'``
* ``'leaf_unused'``
* ``'int_unused'``
* ``'ovfl_unused'``
* ``'gap_cnt'``
* ``'compressed_size'``
* ``'depth'``
* ``'cnt'``
* ``'total_pages'``
* ``'total_pages_percent'``
* ``'storage'``
* ``'is_compressed'``
* ``'compressed_overhead'``
* ``'payload_percent'``
* ``'total_unused'``
* ``'total_metadata'``
* ``'metadata_percent'``
* ``'average_payload'``
* ``'average_unused'``
* ``'average_metadata'``
* ``'ovfl_percent'``
* ``'fragmentation'``
* ``'int_unused_percent'``
* ``'ovfl_unused_percent'``
* ``'leaf_unused_percent'``
* ``'total_unused_percent``
"""
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
class StorageAnalyzer:
"""Extracts storage-space usage statistics from an SQLite3 database.
It uses as a starting point the metrics provided by the
``DBSTAT`` virtual table.
Arguments:
db_path: path to an SQLite3 database file
Note:
SQLite3 must have been compiled with the
``-DSQLITE_ENABLE_DBSTAT_VTAB`` flag enabled.
References:
https://www.sqlite.org/dbstat.html
"""
def __init__(self, db_path: str):
self._db_file = db_path
self._db = SQLite3Manager(self._db_file)
self._is_compressed = None
# Creating temporary DBSTAT table:
self._create_temp_stat_table()
# Creating in-memory db to store the stats:
self._stat_db = SQLite3Manager()
self._stat_db.execute_query(self._spaceused_table_create_query())
# Gathering the stats for all tables:
self._compute_stats()
def item_count(self) -> int:
"""Number of rows defined in table ``SQLITE_MASTER``.
Returns:
``SELECT COUNT(*) from SQLITE_MASTER``
"""
return self._db.fetch_single_field('''SELECT COUNT(*)
from SQLITE_MASTER''')
def file_size(self) -> int:
"""Physical size of the database in bytes, as reported by
:func:`os.stat()`.
Returns:
Size of the database [bytes]
"""
return stat(self._db_file).st_size
def logical_file_size(self) -> int:
"""Number of bytes that the database should take given the size
of a page and the number of pages it contains.
If there is no compression, then this value is equal to
the physical file size (:func:`file_size`).
Returns:
Logical size of the database [bytes]
"""
return self.page_count() * self.page_size()
def page_size(self) -> int:
"""Size in bytes of the database pages.
Returns:
``PRAGMA page_size`` [bytes]
"""
return self._db.fetch_single_field('PRAGMA page_size')
def page_count(self) -> int:
"""Number of reported pages in the database.
Returns:
``PRAGMA page_count``
"""
return self._db.fetch_single_field('PRAGMA page_count')
def calculated_free_pages(self) -> int:
"""Number of free pages.
Returns:
:func:`page_count()` - :func:`in_use_pages()`
- :func:`autovacuum_page_count()`
"""
return self.page_count()\
- self.in_use_pages()\
- self.autovacuum_page_count()
def calculated_page_count(self) -> int:
"""Number of calculated pages in the database.
Returns
The sum of pages in use, pages in the freelist and pages
in the autovacuum pointer map.
:func:`page_count()` + :func:`in_use_pages()`
+ :func:`autovacuum_page_count()`
"""
return self.in_use_pages()\
+ self.freelist_count()\
+ self.autovacuum_page_count()
def freelist_count(self) -> int:
"""Number of pages in the freelist.
Those are unused pages in the database.
Returns:
``PRAGMA freelist_count``
"""
return self._db.fetch_single_field('PRAGMA freelist_count')
def pages(self) -> [Page]:
"""Returns the definition for all pages in the database.
It is a dump of the ``DBSTAT`` virtual table.
Reference:
https://www.sqlite.org/dbstat.html
Returns:
a list of :class:`Page` objects
"""
query = '''SELECT * FROM temp.stat'''
return [Page._make(row) for row in self._db.fetch_all_rows(query)]
def in_use_pages(self) -> int:
"""Number of pages currently in use.
Returns:
``leaf_pages`` + ``internal_pages`` + ``overflow_pages``
"""
query = '''SELECT sum(leaf_pages+int_pages+ovfl_pages)
FROM space_used'''
return self._stat_db.fetch_single_field(query)
def in_use_percent(self) -> float:
"""Percentage of pages from the total that are currently in use.
Returns:
% of pages of the DB that are currently in use
"""
return self._percentage(self.in_use_pages(), self.page_count())
def tables(self) -> [str]:
"""Names of the tables defined in the database.
Returns:
tables in the database
"""
tables = self._db.fetch_all_rows('''SELECT name
FROM sqlite_master
WHERE rootpage>0
AND type == "table"''')
return [t['name'] for t in tables] + ['sqlite_master']
def indices(self) -> [Index]:
"""Returns the indices defined in the database.
Returns:
a list of :class:`Index`
"""
indices = self._db.fetch_all_rows('''SELECT name, tbl_name
FROM sqlite_master
WHERE rootpage>0
AND type == "index"''')
return [{'name': i['name'], 'tbl_name': i['tbl_name']} \
for i in indices]
def index_list(self, table: str) -> [IndexListEntry]:
"""Given a table, returns its entries in ``PRAGMA index_list``.
Returns:
A list of :class:`IndexListEntry` namedtuples.
References:
https://sqlite.org/pragma.html#pragma_index_list
"""
query = 'PRAGMA index_list("{}")'.format(table)
indices = []
for row in self._db.fetch_all_rows(query):
index = IndexListEntry(row['seq'], row['name'], bool(row['unique']),
row['origin'], bool(row['partial']))
indices.append(index)
return indices
def ntable(self) -> int:
"""Number of tables in the database."""
return self._db.fetch_single_field('''SELECT count(*)+1
FROM sqlite_master
WHERE type="table"
''')
def nindex(self) -> int:
"""Number of indices in the database."""
return self._db.fetch_single_field('''SELECT count(*)
FROM sqlite_master
WHERE type="index"
''')
def nautoindex(self) -> int:
"""Number of automatically-created indices in the database."""
return self._db.fetch_single_field('''SELECT count(*)
FROM sqlite_master
WHERE name
LIKE "sqlite_autoindex%"
''')
def nmanindex(self)-> int:
"""Number of manually-created indices in the database."""
return self.nindex() - self.nautoindex()
def payload_size(self)-> int:
"""Space in bytes used by the user's payload.
It does not include the space used by the ``sqlite_master``
table nor any indices.
"""
return self._stat_db.fetch_single_field('''SELECT sum(payload)
FROM space_used
WHERE NOT is_index
AND name
NOT LIKE "sqlite_master";
''')
def is_compressed(self) -> bool:
"""Returns whether the database file is compressed."""
if self._is_compressed is None:
table = self.tables().pop()
self._iscompresed = self.table_stats(table)['is_compressed']
return self._iscompresed
def autovacuum_page_count(self) -> int:
"""The number of pages used by the *auto-vacuum*
pointer map.
"""
auto_vacuum = self._db.fetch_single_field('PRAGMA auto_vacuum')
if auto_vacuum == 0 or self.page_count() == 1:
return 0
# The number of entries on each pointer map page.
#
# The layout of the database file is one pointer-map
# page, followed by ptrsPerPage other pages, followed
# by a pointer-map page, etc.
#
# The first pointer-map page is the second page
# of the file overall.
page_size = float(self.page_size())
pointers_per_page = page_size / 5
# Return the number of pointer map pages
# in the database.
return ceil((self.page_count() - 1) / (pointers_per_page + 1))
def table_space_usage(self) -> dict():
"""Space used by each table in the database.
Returns:
A dictionary from table names to page counts.
"""
# if table is not None:
# return self._table_space_usage(table)
return self._all_tables_usage()
def table_page_count(self, name: str, exclude_indices=False) -> int:
"""Number of pages that the table is currently using.
If ``exclude_indices == True``, then it does not count those
pages taken by indices that might point to that table.
Args:
name: name of the table
exclude_indices: whether to avoid counting pages used
by indices on the table.
"""
if exclude_indices:
return self._item_page_count(name)
return self._table_space_usage(name)
def index_page_count(self, name: str) -> int:
"""Number of pages that the index is currently using.
Args:
name: name of the index
Returns:
number of pages
"""
return self._item_page_count(name)
def index_stats(self, name: str) -> StorageMetrics:
"""Returns statistics for the index.
Args:
name: name of the index
Returns:
a :class:`StorageMetrics` object
"""
condition = 'name = "{}"'.format(name)
return self._query_space_used_table(condition)
def table_stats(self, name: str, exclude_indices=False) -> StorageMetrics:
"""Returns statistics for a table.
The value of the optional parameter ``exclude_indices``,
determines whether indices are considered part of the actual
table or not.
Args:
name: name of the table
Returns:
a :class:`StorageMetrics` object
"""
if exclude_indices:
condition = 'name = "{}"'.format(name)
else:
condition = 'tblname = "{}"'.format(name)
return self._query_space_used_table(condition)
def global_stats(self, exclude_indices=False) -> StorageMetrics:
"""Storage metrics for all tables and/or indices in the database
The value of the optional parameter ``exclude_indices``
determines whether indices are considered.
Args:
exclude_indices: bool: if False, space used by indices is
not considered.
Returns:
a StorageMetrics object
"""
condition = 'NOT is_index' if exclude_indices else '1'
return self._query_space_used_table(condition)
def indices_stats(self) -> StorageMetrics:
"""Return metadata about the indices in the database.
Raises:
ValueError: If no indices exist
"""
if not self.nindex():
raise ValueError('There are no indices in the DB.')
return self._query_space_used_table('is_index')
def is_without_rowid(self, table: str) -> bool:
"""Returns whether the given table is a ``WITHOUT ROWID`` table.
Args:
table: name of the table
References:
https://sqlite.org/withoutrowid.html
"""
query = 'PRAGMA index_list("{}")'.format(table)
indices = self._db.fetch_all_rows(query)
for index in indices:
if index['origin'].upper() == 'PK':
query = '''SELECT count(*)
FROM sqlite_master
WHERE name="{}"'''.format(table)
pk_is_table = self._db.fetch_single_field(query)
if not pk_is_table:
return True
return False
def stat_db_dump(self) -> [str]:
"""Returns a dump of the DB containing the stats.
Returns:
list of lines containing an SQL dump of the stat database.
"""
return list(self._stat_db.iterdump())
#### HELPERS ####
def _query_space_used_table(self, where: str) -> StorageMetrics:
# total_pages: Database pages consumed.
# total_pages_percent: Pages consumed as a percentage of the file.
# storage: Bytes consumed.
# payload_percent: Payload bytes used as a percentage of $storage.
# total_unused: Unused bytes on pages.
# avg_payload: Average payload per btree entry.
# avg_fanout: Average fanout for internal pages.
# avg_unused: Average unused bytes per btree entry.
# avg_meta: Average metadata overhead per entry.
# ovfl_cnt_percent: Percentage of btree entries that use overflow pages.
query = '''SELECT
sum(
CASE WHEN (is_without_rowid OR is_index) THEN nentry
ELSE leaf_entries
END
) AS nentry,
sum(payload) AS payload,
sum(ovfl_payload) AS ovfl_payload,
max(mx_payload) AS mx_payload,
sum(ovfl_cnt) as ovfl_cnt,
sum(leaf_pages) AS leaf_pages,
sum(int_pages) AS int_pages,
sum(ovfl_pages) AS ovfl_pages,
sum(leaf_unused) AS leaf_unused,
sum(int_unused) AS int_unused,
sum(ovfl_unused) AS ovfl_unused,
sum(gap_cnt) AS gap_cnt,
sum(compressed_size) AS compressed_size,
max(depth) AS depth,
count(*) AS cnt
FROM space_used
WHERE {}
'''.format(where)
stats = self._stat_db.fetch_one_row(query)
s = self._extract_storage_metrics(stats)
# Adding calculated values:
s['total_pages'] = s['leaf_pages']\
+ s['int_pages']\
+ s['ovfl_pages']
s['total_pages_percent'] = self._percentage(s['total_pages'],
self.page_count())
s['storage'] = s['total_pages'] * self.page_size()
s['is_compressed'] = (s['storage'] > s['compressed_size'])
s['compressed_overhead'] = 14 if s['is_compressed'] \
else 0
s['payload_percent'] = self._percentage(s['payload'],
s['storage'])
s['total_unused'] = s['ovfl_unused']\
+ s['int_unused'] \
+ s['leaf_unused']
s['total_metadata'] = s['storage'] - s['payload']\
- s['total_unused']\
+ 4 * (s['ovfl_pages'] - s['ovfl_cnt'])
s['metadata_percent'] = self._percentage(s['total_metadata'],
s['storage'])
if s['nentry'] == 0:
s['average_payload'] = 0
s['average_unused'] = s['average_metadata'] = 0
else:
s['average_payload'] = s['payload'] / s['nentry']
s['average_unused'] = s['total_unused'] / s['nentry']
s['average_metadata'] = s['total_metadata'] / s['nentry']
s['ovfl_percent'] = self._percentage(s['ovfl_cnt'], s['nentry'])
s['fragmentation'] = self._percentage(s['gap_cnt'],
s['total_pages'] - 1)
s['int_unused_percent'] = self._percentage(s['int_unused'],
s['int_pages']\
* self.page_size())
s['ovfl_unused_percent'] = self._percentage(s['ovfl_unused'],
s['ovfl_pages']\
* self.page_size())
s['leaf_unused_percent'] = self._percentage(s['leaf_unused'],
s['leaf_pages']\
* self.page_size())
s['total_unused_percent'] = self._percentage(s['total_unused'],
s['storage'])
return s
def _item_page_count(self, name: str) -> int:
query = '''SELECT (int_pages + leaf_pages + ovfl_pages)
FROM space_used
WHERE name = "{}"
'''.format(name)
return self._stat_db.fetch_single_field(query)
def _table_space_usage(self, tbl_name: str) -> int:
query = '''SELECT
sum(int_pages + leaf_pages + ovfl_pages)
AS pages
FROM space_used
WHERE tblname = "{}"
GROUP BY tblname
'''.format(tbl_name)
return self._stat_db.fetch_single_field(query)
def _all_tables_usage(self) -> dict():
query = '''SELECT tblname as name,
sum(int_pages + leaf_pages + ovfl_pages)
AS pages
FROM space_used
GROUP BY tblname'''
return {row['name']: row['pages'] \
for row in self._stat_db.fetch_all_rows(query)}
def _compute_stats(self):
tables = [{'name': t, 'tbl_name': t} for t in self.tables()]
indices = self.indices()
for entry in tables + indices:
stats = self._extract_sqlite_stats(entry['name'])
is_index = (entry['name'] != entry['tbl_name'])
values = (entry['name'],
entry['tbl_name'],
is_index,
stats['is_without_rowid'],
stats['nentry'],
stats['leaf_entries'],
stats['depth'],
stats['payload'],
stats['ovfl_payload'],
stats['ovfl_cnt'],
stats['mx_payload'],
stats['int_pages'],
stats['leaf_pages'],
stats['ovfl_pages'],
stats['int_unused'],
stats['leaf_unused'],
stats['ovfl_unused'],
stats['gap_count'],
stats['compressed_size'])
placeholders = ','.join('?' * len(values))
insert_query = '''INSERT INTO space_used
VALUES ({})'''.format(placeholders)
self._stat_db.execute_query(insert_query, values)
### HELPERS ###
def _count_gaps(self, table_name: str):
# Column 'gap_cnt' is set to the number of non-contiguous entries in the
# list of pages visited if the b-tree structure is traversed in a top-
# down fashion (each node visited before its child-tree is passed). Any
# overflow chains present are traversed from start to finish before any
# child-tree is.
pages = self._db.fetch_all_rows('''SELECT pageno, pagetype
FROM temp.dbstat
WHERE name="{}"
ORDER BY pageno;
'''.format(table_name))
gap_count = 0
previous_page = 0
for page in pages:
if previous_page > 0 and (page['pagetype'] == 'leaf') \
and (page['pageno'] != previous_page+1):
gap_count += 1
previous_page = page['pageno']
return gap_count
def _extract_sqlite_stats(self, table_name: str) -> dict:
query = '''SELECT
sum(ncell) AS nentry,
sum((pagetype == 'leaf') * ncell) AS leaf_entries,
sum(payload) AS payload,
sum((pagetype == 'overflow') * payload) AS ovfl_payload,
sum(path LIKE '%+000000') AS ovfl_cnt,
max(mx_payload) AS mx_payload,
sum(pagetype == 'internal') AS int_pages,
sum(pagetype == 'leaf') AS leaf_pages,
sum(pagetype == 'overflow') AS ovfl_pages,
sum((pagetype == 'internal') * unused) AS int_unused,
sum((pagetype == 'leaf') * unused) AS leaf_unused,
sum((pagetype == 'overflow') * unused) AS ovfl_unused,
sum(pgsize) AS compressed_size,
max((length(CASE WHEN path LIKE '%+%' THEN ''
ELSE path END)+3)/4) AS depth
FROM temp.dbstat
WHERE name = '{}';'''.format(table_name)
stats = self._row_to_dict(self._db.fetch_one_row(query))
stats['is_without_rowid'] = self.is_without_rowid(table_name)
stats['gap_count'] = self._count_gaps(table_name)
return stats
@staticmethod
def _row_to_dict(row) -> dict:
"""Convert an sqlite.row to a regular dictionary."""
res = {}
for column in row.keys():
res[column] = row[column]
return res
@staticmethod
def _extract_storage_metrics(row) -> StorageMetrics:
"""Convert an sqlite.row to a StorageMetrics object."""
res = StorageMetrics()
for column in row.keys():
res[column] = row[column]
return res
@staticmethod
def _percentage(value: float, total: float) -> float:
if total == 0:
return 0
return 100 * value / total
def _create_stat_virtual_table(self):
self._db.execute_query('''CREATE VIRTUAL TABLE temp.stat
USING dbstat''')
def _create_temp_stat_table(self):
self._create_stat_virtual_table()
self._db.execute_query('''CREATE TEMP TABLE dbstat
AS SELECT * FROM temp.stat
ORDER BY name, path''')
@staticmethod
def _stat_table_create_query():
return '''CREATE TABLE stats("
name STRING, /* Name of table or index */
path INTEGER, /* Path to page from root */
pageno INTEGER, /* Page number */
pagetype STRING, /* 'internal', 'leaf' or 'overflow' */
ncell INTEGER, /* Cells on page (0 for overflow) */
payload INTEGER, /* Bytes of payload on this page */
unused INTEGER, /* Bytes of unused space on this page */
mx_payload INTEGER, /* Largest payload size of all cells */
pgoffset INTEGER, /* Offset of page in file */
pgsize INTEGER /* Size of the page */
");'''
@staticmethod
def _spaceused_table_create_query():
return '''CREATE TABLE space_used(
name clob, -- Name of a table or index in the database file
tblname clob, -- Name of associated table
is_index boolean, -- TRUE if it is an index, false for a table
is_without_rowid boolean, -- TRUE if WITHOUT ROWID table
nentry int, -- Number of entries in the BTree
leaf_entries int, -- Number of leaf entries
depth int, -- Depth of the b-tree
payload int, -- Total amount of data stored in this table or index
ovfl_payload int, -- Total amount of data stored on overflow pages
ovfl_cnt int, -- Number of entries that use overflow
mx_payload int, -- Maximum payload size
int_pages int, -- Number of interior pages used
leaf_pages int, -- Number of leaf pages used
ovfl_pages int, -- Number of overflow pages used
int_unused int, -- Number of unused bytes on interior pages
leaf_unused int, -- Number of unused bytes on primary pages
ovfl_unused int, -- Number of unused bytes on overflow pages
gap_cnt int, -- Number of gaps in the page layout
compressed_size int -- Total bytes stored on disk
)'''
| 35.92129
| 89
| 0.534969
|
c336a0891f715c90f49832698a5dbe9209903b1e
| 4,840
|
py
|
Python
|
Application/appResanet.py
|
Aaldn/Resanet
|
23bc2d69e82b99c35d464d4a6e82cdd15c182952
|
[
"MIT"
] | null | null | null |
Application/appResanet.py
|
Aaldn/Resanet
|
23bc2d69e82b99c35d464d4a6e82cdd15c182952
|
[
"MIT"
] | null | null | null |
Application/appResanet.py
|
Aaldn/Resanet
|
23bc2d69e82b99c35d464d4a6e82cdd15c182952
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from flask import *
from modeles import modeleResanet
from technique import datesResanet
app = Flask(__name__)
app.secret_key = 'resanet'
@app.route('/', methods=['GET'])
def index():
return render_template('vueAccueil.html')
@app.route('/usager/session/choisir', methods=['GET'])
def choisirSessionUsager():
return render_template('vueConnexionUsager.html', carteBloquee=False, echecConnexion=False, saisieIncomplete=False)
@app.route('/usager/seConnecter', methods=['POST'])
def seConnecterUsager():
numeroCarte = request.form['numeroCarte']
mdp = request.form['mdp']
if numeroCarte != '' and mdp != '':
usager = modeleResanet.seConnecterUsager(numeroCarte, mdp)
if len(usager) != 0:
if usager['activee'] == True:
session['numeroCarte'] = usager['numeroCarte']
session['nom'] = usager['nom']
session['prenom'] = usager['prenom']
session['mdp'] = mdp
return redirect('/usager/reservations/lister')
else:
return render_template('vueConnexionUsager.html', carteBloquee=True, echecConnexion=False, saisieIncomplete=False)
else:
return render_template('vueConnexionUsager.html', carteBloquee=False, echecConnexion=True, saisieIncomplete=False)
else:
return render_template('vueConnexionUsager.html', carteBloquee=False, echecConnexion=False, saisieIncomplete=True)
@app.route('/usager/seDeconnecter', methods=['GET'])
def seDeconnecterUsager():
session.pop('numeroCarte', None)
session.pop('nom', None)
session.pop('prenom', None)
return redirect('/')
@app.route('/usager/reservations/lister', methods=['GET'])
def listerReservations():
tarifRepas = modeleResanet.getTarifRepas(session['numeroCarte'])
soldeCarte = modeleResanet.getSolde(session['numeroCarte'])
solde = '%.2f' % (soldeCarte, )
aujourdhui = datesResanet.getDateAujourdhuiISO()
datesPeriodeISO = datesResanet.getDatesPeriodeCouranteISO()
datesResas = modeleResanet.getReservationsCarte(
session['numeroCarte'], datesPeriodeISO[0], datesPeriodeISO[-1])
dates = []
for uneDateISO in datesPeriodeISO:
uneDate = {}
uneDate['iso'] = uneDateISO
uneDate['fr'] = datesResanet.convertirDateISOversFR(uneDateISO)
if uneDateISO <= aujourdhui:
uneDate['verrouillee'] = True
else:
uneDate['verrouillee'] = False
if uneDateISO in datesResas:
uneDate['reservee'] = True
else:
uneDate['reservee'] = False
if soldeCarte < tarifRepas and uneDate['reservee'] == False:
uneDate['verrouillee'] = True
dates.append(uneDate)
if soldeCarte < tarifRepas:
soldeInsuffisant = True
else:
soldeInsuffisant = False
return render_template('vueListeReservations.html', laSession=session, leSolde=solde, lesDates=dates, soldeInsuffisant=soldeInsuffisant)
@app.route('/usager/reservations/annuler/<dateISO>', methods=['GET'])
def annulerReservation(dateISO):
modeleResanet.annulerReservation(session['numeroCarte'], dateISO)
modeleResanet.crediterSolde(session['numeroCarte'])
return redirect('/usager/reservations/lister')
@app.route('/usager/reservations/enregistrer/<dateISO>', methods=['GET'])
def enregistrerReservation(dateISO):
modeleResanet.enregistrerReservation(session['numeroCarte'], dateISO)
modeleResanet.debiterSolde(session['numeroCarte'])
return redirect('/usager/reservations/lister')
@app.route('/usager/mdp/modification/choisir', methods=['GET'])
def choisirModifierMdpUsager():
soldeCarte = modeleResanet.getSolde(session['numeroCarte'])
solde = '%.2f' % (soldeCarte, )
return render_template('vueModificationMdp.html', laSession=session, leSolde=solde, modifMdp='')
@app.route('/usager/mdp/modification/appliquer', methods=['POST'])
def modifierMdpUsager():
ancienMdp = request.form['ancienMDP']
nouveauMdp = request.form['nouveauMDP']
soldeCarte = modeleResanet.getSolde(session['numeroCarte'])
solde = '%.2f' % (soldeCarte, )
if ancienMdp != session['mdp'] or nouveauMdp == '':
return render_template('vueModificationMdp.html', laSession=session, leSolde=solde, modifMdp='Nok')
else:
modeleResanet.modifierMdpUsager(session['numeroCarte'], nouveauMdp)
session['mdp'] = nouveauMdp
return render_template('vueModificationMdp.html', laSession=session, leSolde=solde, modifMdp='Ok')
@app.route('/gestionnaire/session/choisir', methods=['GET'])
def choisirSessionGestionnaire():
return 'Formulaire authentification gestionnaire'
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=5000)
| 33.37931
| 140
| 0.688017
|
0118978e82d3c106136f1e216c1f31c6abf6e8f1
| 263
|
py
|
Python
|
day04/app04/models.py
|
940716tian/PythonStudy
|
05738c50c9d504d5f073ded9e70b29c2f76f5ea6
|
[
"Apache-2.0"
] | null | null | null |
day04/app04/models.py
|
940716tian/PythonStudy
|
05738c50c9d504d5f073ded9e70b29c2f76f5ea6
|
[
"Apache-2.0"
] | null | null | null |
day04/app04/models.py
|
940716tian/PythonStudy
|
05738c50c9d504d5f073ded9e70b29c2f76f5ea6
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
# Create your models here.
class Language(models.Model):
name = models.CharField(
max_length=20
)
desc = models.CharField(
max_length=30
)
def get_desc(self):
return "爱你的理由:%s"% self.desc
| 18.785714
| 36
| 0.627376
|
e5d113db595273bab9ce8e18cde9e73ee8ab2111
| 45,852
|
py
|
Python
|
metar/Metar.py
|
mkjunker/python-metar
|
ba599ad78f8e7d2f46849a5e98106a6221d1f787
|
[
"BSD-1-Clause"
] | 136
|
2018-07-25T06:44:30.000Z
|
2022-03-29T18:39:03.000Z
|
metar/Metar.py
|
mkjunker/python-metar
|
ba599ad78f8e7d2f46849a5e98106a6221d1f787
|
[
"BSD-1-Clause"
] | 125
|
2018-07-22T19:00:41.000Z
|
2022-03-30T13:28:44.000Z
|
metar/Metar.py
|
mkjunker/python-metar
|
ba599ad78f8e7d2f46849a5e98106a6221d1f787
|
[
"BSD-1-Clause"
] | 66
|
2018-07-22T18:57:23.000Z
|
2022-03-10T18:29:28.000Z
|
# Copyright (c) 2004,2018 Python-Metar Developers.
# Distributed under the terms of the BSD 2-Clause License.
# SPDX-License-Identifier: BSD-2-Clause
"""This module defines the Metar class.
A Metar object represents the weather report encoded by a single METAR code.
"""
import re
import datetime
import warnings
import logging
from metar import __version__, __author__, __email__, __LICENSE__
from metar.Datatypes import (
temperature,
pressure,
speed,
distance,
direction,
precipitation,
)
# logger
_logger = logging.getLogger(__name__)
# Exceptions
class ParserError(Exception):
"""Exception raised when an unparseable group is found in body of the report."""
pass
# regular expressions to decode various groups of the METAR code
MISSING_RE = re.compile(r"^[M/]+$")
TYPE_RE = re.compile(r"^(?P<type>METAR|SPECI)\s+")
COR_RE = re.compile(r"^(?P<cor>COR)\s+")
STATION_RE = re.compile(r"^(?P<station>[A-Z][A-Z0-9]{3})\s+")
TIME_RE = re.compile(
r"""^(?P<day>\d\d)
(?P<hour>\d\d)
(?P<min>\d\d)Z?\s+""",
re.VERBOSE,
)
MODIFIER_RE = re.compile(r"^(?P<mod>AUTO|COR AUTO|FINO|NIL|TEST|CORR?|RTD|CC[A-G])\s+")
WIND_RE = re.compile(
r"""^(?P<dir>[\dO]{3}|[0O]|///|MMM|VRB)
(?P<speed>P?[\dO]{2,3}|[/M]{2,3})
(G(?P<gust>P?(\d{1,3}|[/M]{1,3})))?
(?P<units>KTS?|LT|K|T|KMH|MPS)?
(\s+(?P<varfrom>\d\d\d)V
(?P<varto>\d\d\d))?\s+""",
re.VERBOSE,
)
VISIBILITY_RE = re.compile(
r"""^(?P<vis>(?P<dist>(M|P)?\d\d\d\d|////)
(?P<dir>[NSEW][EW]? | NDV)? |
(?P<distu>(M|P)?(\d+|\d\d?/\d\d?|\d+\s+\d/\d))
(?P<units>SM|KM|M|U) |
CAVOK )\s+""",
re.VERBOSE,
)
RUNWAY_RE = re.compile(
r"""^(RVRNO |
R(?P<name>\d\d(RR?|LL?|C)?)/
(?P<low>(M|P)?(\d\d\d\d|/{4}))
(V(?P<high>(M|P)?\d\d\d\d))?
(?P<unit>FT)?[/NDU]*)\s+""",
re.VERBOSE,
)
WEATHER_RE = re.compile(
r"""^(?P<int>(-|\+|VC)*)
(?P<desc>(MI|PR|BC|DR|BL|SH|TS|FZ)+)?
(?P<prec>(DZ|RA|SN|SG|IC|PL|GR|GS|UP|/)*)
(?P<obsc>BR|FG|FU|VA|DU|SA|HZ|PY)?
(?P<other>PO|SQ|FC|SS|DS|NSW|/+)?
(?P<int2>[-+])?\s+""",
re.VERBOSE,
)
SKY_RE = re.compile(
r"""^(?P<cover>VV|CLR|SKC|SCK|NSC|NCD|BKN|SCT|FEW|[O0]VC|///)
(?P<height>[\dO]{2,4}|///)?
(?P<cloud>([A-Z][A-Z]+|///))?\s+""",
re.VERBOSE,
)
TEMP_RE = re.compile(
r"""^(?P<temp>(M|-)?\d{1,2}|//|XX|MM)/
(?P<dewpt>(M|-)?\d{1,2}|//|XX|MM)?\s+""",
re.VERBOSE,
)
PRESS_RE = re.compile(
r"""^(?P<unit>A|Q|QNH)?
(?P<press>[\dO]{3,4}|////)
(?P<unit2>INS)?\s+""",
re.VERBOSE,
)
RECENT_RE = re.compile(
r"""^RE(?P<desc>MI|PR|BC|DR|BL|SH|TS|FZ)?
(?P<prec>(DZ|RA|SN|SG|IC|PL|GR|GS|UP)*)?
(?P<obsc>BR|FG|FU|VA|DU|SA|HZ|PY)?
(?P<other>PO|SQ|FC|SS|DS)?\s+""",
re.VERBOSE,
)
WINDSHEAR_RE = re.compile(r"^(WS\s+)?(ALL\s+RWY|R(WY)?(?P<name>\d\d(RR?|L?|C)?))\s+")
COLOR_RE = re.compile(
r"""^(BLACK)?(BLU|GRN|WHT|RED)\+?
(/?(BLACK)?(BLU|GRN|WHT|RED)\+?)*\s*""",
re.VERBOSE,
)
RUNWAYSTATE_RE = re.compile(
r"""((?P<snoclo>R/SNOCLO) |
((?P<name>\d\d) | R(?P<namenew>\d\d)(RR?|LL?|C)?/?)
((?P<special> SNOCLO|CLRD(\d\d|//)) |
(?P<deposit>(\d|/))
(?P<extent>(\d|/))
(?P<depth>(\d\d|//))
(?P<friction>(\d\d|//))))\s+""",
re.VERBOSE,
)
TREND_RE = re.compile(r"^(?P<trend>TEMPO|BECMG|FCST|NOSIG)\s+")
TRENDTIME_RE = re.compile(r"(?P<when>(FM|TL|AT))(?P<hour>\d\d)(?P<min>\d\d)\s+")
REMARK_RE = re.compile(r"^(RMKS?|NOSPECI|NOSIG)\s+")
# regular expressions for remark groups
AUTO_RE = re.compile(r"^AO(?P<type>\d)\s+")
SEALVL_PRESS_RE = re.compile(r"^SLP(?P<press>\d\d\d)\s+")
PEAK_WIND_RE = re.compile(
r"""^P[A-Z]\s+WND\s+
(?P<dir>\d\d\d)
(?P<speed>P?\d\d\d?)/
(?P<hour>\d\d)?
(?P<min>\d\d)\s+""",
re.VERBOSE,
)
WIND_SHIFT_RE = re.compile(
r"""^WSHFT\s+
(?P<hour>\d\d)?
(?P<min>\d\d)
(\s+(?P<front>FROPA))?\s+""",
re.VERBOSE,
)
PRECIP_1HR_RE = re.compile(r"^P(?P<precip>\d\d\d\d)\s+")
PRECIP_24HR_RE = re.compile(
r"""^(?P<type>6|7)
(?P<precip>\d\d\d\d)\s+""",
re.VERBOSE,
)
PRESS_3HR_RE = re.compile(
r"""^5(?P<tend>[0-8])
(?P<press>\d\d\d)\s+""",
re.VERBOSE,
)
TEMP_1HR_RE = re.compile(
r"""^T(?P<tsign>0|1)
(?P<temp>\d\d\d)
((?P<dsign>0|1)
(?P<dewpt>\d\d\d))?\s+""",
re.VERBOSE,
)
TEMP_6HR_RE = re.compile(
r"""^(?P<type>1|2)
(?P<sign>0|1)
(?P<temp>\d\d\d)\s+""",
re.VERBOSE,
)
TEMP_24HR_RE = re.compile(
r"""^4(?P<smaxt>0|1)
(?P<maxt>\d\d\d)
(?P<smint>0|1)
(?P<mint>\d\d\d)\s+""",
re.VERBOSE,
)
UNPARSED_RE = re.compile(r"(?P<group>\S+)\s+")
LIGHTNING_RE = re.compile(
r"""^((?P<freq>OCNL|FRQ|CONS)\s+)?
LTG(?P<type>(IC|CC|CG|CA)*)
( \s+(?P<loc>( OHD | VC | DSNT\s+ | \s+AND\s+ |
[NSEW][EW]? (-[NSEW][EW]?)* )+) )?\s+""",
re.VERBOSE,
)
TS_LOC_RE = re.compile(
r"""TS(\s+(?P<loc>( OHD | VC | DSNT\s+ | \s+AND\s+ |
[NSEW][EW]? (-[NSEW][EW]?)* )+))?
( \s+MOV\s+(?P<dir>[NSEW][EW]?) )?\s+""",
re.VERBOSE,
)
SNOWDEPTH_RE = re.compile(r"""^4/(?P<snowdepth>\d\d\d)\s+""")
ICE_ACCRETION_RE = re.compile(
r"^I(?P<ice_accretion_hours>[136])(?P<ice_accretion_depth>\d\d\d)\s+"
)
# translation of weather location codes
loc_terms = [("OHD", "overhead"), ("DSNT", "distant"), ("AND", "and"), ("VC", "nearby")]
def xlate_loc(loc):
"""Substitute English terms for the location codes in the given string."""
for code, english in loc_terms:
loc = loc.replace(code, english)
return loc
# translation of the sky-condition codes into english
SKY_COVER = {
"SKC": "clear",
"CLR": "clear",
"NSC": "clear",
"NCD": "clear",
"FEW": "a few ",
"SCT": "scattered ",
"BKN": "broken ",
"OVC": "overcast",
"///": "",
"VV": "indefinite ceiling",
}
CLOUD_TYPE = {
"AC": "altocumulus",
"ACC": "altocumulus castellanus",
"ACSL": "standing lenticular altocumulus",
"AS": "altostratus",
"CB": "cumulonimbus",
"CBMAM": "cumulonimbus mammatus",
"CCSL": "standing lenticular cirrocumulus",
"CC": "cirrocumulus",
"CI": "cirrus",
"CS": "cirrostratus",
"CU": "cumulus",
"NS": "nimbostratus",
"SC": "stratocumulus",
"ST": "stratus",
"SCSL": "standing lenticular stratocumulus",
"TCU": "towering cumulus",
}
# translation of the present-weather codes into english
WEATHER_INT = {
"-": "light",
"+": "heavy",
"-VC": "nearby light",
"+VC": "nearby heavy",
"VC": "nearby",
}
WEATHER_DESC = {
"MI": "shallow",
"PR": "partial",
"BC": "patches of",
"DR": "low drifting",
"BL": "blowing",
"SH": "showers",
"TS": "thunderstorm",
"FZ": "freezing",
}
WEATHER_PREC = {
"DZ": "drizzle",
"RA": "rain",
"SN": "snow",
"SG": "snow grains",
"IC": "ice crystals",
"PL": "ice pellets",
"GR": "hail",
"GS": "snow pellets",
"UP": "unknown precipitation",
"//": "",
}
WEATHER_OBSC = {
"BR": "mist",
"FG": "fog",
"FU": "smoke",
"VA": "volcanic ash",
"DU": "dust",
"SA": "sand",
"HZ": "haze",
"PY": "spray",
}
WEATHER_OTHER = {
"PO": "sand whirls",
"SQ": "squalls",
"FC": "funnel cloud",
"SS": "sandstorm",
"DS": "dust storm",
}
WEATHER_SPECIAL = {"+FC": "tornado"}
COLOR = {"BLU": "blue", "GRN": "green", "WHT": "white"}
# translation of various remark codes into English
PRESSURE_TENDENCY = {
"0": "increasing, then decreasing",
"1": "increasing more slowly",
"2": "increasing",
"3": "increasing more quickly",
"4": "steady",
"5": "decreasing, then increasing",
"6": "decreasing more slowly",
"7": "decreasing",
"8": "decreasing more quickly",
}
LIGHTNING_FREQUENCY = {"OCNL": "occasional", "FRQ": "frequent", "CONS": "constant"}
LIGHTNING_TYPE = {
"IC": "intracloud",
"CC": "cloud-to-cloud",
"CG": "cloud-to-ground",
"CA": "cloud-to-air",
}
REPORT_TYPE = {
"METAR": "routine report",
"SPECI": "special report",
"AUTO": "automatic report",
"COR": "manually corrected report",
}
# Helper functions
def _sanitize(code):
"""Some string prep to improve parsing fidelity."""
# Remove extraneous whitespace, any trailing =, then add trailing
# whitespace as regex matches need that.
return "%s " % (code.strip().rstrip("="),)
def _report_match(handler, match):
"""Report success or failure of the given handler function. (DEBUG)"""
if match:
_logger.debug("%s matched '%s'", handler.__name__, match)
else:
_logger.debug("%s didn't match...", handler.__name__)
def _unparsedGroup(self, d):
"""
Handle otherwise unparseable main-body groups.
"""
self._unparsed_groups.append(d["group"])
# METAR report objects
debug = False
class Metar(object):
"""METAR (aviation meteorology report)"""
def __init__(self, metarcode, month=None, year=None, utcdelta=None, strict=True):
"""
Parse raw METAR code.
Parameters
----------
metarcode : str
month, year : int, optional
Date values to be used when parsing a non-current METAR code. If not
provided, then the month and year are guessed from the current date.
utcdelta : int or datetime.timedelta, optional
An int of hours or a timedelta object used to specify the timezone.
strict : bool (default is True)
This option determines if a ``ParserError`` is raised when
unparsable groups are found or an unexpected exception is encountered.
Setting this to `False` will prevent exceptions from being raised and
only generate warning messages.
"""
self.code = metarcode # original METAR code
self.type = "METAR" # METAR (routine) or SPECI (special)
self.correction = None # COR (corrected - WMO spec)
self.mod = "AUTO" # AUTO (automatic) or COR (corrected - US spec)
self.station_id = None # 4-character ICAO station code
self.time = None # observation time [datetime]
self.cycle = None # observation cycle (0-23) [int]
self.wind_dir = None # wind direction [direction]
self.wind_speed = None # wind speed [speed]
self.wind_gust = None # wind gust speed [speed]
self.wind_dir_from = None # beginning of range for win dir [direction]
self.wind_dir_to = None # end of range for wind dir [direction]
self.vis = None # visibility [distance]
self.vis_dir = None # visibility direction [direction]
self.max_vis = None # visibility [distance]
self.max_vis_dir = None # visibility direction [direction]
self.temp = None # temperature (C) [temperature]
self.dewpt = None # dew point (C) [temperature]
self.press = None # barometric pressure [pressure]
self.runway = [] # runway visibility (list of tuples)
self.weather = [] # present weather (list of tuples)
self.recent = [] # recent weather (list of tuples)
self.sky = [] # sky conditions (list of tuples)
self.windshear = [] # runways w/ wind shear (list of strings)
self.wind_speed_peak = None # peak wind speed in last hour
self.wind_dir_peak = None # direction of peak wind speed in last hour
self.peak_wind_time = None # time of peak wind observation [datetime]
self.wind_shift_time = None # time of wind shift [datetime]
self.max_temp_6hr = None # max temp in last 6 hours
self.min_temp_6hr = None # min temp in last 6 hours
self.max_temp_24hr = None # max temp in last 24 hours
self.min_temp_24hr = None # min temp in last 24 hours
self.press_sea_level = None # sea-level pressure
self.precip_1hr = None # precipitation over the last hour
self.precip_3hr = None # precipitation over the last 3 hours
self.precip_6hr = None # precipitation over the last 6 hours
self.precip_24hr = None # precipitation over the last 24 hours
self.snowdepth = None # snow depth (distance)
self.ice_accretion_1hr = None # ice accretion over the past hour
self.ice_accretion_3hr = None # ice accretion over the past 3 hours
self.ice_accretion_6hr = None # ice accretion over the past 6 hours
self._trend = False # trend groups present (bool)
self._trend_groups = [] # trend forecast groups
self._remarks = [] # remarks (list of strings)
self._unparsed_groups = []
self._unparsed_remarks = []
self._now = datetime.datetime.utcnow()
if utcdelta:
self._utcdelta = utcdelta
else:
self._utcdelta = datetime.datetime.now() - self._now
self._month = month
self._year = year
# Do some string prep before parsing
code = _sanitize(self.code)
try:
ngroup = len(self.handlers)
igroup = 0
ifailed = -1
while igroup < ngroup and code:
pattern, handler, repeatable = self.handlers[igroup]
if debug:
_logger.debug("%s: %s", handler.__name__, code)
m = pattern.match(code)
while m:
ifailed = -1
if debug:
_report_match(handler, m.group())
handler(self, m.groupdict())
code = code[m.end():]
if self._trend:
code = self._do_trend_handlers(code)
if not repeatable:
break
if debug:
_logger.debug("%s: %s", handler.__name__, code)
m = pattern.match(code)
if not m and ifailed < 0:
ifailed = igroup
igroup += 1
if igroup == ngroup and not m:
pattern, handler = (UNPARSED_RE, _unparsedGroup)
if debug:
_logger.debug("%s: %s", handler.__name__, code)
m = pattern.match(code)
if debug:
_report_match(handler, m.group())
handler(self, m.groupdict())
code = code[m.end():]
igroup = ifailed
ifailed = -2 # if it's still -2 when we run out of main-body
# groups, we'll try parsing this group as a remark
if pattern == REMARK_RE or self.press:
while code:
for pattern, handler in self.remark_handlers:
if debug:
_logger.debug("%s: %s", handler.__name__, code)
m = pattern.match(code)
if m:
if debug:
_report_match(handler, m.group())
handler(self, m.groupdict())
code = pattern.sub("", code, 1)
break
except Exception as err:
message = ("%s failed while processing '%s'\n\t%s") % (
handler.__name__,
code,
"\n\t".join(err.args),
)
if strict:
raise ParserError(message)
else:
warnings.warn(message, RuntimeWarning)
if self._unparsed_groups:
code = " ".join(self._unparsed_groups)
message = "Unparsed groups in body '%s' while processing '%s'" % (
code,
metarcode,
)
if strict:
raise ParserError(message)
else:
warnings.warn(message, RuntimeWarning)
@property
def decode_completed(self):
"""
Indicate whether the decoding was complete for non-remark elements.
"""
return not self._unparsed_groups
def _do_trend_handlers(self, code):
for pattern, handler, repeatable in self.trend_handlers:
if debug:
print(handler.__name__, ":", code)
m = pattern.match(code)
while m:
if debug:
_report_match(handler, m.group())
self._trend_groups.append(m.group().strip())
handler(self, m.groupdict())
code = code[m.end():]
if not repeatable:
break
m = pattern.match(code)
return code
def __str__(self):
return self.string()
def _handleType(self, d):
"""
Parse the report-type group.
The following attributes are set:
type [string]
"""
self.type = d["type"]
def _handleCorrection(self, d):
"""
Parse the correction group.
The following attributes are set:
correction [string]
"""
self.correction = d["cor"]
def _handleStation(self, d):
"""
Parse the station id group.
The following attributes are set:
station_id [string]
"""
self.station_id = d["station"]
def _handleModifier(self, d):
"""
Parse the report-modifier group.
The following attributes are set:
mod [string]
"""
mod = d["mod"]
if mod == "CORR":
mod = "COR"
if mod == "NIL" or mod == "FINO":
mod = "NO DATA"
self.mod = mod
def _handleTime(self, d):
"""
Parse the observation-time group.
The following attributes are set:
time [datetime]
cycle [int]
_day [int]
_hour [int]
_min [int]
"""
self._day = int(d["day"])
if not self._month:
self._month = self._now.month
if self._day > self._now.day:
if self._month == 1:
self._month = 12
else:
self._month = self._month - 1
if not self._year:
self._year = self._now.year
if self._month > self._now.month:
self._year = self._year - 1
elif self._month == self._now.month and self._day > self._now.day:
self._year = self._year - 1
self._hour = int(d["hour"])
self._min = int(d["min"])
self.time = datetime.datetime(
self._year, self._month, self._day, self._hour, self._min
)
if self._min < 45:
self.cycle = self._hour
else:
self.cycle = self._hour + 1
def _handleWind(self, d):
"""
Parse the wind and variable-wind groups.
The following attributes are set:
wind_dir [direction]
wind_speed [speed]
wind_gust [speed]
wind_dir_from [int]
wind_dir_to [int]
"""
wind_dir = d["dir"].replace("O", "0")
if wind_dir != "VRB" and wind_dir != "///" and wind_dir != "MMM":
self.wind_dir = direction(wind_dir)
wind_speed = d["speed"].replace("O", "0")
units = d["units"]
# Ambiguous METAR when no wind speed units are provided
if units is None and self.station_id is not None:
# Assume US METAR sites are reporting in KT
if len(self.station_id) == 3 or self.station_id.startswith("K"):
units = "KT"
# If units are still None, default to MPS
if units is None:
units = "MPS"
if units == "KTS" or units == "K" or units == "T" or units == "LT":
units = "KT"
if wind_speed.startswith("P"):
self.wind_speed = speed(wind_speed[1:], units, ">")
elif not MISSING_RE.match(wind_speed):
self.wind_speed = speed(wind_speed, units)
if d["gust"]:
wind_gust = d["gust"]
if wind_gust.startswith("P"):
self.wind_gust = speed(wind_gust[1:], units, ">")
elif not MISSING_RE.match(wind_gust):
self.wind_gust = speed(wind_gust, units)
if d["varfrom"]:
self.wind_dir_from = direction(d["varfrom"])
self.wind_dir_to = direction(d["varto"])
def _handleVisibility(self, d):
"""
Parse the minimum and maximum visibility groups.
The following attributes are set:
vis [distance]
vis_dir [direction]
max_vis [distance]
max_vis_dir [direction]
"""
vis = d["vis"]
vis_less = None
vis_dir = None
vis_units = "M"
vis_dist = "10000"
if d["dist"] and d["dist"] != "////":
vis_dist = d["dist"]
if d["dir"] and d["dir"] != "NDV":
vis_dir = d["dir"]
elif d["distu"]:
vis_dist = d["distu"]
if d["units"] and d["units"] != "U":
vis_units = d["units"]
if vis_dist == "9999":
vis_dist = "10000"
vis_less = ">"
if self.vis:
if vis_dir:
self.max_vis_dir = direction(vis_dir)
self.max_vis = distance(vis_dist, vis_units, vis_less)
else:
if vis_dir:
self.vis_dir = direction(vis_dir)
self.vis = distance(vis_dist, vis_units, vis_less)
def _handleRunway(self, d):
"""
Parse a runway visual range group.
The following attributes are set:
range [list of tuples]
. name [string]
. low [distance]
. high [distance]
. unit [string]
"""
if d["name"] is None:
return
unit = d["unit"] if d["unit"] is not None else "M"
if d["low"] == "////":
return
else:
low = distance(d["low"], unit)
if d["high"] is None:
high = low
else:
high = distance(d["high"], unit)
self.runway.append([d["name"], low, high, unit])
def _handleWeather(self, d):
"""
Parse a present-weather group.
The following attributes are set:
weather [list of tuples]
. intensity [string]
. description [string]
. precipitation [string]
. obscuration [string]
. other [string]
"""
inteni = d["int"]
if not inteni and d["int2"]:
inteni = d["int2"]
desci = d["desc"]
preci = d["prec"]
obsci = d["obsc"]
otheri = d["other"]
self.weather.append((inteni, desci, preci, obsci, otheri))
def _handleSky(self, d):
"""
Parse a sky-conditions group.
The following attributes are set:
sky [list of tuples]
. cover [string]
. height [distance]
. cloud [string]
"""
height = d["height"]
if not height or height == "///":
height = None
else:
height = height.replace("O", "0")
height = distance(int(height) * 100, "FT")
cover = d["cover"]
if cover == "SCK" or cover == "SKC" or cover == "CL":
cover = "CLR"
if cover == "0VC":
cover = "OVC"
cloud = d["cloud"]
if cloud == "///":
cloud = ""
self.sky.append((cover, height, cloud))
def _handleTemp(self, d):
"""
Parse a temperature-dewpoint group.
The following attributes are set:
temp temperature (Celsius) [float]
dewpt dew point (Celsius) [float]
"""
temp = d["temp"]
dewpt = d["dewpt"]
if temp and temp != "//" and temp != "XX" and temp != "MM":
self.temp = temperature(temp)
if dewpt and dewpt != "//" and dewpt != "XX" and dewpt != "MM":
self.dewpt = temperature(dewpt)
def _handlePressure(self, d):
"""
Parse an altimeter-pressure group.
The following attributes are set:
press [int]
"""
press = d["press"]
if press != "////":
press = float(press.replace("O", "0"))
if d["unit"]:
if d["unit"] == "A" or (d["unit2"] and d["unit2"] == "INS"):
self.press = pressure(press / 100, "IN")
elif d["unit"] == "SLP":
if press < 500:
press = press / 10 + 1000
else:
press = press / 10 + 900
self.press = pressure(press, "MB")
self._remarks.append("sea-level pressure %.1fhPa" % press)
else:
self.press = pressure(press, "MB")
elif press > 2500:
self.press = pressure(press / 100, "IN")
else:
self.press = pressure(press, "MB")
def _handleRecent(self, d):
"""
Parse a recent-weather group.
The following attributes are set:
weather [list of tuples]
. intensity [string]
. description [string]
. precipitation [string]
. obscuration [string]
. other [string]
"""
desci = d["desc"]
preci = d["prec"]
obsci = d["obsc"]
otheri = d["other"]
self.recent.append(("", desci, preci, obsci, otheri))
def _handleWindShear(self, d):
"""
Parse wind-shear groups.
The following attributes are set:
windshear [list of strings]
"""
if d["name"]:
self.windshear.append(d["name"])
else:
self.windshear.append("ALL")
def _handleColor(self, d):
"""
Parse (and ignore) the color groups.
The following attributes are set:
trend [list of strings]
"""
pass
def _handleRunwayState(self, d):
"""
Parse (and ignore) the runway state.
The following attributes are set:
"""
pass
def _handleTrend(self, d):
"""
Parse (and ignore) the trend groups.
"""
if "trend" in d:
self._trend_groups.append(d["trend"])
self._trend = True
def _startRemarks(self, d):
"""
Found the start of the remarks section.
"""
self._remarks = []
def _handleSealvlPressRemark(self, d):
"""
Parse the sea-level pressure remark group.
"""
value = float(d["press"]) / 10.0
if value < 50:
value += 1000
else:
value += 900
self.press_sea_level = pressure(value, "MB")
def _handlePrecip24hrRemark(self, d):
"""
Parse a 3-, 6- or 24-hour cumulative preciptation remark group.
"""
value = float(d["precip"]) / 100.0
if d["type"] == "6":
if self.cycle in [3, 9, 15, 21]:
self.precip_3hr = precipitation(value, "IN")
else:
self.precip_6hr = precipitation(value, "IN")
else:
self.precip_24hr = precipitation(value, "IN")
def _handlePrecip1hrRemark(self, d):
"""Parse an hourly precipitation remark group."""
value = float(d["precip"]) / 100.0
self.precip_1hr = precipitation(value, "IN")
def _handleTemp1hrRemark(self, d):
"""
Parse a temperature & dewpoint remark group.
These values replace the temp and dewpt from the body of the report.
"""
value = float(d["temp"]) / 10.0
if d["tsign"] == "1":
value = -value
self.temp = temperature(value)
if d["dewpt"]:
value2 = float(d["dewpt"]) / 10.0
if d["dsign"] == "1":
value2 = -value2
self.dewpt = temperature(value2)
def _handleTemp6hrRemark(self, d):
"""
Parse a 6-hour maximum or minimum temperature remark group.
"""
value = float(d["temp"]) / 10.0
if d["sign"] == "1":
value = -value
if d["type"] == "1":
self.max_temp_6hr = temperature(value, "C")
else:
self.min_temp_6hr = temperature(value, "C")
def _handleTemp24hrRemark(self, d):
"""
Parse a 24-hour maximum/minimum temperature remark group.
"""
value = float(d["maxt"]) / 10.0
if d["smaxt"] == "1":
value = -value
value2 = float(d["mint"]) / 10.0
if d["smint"] == "1":
value2 = -value2
self.max_temp_24hr = temperature(value, "C")
self.min_temp_24hr = temperature(value2, "C")
def _handlePress3hrRemark(self, d):
"""
Parse a pressure-tendency remark group.
"""
value = float(d["press"]) / 10.0
descrip = PRESSURE_TENDENCY[d["tend"]]
self._remarks.append("3-hr pressure change %.1fhPa, %s" % (value, descrip))
def _handlePeakWindRemark(self, d):
"""
Parse a peak wind remark group.
"""
peak_dir = int(d["dir"])
peak_speed = int(d["speed"])
self.wind_speed_peak = speed(peak_speed, "KT")
self.wind_dir_peak = direction(peak_dir)
peak_min = int(d["min"])
if d["hour"]:
peak_hour = int(d["hour"])
else:
peak_hour = self._hour
self.peak_wind_time = datetime.datetime(
self._year, self._month, self._day, peak_hour, peak_min
)
if self.peak_wind_time > self.time:
if peak_hour > self._hour:
self.peak_wind_time -= datetime.timedelta(hours=24)
else:
self.peak_wind_time -= datetime.timedelta(hours=1)
self._remarks.append(
"peak wind %dkt from %d degrees at %d:%02d"
% (peak_speed, peak_dir, peak_hour, peak_min)
)
def _handleWindShiftRemark(self, d):
"""
Parse a wind shift remark group.
"""
if d["hour"]:
wshft_hour = int(d["hour"])
else:
wshft_hour = self._hour
wshft_min = int(d["min"])
self.wind_shift_time = datetime.datetime(
self._year, self._month, self._day, wshft_hour, wshft_min
)
if self.wind_shift_time > self.time:
if wshft_hour > self._hour:
self.wind_shift_time -= datetime.timedelta(hours=24)
else:
self.wind_shift_time -= datetime.timedelta(hours=1)
text = "wind shift at %d:%02d" % (wshft_hour, wshft_min)
if d["front"]:
text += " (front)"
self._remarks.append(text)
def _handleLightningRemark(self, d):
"""
Parse a lightning observation remark group.
"""
parts = []
if d["freq"]:
parts.append(LIGHTNING_FREQUENCY[d["freq"]])
parts.append("lightning")
if d["type"]:
ltg_types = []
group = d["type"]
while group:
ltg_types.append(LIGHTNING_TYPE[group[:2]])
group = group[2:]
parts.append("(" + ",".join(ltg_types) + ")")
if d["loc"]:
parts.append(xlate_loc(d["loc"]))
self._remarks.append(" ".join(parts))
def _handleTSLocRemark(self, d):
"""
Parse a thunderstorm location remark group.
"""
text = "thunderstorm"
if d["loc"]:
text += " " + xlate_loc(d["loc"])
if d["dir"]:
text += " moving %s" % d["dir"]
self._remarks.append(text)
def _handleAutoRemark(self, d):
"""
Parse an automatic station remark group.
"""
if d["type"] == "1":
self._remarks.append("Automated station")
elif d["type"] == "2":
self._remarks.append("Automated station (type 2)")
def _handleSnowDepthRemark(self, d):
"""
Parse the 4/ group snowdepth report
"""
self.snowdepth = distance(float(d["snowdepth"]), "IN")
self._remarks.append(" snowdepth %s" % (self.snowdepth,))
def _handleIceAccretionRemark(self, d):
"""
Parse the I/ group ice accretion report.
"""
myattr = "ice_accretion_%shr" % (d["ice_accretion_hours"],)
value = precipitation(float(d["ice_accretion_depth"]) / 100.0, "IN")
setattr(self, myattr, value)
def _unparsedRemark(self, d):
"""
Handle otherwise unparseable remark groups.
"""
self._unparsed_remarks.append(d["group"])
# the list of handler functions to use (in order) to process a METAR report
handlers = [
(TYPE_RE, _handleType, False),
(COR_RE, _handleCorrection, False),
(STATION_RE, _handleStation, False),
(TIME_RE, _handleTime, False),
(MODIFIER_RE, _handleModifier, False),
(WIND_RE, _handleWind, False),
(VISIBILITY_RE, _handleVisibility, True),
(RUNWAY_RE, _handleRunway, True),
(WEATHER_RE, _handleWeather, True),
(SKY_RE, _handleSky, True),
(WIND_RE, _handleWind, False),
(VISIBILITY_RE, _handleVisibility, True),
(TEMP_RE, _handleTemp, False),
(PRESS_RE, _handlePressure, True),
(SEALVL_PRESS_RE, _handleSealvlPressRemark, False),
(RECENT_RE, _handleRecent, True),
(WINDSHEAR_RE, _handleWindShear, True),
(COLOR_RE, _handleColor, True),
(RUNWAYSTATE_RE, _handleRunwayState, True),
(TREND_RE, _handleTrend, True),
(REMARK_RE, _startRemarks, False),
]
trend_handlers = [
(TRENDTIME_RE, _handleTrend, True),
(WIND_RE, _handleTrend, True),
(VISIBILITY_RE, _handleTrend, True),
(WEATHER_RE, _handleTrend, True),
(SKY_RE, _handleTrend, True),
(COLOR_RE, _handleTrend, True),
]
# the list of patterns for the various remark groups,
# paired with the handler functions to use to record the decoded remark.
remark_handlers = [
(AUTO_RE, _handleAutoRemark),
(SEALVL_PRESS_RE, _handleSealvlPressRemark),
(PEAK_WIND_RE, _handlePeakWindRemark),
(WIND_SHIFT_RE, _handleWindShiftRemark),
(LIGHTNING_RE, _handleLightningRemark),
(TS_LOC_RE, _handleTSLocRemark),
(TEMP_1HR_RE, _handleTemp1hrRemark),
(PRECIP_1HR_RE, _handlePrecip1hrRemark),
(PRECIP_24HR_RE, _handlePrecip24hrRemark),
(PRESS_3HR_RE, _handlePress3hrRemark),
(TEMP_6HR_RE, _handleTemp6hrRemark),
(TEMP_24HR_RE, _handleTemp24hrRemark),
(SNOWDEPTH_RE, _handleSnowDepthRemark),
(ICE_ACCRETION_RE, _handleIceAccretionRemark),
(UNPARSED_RE, _unparsedRemark),
]
# functions that return text representations of conditions for output
def string(self):
"""
Return a human-readable version of the decoded report.
"""
lines = []
lines.append("station: %s" % self.station_id)
if self.type:
lines.append("type: %s" % self.report_type())
if self.time:
lines.append("time: %s" % self.time.ctime())
if self.temp:
lines.append("temperature: %s" % self.temp.string("C"))
if self.dewpt:
lines.append("dew point: %s" % self.dewpt.string("C"))
if self.wind_speed:
lines.append("wind: %s" % self.wind())
if self.wind_speed_peak:
lines.append("peak wind: %s" % self.peak_wind())
if self.wind_shift_time:
lines.append("wind shift: %s" % self.wind_shift())
if self.vis:
lines.append("visibility: %s" % self.visibility())
if self.runway:
lines.append("visual range: %s" % self.runway_visual_range())
if self.press:
lines.append("pressure: %s" % self.press.string("mb"))
if self.weather:
lines.append("weather: %s" % self.present_weather())
if self.sky:
lines.append("sky: %s" % self.sky_conditions("\n "))
if self.press_sea_level:
lines.append("sea-level pressure: %s" % self.press_sea_level.string("mb"))
if self.max_temp_6hr:
lines.append("6-hour max temp: %s" % str(self.max_temp_6hr))
if self.max_temp_6hr:
lines.append("6-hour min temp: %s" % str(self.min_temp_6hr))
if self.max_temp_24hr:
lines.append("24-hour max temp: %s" % str(self.max_temp_24hr))
if self.max_temp_24hr:
lines.append("24-hour min temp: %s" % str(self.min_temp_24hr))
if self.precip_1hr:
lines.append("1-hour precipitation: %s" % str(self.precip_1hr))
if self.precip_3hr:
lines.append("3-hour precipitation: %s" % str(self.precip_3hr))
if self.precip_6hr:
lines.append("6-hour precipitation: %s" % str(self.precip_6hr))
if self.precip_24hr:
lines.append("24-hour precipitation: %s" % str(self.precip_24hr))
if self.ice_accretion_1hr:
lines.append("1-hour Ice Accretion: %s" % str(self.ice_accretion_1hr))
if self.ice_accretion_3hr:
lines.append("3-hour Ice Accretion: %s" % str(self.ice_accretion_3hr))
if self.ice_accretion_6hr:
lines.append("6-hour Ice Accretion: %s" % str(self.ice_accretion_6hr))
if self._remarks:
lines.append("remarks:")
lines.append("- " + self.remarks("\n- "))
if self._unparsed_remarks:
lines.append("- " + " ".join(self._unparsed_remarks))
lines.append("METAR: " + self.code)
return "\n".join(lines)
def report_type(self):
"""
Return a textual description of the report type.
"""
if self.type is None:
text = "unknown report type"
elif self.type in REPORT_TYPE:
text = REPORT_TYPE[self.type]
else:
text = self.type + " report"
if self.cycle:
text += ", cycle %d" % self.cycle
if self.mod:
if self.mod in REPORT_TYPE:
text += " (%s)" % REPORT_TYPE[self.mod]
else:
text += " (%s)" % self.mod
if self.correction:
text += " (%s)" % REPORT_TYPE[self.correction]
return text
def wind(self, units="KT"):
"""
Return a textual description of the wind conditions.
Units may be specified as "MPS", "KT", "KMH", or "MPH".
"""
if self.wind_speed is None:
return "missing"
elif self.wind_speed.value() == 0.0:
text = "calm"
else:
wind_speed = self.wind_speed.string(units)
if not self.wind_dir:
text = "variable at %s" % wind_speed
elif self.wind_dir_from:
text = "%s to %s at %s" % (
self.wind_dir_from.compass(),
self.wind_dir_to.compass(),
wind_speed,
)
else:
text = "%s at %s" % (self.wind_dir.compass(), wind_speed)
if self.wind_gust:
text += ", gusting to %s" % self.wind_gust.string(units)
return text
def peak_wind(self, units="KT"):
"""
Return a textual description of the peak wind conditions.
Units may be specified as "MPS", "KT", "KMH", or "MPH".
"""
if self.wind_speed_peak is None:
return "missing"
elif self.wind_speed_peak.value() == 0.0:
text = "calm"
else:
wind_speed = self.wind_speed_peak.string(units)
if not self.wind_dir_peak:
text = wind_speed
else:
text = "%s at %s" % (self.wind_dir_peak.compass(), wind_speed)
if self.peak_wind_time is not None:
text += " at %s" % self.peak_wind_time.strftime("%H:%M")
return text
def wind_shift(self, units="KT"):
"""
Return a textual description of the wind shift time
Units may be specified as "MPS", "KT", "KMH", or "MPH".
"""
if self.wind_shift_time is None:
return "missing"
else:
return self.wind_shift_time.strftime("%H:%M")
def visibility(self, units=None):
"""
Return a textual description of the visibility.
Units may be statute miles ("SM") or meters ("M").
"""
if self.vis is None:
return "missing"
if self.vis_dir:
text = "%s to %s" % (self.vis.string(units), self.vis_dir.compass())
else:
text = self.vis.string(units)
if self.max_vis:
if self.max_vis_dir:
text += "; %s to %s" % (
self.max_vis.string(units),
self.max_vis_dir.compass(),
)
else:
text += "; %s" % self.max_vis.string(units)
return text
def runway_visual_range(self, units=None):
"""
Return a textual description of the runway visual range.
"""
lines = []
for name, low, high, unit in self.runway:
reportunits = unit if units is None else units
if low != high:
lines.append(
("on runway %s, from %d to %s")
% (name, low.value(reportunits), high.string(reportunits))
)
else:
lines.append("on runway %s, %s" % (name, low.string(reportunits)))
return "; ".join(lines)
def present_weather(self):
"""
Return a textual description of the present weather.
"""
return self._weather(self.weather)
def recent_weather(self):
"""
Return a textual description of the recent weather.
"""
return self._weather(self.recent)
def _weather(self, weather):
"""
Return a textual description of weather.
"""
text_list = []
for weatheri in weather:
(inteni, desci, preci, obsci, otheri) = weatheri
text_parts = []
code_parts = []
if inteni:
code_parts.append(inteni)
text_parts.append(WEATHER_INT[inteni])
if desci:
code_parts.append(desci)
if desci != "SH" or not preci:
text_parts.append(WEATHER_DESC[desci[0:2]])
if len(desci) == 4:
text_parts.append(WEATHER_DESC[desci[2:]])
if preci:
code_parts.append(preci)
if len(preci) == 2:
precip_text = WEATHER_PREC[preci]
elif len(preci) == 4:
precip_text = WEATHER_PREC[preci[:2]] + " and "
precip_text += WEATHER_PREC[preci[2:]]
elif len(preci) == 6:
precip_text = WEATHER_PREC[preci[:2]] + ", "
precip_text += WEATHER_PREC[preci[2:4]] + " and "
precip_text += WEATHER_PREC[preci[4:]]
else:
precip_text = preci
if desci == "TS":
text_parts.append("with")
text_parts.append(precip_text)
if desci == "SH":
text_parts.append(WEATHER_DESC[desci])
if obsci:
code_parts.append(obsci)
text_parts.append(WEATHER_OBSC[obsci])
if otheri:
code_parts.append(otheri)
text_parts.append(WEATHER_OTHER[otheri])
code = " ".join(code_parts)
if code in WEATHER_SPECIAL:
text_list.append(WEATHER_SPECIAL[code])
else:
text_list.append(" ".join(text_parts))
return "; ".join(text_list)
def sky_conditions(self, sep="; "):
"""
Return a textual description of the sky conditions.
"""
text_list = []
for skyi in self.sky:
(cover, height, cloud) = skyi
if cover in ["SKC", "CLR", "NSC"]:
text_list.append(SKY_COVER[cover])
else:
if cloud:
what = CLOUD_TYPE.get(cloud, "unknown CLOUD_TYPE of %s" % (cloud,))
elif SKY_COVER[cover].endswith(" "):
what = "clouds"
else:
what = ""
label = "%s %s" % (SKY_COVER[cover], what)
# HACK here to account for 'empty' entries with above format
label = " ".join(label.strip().split())
if cover == "VV":
label += ", vertical visibility to %s" % (str(height),)
else:
label += " at %s" % (str(height),)
text_list.append(label)
return sep.join(text_list)
def trend(self):
"""
Return the trend forecast groups
"""
return " ".join(self._trend_groups)
def remarks(self, sep="; "):
"""
Return the decoded remarks.
"""
return sep.join(self._remarks)
| 33.322674
| 88
| 0.521264
|
edf40e09cc30a180fb7113f0b7e03df685123dfa
| 4,836
|
py
|
Python
|
tests/test_functiondef_handler.py
|
DrewTChrist/obscurepy
|
4c1f28bc2fef27519fa7e8eddfeefa8e766d117e
|
[
"MIT"
] | 11
|
2021-05-21T18:22:40.000Z
|
2021-08-23T23:56:38.000Z
|
tests/test_functiondef_handler.py
|
DrewTChrist/obscurepy
|
4c1f28bc2fef27519fa7e8eddfeefa8e766d117e
|
[
"MIT"
] | 7
|
2021-09-14T02:42:45.000Z
|
2021-09-18T14:40:47.000Z
|
tests/test_functiondef_handler.py
|
DrewTChrist/obscurepy
|
4c1f28bc2fef27519fa7e8eddfeefa8e766d117e
|
[
"MIT"
] | 2
|
2021-05-26T12:27:15.000Z
|
2021-07-18T02:47:31.000Z
|
import ast
import unittest
from obscurepy.handlers.classdef_handler import ClassDefHandler
from obscurepy.handlers.functiondef_handler import FunctionDefHandler
from obscurepy.utils.definition_tracker import DefinitionTracker
from obscurepy.handlers.functiondef_handler import get_args, get_variables, get_return
from obscurepy.handlers.functiondef_handler import handle_function_scope, handle_class_scope, handle_global_scope
from obscurepy.utils.tree import add_parents
class FunctionDefHandlerTest(unittest.TestCase):
def setUp(self):
self.fixture = FunctionDefHandler()
self.classdef_handler = ClassDefHandler()
self.tracker = DefinitionTracker.get_instance()
self.source = 'def FirstFunction(param_1, param_2):\n\t' \
'first_variable = 42\n\t' \
'second_variable = param_1 + param_2\n\t' \
'return second_variable\n' \
'class SomeClass():\n\t' \
'def some_method():\n\t\t' \
'pass'
self.tree = ast.parse(self.source)
add_parents(self.tree)
self.tree = self.classdef_handler.handle(self.tree)
def tearDown(self):
self.tracker.clear_definitions()
def test_visitFunctionDef(self):
self.tree = self.fixture.handle(self.tree)
self.assertEqual(len(self.tracker.definitions['functions']), 1)
function = self.tracker.definitions['functions']['FirstFunction']
self.assertEqual(function['new_name'], '_0x54e')
self.assertEqual(function['prev_name'], 'FirstFunction')
self.assertEqual(function['variables']
['first_variable']['new_name'], '_0x5cd')
self.assertEqual(function['args']['param_1']['new_name'], '_0x2a1')
self.assertEqual(function['return']['second_variable'], '_0x621')
def test_get_args(self):
args = get_args(self.tree.body[0])
self.assertEqual(args['param_1']['new_name'], '_0x2a1')
self.assertEqual(args['param_2']['new_name'], '_0x2a2')
def test_get_args_none(self):
tree = ast.parse('def FirstFunction():\n\tpass')
args = get_args(tree.body[0])
self.assertTrue(len(args) == 0)
def test_get_variables(self):
variables = get_variables(self.tree.body[0])
self.assertEqual(variables['first_variable']['new_name'], '_0x5cd')
self.assertEqual(variables['second_variable']['new_name'], '_0x621')
def test_get_variables_none(self):
tree = ast.parse('def FirstFunction():\n\tpass')
variables = get_variables(tree.body[0])
self.assertTrue(len(variables) == 0)
def test_get_return(self):
return_ = get_return(self.tree.body[0])
self.assertEqual(return_['second_variable'], '_0x621')
def test_get_return_none(self):
tree = ast.parse('def a_function():\n\tpass')
return_ = get_return(tree.body[0])
self.assertEqual(len(return_), 0)
def test_handle_global_scope(self):
tree = ast.parse('def global_function():\n\tpass')
add_parents(tree)
tracker = DefinitionTracker.get_instance()
handled_node = handle_global_scope(tree.body[0], tracker)
self.assertTrue('global_function' in tracker.definitions['functions'])
def test_handle_global_scope_outside(self):
tree = ast.parse(
'def global_function():\n\tdef function_function():\n\t\tpass')
add_parents(tree)
tracker = DefinitionTracker.get_instance()
handled_node = handle_global_scope(tree.body[0].body[0], tracker)
self.assertEqual(len(tracker.definitions['functions']), 0)
def test_handle_class_scope(self):
tree = ast.parse('class SomeClass:\n\tdef some_method():\n\t\tpass')
add_parents(tree)
tracker = DefinitionTracker.get_instance()
handled_node = handle_class_scope(tree.body[0].body[0], tracker)
self.assertTrue(
len(tracker.definitions['classes']['SomeClass']['methods']), 1)
def test_handle_class_scope_outside(self):
tree = ast.parse(
'def some_function():\n\tpass\nclass SomeClass:\n\tpass')
add_parents(tree)
tracker = DefinitionTracker.get_instance()
handled_node = handle_class_scope(tree.body[0], tracker)
self.assertTrue(
'some_function' not in tracker.definitions['classes']['SomeClass']['methods'])
def test_handle_function_scope(self):
tree = ast.parse(
'def global_function():\n\tdef function_function():\n\t\tpass')
add_parents(tree)
tracker = DefinitionTracker.get_instance()
handled_node = handle_function_scope(tree.body[0].body[0], tracker)
pass
def test_handle_function_scope_outside(self):
pass
| 42.79646
| 113
| 0.662531
|
f9468a2ad3928536f2c2c43536236706b8ea7a3a
| 1,477
|
py
|
Python
|
pcat2py/class/20c0e630-5cc5-11e4-af55-00155d01fe08.py
|
phnomcobra/PCAT2PY
|
937c3b365cdc5ac69b78f59070be0a21bdb53db0
|
[
"MIT"
] | null | null | null |
pcat2py/class/20c0e630-5cc5-11e4-af55-00155d01fe08.py
|
phnomcobra/PCAT2PY
|
937c3b365cdc5ac69b78f59070be0a21bdb53db0
|
[
"MIT"
] | null | null | null |
pcat2py/class/20c0e630-5cc5-11e4-af55-00155d01fe08.py
|
phnomcobra/PCAT2PY
|
937c3b365cdc5ac69b78f59070be0a21bdb53db0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
################################################################################
# 20c0e630-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# justindierking@hardbitsolutions.com
# phnomcobra@gmail.com
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "20c0e630-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = False
# Get Registry DWORD
dword = cli.get_reg_dword(r'HKCU:\Software\Policies\Microsoft\Office\14.0\powerpoint\options', 'DefaultFormat')
# Output Lines
self.output = [r'HKCU:\Software\Policies\Microsoft\Office\14.0\powerpoint\options', ('DefaultFormat=' + str(dword))]
if dword == 27:
self.is_compliant = True
return self.is_compliant
def fix(self, cli):
cli.powershell(r"New-Item -path 'HKCU:\Software\Policies\Microsoft\Office\14.0'")
cli.powershell(r"New-Item -path 'HKCU:\Software\Policies\Microsoft\Office\14.0\powerpoint'")
cli.powershell(r"New-Item -path 'HKCU:\Software\Policies\Microsoft\Office\14.0\powerpoint\options'")
cli.powershell(r"Set-ItemProperty -path 'HKCU:\Software\Policies\Microsoft\Office\14.0\powerpoint\options' -name 'DefaultFormat' -value 27 -Type DWord")
| 38.868421
| 160
| 0.606635
|
7b2377da8aa227ef89f8505e063750c4d250b58b
| 2,125
|
py
|
Python
|
templates/zerotier_client/zerotier_client_test.py
|
threefoldtech/0-templates
|
4106bb3d4d1de305557bf4748a7d77ffeb302abb
|
[
"Apache-2.0"
] | 1
|
2019-01-20T17:50:53.000Z
|
2019-01-20T17:50:53.000Z
|
templates/zerotier_client/zerotier_client_test.py
|
threefoldtech/0-templates
|
4106bb3d4d1de305557bf4748a7d77ffeb302abb
|
[
"Apache-2.0"
] | 192
|
2018-08-01T13:31:16.000Z
|
2020-05-29T09:41:06.000Z
|
templates/zerotier_client/zerotier_client_test.py
|
threefoldtech/0-templates
|
4106bb3d4d1de305557bf4748a7d77ffeb302abb
|
[
"Apache-2.0"
] | 1
|
2018-08-09T12:30:52.000Z
|
2018-08-09T12:30:52.000Z
|
from unittest.mock import MagicMock, patch
import os
import pytest
from zerotier_client import ZerotierClient
from JumpscaleZrobot.test.utils import ZrobotBaseTest
class TestZerotierClientTemplate(ZrobotBaseTest):
@classmethod
def setUpClass(cls):
super().preTest(os.path.dirname(__file__), ZerotierClient)
def setUp(self):
self.list = patch('jumpscale.j.clients.zerotier.list', MagicMock(return_value=[])).start()
self.get = patch('jumpscale.j.clients.zerotier.get', MagicMock()).start()
def tearDown(self):
patch.stopall()
def test_create_data_none(self):
with pytest.raises(ValueError, message='template should fail to instantiate if data is None'):
ZerotierClient(name="zttest", data=None)
def test_create_data_no_token(self):
with pytest.raises(ValueError, message="template should fail to instantiate if data doesn't contain 'token'"):
ZerotierClient(name="zttest", data={'foo': 'bar'})
with pytest.raises(ValueError, message="template should fail to instantiate if data doesn't contain 'token'"):
ZerotierClient(name="zttest", data={'token': ''})
def test_create(self):
get = patch('jumpscale.j.clients.zerotier.get', MagicMock()).start()
data = {'token': 'foo'}
ZerotierClient(name="zttest", data=data)
self.list.assert_called_with()
get.assert_called_with("zttest", data={'token_': data['token']})
def test_create_already_exists(self):
patch('jumpscale.j.clients.zerotier.list', MagicMock(return_value=['zttest'])).start()
ZerotierClient(name='zttest', data={'token': 'foo'})
assert self.get.called is False
def test_uninstall(self):
uninstall = patch('jumpscale.j.clients.zerotier.delete', MagicMock()).start()
service = ZerotierClient(name='zttest', data={'token': 'foo'})
service.uninstall()
uninstall.assert_called_once_with('zttest')
def test_token(self):
service = ZerotierClient(name='zttest', data={'token': 'foo'})
assert service.token() == 'foo'
| 37.280702
| 118
| 0.676235
|
ce3194dccea8e81de00212a837d91672139f4eab
| 280
|
py
|
Python
|
tests/conftest.py
|
k-sriram/rotins
|
fbb398bf5a46b5869081bce149afdf2cfd5bc2dd
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
k-sriram/rotins
|
fbb398bf5a46b5869081bce149afdf2cfd5bc2dd
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
k-sriram/rotins
|
fbb398bf5a46b5869081bce149afdf2cfd5bc2dd
|
[
"MIT"
] | null | null | null |
import pytest
def pytest_addoption(parser):
parser.addoption(
"--makeplots",
action="store_true",
help="make diagnostic plots for manual inspection.",
)
@pytest.fixture
def makeplots(request):
return request.config.getoption("--makeplots")
| 18.666667
| 60
| 0.671429
|
8dd30e34d94360c272ea6abf62b78413ec4e81b0
| 2,766
|
py
|
Python
|
lldb/test/API/macosx/find-dsym/deep-bundle/TestDeepBundle.py
|
AnthonyLatsis/llvm-project
|
2acd6cdb9a4bfb2c34b701527e04dd4ffe791d74
|
[
"Apache-2.0"
] | 34
|
2020-01-31T17:50:00.000Z
|
2022-02-16T20:19:29.000Z
|
lldb/test/API/macosx/find-dsym/deep-bundle/TestDeepBundle.py
|
coolstar/llvm-project
|
e21ccdd5b5667de50de65ee8903a89a21020e89a
|
[
"Apache-2.0"
] | 14
|
2020-02-03T23:39:51.000Z
|
2021-07-20T16:24:25.000Z
|
lldb/test/API/macosx/find-dsym/deep-bundle/TestDeepBundle.py
|
coolstar/llvm-project
|
e21ccdd5b5667de50de65ee8903a89a21020e89a
|
[
"Apache-2.0"
] | 6
|
2021-02-08T16:57:07.000Z
|
2022-01-13T11:32:34.000Z
|
"""Test that a dSYM can be found when a binary is in a deep bundle with multiple pathname components."""
#import unittest2
from time import sleep
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
exe_name = 'deep-bundle' # must match Makefile
class DeepBundleTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipIfRemote
@skipUnlessDarwin
# This test is explicitly a dSYM test, it doesn't need to run for any other config, but
# the following doesn't work, fixme.
# @skipIf(debug_info=no_match(["dsym"]), bugnumber="This test is looking explicitly for a dSYM")
def setUp(self):
TestBase.setUp(self)
self.source = 'main.c'
def tearDown(self):
# Destroy process before TestBase.tearDown()
self.dbg.GetSelectedTarget().GetProcess().Destroy()
# Call super's tearDown().
TestBase.tearDown(self)
def test_attach_and_check_dsyms(self):
"""Test attach to binary, see if the framework dSYM is found"""
exe = self.getBuildArtifact(exe_name)
self.build()
popen = self.spawnSubprocess(exe, [self.getBuildDir()])
self.addTearDownHook(self.cleanupSubprocesses)
# Give the inferior time to start up, dlopen a bundle, remove the bundle it linked in
sleep(5)
# Since the library that was dlopen()'ed is now removed, lldb will need to find the
# binary & dSYM via target.exec-search-paths
settings_str = "settings set target.exec-search-paths " + self.get_process_working_directory() + "/hide.app"
self.runCmd(settings_str)
self.runCmd("process attach -p " + str(popen.pid))
target = self.dbg.GetSelectedTarget()
self.assertTrue(target.IsValid(), 'Should have a valid Target after attaching to process')
setup_complete = target.FindFirstGlobalVariable("setup_is_complete")
self.assertEquals(setup_complete.GetValueAsUnsigned(), 1, 'Check that inferior process has completed setup')
# Find the bundle module, see if we found the dSYM too (they're both in "hide.app")
i = 0
found_module = False
while i < target.GetNumModules():
mod = target.GetModuleAtIndex(i)
if mod.GetFileSpec().GetFilename() == 'MyFramework':
found_module = True
dsym_name = mod.GetSymbolFileSpec().GetFilename()
self.assertTrue (dsym_name == 'MyFramework', "Check that we found the dSYM for the bundle that was loaded")
i=i+1
self.assertTrue(found_module, "Check that we found the framework loaded in lldb's image list")
if __name__ == '__main__':
unittest.main()
| 37.890411
| 123
| 0.67462
|
57e29633d6b75c8d369510dd6155c5eac34c5bf5
| 1,260
|
py
|
Python
|
pirates/piratesbase/TODDefs.py
|
ksmit799/POTCO-PS
|
520d38935ae8df4b452c733a82c94dddac01e275
|
[
"Apache-2.0"
] | 8
|
2017-01-24T04:33:29.000Z
|
2020-11-01T08:36:24.000Z
|
pirates/piratesbase/TODDefs.py
|
ksmit799/Pirates-Online-Remake
|
520d38935ae8df4b452c733a82c94dddac01e275
|
[
"Apache-2.0"
] | 1
|
2017-03-02T18:05:17.000Z
|
2017-03-14T06:47:10.000Z
|
pirates/piratesbase/TODDefs.py
|
ksmit799/Pirates-Online-Remake
|
520d38935ae8df4b452c733a82c94dddac01e275
|
[
"Apache-2.0"
] | 11
|
2017-03-02T18:46:07.000Z
|
2020-11-01T08:36:26.000Z
|
# File: T (Python 2.4)
SKY_OFF = 0
SKY_LAST = 1
SKY_DAWN = 2
SKY_DAY = 3
SKY_DUSK = 4
SKY_NIGHT = 5
SKY_STARS = 6
SKY_HALLOWEEN = 7
SKY_SWAMP = 8
SKY_INVASION = 9
SKY_OVERCAST = 10
SKY_OVERCASTNIGHT = 11
SKY_CODES = {
SKY_OFF: 'SKY_OFF',
SKY_LAST: 'SKY_LAST',
SKY_DAWN: 'SKY_DAWN',
SKY_DAY: 'SKY_DAY',
SKY_DUSK: 'SKY_DUSK',
SKY_NIGHT: 'SKY_NIGHT',
SKY_STARS: 'SKY_STARS',
SKY_HALLOWEEN: 'SKY_HALLOWEEN',
SKY_SWAMP: 'SKY_SWAMP',
SKY_OVERCAST: 'SKY_OVERCAST',
SKY_OVERCASTNIGHT: 'SKY_OVERCASTNIGHT',
SKY_INVASION: 'SKY_INVASION' }
TOD_ALL_CYCLE = 0
TOD_REGULAR_CYCLE = 1
TOD_HALLOWEEN_CYCLE = 2
TOD_JOLLYCURSE_CYCLE = 3
TOD_JOLLYINVASION_CYCLE = 4
TOD_NORMAL2JOLLY_CYCLE = 5
TOD_JOLLY2NIGHT_CYCLE = 6
TOD_VALENTINE_CYCLE = 7
ENV_DEFAULT = 0
ENV_OFF = 1
ENV_OPENSKY = 2
ENV_FOREST = 3
ENV_SWAMP = 4
ENV_CAVE = 5
ENV_LAVACAVE = 6
ENV_INTERIOR = 7
ENV_AVATARCHOOSER = 8
ENV_SAILING = 9
ENV_CANNONGAME = 10
ENV_CLOUDY = 11
ENV_INVASION = 12
ENV_HALLOWEEN = 13
ENV_VALENTINES = 14
ENV_CURSED_NIGHT = 15
ENV_EVER_NIGHT = 16
ENV_NO_HOLIDAY = 17
ENV_SAINT_PATRICKS = 18
ENV_DATAFILE = 255
FOG_OFF = 0
FOG_EXP = 1
FOG_LINEAR = 2
FOG_CODES = {
FOG_OFF: 'FOG_OFF',
FOG_EXP: 'FOG_EXP',
FOG_LINEAR: 'FOG_LINEAR' }
| 20
| 43
| 0.72619
|
2444014ac9779ce809ca9f0f3a9b0792dc7bad67
| 168,589
|
py
|
Python
|
pandas/core/index.py
|
ichuang/pandas
|
b31574a9ec6bccee427ed08d5b5b995cc6753439
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
pandas/core/index.py
|
ichuang/pandas
|
b31574a9ec6bccee427ed08d5b5b995cc6753439
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
pandas/core/index.py
|
ichuang/pandas
|
b31574a9ec6bccee427ed08d5b5b995cc6753439
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
# pylint: disable=E1101,E1103,W0232
import datetime
import warnings
import operator
from functools import partial
from pandas.compat import range, zip, lrange, lzip, u, reduce, filter, map
from pandas import compat
import numpy as np
from sys import getsizeof
import pandas.tslib as tslib
import pandas.lib as lib
import pandas.algos as _algos
import pandas.index as _index
from pandas.lib import Timestamp, Timedelta, is_datetime_array
from pandas.core.base import PandasObject, FrozenList, FrozenNDArray, IndexOpsMixin, _shared_docs
from pandas.util.decorators import (Appender, Substitution, cache_readonly,
deprecate)
from pandas.core.common import isnull, array_equivalent
import pandas.core.common as com
from pandas.core.common import (_values_from_object, is_float, is_integer,
ABCSeries, _ensure_object, _ensure_int64, is_bool_indexer,
is_list_like, is_bool_dtype, is_null_slice, is_integer_dtype)
from pandas.core.config import get_option
from pandas.io.common import PerformanceWarning
# simplify
default_pprint = lambda x: com.pprint_thing(x, escape_chars=('\t', '\r', '\n'),
quote_strings=True)
__all__ = ['Index']
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
_index_doc_kwargs = dict(klass='Index', inplace='',
duplicated='np.array')
def _try_get_item(x):
try:
return x.item()
except AttributeError:
return x
def _indexOp(opname):
"""
Wrapper function for index comparison operations, to avoid
code duplication.
"""
def wrapper(self, other):
func = getattr(self._data.view(np.ndarray), opname)
result = func(np.asarray(other))
# technically we could support bool dtyped Index
# for now just return the indexing array directly
if is_bool_dtype(result):
return result
try:
return Index(result)
except: # pragma: no cover
return result
return wrapper
class InvalidIndexError(Exception):
pass
_o_dtype = np.dtype(object)
_Identity = object
def _new_Index(cls, d):
""" This is called upon unpickling, rather than the default which doesn't have arguments
and breaks __new__ """
return cls.__new__(cls, **d)
class Index(IndexOpsMixin, PandasObject):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible
Notes
-----
An Index instance can **only** contain hashable objects
"""
# To hand over control to subclasses
_join_precedence = 1
# Cython methods
_groupby = _algos.groupby_object
_arrmap = _algos.arrmap_object
_left_indexer_unique = _algos.left_join_indexer_unique_object
_left_indexer = _algos.left_join_indexer_object
_inner_indexer = _algos.inner_join_indexer_object
_outer_indexer = _algos.outer_join_indexer_object
_box_scalars = False
_typ = 'index'
_data = None
_id = None
name = None
asi8 = None
_comparables = ['name']
_attributes = ['name']
_allow_index_ops = True
_allow_datetime_index_ops = False
_allow_period_index_ops = False
_is_numeric_dtype = False
_engine_type = _index.ObjectEngine
def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False,
tupleize_cols=True, **kwargs):
# no class inference!
if fastpath:
return cls._simple_new(data, name)
from pandas.tseries.period import PeriodIndex
if isinstance(data, (np.ndarray, Index, ABCSeries)):
if issubclass(data.dtype.type, np.datetime64):
from pandas.tseries.index import DatetimeIndex
result = DatetimeIndex(data, copy=copy, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
return Index(result.to_pydatetime(), dtype=_o_dtype)
else:
return result
elif issubclass(data.dtype.type, np.timedelta64):
from pandas.tseries.tdi import TimedeltaIndex
result = TimedeltaIndex(data, copy=copy, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
return Index(result.to_pytimedelta(), dtype=_o_dtype)
else:
return result
if dtype is not None:
try:
data = np.array(data, dtype=dtype, copy=copy)
except TypeError:
pass
# maybe coerce to a sub-class
if isinstance(data, PeriodIndex):
return PeriodIndex(data, copy=copy, name=name, **kwargs)
if issubclass(data.dtype.type, np.integer):
return Int64Index(data, copy=copy, dtype=dtype, name=name)
elif issubclass(data.dtype.type, np.floating):
return Float64Index(data, copy=copy, dtype=dtype, name=name)
elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data):
subarr = data.astype('object')
else:
subarr = com._asarray_tuplesafe(data, dtype=object)
# _asarray_tuplesafe does not always copy underlying data,
# so need to make sure that this happens
if copy:
subarr = subarr.copy()
elif hasattr(data, '__array__'):
return Index(np.asarray(data), dtype=dtype, copy=copy, name=name,
**kwargs)
elif data is None or np.isscalar(data):
cls._scalar_data_error(data)
else:
if tupleize_cols and isinstance(data, list) and data:
try:
sorted(data)
has_mixed_types = False
except (TypeError, UnicodeDecodeError):
has_mixed_types = True # python3 only
if isinstance(data[0], tuple) and not has_mixed_types:
try:
return MultiIndex.from_tuples(
data, names=name or kwargs.get('names'))
except (TypeError, KeyError):
pass # python2 - MultiIndex fails on mixed types
# other iterable of some kind
subarr = com._asarray_tuplesafe(data, dtype=object)
if dtype is None:
inferred = lib.infer_dtype(subarr)
if inferred == 'integer':
return Int64Index(subarr.astype('i8'), copy=copy, name=name)
elif inferred in ['floating', 'mixed-integer-float']:
return Float64Index(subarr, copy=copy, name=name)
elif inferred == 'boolean':
# don't support boolean explicity ATM
pass
elif inferred != 'string':
if (inferred.startswith('datetime') or
tslib.is_timestamp_array(subarr)):
from pandas.tseries.index import DatetimeIndex
return DatetimeIndex(subarr, copy=copy, name=name, **kwargs)
elif (inferred.startswith('timedelta') or
lib.is_timedelta_array(subarr)):
from pandas.tseries.tdi import TimedeltaIndex
return TimedeltaIndex(subarr, copy=copy, name=name, **kwargs)
elif inferred == 'period':
return PeriodIndex(subarr, name=name, **kwargs)
return cls._simple_new(subarr, name)
@classmethod
def _simple_new(cls, values, name=None, **kwargs):
result = object.__new__(cls)
result._data = values
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result,k,v)
result._reset_identity()
return result
def _update_inplace(self, result, **kwargs):
# guard when called from IndexOpsMixin
raise TypeError("Index can't be updated inplace")
def is_(self, other):
"""
More flexible, faster check like ``is`` but that works through views
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
other object to compare against.
Returns
-------
True if both have same underlying data, False otherwise : bool
"""
# use something other than None to be clearer
return self._id is getattr(other, '_id', Ellipsis)
def _reset_identity(self):
"""Initializes or resets ``_id`` attribute with new object"""
self._id = _Identity()
# ndarray compat
def __len__(self):
"""
return the length of the Index
"""
return len(self._data)
def __array__(self, result=None):
""" the array interface, return my values """
return self._data.view(np.ndarray)
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc
"""
return self._shallow_copy(result)
@cache_readonly
def dtype(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@property
def values(self):
""" return the underlying data as an ndarray """
return self._data.view(np.ndarray)
def get_values(self):
""" return the underlying data as an ndarray """
return self.values
def _array_values(self):
return self._data
# ops compat
def tolist(self):
"""
return a list of the Index values
"""
return list(self.values)
def repeat(self, n):
"""
return a new Index of the values repeated n times
See also
--------
numpy.ndarray.repeat
"""
return self._shallow_copy(self.values.repeat(n))
def ravel(self, order='C'):
"""
return an ndarray of the flattened values of the underlying data
See also
--------
numpy.ndarray.ravel
"""
return self.values.ravel(order=order)
# construction helpers
@classmethod
def _scalar_data_error(cls, data):
raise TypeError(
'{0}(...) must be called with a collection of some kind, {1} was '
'passed'.format(cls.__name__, repr(data))
)
@classmethod
def _string_data_error(cls, data):
raise TypeError('String dtype not supported, you may need '
'to explicitly cast to a numeric type')
@classmethod
def _coerce_to_ndarray(cls, data):
"""coerces data to ndarray, raises on scalar data. Converts other
iterables to list first and then to array. Does not touch ndarrays."""
if not isinstance(data, (np.ndarray, Index)):
if data is None or np.isscalar(data):
cls._scalar_data_error(data)
# other iterable of some kind
if not isinstance(data, (ABCSeries, list, tuple)):
data = list(data)
data = np.asarray(data)
return data
def _get_attributes_dict(self):
""" return an attributes dict for my class """
return dict([ (k,getattr(self,k,None)) for k in self._attributes])
def view(self, cls=None):
# we need to see if we are subclassing an
# index type here
if cls is not None and not hasattr(cls,'_typ'):
result = self._data.view(cls)
else:
result = self._shallow_copy()
if isinstance(result, Index):
result._id = self._id
return result
def _shallow_copy(self, values=None, **kwargs):
""" create a new Index, don't copy the data, use the same object attributes
with passed in attributes taking precedence """
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
return self.__class__._simple_new(values,**attributes)
def copy(self, names=None, name=None, dtype=None, deep=False):
"""
Make a copy of this object. Name and dtype sets those attributes on
the new object.
Parameters
----------
name : string, optional
dtype : numpy dtype or pandas type
Returns
-------
copy : Index
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
"""
if names is not None and name is not None:
raise TypeError("Can only provide one of `names` and `name`")
if deep:
from copy import deepcopy
new_index = self._shallow_copy(self._data.copy())
name = name or deepcopy(self.name)
else:
new_index = self._shallow_copy()
name = self.name
if name is not None:
names = [name]
if names:
new_index = new_index.set_names(names)
if dtype:
new_index = new_index.astype(dtype)
return new_index
__copy__ = copy
def __unicode__(self):
"""
Return a string representation for this object.
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
prepr = com.pprint_thing(self, escape_chars=('\t', '\r', '\n'),
quote_strings=True)
return "%s(%s, dtype='%s')" % (type(self).__name__, prepr, self.dtype)
def to_series(self, **kwargs):
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index
Returns
-------
Series : dtype will be based on the type of the Index values.
"""
from pandas import Series
return Series(self._to_embed(), index=self, name=self.name)
def _to_embed(self, keep_tz=False):
"""
return an array repr of this object, potentially casting to object
This is for internal compat
"""
return self.values
def astype(self, dtype):
return Index(self.values.astype(dtype), name=self.name,
dtype=dtype)
def to_datetime(self, dayfirst=False):
"""
For an Index containing strings or datetime.datetime objects, attempt
conversion to DatetimeIndex
"""
from pandas.tseries.index import DatetimeIndex
if self.inferred_type == 'string':
from dateutil.parser import parse
parser = lambda x: parse(x, dayfirst=dayfirst)
parsed = lib.try_parse_dates(self.values, parser=parser)
return DatetimeIndex(parsed)
else:
return DatetimeIndex(self.values)
def _assert_can_do_setop(self, other):
return True
@property
def nlevels(self):
return 1
def _get_names(self):
return FrozenList((self.name,))
def _set_names(self, values, level=None):
if len(values) != 1:
raise ValueError('Length of new names must be 1, got %d'
% len(values))
self.name = values[0]
names = property(fset=_set_names, fget=_get_names)
def set_names(self, names, level=None, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
names : str or sequence
name(s) to set
level : int or level name, or sequence of int / level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None for all levels)
Otherwise level must be None
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
Examples
--------
>>> Index([1, 2, 3, 4]).set_names('foo')
Int64Index([1, 2, 3, 4], dtype='int64')
>>> Index([1, 2, 3, 4]).set_names(['foo'])
Int64Index([1, 2, 3, 4], dtype='int64')
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_names(['baz', 'quz'])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'quz'])
>>> idx.set_names('baz', level=0)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'bar'])
"""
if level is not None and self.nlevels == 1:
raise ValueError('Level must be None for non-MultiIndex')
if level is not None and not is_list_like(level) and is_list_like(names):
raise TypeError("Names must be a string")
if not is_list_like(names) and level is None and self.nlevels > 1:
raise TypeError("Must pass list-like as `names`.")
if not is_list_like(names):
names = [names]
if level is not None and not is_list_like(level):
level = [level]
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._set_names(names, level=level)
if not inplace:
return idx
def rename(self, name, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
name : str or list
name to set
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
"""
return self.set_names([name], inplace=inplace)
@property
def _has_complex_internals(self):
# to disable groupby tricks in MultiIndex
return False
def summary(self, name=None):
if len(self) > 0:
head = self[0]
if hasattr(head, 'format') and\
not isinstance(head, compat.string_types):
head = head.format()
tail = self[-1]
if hasattr(tail, 'format') and\
not isinstance(tail, compat.string_types):
tail = tail.format()
index_summary = ', %s to %s' % (com.pprint_thing(head),
com.pprint_thing(tail))
else:
index_summary = ''
if name is None:
name = type(self).__name__
return '%s: %s entries%s' % (name, len(self), index_summary)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.values
_na_value = np.nan
"""The expected NA value to use with this index."""
@property
def is_monotonic(self):
""" alias for is_monotonic_increasing (deprecated) """
return self._engine.is_monotonic_increasing
@property
def is_monotonic_increasing(self):
"""
return if the index is monotonic increasing (only equal or
increasing) values.
"""
return self._engine.is_monotonic_increasing
@property
def is_monotonic_decreasing(self):
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
"""
return self._engine.is_monotonic_decreasing
def is_lexsorted_for_tuple(self, tup):
return True
@cache_readonly(allow_setting=True)
def is_unique(self):
""" return if the index has unique values """
return self._engine.is_unique
@property
def has_duplicates(self):
return not self.is_unique
def is_boolean(self):
return self.inferred_type in ['boolean']
def is_integer(self):
return self.inferred_type in ['integer']
def is_floating(self):
return self.inferred_type in ['floating', 'mixed-integer-float']
def is_numeric(self):
return self.inferred_type in ['integer', 'floating']
def is_object(self):
return self.dtype == np.object_
def is_mixed(self):
return 'mixed' in self.inferred_type
def holds_integer(self):
return self.inferred_type in ['integer', 'mixed-integer']
def _convert_scalar_indexer(self, key, typ=None):
""" convert a scalar indexer, right now we are converting
floats -> ints if the index supports it
"""
def to_int():
ikey = int(key)
if ikey != key:
return self._convert_indexer_error(key, 'label')
return ikey
if typ == 'iloc':
if is_integer(key):
return key
elif is_float(key):
key = to_int()
warnings.warn("scalar indexers for index type {0} should be integers and not floating point".format(
type(self).__name__),FutureWarning)
return key
return self._convert_indexer_error(key, 'label')
if is_float(key):
if not self.is_floating():
warnings.warn("scalar indexers for index type {0} should be integers and not floating point".format(
type(self).__name__),FutureWarning)
return to_int()
return key
def _validate_slicer(self, key, f):
""" validate and raise if needed on a slice indexers according to the
passed in function """
for c in ['start','stop','step']:
if not f(getattr(key,c)):
self._convert_indexer_error(key.start, 'slice {0} value'.format(c))
def _convert_slice_indexer_getitem(self, key, is_index_slice=False):
""" called from the getitem slicers, determine how to treat the key
whether positional or not """
if self.is_integer() or is_index_slice:
return key
return self._convert_slice_indexer(key)
def _convert_slice_indexer(self, key, typ=None):
""" convert a slice indexer. disallow floats in the start/stop/step """
# if we are not a slice, then we are done
if not isinstance(key, slice):
return key
# validate iloc
if typ == 'iloc':
# need to coerce to_int if needed
def f(c):
v = getattr(key,c)
if v is None or is_integer(v):
return v
# warn if its a convertible float
if v == int(v):
warnings.warn("slice indexers when using iloc should be integers "
"and not floating point",FutureWarning)
return int(v)
self._convert_indexer_error(v, 'slice {0} value'.format(c))
return slice(*[ f(c) for c in ['start','stop','step']])
# validate slicers
def validate(v):
if v is None or is_integer(v):
return True
# dissallow floats
elif is_float(v):
return False
return True
self._validate_slicer(key, validate)
# figure out if this is a positional indexer
start, stop, step = key.start, key.stop, key.step
def is_int(v):
return v is None or is_integer(v)
is_null_slicer = start is None and stop is None
is_index_slice = is_int(start) and is_int(stop)
is_positional = is_index_slice and not self.is_integer()
if typ == 'getitem':
return self._convert_slice_indexer_getitem(
key, is_index_slice=is_index_slice)
# convert the slice to an indexer here
# if we are mixed and have integers
try:
if is_positional and self.is_mixed():
if start is not None:
i = self.get_loc(start)
if stop is not None:
j = self.get_loc(stop)
is_positional = False
except KeyError:
if self.inferred_type == 'mixed-integer-float':
raise
if is_null_slicer:
indexer = key
elif is_positional:
indexer = key
else:
try:
indexer = self.slice_indexer(start, stop, step)
except Exception:
if is_index_slice:
if self.is_integer():
raise
else:
indexer = key
else:
raise
return indexer
def _convert_list_indexer(self, key, typ=None):
""" convert a list indexer. these should be locations """
return key
def _convert_list_indexer_for_mixed(self, keyarr, typ=None):
""" passed a key that is tuplesafe that is integer based
and we have a mixed index (e.g. number/labels). figure out
the indexer. return None if we can't help
"""
if (typ is None or typ in ['iloc','ix']) and (is_integer_dtype(keyarr) and not self.is_floating()):
if self.inferred_type != 'integer':
keyarr = np.where(keyarr < 0,
len(self) + keyarr, keyarr)
if self.inferred_type == 'mixed-integer':
indexer = self.get_indexer(keyarr)
if (indexer >= 0).all():
return indexer
from pandas.core.indexing import _maybe_convert_indices
return _maybe_convert_indices(indexer, len(self))
elif not self.inferred_type == 'integer':
return keyarr
return None
def _convert_indexer_error(self, key, msg=None):
if msg is None:
msg = 'label'
raise TypeError("the {0} [{1}] is not a proper indexer for this index "
"type ({2})".format(msg, key, self.__class__.__name__))
def get_duplicates(self):
from collections import defaultdict
counter = defaultdict(lambda: 0)
for k in self.values:
counter[k] += 1
return sorted(k for k, v in compat.iteritems(counter) if v > 1)
_get_duplicates = get_duplicates
def _cleanup(self):
self._engine.clear_mapping()
@cache_readonly
def _engine(self):
# property, for now, slow to look up
return self._engine_type(lambda: self.values, len(self))
def _validate_index_level(self, level):
"""
Validate index level.
For single-level Index getting level number is a no-op, but some
verification must be done like in MultiIndex.
"""
if isinstance(level, int):
if level < 0 and level != -1:
raise IndexError("Too many levels: Index has only 1 level,"
" %d is not a valid level number" % (level,))
elif level > 0:
raise IndexError("Too many levels:"
" Index has only 1 level, not %d" %
(level + 1))
elif level != self.name:
raise KeyError('Level %s must be same as name (%s)'
% (level, self.name))
def _get_level_number(self, level):
self._validate_index_level(level)
return 0
@cache_readonly
def inferred_type(self):
""" return a string of the type inferred from the values """
return lib.infer_dtype(self)
def is_type_compatible(self, typ):
return typ == self.inferred_type
@cache_readonly
def is_all_dates(self):
if self._data is None:
return False
return is_datetime_array(_ensure_object(self.values))
def __iter__(self):
return iter(self.values)
def __reduce__(self):
d = dict(data=self._data)
d.update(self._get_attributes_dict())
return _new_Index, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
self._data = state.pop('data')
for k, v in compat.iteritems(state):
setattr(self, k, v)
elif isinstance(state, tuple):
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
self.name = own_state[0]
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(data, state)
self._data = data
self._reset_identity()
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def __deepcopy__(self, memo={}):
return self.copy(deep=True)
def __nonzero__(self):
raise ValueError("The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
.format(self.__class__.__name__))
__bool__ = __nonzero__
def __contains__(self, key):
hash(key)
# work around some kind of odd cython bug
try:
return key in self._engine
except TypeError:
return False
def __hash__(self):
raise TypeError("unhashable type: %r" % type(self).__name__)
def __setitem__(self, key, value):
raise TypeError("Indexes does not support mutable operations")
def __getitem__(self, key):
"""
Override numpy.ndarray's __getitem__ method to work as desired.
This function adds lists and Series as valid boolean indexers
(ndarrays only supports ndarray with dtype=bool).
If resulting ndim != 1, plain ndarray is returned instead of
corresponding `Index` subclass.
"""
# There's no custom logic to be implemented in __getslice__, so it's
# not overloaded intentionally.
getitem = self._data.__getitem__
promote = self._shallow_copy
if np.isscalar(key):
return getitem(key)
if isinstance(key, slice):
# This case is separated from the conditional above to avoid
# pessimization of basic indexing.
return promote(getitem(key))
if is_bool_indexer(key):
key = np.asarray(key)
key = _values_from_object(key)
result = getitem(key)
if not np.isscalar(result):
return promote(result)
else:
return result
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
name = self.name
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if isinstance(obj, Index) and obj.name != name:
name = None
break
to_concat = self._ensure_compat_concat(to_concat)
to_concat = [x.values if isinstance(x, Index) else x
for x in to_concat]
return Index(np.concatenate(to_concat), name=name)
@staticmethod
def _ensure_compat_concat(indexes):
from pandas.tseries.api import DatetimeIndex, PeriodIndex, TimedeltaIndex
klasses = DatetimeIndex, PeriodIndex, TimedeltaIndex
is_ts = [isinstance(idx, klasses) for idx in indexes]
if any(is_ts) and not all(is_ts):
return [_maybe_box(idx) for idx in indexes]
return indexes
def take(self, indexer, axis=0):
"""
return a new Index of the values selected by the indexer
See also
--------
numpy.ndarray.take
"""
indexer = com._ensure_platform_int(indexer)
taken = np.array(self).take(indexer)
# by definition cannot propogate freq
return self._shallow_copy(taken, freq=None)
def putmask(self, mask, value):
"""
return a new Index of the values set with the mask
See also
--------
numpy.ndarray.putmask
"""
values = self.values.copy()
np.putmask(values, mask, value)
return self._shallow_copy(values)
def format(self, name=False, formatter=None, **kwargs):
"""
Render a string representation of the Index
"""
header = []
if name:
header.append(com.pprint_thing(self.name,
escape_chars=('\t', '\r', '\n'))
if self.name is not None else '')
if formatter is not None:
return header + list(self.map(formatter))
return self._format_with_header(header, **kwargs)
def _format_with_header(self, header, na_rep='NaN', **kwargs):
values = self.values
from pandas.core.format import format_array
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values, safe=1)
if values.dtype == np.object_:
result = [com.pprint_thing(x, escape_chars=('\t', '\r', '\n'))
for x in values]
# could have nans
mask = isnull(values)
if mask.any():
result = np.array(result)
result[mask] = na_rep
result = result.tolist()
else:
result = _trim_front(format_array(values, None, justify='left'))
return header + result
def to_native_types(self, slicer=None, **kwargs):
""" slice and dice then format """
values = self
if slicer is not None:
values = values[slicer]
return values._format_native_types(**kwargs)
def _format_native_types(self, na_rep='', **kwargs):
""" actually format my specific types """
mask = isnull(self)
values = np.array(self, dtype=object, copy=True)
values[mask] = na_rep
return values.tolist()
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if type(other) != Index:
return other.equals(self)
return array_equivalent(_values_from_object(self), _values_from_object(other))
def identical(self, other):
"""Similar to equals, but check that other comparable attributes are
also equal
"""
return (self.equals(other) and
all((getattr(self, c, None) == getattr(other, c, None)
for c in self._comparables)) and
type(self) == type(other))
def asof(self, label):
"""
For a sorted index, return the most recent label up to and including
the passed label. Return NaN if not found
"""
if isinstance(label, (Index, ABCSeries, np.ndarray)):
raise TypeError('%s' % type(label))
if not isinstance(label, Timestamp):
label = Timestamp(label)
if label not in self:
loc = self.searchsorted(label, side='left')
if loc > 0:
return self[loc - 1]
else:
return np.nan
return label
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
locs = self.values[mask].searchsorted(where.values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where < self.values[first])] = -1
return result
def order(self, return_indexer=False, ascending=True):
"""
Return sorted copy of Index
"""
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
if return_indexer:
return sorted_index, _as
else:
return sorted_index
def sort(self, *args, **kwargs):
raise TypeError('Cannot sort an %r object' % self.__class__.__name__)
def shift(self, periods=1, freq=None):
"""
Shift Index containing datetime objects by input number of periods and
DateOffset
Returns
-------
shifted : Index
"""
if periods == 0:
# OK because immutable
return self
offset = periods * freq
return Index([idx + offset for idx in self], name=self.name)
def argsort(self, *args, **kwargs):
"""
return an ndarray indexer of the underlying data
See also
--------
numpy.ndarray.argsort
"""
result = self.asi8
if result is None:
result = np.array(self)
return result.argsort(*args, **kwargs)
def __add__(self, other):
if isinstance(other, Index):
warnings.warn("using '+' to provide set union with Indexes is deprecated, "
"use '|' or .union()",FutureWarning)
return self.union(other)
return Index(np.array(self) + other)
__iadd__ = __add__
def __sub__(self, other):
if isinstance(other, Index):
warnings.warn("using '-' to provide set differences with Indexes is deprecated, "
"use .difference()",FutureWarning)
return self.difference(other)
__eq__ = _indexOp('__eq__')
__ne__ = _indexOp('__ne__')
__lt__ = _indexOp('__lt__')
__gt__ = _indexOp('__gt__')
__le__ = _indexOp('__le__')
__ge__ = _indexOp('__ge__')
def __and__(self, other):
return self.intersection(other)
def __or__(self, other):
return self.union(other)
def __xor__(self, other):
return self.sym_diff(other)
def union(self, other):
"""
Form the union of two Index objects and sorts if possible
Parameters
----------
other : Index or array-like
Returns
-------
union : Index
"""
if not hasattr(other, '__iter__'):
raise TypeError('Input must be iterable.')
if len(other) == 0 or self.equals(other):
return self
if len(self) == 0:
return _ensure_index(other)
self._assert_can_do_setop(other)
if self.dtype != other.dtype:
this = self.astype('O')
other = other.astype('O')
return this.union(other)
if self.is_monotonic and other.is_monotonic:
try:
result = self._outer_indexer(self.values, other.values)[0]
except TypeError:
# incomparable objects
result = list(self.values)
# worth making this faster? a very unusual case
value_set = set(self.values)
result.extend([x for x in other.values if x not in value_set])
else:
indexer = self.get_indexer(other)
indexer, = (indexer == -1).nonzero()
if len(indexer) > 0:
other_diff = com.take_nd(other.values, indexer,
allow_fill=False)
result = com._concat_compat((self.values, other_diff))
try:
self.values[0] < other_diff[0]
except TypeError as e:
warnings.warn("%s, sort order is undefined for "
"incomparable objects" % e, RuntimeWarning)
else:
types = frozenset((self.inferred_type,
other.inferred_type))
if not types & _unsortable_types:
result.sort()
else:
result = self.values
try:
result = np.sort(result)
except TypeError as e:
warnings.warn("%s, sort order is undefined for "
"incomparable objects" % e, RuntimeWarning)
# for subclasses
return self._wrap_union_result(other, result)
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
return self.__class__(data=result, name=name)
def intersection(self, other):
"""
Form the intersection of two Index objects. Sortedness of the result is
not guaranteed
Parameters
----------
other : Index or array-like
Returns
-------
intersection : Index
"""
if not hasattr(other, '__iter__'):
raise TypeError('Input must be iterable!')
self._assert_can_do_setop(other)
other = _ensure_index(other)
if self.equals(other):
return self
if self.dtype != other.dtype:
this = self.astype('O')
other = other.astype('O')
return this.intersection(other)
if self.is_monotonic and other.is_monotonic:
try:
result = self._inner_indexer(self.values, other.values)[0]
return self._wrap_union_result(other, result)
except TypeError:
pass
try:
indexer = self.get_indexer(other.values)
indexer = indexer.take((indexer != -1).nonzero()[0])
except:
# duplicates
indexer = self.get_indexer_non_unique(other.values)[0].unique()
indexer = indexer[indexer != -1]
taken = self.take(indexer)
if self.name != other.name:
taken.name = None
return taken
def difference(self, other):
"""
Compute sorted set difference of two Index objects
Parameters
----------
other : Index or array-like
Returns
-------
diff : Index
Notes
-----
One can do either of these and achieve the same result
>>> index.difference(index2)
"""
if not hasattr(other, '__iter__'):
raise TypeError('Input must be iterable!')
if self.equals(other):
return Index([], name=self.name)
if not isinstance(other, Index):
other = np.asarray(other)
result_name = self.name
else:
result_name = self.name if self.name == other.name else None
theDiff = sorted(set(self) - set(other))
return Index(theDiff, name=result_name)
diff = deprecate('diff',difference)
def sym_diff(self, other, result_name=None):
"""
Compute the sorted symmetric difference of two Index objects.
Parameters
----------
other : array-like
result_name : str
Returns
-------
sym_diff : Index
Notes
-----
``sym_diff`` contains elements that appear in either ``idx1`` or
``idx2`` but not both. Equivalent to the Index created by
``(idx1 - idx2) + (idx2 - idx1)`` with duplicates dropped.
The sorting of a result containing ``NaN`` values is not guaranteed
across Python versions. See GitHub issue #6444.
Examples
--------
>>> idx1 = Index([1, 2, 3, 4])
>>> idx2 = Index([2, 3, 4, 5])
>>> idx1.sym_diff(idx2)
Int64Index([1, 5], dtype='int64')
You can also use the ``^`` operator:
>>> idx1 ^ idx2
Int64Index([1, 5], dtype='int64')
"""
if not hasattr(other, '__iter__'):
raise TypeError('Input must be iterable!')
if not isinstance(other, Index):
other = Index(other)
result_name = result_name or self.name
the_diff = sorted(set((self.difference(other)).union(other.difference(self))))
return Index(the_diff, name=result_name)
def get_loc(self, key):
"""
Get integer location for requested label
Returns
-------
loc : int if unique index, possibly slice or mask if not
"""
return self._engine.get_loc(_values_from_object(key))
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
s = _values_from_object(series)
k = _values_from_object(key)
# prevent integer truncation bug in indexing
if is_float(k) and not self.is_floating():
raise KeyError
try:
return self._engine.get_value(s, k)
except KeyError as e1:
if len(self) > 0 and self.inferred_type in ['integer','boolean']:
raise
try:
return tslib.get_value_box(s, key)
except IndexError:
raise
except TypeError:
# generator/iterator-like
if com.is_iterator(key):
raise InvalidIndexError(key)
else:
raise e1
except Exception: # pragma: no cover
raise e1
except TypeError:
# python 3
if np.isscalar(key): # pragma: no cover
raise IndexError(key)
raise InvalidIndexError(key)
def set_value(self, arr, key, value):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
self._engine.set_value(
_values_from_object(arr), _values_from_object(key), value)
def get_level_values(self, level):
"""
Return vector of label values for requested level, equal to the length
of the index
Parameters
----------
level : int
Returns
-------
values : ndarray
"""
# checks that level number is actually just 1
self._validate_index_level(level)
return self
def get_indexer(self, target, method=None, limit=None):
"""
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
current data to the new index. The mask determines whether labels are
found or not in the current index
Parameters
----------
target : Index
method : {'pad', 'ffill', 'backfill', 'bfill'}
pad / ffill: propagate LAST valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
Notes
-----
This is a low-level method and probably should be used at your own risk
Examples
--------
>>> indexer = index.get_indexer(new_index)
>>> new_values = cur_values.take(indexer)
Returns
-------
indexer : ndarray
"""
method = self._get_method(method)
target = _ensure_index(target)
pself, ptarget = self._possibly_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer(ptarget, method=method, limit=limit)
if self.dtype != target.dtype:
this = self.astype(object)
target = target.astype(object)
return this.get_indexer(target, method=method, limit=limit)
if not self.is_unique:
raise InvalidIndexError('Reindexing only valid with uniquely'
' valued Index objects')
if method == 'pad':
if not self.is_monotonic or not target.is_monotonic:
raise ValueError('Must be monotonic for forward fill')
indexer = self._engine.get_pad_indexer(target.values, limit)
elif method == 'backfill':
if not self.is_monotonic or not target.is_monotonic:
raise ValueError('Must be monotonic for backward fill')
indexer = self._engine.get_backfill_indexer(target.values, limit)
elif method is None:
indexer = self._engine.get_indexer(target.values)
else:
raise ValueError('unrecognized method: %s' % method)
return com._ensure_platform_int(indexer)
def get_indexer_non_unique(self, target, **kwargs):
""" return an indexer suitable for taking from a non unique index
return the labels in the same order as the target, and
return a missing indexer into the target (missing are marked as -1
in the indexer); target must be an iterable """
target = _ensure_index(target)
pself, ptarget = self._possibly_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer_non_unique(ptarget)
if self.is_all_dates:
self = Index(self.asi8)
tgt_values = target.asi8
else:
tgt_values = target.values
indexer, missing = self._engine.get_indexer_non_unique(tgt_values)
return Index(indexer), missing
def get_indexer_for(self, target, **kwargs):
""" guaranteed return of an indexer even when non-unique """
if self.is_unique:
return self.get_indexer(target, **kwargs)
return self.get_indexer_non_unique(target, **kwargs)[0]
def _possibly_promote(self, other):
# A hack, but it works
from pandas.tseries.index import DatetimeIndex
if self.inferred_type == 'date' and isinstance(other, DatetimeIndex):
return DatetimeIndex(self), other
elif self.inferred_type == 'boolean':
if self.dtype != 'object':
return self.astype('object'), other.astype('object')
return self, other
def groupby(self, to_groupby):
"""
Group the index labels by a given array of values.
Parameters
----------
to_groupby : array
Values used to determine the groups.
Returns
-------
groups : dict
{group name -> group labels}
"""
return self._groupby(self.values, _values_from_object(to_groupby))
def map(self, mapper):
return self._arrmap(self.values, mapper)
def isin(self, values, level=None):
"""
Compute boolean array of whether each index value is found in the
passed set of values
Parameters
----------
values : set or sequence of values
Sought values.
level : str or int, optional
Name or position of the index level to use (if the index is a
MultiIndex).
Notes
-----
If `level` is specified:
- if it is the name of one *and only one* index level, use that level;
- otherwise it should be a number indicating level position.
Returns
-------
is_contained : ndarray (boolean dtype)
"""
value_set = set(values)
if level is not None:
self._validate_index_level(level)
return lib.ismember(self._array_values(), value_set)
def _get_method(self, method):
if method:
method = method.lower()
aliases = {
'ffill': 'pad',
'bfill': 'backfill'
}
return aliases.get(method, method)
def reindex(self, target, method=None, level=None, limit=None):
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, 'name')
# GH7774: preserve dtype/tz if target is empty and not an Index.
target = _ensure_has_len(target) # target may be an iterator
if not isinstance(target, Index) and len(target) == 0:
attrs = self._get_attributes_dict()
attrs.pop('freq', None) # don't preserve freq
target = self._simple_new(np.empty(0, dtype=self.dtype), **attrs)
else:
target = _ensure_index(target)
if level is not None:
if method is not None:
raise TypeError('Fill method not supported if level passed')
_, indexer, _ = self._join_level(target, level, how='right',
return_indexers=True)
else:
if self.equals(target):
indexer = None
else:
if self.is_unique:
indexer = self.get_indexer(target, method=method,
limit=limit)
else:
if method is not None or limit is not None:
raise ValueError("cannot reindex a non-unique index "
"with a method or limit")
indexer, missing = self.get_indexer_non_unique(target)
if preserve_names and target.nlevels == 1 and target.name != self.name:
target = target.copy()
target.name = self.name
return target, indexer
def join(self, other, how='left', level=None, return_indexers=False):
"""
Internal API method. Compute join_index and indexers to conform data
structures to the new index.
Parameters
----------
other : Index
how : {'left', 'right', 'inner', 'outer'}
level : int or level name, default None
return_indexers : boolean, default False
Returns
-------
join_index, (left_indexer, right_indexer)
"""
self_is_mi = isinstance(self, MultiIndex)
other_is_mi = isinstance(other, MultiIndex)
# try to figure out the join level
# GH3662
if (level is None and (self_is_mi or other_is_mi)):
# have the same levels/names so a simple join
if self.names == other.names:
pass
else:
return self._join_multi(other, how=how, return_indexers=return_indexers)
# join on the level
if (level is not None and (self_is_mi or other_is_mi)):
return self._join_level(other, level, how=how,
return_indexers=return_indexers)
other = _ensure_index(other)
if len(other) == 0 and how in ('left', 'outer'):
join_index = self._shallow_copy()
if return_indexers:
rindexer = np.repeat(-1, len(join_index))
return join_index, None, rindexer
else:
return join_index
if len(self) == 0 and how in ('right', 'outer'):
join_index = other._shallow_copy()
if return_indexers:
lindexer = np.repeat(-1, len(join_index))
return join_index, lindexer, None
else:
return join_index
if self._join_precedence < other._join_precedence:
how = {'right': 'left', 'left': 'right'}.get(how, how)
result = other.join(self, how=how, level=level,
return_indexers=return_indexers)
if return_indexers:
x, y, z = result
result = x, z, y
return result
if self.dtype != other.dtype:
this = self.astype('O')
other = other.astype('O')
return this.join(other, how=how,
return_indexers=return_indexers)
_validate_join_method(how)
if not self.is_unique and not other.is_unique:
return self._join_non_unique(other, how=how,
return_indexers=return_indexers)
elif not self.is_unique or not other.is_unique:
if self.is_monotonic and other.is_monotonic:
return self._join_monotonic(other, how=how,
return_indexers=return_indexers)
else:
return self._join_non_unique(other, how=how,
return_indexers=return_indexers)
elif self.is_monotonic and other.is_monotonic:
try:
return self._join_monotonic(other, how=how,
return_indexers=return_indexers)
except TypeError:
pass
if how == 'left':
join_index = self
elif how == 'right':
join_index = other
elif how == 'inner':
join_index = self.intersection(other)
elif how == 'outer':
join_index = self.union(other)
if return_indexers:
if join_index is self:
lindexer = None
else:
lindexer = self.get_indexer(join_index)
if join_index is other:
rindexer = None
else:
rindexer = other.get_indexer(join_index)
return join_index, lindexer, rindexer
else:
return join_index
def _join_multi(self, other, how, return_indexers=True):
self_is_mi = isinstance(self, MultiIndex)
other_is_mi = isinstance(other, MultiIndex)
# figure out join names
self_names = [ n for n in self.names if n is not None ]
other_names = [ n for n in other.names if n is not None ]
overlap = list(set(self_names) & set(other_names))
# need at least 1 in common, but not more than 1
if not len(overlap):
raise ValueError("cannot join with no level specified and no overlapping names")
if len(overlap) > 1:
raise NotImplementedError("merging with more than one level overlap on a multi-index is not implemented")
jl = overlap[0]
# make the indices into mi's that match
if not (self_is_mi and other_is_mi):
flip_order = False
if self_is_mi:
self, other = other, self
flip_order = True
level = other.names.index(jl)
result = self._join_level(other, level, how=how,
return_indexers=return_indexers)
if flip_order:
if isinstance(result, tuple):
return result[0], result[2], result[1]
return result
# 2 multi-indexes
raise NotImplementedError("merging with both multi-indexes is not implemented")
def _join_non_unique(self, other, how='left', return_indexers=False):
from pandas.tools.merge import _get_join_indexers
left_idx, right_idx = _get_join_indexers([self.values], [other.values],
how=how, sort=True)
left_idx = com._ensure_platform_int(left_idx)
right_idx = com._ensure_platform_int(right_idx)
join_index = self.values.take(left_idx)
mask = left_idx == -1
np.putmask(join_index, mask, other.values.take(right_idx))
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
return join_index, left_idx, right_idx
else:
return join_index
def _join_level(self, other, level, how='left',
return_indexers=False,
keep_order=True):
"""
The join method *only* affects the level of the resulting
MultiIndex. Otherwise it just exactly aligns the Index data to the
labels of the level in the MultiIndex. If `keep_order` == True, the
order of the data indexed by the MultiIndex will not be changed;
otherwise, it will tie out with `other`.
"""
from pandas.algos import groupsort_indexer
def _get_leaf_sorter(labels):
'''
returns sorter for the inner most level while preserving the
order of higher levels
'''
if labels[0].size == 0:
return np.empty(0, dtype='int64')
if len(labels) == 1:
lab = com._ensure_int64(labels[0])
sorter, _ = groupsort_indexer(lab, 1 + lab.max())
return sorter
# find indexers of begining of each set of
# same-key labels w.r.t all but last level
tic = labels[0][:-1] != labels[0][1:]
for lab in labels[1:-1]:
tic |= lab[:-1] != lab[1:]
starts = np.hstack(([True], tic, [True])).nonzero()[0]
lab = com._ensure_int64(labels[-1])
return lib.get_level_sorter(lab, com._ensure_int64(starts))
if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):
raise TypeError('Join on level between two MultiIndex objects '
'is ambiguous')
left, right = self, other
flip_order = not isinstance(self, MultiIndex)
if flip_order:
left, right = right, left
how = {'right': 'left', 'left': 'right'}.get(how, how)
level = left._get_level_number(level)
old_level = left.levels[level]
if not right.is_unique:
raise NotImplementedError('Index._join_level on non-unique index '
'is not implemented')
new_level, left_lev_indexer, right_lev_indexer = \
old_level.join(right, how=how, return_indexers=True)
if left_lev_indexer is None:
if keep_order or len(left) == 0:
left_indexer = None
join_index = left
else: # sort the leaves
left_indexer = _get_leaf_sorter(left.labels[:level + 1])
join_index = left[left_indexer]
else:
left_lev_indexer = com._ensure_int64(left_lev_indexer)
rev_indexer = lib.get_reverse_indexer(left_lev_indexer,
len(old_level))
new_lev_labels = com.take_nd(rev_indexer, left.labels[level],
allow_fill=False)
new_labels = list(left.labels)
new_labels[level] = new_lev_labels
new_levels = list(left.levels)
new_levels[level] = new_level
if keep_order: # just drop missing values. o.w. keep order
left_indexer = np.arange(len(left))
mask = new_lev_labels != -1
if not mask.all():
new_labels = [lab[mask] for lab in new_labels]
left_indexer = left_indexer[mask]
else: # tie out the order with other
if level == 0: # outer most level, take the fast route
ngroups = 1 + new_lev_labels.max()
left_indexer, counts = groupsort_indexer(new_lev_labels,
ngroups)
# missing values are placed first; drop them!
left_indexer = left_indexer[counts[0]:]
new_labels = [lab[left_indexer] for lab in new_labels]
else: # sort the leaves
mask = new_lev_labels != -1
mask_all = mask.all()
if not mask_all:
new_labels = [lab[mask] for lab in new_labels]
left_indexer = _get_leaf_sorter(new_labels[:level + 1])
new_labels = [lab[left_indexer] for lab in new_labels]
# left_indexers are w.r.t masked frame.
# reverse to original frame!
if not mask_all:
left_indexer = mask.nonzero()[0][left_indexer]
join_index = MultiIndex(levels=new_levels,
labels=new_labels,
names=left.names,
verify_integrity=False)
if right_lev_indexer is not None:
right_indexer = com.take_nd(right_lev_indexer,
join_index.labels[level],
allow_fill=False)
else:
right_indexer = join_index.labels[level]
if flip_order:
left_indexer, right_indexer = right_indexer, left_indexer
if return_indexers:
return join_index, left_indexer, right_indexer
else:
return join_index
def _join_monotonic(self, other, how='left', return_indexers=False):
if self.equals(other):
ret_index = other if how == 'right' else self
if return_indexers:
return ret_index, None, None
else:
return ret_index
sv = self.values
ov = other.values
if self.is_unique and other.is_unique:
# We can perform much better than the general case
if how == 'left':
join_index = self
lidx = None
ridx = self._left_indexer_unique(sv, ov)
elif how == 'right':
join_index = other
lidx = self._left_indexer_unique(ov, sv)
ridx = None
elif how == 'inner':
join_index, lidx, ridx = self._inner_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
elif how == 'outer':
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
else:
if how == 'left':
join_index, lidx, ridx = self._left_indexer(sv, ov)
elif how == 'right':
join_index, ridx, lidx = self._left_indexer(other, self)
elif how == 'inner':
join_index, lidx, ridx = self._inner_indexer(sv, ov)
elif how == 'outer':
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
return join_index, lidx, ridx
else:
return join_index
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return Index(joined, name=name)
def slice_indexer(self, start=None, end=None, step=None):
"""
For an ordered Index, compute the slice indexer for input labels and
step
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the end
step : int, default None
Returns
-------
indexer : ndarray or slice
Notes
-----
This function assumes that the data is sorted, so use at your own peril
"""
start_slice, end_slice = self.slice_locs(start, end, step=step)
# return a slice
if not lib.isscalar(start_slice):
raise AssertionError("Start slice bound is non-scalar")
if not lib.isscalar(end_slice):
raise AssertionError("End slice bound is non-scalar")
return slice(start_slice, end_slice, step)
def _maybe_cast_slice_bound(self, label, side):
"""
This function should be overloaded in subclasses that allow non-trivial
casting on label-slice bounds, e.g. datetime-like indices allowing
strings containing formatted datetimes.
Parameters
----------
label : object
side : {'left', 'right'}
Notes
-----
Value of `side` parameter should be validated in caller.
"""
return label
def get_slice_bound(self, label, side):
"""
Calculate slice bound that corresponds to given label.
Returns leftmost (one-past-the-rightmost if ``side=='right'``) position
of given label.
Parameters
----------
label : object
side : {'left', 'right'}
"""
if side not in ('left', 'right'):
raise ValueError(
"Invalid value for side kwarg,"
" must be either 'left' or 'right': %s" % (side,))
original_label = label
# For datetime indices label may be a string that has to be converted
# to datetime boundary according to its resolution.
label = self._maybe_cast_slice_bound(label, side)
try:
slc = self.get_loc(label)
except KeyError:
if self.is_monotonic_increasing:
return self.searchsorted(label, side=side)
elif self.is_monotonic_decreasing:
# np.searchsorted expects ascending sort order, have to reverse
# everything for it to work (element ordering, search side and
# resulting value).
pos = self[::-1].searchsorted(
label, side='right' if side == 'left' else 'right')
return len(self) - pos
# In all other cases, just re-raise the KeyError
raise
if isinstance(slc, np.ndarray):
# get_loc may return a boolean array or an array of indices, which
# is OK as long as they are representable by a slice.
if is_bool_dtype(slc):
slc = lib.maybe_booleans_to_slice(slc.view('u1'))
else:
slc = lib.maybe_indices_to_slice(slc.astype('i8'))
if isinstance(slc, np.ndarray):
raise KeyError(
"Cannot get %s slice bound for non-unique label:"
" %r" % (side, original_label))
if isinstance(slc, slice):
if side == 'left':
return slc.start
else:
return slc.stop
else:
if side == 'right':
return slc + 1
else:
return slc
def slice_locs(self, start=None, end=None, step=None):
"""
Compute slice locations for input labels.
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the end
Returns
-------
start, end : int
"""
inc = (step is None or step >= 0)
if not inc:
# If it's a reverse slice, temporarily swap bounds.
start, end = end, start
start_slice = None
if start is not None:
start_slice = self.get_slice_bound(start, 'left')
if start_slice is None:
start_slice = 0
end_slice = None
if end is not None:
end_slice = self.get_slice_bound(end, 'right')
if end_slice is None:
end_slice = len(self)
if not inc:
# Bounds at this moment are swapped, swap them back and shift by 1.
#
# slice_locs('B', 'A', step=-1): s='B', e='A'
#
# s='A' e='B'
# AFTER SWAP: | |
# v ------------------> V
# -----------------------------------
# | | |A|A|A|A| | | | | |B|B| | | | |
# -----------------------------------
# ^ <------------------ ^
# SHOULD BE: | |
# end=s-1 start=e-1
#
end_slice, start_slice = start_slice - 1, end_slice - 1
# i == -1 triggers ``len(self) + i`` selection that points to the
# last element, not before-the-first one, subtracting len(self)
# compensates that.
if end_slice == -1:
end_slice -= len(self)
if start_slice == -1:
start_slice -= len(self)
return start_slice, end_slice
def delete(self, loc):
"""
Make new Index with passed location(-s) deleted
Returns
-------
new_index : Index
"""
return Index(np.delete(self._data, loc), name=self.name)
def insert(self, loc, item):
"""
Make new Index inserting new item at location. Follows
Python list.append semantics for negative values
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
"""
_self = np.asarray(self)
item_idx = Index([item], dtype=self.dtype).values
idx = np.concatenate(
(_self[:loc], item_idx, _self[loc:]))
return Index(idx, name=self.name)
def drop(self, labels):
"""
Make new Index with passed list of labels deleted
Parameters
----------
labels : array-like
Returns
-------
dropped : Index
"""
labels = com._index_labels_to_array(labels)
indexer = self.get_indexer(labels)
mask = indexer == -1
if mask.any():
raise ValueError('labels %s not contained in axis' % labels[mask])
return self.delete(indexer)
@Appender(_shared_docs['drop_duplicates'] % _index_doc_kwargs)
def drop_duplicates(self, take_last=False):
result = super(Index, self).drop_duplicates(take_last=take_last)
return self._constructor(result)
@Appender(_shared_docs['duplicated'] % _index_doc_kwargs)
def duplicated(self, take_last=False):
return super(Index, self).duplicated(take_last=take_last)
def _evaluate_with_timedelta_like(self, other, op, opstr):
raise TypeError("can only perform ops with timedelta like values")
def _evaluate_with_datetime_like(self, other, op, opstr):
raise TypeError("can only perform ops with datetime like values")
@classmethod
def _add_numeric_methods_disabled(cls):
""" add in numeric methods to disable """
def _make_invalid_op(name):
def invalid_op(self, other=None):
raise TypeError("cannot perform {name} with this index type: {typ}".format(name=name,
typ=type(self)))
invalid_op.__name__ = name
return invalid_op
cls.__mul__ = cls.__rmul__ = _make_invalid_op('__mul__')
cls.__floordiv__ = cls.__rfloordiv__ = _make_invalid_op('__floordiv__')
cls.__truediv__ = cls.__rtruediv__ = _make_invalid_op('__truediv__')
if not compat.PY3:
cls.__div__ = cls.__rdiv__ = _make_invalid_op('__div__')
cls.__neg__ = _make_invalid_op('__neg__')
cls.__pos__ = _make_invalid_op('__pos__')
cls.__abs__ = _make_invalid_op('__abs__')
cls.__inv__ = _make_invalid_op('__inv__')
@classmethod
def _add_numeric_methods(cls):
""" add in numeric methods """
def _make_evaluate_binop(op, opstr, reversed=False):
def _evaluate_numeric_binop(self, other):
import pandas.tseries.offsets as offsets
# if we are an inheritor of numeric, but not actually numeric (e.g. DatetimeIndex/PeriodInde)
if not self._is_numeric_dtype:
raise TypeError("cannot evaluate a numeric op {opstr} for type: {typ}".format(opstr=opstr,
typ=type(self)))
if isinstance(other, Index):
if not other._is_numeric_dtype:
raise TypeError("cannot evaluate a numeric op {opstr} with type: {typ}".format(opstr=type(self),
typ=type(other)))
elif isinstance(other, np.ndarray) and not other.ndim:
other = other.item()
if isinstance(other, (Index, ABCSeries, np.ndarray)):
if len(self) != len(other):
raise ValueError("cannot evaluate a numeric op with unequal lengths")
other = _values_from_object(other)
if other.dtype.kind not in ['f','i']:
raise TypeError("cannot evaluate a numeric op with a non-numeric dtype")
elif isinstance(other, (offsets.DateOffset, np.timedelta64, Timedelta, datetime.timedelta)):
return self._evaluate_with_timedelta_like(other, op, opstr)
elif isinstance(other, (Timestamp, np.datetime64)):
return self._evaluate_with_datetime_like(other, op, opstr)
else:
if not (com.is_float(other) or com.is_integer(other)):
raise TypeError("can only perform ops with scalar values")
# if we are a reversed non-communative op
values = self.values
if reversed:
values, other = other, values
return self._shallow_copy(op(values, other))
return _evaluate_numeric_binop
def _make_evaluate_unary(op, opstr):
def _evaluate_numeric_unary(self):
# if we are an inheritor of numeric, but not actually numeric (e.g. DatetimeIndex/PeriodInde)
if not self._is_numeric_dtype:
raise TypeError("cannot evaluate a numeric op {opstr} for type: {typ}".format(opstr=opstr,
typ=type(self)))
return self._shallow_copy(op(self.values))
return _evaluate_numeric_unary
cls.__add__ = cls.__radd__ = _make_evaluate_binop(operator.add,'__add__')
cls.__sub__ = _make_evaluate_binop(operator.sub,'__sub__')
cls.__rsub__ = _make_evaluate_binop(operator.sub,'__sub__',reversed=True)
cls.__mul__ = cls.__rmul__ = _make_evaluate_binop(operator.mul,'__mul__')
cls.__floordiv__ = _make_evaluate_binop(operator.floordiv,'__floordiv__')
cls.__rfloordiv__ = _make_evaluate_binop(operator.floordiv,'__floordiv__',reversed=True)
cls.__truediv__ = _make_evaluate_binop(operator.truediv,'__truediv__')
cls.__rtruediv__ = _make_evaluate_binop(operator.truediv,'__truediv__',reversed=True)
if not compat.PY3:
cls.__div__ = _make_evaluate_binop(operator.div,'__div__')
cls.__rdiv__ = _make_evaluate_binop(operator.div,'__div__',reversed=True)
cls.__neg__ = _make_evaluate_unary(lambda x: -x,'__neg__')
cls.__pos__ = _make_evaluate_unary(lambda x: x,'__pos__')
cls.__abs__ = _make_evaluate_unary(lambda x: np.abs(x),'__abs__')
cls.__inv__ = _make_evaluate_unary(lambda x: -x,'__inv__')
@classmethod
def _add_logical_methods(cls):
""" add in logical methods """
_doc = """
%(desc)s
Parameters
----------
All arguments to numpy.%(outname)s are accepted.
Returns
-------
%(outname)s : bool or array_like (if axis is specified)
A single element array_like may be converted to bool."""
def _make_logical_function(name, desc, f):
@Substitution(outname=name, desc=desc)
@Appender(_doc)
def logical_func(self, *args, **kwargs):
result = f(self.values)
if isinstance(result, (np.ndarray, com.ABCSeries, Index)) \
and result.ndim == 0:
# return NumPy type
return result.dtype.type(result.item())
else: # pragma: no cover
return result
logical_func.__name__ = name
return logical_func
cls.all = _make_logical_function(
'all', 'Return whether all elements are True', np.all)
cls.any = _make_logical_function(
'any', 'Return whether any element is True', np.any)
@classmethod
def _add_logical_methods_disabled(cls):
""" add in logical methods to disable """
def _make_invalid_op(name):
def invalid_op(self, other=None):
raise TypeError("cannot perform {name} with this index type: {typ}".format(name=name,
typ=type(self)))
invalid_op.__name__ = name
return invalid_op
cls.all = _make_invalid_op('all')
cls.any = _make_invalid_op('any')
Index._add_numeric_methods_disabled()
Index._add_logical_methods()
class NumericIndex(Index):
"""
Provide numeric type operations
This is an abstract class
"""
_is_numeric_dtype = True
class Int64Index(NumericIndex):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects. Int64Index is a special case
of `Index` with purely integer labels. This is the default index type used
by the DataFrame and Series ctors when no explicit index is provided by the
user.
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: int64)
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
Notes
-----
An Index instance can **only** contain hashable objects
"""
_typ = 'int64index'
_groupby = _algos.groupby_int64
_arrmap = _algos.arrmap_int64
_left_indexer_unique = _algos.left_join_indexer_unique_int64
_left_indexer = _algos.left_join_indexer_int64
_inner_indexer = _algos.inner_join_indexer_int64
_outer_indexer = _algos.outer_join_indexer_int64
_engine_type = _index.Int64Engine
def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False, **kwargs):
if fastpath:
return cls._simple_new(data, name=name)
# isscalar, generators handled in coerce_to_ndarray
data = cls._coerce_to_ndarray(data)
if issubclass(data.dtype.type, compat.string_types):
cls._string_data_error(data)
elif issubclass(data.dtype.type, np.integer):
# don't force the upcast as we may be dealing
# with a platform int
if dtype is None or not issubclass(np.dtype(dtype).type,
np.integer):
dtype = np.int64
subarr = np.array(data, dtype=dtype, copy=copy)
else:
subarr = np.array(data, dtype=np.int64, copy=copy)
if len(data) > 0:
if (subarr != data).any():
raise TypeError('Unsafe NumPy casting to integer, you must'
' explicitly cast')
return cls._simple_new(subarr, name=name)
@property
def inferred_type(self):
return 'integer'
@cache_readonly
def hasnans(self):
# by definition
return False
@property
def asi8(self):
# do not cache or you'll create a memory leak
return self.values.view('i8')
@property
def is_all_dates(self):
"""
Checks that all the labels are datetime objects
"""
return False
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
# if not isinstance(other, Int64Index):
# return False
try:
return array_equivalent(_values_from_object(self), _values_from_object(other))
except TypeError:
# e.g. fails in numpy 1.6 with DatetimeIndex #1681
return False
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return Int64Index(joined, name=name)
Int64Index._add_numeric_methods()
Int64Index._add_logical_methods()
class Float64Index(NumericIndex):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects. Float64Index is a special case
of `Index` with purely floating point labels.
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
Notes
-----
An Float64Index instance can **only** contain hashable objects
"""
_typ = 'float64index'
_engine_type = _index.Float64Engine
_groupby = _algos.groupby_float64
_arrmap = _algos.arrmap_float64
_left_indexer_unique = _algos.left_join_indexer_unique_float64
_left_indexer = _algos.left_join_indexer_float64
_inner_indexer = _algos.inner_join_indexer_float64
_outer_indexer = _algos.outer_join_indexer_float64
def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False, **kwargs):
if fastpath:
return cls._simple_new(data, name)
data = cls._coerce_to_ndarray(data)
if issubclass(data.dtype.type, compat.string_types):
cls._string_data_error(data)
if dtype is None:
dtype = np.float64
try:
subarr = np.array(data, dtype=dtype, copy=copy)
except:
raise TypeError('Unsafe NumPy casting, you must '
'explicitly cast')
# coerce to float64 for storage
if subarr.dtype != np.float64:
subarr = subarr.astype(np.float64)
return cls._simple_new(subarr, name)
@property
def inferred_type(self):
return 'floating'
def astype(self, dtype):
if np.dtype(dtype) not in (np.object, np.float64):
raise TypeError('Setting %s dtype to anything other than '
'float64 or object is not supported' %
self.__class__)
return Index(self.values, name=self.name, dtype=dtype)
def _convert_scalar_indexer(self, key, typ=None):
if typ == 'iloc':
return super(Float64Index, self)._convert_scalar_indexer(key,
typ=typ)
return key
def _convert_slice_indexer(self, key, typ=None):
""" convert a slice indexer, by definition these are labels
unless we are iloc """
# if we are not a slice, then we are done
if not isinstance(key, slice):
return key
if typ == 'iloc':
return super(Float64Index, self)._convert_slice_indexer(key,
typ=typ)
# allow floats here
validator = lambda v: v is None or is_integer(v) or is_float(v)
self._validate_slicer(key, validator)
# translate to locations
return self.slice_indexer(key.start, key.stop, key.step)
def get_value(self, series, key):
""" we always want to get an index value, never a value """
if not np.isscalar(key):
raise InvalidIndexError
from pandas.core.indexing import maybe_droplevels
from pandas.core.series import Series
k = _values_from_object(key)
loc = self.get_loc(k)
new_values = _values_from_object(series)[loc]
if np.isscalar(new_values) or new_values is None:
return new_values
new_index = self[loc]
new_index = maybe_droplevels(new_index, k)
return Series(new_values, index=new_index, name=series.name)
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self is other:
return True
# need to compare nans locations and make sure that they are the same
# since nans don't compare equal this is a bit tricky
try:
if not isinstance(other, Float64Index):
other = self._constructor(other)
if self.dtype != other.dtype or self.shape != other.shape:
return False
left, right = self.values, other.values
return ((left == right) | (self._isnan & other._isnan)).all()
except TypeError:
# e.g. fails in numpy 1.6 with DatetimeIndex #1681
return False
def __contains__(self, other):
if super(Float64Index, self).__contains__(other):
return True
try:
# if other is a sequence this throws a ValueError
return np.isnan(other) and self.hasnans
except ValueError:
try:
return len(other) <= 1 and _try_get_item(other) in self
except TypeError:
return False
except:
return False
def get_loc(self, key):
try:
if np.all(np.isnan(key)):
nan_idxs = self._nan_idxs
try:
return nan_idxs.item()
except (ValueError, IndexError):
# should only need to catch ValueError here but on numpy
# 1.7 .item() can raise IndexError when NaNs are present
return nan_idxs
except (TypeError, NotImplementedError):
pass
return super(Float64Index, self).get_loc(key)
@property
def is_all_dates(self):
"""
Checks that all the labels are datetime objects
"""
return False
@cache_readonly
def _nan_idxs(self):
w, = self._isnan.nonzero()
return w
@cache_readonly
def _isnan(self):
return np.isnan(self.values)
@cache_readonly
def hasnans(self):
return self._isnan.any()
@cache_readonly
def is_unique(self):
return super(Float64Index, self).is_unique and self._nan_idxs.size < 2
@Appender(Index.isin.__doc__)
def isin(self, values, level=None):
value_set = set(values)
if level is not None:
self._validate_index_level(level)
return lib.ismember_nans(self._array_values(), value_set,
isnull(list(value_set)).any())
Float64Index._add_numeric_methods()
Float64Index._add_logical_methods_disabled()
class MultiIndex(Index):
"""
Implements multi-level, a.k.a. hierarchical, index object for pandas
objects
Parameters
----------
levels : sequence of arrays
The unique labels for each level
labels : sequence of arrays
Integers for each level designating which label at each location
sortorder : optional int
Level of sortedness (must be lexicographically sorted by that
level)
names : optional sequence of objects
Names for each of the index levels.
copy : boolean, default False
Copy the meta-data
verify_integrity : boolean, default True
Check that the levels/labels are consistent and valid
"""
# initialize to zero-length tuples to make everything work
_typ = 'multiindex'
_names = FrozenList()
_levels = FrozenList()
_labels = FrozenList()
_comparables = ['names']
rename = Index.set_names
def __new__(cls, levels=None, labels=None, sortorder=None, names=None,
copy=False, verify_integrity=True, _set_identity=True, **kwargs):
if levels is None or labels is None:
raise TypeError("Must pass both levels and labels")
if len(levels) != len(labels):
raise ValueError('Length of levels and labels must be the same.')
if len(levels) == 0:
raise ValueError('Must pass non-zero number of levels/labels')
if len(levels) == 1:
if names:
name = names[0]
else:
name = None
return Index(levels[0], name=name, copy=True).take(labels[0])
result = object.__new__(MultiIndex)
# we've already validated levels and labels, so shortcut here
result._set_levels(levels, copy=copy, validate=False)
result._set_labels(labels, copy=copy, validate=False)
if names is not None:
# handles name validation
result._set_names(names)
if sortorder is not None:
result.sortorder = int(sortorder)
else:
result.sortorder = sortorder
if verify_integrity:
result._verify_integrity()
if _set_identity:
result._reset_identity()
return result
def _verify_integrity(self):
"""Raises ValueError if length of levels and labels don't match or any
label would exceed level bounds"""
# NOTE: Currently does not check, among other things, that cached
# nlevels matches nor that sortorder matches actually sortorder.
labels, levels = self.labels, self.levels
if len(levels) != len(labels):
raise ValueError("Length of levels and labels must match. NOTE:"
" this index is in an inconsistent state.")
label_length = len(self.labels[0])
for i, (level, label) in enumerate(zip(levels, labels)):
if len(label) != label_length:
raise ValueError("Unequal label lengths: %s" % (
[len(lab) for lab in labels]))
if len(label) and label.max() >= len(level):
raise ValueError("On level %d, label max (%d) >= length of"
" level (%d). NOTE: this index is in an"
" inconsistent state" % (i, label.max(),
len(level)))
def _get_levels(self):
return self._levels
def _set_levels(self, levels, level=None, copy=False, validate=True,
verify_integrity=False):
# This is NOT part of the levels property because it should be
# externally not allowed to set levels. User beware if you change
# _levels directly
if validate and len(levels) == 0:
raise ValueError('Must set non-zero number of levels.')
if validate and level is None and len(levels) != self.nlevels:
raise ValueError('Length of levels must match number of levels.')
if validate and level is not None and len(levels) != len(level):
raise ValueError('Length of levels must match length of level.')
if level is None:
new_levels = FrozenList(_ensure_index(lev, copy=copy)._shallow_copy()
for lev in levels)
else:
level = [self._get_level_number(l) for l in level]
new_levels = list(self._levels)
for l, v in zip(level, levels):
new_levels[l] = _ensure_index(v, copy=copy)._shallow_copy()
new_levels = FrozenList(new_levels)
names = self.names
self._levels = new_levels
if any(names):
self._set_names(names)
self._tuples = None
self._reset_cache()
if verify_integrity:
self._verify_integrity()
def set_levels(self, levels, level=None, inplace=False, verify_integrity=True):
"""
Set new levels on MultiIndex. Defaults to returning
new index.
Parameters
----------
levels : sequence or list of sequence
new level(s) to apply
level : int or level name, or sequence of int / level names (default None)
level(s) to set (None for all levels)
inplace : bool
if True, mutates in place
verify_integrity : bool (default True)
if True, checks that levels and labels are compatible
Returns
-------
new index (of same type and class...etc)
Examples
--------
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_levels([['a','b'], [1,2]])
MultiIndex(levels=[[u'a', u'b'], [1, 2]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_levels(['a','b'], level=0)
MultiIndex(levels=[[u'a', u'b'], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_levels(['a','b'], level='bar')
MultiIndex(levels=[[1, 2], [u'a', u'b']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_levels([['a','b'], [1,2]], level=[0,1])
MultiIndex(levels=[[u'a', u'b'], [1, 2]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
"""
if level is not None and not is_list_like(level):
if not is_list_like(levels):
raise TypeError("Levels must be list-like")
if is_list_like(levels[0]):
raise TypeError("Levels must be list-like")
level = [level]
levels = [levels]
elif level is None or is_list_like(level):
if not is_list_like(levels) or not is_list_like(levels[0]):
raise TypeError("Levels must be list of lists-like")
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._reset_identity()
idx._set_levels(levels, level=level, validate=True,
verify_integrity=verify_integrity)
if not inplace:
return idx
# remove me in 0.14 and change to read only property
__set_levels = deprecate("setting `levels` directly",
partial(set_levels, inplace=True,
verify_integrity=True),
alt_name="set_levels")
levels = property(fget=_get_levels, fset=__set_levels)
def _get_labels(self):
return self._labels
def _set_labels(self, labels, level=None, copy=False, validate=True,
verify_integrity=False):
if validate and level is None and len(labels) != self.nlevels:
raise ValueError("Length of labels must match number of levels")
if validate and level is not None and len(labels) != len(level):
raise ValueError('Length of labels must match length of levels.')
if level is None:
new_labels = FrozenList(_ensure_frozen(lab, lev, copy=copy)._shallow_copy()
for lev, lab in zip(self.levels, labels))
else:
level = [self._get_level_number(l) for l in level]
new_labels = list(self._labels)
for l, lev, lab in zip(level, self.levels, labels):
new_labels[l] = _ensure_frozen(lab, lev, copy=copy)._shallow_copy()
new_labels = FrozenList(new_labels)
self._labels = new_labels
self._tuples = None
self._reset_cache()
if verify_integrity:
self._verify_integrity()
def set_labels(self, labels, level=None, inplace=False, verify_integrity=True):
"""
Set new labels on MultiIndex. Defaults to returning
new index.
Parameters
----------
labels : sequence or list of sequence
new labels to apply
level : int or level name, or sequence of int / level names (default None)
level(s) to set (None for all levels)
inplace : bool
if True, mutates in place
verify_integrity : bool (default True)
if True, checks that levels and labels are compatible
Returns
-------
new index (of same type and class...etc)
Examples
--------
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_labels([[1,0,1,0], [0,0,1,1]])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[1, 0, 1, 0], [0, 0, 1, 1]],
names=[u'foo', u'bar'])
>>> idx.set_labels([1,0,1,0], level=0)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[1, 0, 1, 0], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_labels([0,0,1,1], level='bar')
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 0, 1, 1]],
names=[u'foo', u'bar'])
>>> idx.set_labels([[1,0,1,0], [0,0,1,1]], level=[0,1])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[1, 0, 1, 0], [0, 0, 1, 1]],
names=[u'foo', u'bar'])
"""
if level is not None and not is_list_like(level):
if not is_list_like(labels):
raise TypeError("Labels must be list-like")
if is_list_like(labels[0]):
raise TypeError("Labels must be list-like")
level = [level]
labels = [labels]
elif level is None or is_list_like(level):
if not is_list_like(labels) or not is_list_like(labels[0]):
raise TypeError("Labels must be list of lists-like")
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._reset_identity()
idx._set_labels(labels, level=level, verify_integrity=verify_integrity)
if not inplace:
return idx
# remove me in 0.14 and change to readonly property
__set_labels = deprecate("setting labels directly",
partial(set_labels, inplace=True,
verify_integrity=True),
alt_name="set_labels")
labels = property(fget=_get_labels, fset=__set_labels)
def copy(self, names=None, dtype=None, levels=None, labels=None,
deep=False, _set_identity=False):
"""
Make a copy of this object. Names, dtype, levels and labels can be
passed and will be set on new copy.
Parameters
----------
names : sequence, optional
dtype : numpy dtype or pandas type, optional
levels : sequence, optional
labels : sequence, optional
Returns
-------
copy : MultiIndex
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
This could be potentially expensive on large MultiIndex objects.
"""
if deep:
from copy import deepcopy
levels = levels if levels is not None else deepcopy(self.levels)
labels = labels if labels is not None else deepcopy(self.labels)
names = names if names is not None else deepcopy(self.names)
else:
levels = self.levels
labels = self.labels
names = self.names
return MultiIndex(levels=levels,
labels=labels,
names=names,
sortorder=self.sortorder,
verify_integrity=False,
_set_identity=_set_identity)
def __array__(self, result=None):
""" the array interface, return my values """
return self.values
def view(self, cls=None):
""" this is defined as a copy with the same identity """
result = self.copy()
result._id = self._id
return result
_shallow_copy = view
def _array_values(self):
# hack for various methods
return self.values
@cache_readonly
def dtype(self):
return np.dtype('O')
@cache_readonly
def nbytes(self):
""" return the number of bytes in the underlying data """
level_nbytes = sum(( i.nbytes for i in self.levels ))
label_nbytes = sum(( i.nbytes for i in self.labels ))
names_nbytes = sum(( getsizeof(i) for i in self.names ))
return level_nbytes + label_nbytes + names_nbytes
def __repr__(self):
encoding = get_option('display.encoding')
attrs = [('levels', default_pprint(self.levels)),
('labels', default_pprint(self.labels))]
if not all(name is None for name in self.names):
attrs.append(('names', default_pprint(self.names)))
if self.sortorder is not None:
attrs.append(('sortorder', default_pprint(self.sortorder)))
space = ' ' * (len(self.__class__.__name__) + 1)
prepr = (u(",\n%s") % space).join([u("%s=%s") % (k, v)
for k, v in attrs])
res = u("%s(%s)") % (self.__class__.__name__, prepr)
if not compat.PY3:
# needs to be str in Python 2
res = res.encode(encoding)
return res
def __unicode__(self):
"""
Return a string representation for a particular Index
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
rows = self.format(names=True)
max_rows = get_option('display.max_rows')
if len(rows) > max_rows:
spaces = (len(rows[0]) - 3) // 2
centered = ' ' * spaces
half = max_rows // 2
rows = rows[:half] + [centered + '...' + centered] + rows[-half:]
return "\n".join(rows)
def __len__(self):
return len(self.labels[0])
def _get_names(self):
return FrozenList(level.name for level in self.levels)
def _set_names(self, names, level=None, validate=True):
"""
sets names on levels. WARNING: mutates!
Note that you generally want to set this *after* changing levels, so
that it only acts on copies
"""
names = list(names)
if validate and level is not None and len(names) != len(level):
raise ValueError('Length of names must match length of level.')
if validate and level is None and len(names) != self.nlevels:
raise ValueError(
'Length of names must match number of levels in MultiIndex.')
if level is None:
level = range(self.nlevels)
else:
level = [self._get_level_number(l) for l in level]
# set the name
for l, name in zip(level, names):
self.levels[l].rename(name, inplace=True)
names = property(
fset=_set_names, fget=_get_names, doc="Names of levels in MultiIndex")
def _reference_duplicate_name(self, name):
"""
Returns True if the name refered to in self.names is duplicated.
"""
# count the times name equals an element in self.names.
return np.sum(name == np.asarray(self.names)) > 1
def _format_native_types(self, **kwargs):
return self.tolist()
@property
def _constructor(self):
return MultiIndex.from_tuples
@cache_readonly
def inferred_type(self):
return 'mixed'
@staticmethod
def _from_elements(values, labels=None, levels=None, names=None,
sortorder=None):
return MultiIndex(levels, labels, names, sortorder=sortorder)
def _get_level_number(self, level):
try:
count = self.names.count(level)
if count > 1:
raise ValueError('The name %s occurs multiple times, use a '
'level number' % level)
level = self.names.index(level)
except ValueError:
if not isinstance(level, int):
raise KeyError('Level %s not found' % str(level))
elif level < 0:
level += self.nlevels
if level < 0:
orig_level = level - self.nlevels
raise IndexError(
'Too many levels: Index has only %d levels, '
'%d is not a valid level number' % (self.nlevels, orig_level)
)
# Note: levels are zero-based
elif level >= self.nlevels:
raise IndexError('Too many levels: Index has only %d levels, '
'not %d' % (self.nlevels, level + 1))
return level
_tuples = None
@property
def values(self):
if self._tuples is not None:
return self._tuples
values = []
for lev, lab in zip(self.levels, self.labels):
# Need to box timestamps, etc.
box = hasattr(lev, '_box_values')
# Try to minimize boxing.
if box and len(lev) > len(lab):
taken = lev._box_values(com.take_1d(lev.values, lab))
elif box:
taken = com.take_1d(lev._box_values(lev.values), lab,
fill_value=_get_na_value(lev.dtype.type))
else:
taken = com.take_1d(lev.values, lab)
values.append(taken)
self._tuples = lib.fast_zip(values)
return self._tuples
# fml
@property
def _is_v1(self):
return False
@property
def _is_v2(self):
return False
@property
def _has_complex_internals(self):
# to disable groupby tricks
return True
@cache_readonly
def is_unique(self):
return not self.duplicated().any()
@Appender(_shared_docs['duplicated'] % _index_doc_kwargs)
def duplicated(self, take_last=False):
from pandas.core.groupby import get_group_index
from pandas.hashtable import duplicated_int64
shape = map(len, self.levels)
ids = get_group_index(self.labels, shape, sort=False, xnull=False)
return duplicated_int64(ids, take_last)
def get_value(self, series, key):
# somewhat broken encapsulation
from pandas.core.indexing import maybe_droplevels
from pandas.core.series import Series
# Label-based
s = _values_from_object(series)
k = _values_from_object(key)
def _try_mi(k):
# TODO: what if a level contains tuples??
loc = self.get_loc(k)
new_values = series.values[loc]
new_index = self[loc]
new_index = maybe_droplevels(new_index, k)
return Series(new_values, index=new_index, name=series.name)
try:
return self._engine.get_value(s, k)
except KeyError as e1:
try:
return _try_mi(key)
except KeyError:
pass
try:
return _index.get_value_at(s, k)
except IndexError:
raise
except TypeError:
# generator/iterator-like
if com.is_iterator(key):
raise InvalidIndexError(key)
else:
raise e1
except Exception: # pragma: no cover
raise e1
except TypeError:
# a Timestamp will raise a TypeError in a multi-index
# rather than a KeyError, try it here
# note that a string that 'looks' like a Timestamp will raise
# a KeyError! (GH5725)
if isinstance(key, (datetime.datetime, np.datetime64)) or (
compat.PY3 and isinstance(key, compat.string_types)):
try:
return _try_mi(key)
except (KeyError):
raise
except:
pass
try:
return _try_mi(Timestamp(key))
except:
pass
raise InvalidIndexError(key)
def get_level_values(self, level):
"""
Return vector of label values for requested level, equal to the length
of the index
Parameters
----------
level : int or level name
Returns
-------
values : ndarray
"""
num = self._get_level_number(level)
unique = self.levels[num] # .values
labels = self.labels[num]
filled = com.take_1d(unique.values, labels, fill_value=unique._na_value)
values = unique._simple_new(filled, self.names[num],
freq=getattr(unique, 'freq', None),
tz=getattr(unique, 'tz', None))
return values
def format(self, space=2, sparsify=None, adjoin=True, names=False,
na_rep=None, formatter=None):
if len(self) == 0:
return []
stringified_levels = []
for lev, lab in zip(self.levels, self.labels):
na = na_rep if na_rep is not None else _get_na_rep(lev.dtype.type)
if len(lev) > 0:
formatted = lev.take(lab).format(formatter=formatter)
# we have some NA
mask = lab == -1
if mask.any():
formatted = np.array(formatted, dtype=object)
formatted[mask] = na
formatted = formatted.tolist()
else:
# weird all NA case
formatted = [com.pprint_thing(na if isnull(x) else x,
escape_chars=('\t', '\r', '\n'))
for x in com.take_1d(lev.values, lab)]
stringified_levels.append(formatted)
result_levels = []
for lev, name in zip(stringified_levels, self.names):
level = []
if names:
level.append(com.pprint_thing(name,
escape_chars=('\t', '\r', '\n'))
if name is not None else '')
level.extend(np.array(lev, dtype=object))
result_levels.append(level)
if sparsify is None:
sparsify = get_option("display.multi_sparse")
if sparsify:
sentinel = ''
# GH3547
# use value of sparsify as sentinel, unless it's an obvious
# "Truthey" value
if sparsify not in [True, 1]:
sentinel = sparsify
# little bit of a kludge job for #1217
result_levels = _sparsify(result_levels,
start=int(names),
sentinel=sentinel)
if adjoin:
return com.adjoin(space, *result_levels).split('\n')
else:
return result_levels
def to_hierarchical(self, n_repeat, n_shuffle=1):
"""
Return a MultiIndex reshaped to conform to the
shapes given by n_repeat and n_shuffle.
Useful to replicate and rearrange a MultiIndex for combination
with another Index with n_repeat items.
Parameters
----------
n_repeat : int
Number of times to repeat the labels on self
n_shuffle : int
Controls the reordering of the labels. If the result is going
to be an inner level in a MultiIndex, n_shuffle will need to be
greater than one. The size of each label must divisible by
n_shuffle.
Returns
-------
MultiIndex
Examples
--------
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')])
>>> idx.to_hierarchical(3)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
"""
levels = self.levels
labels = [np.repeat(x, n_repeat) for x in self.labels]
# Assumes that each label is divisible by n_shuffle
labels = [x.reshape(n_shuffle, -1).ravel(1) for x in labels]
names = self.names
return MultiIndex(levels=levels, labels=labels, names=names)
@property
def is_all_dates(self):
return False
def is_lexsorted(self):
"""
Return True if the labels are lexicographically sorted
"""
return self.lexsort_depth == self.nlevels
def is_lexsorted_for_tuple(self, tup):
"""
Return True if we are correctly lexsorted given the passed tuple
"""
return len(tup) <= self.lexsort_depth
@cache_readonly
def lexsort_depth(self):
if self.sortorder is not None:
if self.sortorder == 0:
return self.nlevels
else:
return 0
int64_labels = [com._ensure_int64(lab) for lab in self.labels]
for k in range(self.nlevels, 0, -1):
if lib.is_lexsorted(int64_labels[:k]):
return k
return 0
@classmethod
def from_arrays(cls, arrays, sortorder=None, names=None):
"""
Convert arrays to MultiIndex
Parameters
----------
arrays : list / sequence of array-likes
Each array-like gives one level's value for each data point.
len(arrays) is the number of levels.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level)
Returns
-------
index : MultiIndex
Examples
--------
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> MultiIndex.from_arrays(arrays, names=('number', 'color'))
See Also
--------
MultiIndex.from_tuples : Convert list of tuples to MultiIndex
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables
"""
from pandas.core.categorical import Categorical
if len(arrays) == 1:
name = None if names is None else names[0]
return Index(arrays[0], name=name)
cats = [Categorical.from_array(arr) for arr in arrays]
levels = [c.categories for c in cats]
labels = [c.codes for c in cats]
if names is None:
names = [c.name for c in cats]
return MultiIndex(levels=levels, labels=labels,
sortorder=sortorder, names=names,
verify_integrity=False)
@classmethod
def from_tuples(cls, tuples, sortorder=None, names=None):
"""
Convert list of tuples to MultiIndex
Parameters
----------
tuples : list / sequence of tuple-likes
Each tuple is the index of one row/column.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level)
Returns
-------
index : MultiIndex
Examples
--------
>>> tuples = [(1, u'red'), (1, u'blue'),
(2, u'red'), (2, u'blue')]
>>> MultiIndex.from_tuples(tuples, names=('number', 'color'))
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables
"""
if len(tuples) == 0:
# I think this is right? Not quite sure...
raise TypeError('Cannot infer number of levels from empty list')
if isinstance(tuples, (np.ndarray, Index)):
if isinstance(tuples, Index):
tuples = tuples.values
arrays = list(lib.tuples_to_object_array(tuples).T)
elif isinstance(tuples, list):
arrays = list(lib.to_object_array_tuples(tuples).T)
else:
arrays = lzip(*tuples)
return MultiIndex.from_arrays(arrays, sortorder=sortorder,
names=names)
@classmethod
def from_product(cls, iterables, sortorder=None, names=None):
"""
Make a MultiIndex from the cartesian product of multiple iterables
Parameters
----------
iterables : list / sequence of iterables
Each iterable has unique labels for each level of the index.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of strings or None
Names for the levels in the index.
Returns
-------
index : MultiIndex
Examples
--------
>>> numbers = [0, 1, 2]
>>> colors = [u'green', u'purple']
>>> MultiIndex.from_product([numbers, colors],
names=['number', 'color'])
MultiIndex(levels=[[0, 1, 2], [u'green', u'purple']],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
names=[u'number', u'color'])
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex
MultiIndex.from_tuples : Convert list of tuples to MultiIndex
"""
from pandas.core.categorical import Categorical
from pandas.tools.util import cartesian_product
categoricals = [Categorical.from_array(it) for it in iterables]
labels = cartesian_product([c.codes for c in categoricals])
return MultiIndex(levels=[c.categories for c in categoricals],
labels=labels, sortorder=sortorder, names=names)
@property
def nlevels(self):
return len(self.levels)
@property
def levshape(self):
return tuple(len(x) for x in self.levels)
def __contains__(self, key):
hash(key)
# work around some kind of odd cython bug
try:
self.get_loc(key)
return True
except KeyError:
return False
def __reduce__(self):
"""Necessary for making this object picklable"""
d = dict(levels = [lev for lev in self.levels],
labels = [label for label in self.labels],
sortorder = self.sortorder,
names = list(self.names))
return _new_Index, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
levels = state.get('levels')
labels = state.get('labels')
sortorder = state.get('sortorder')
names = state.get('names')
elif isinstance(state, tuple):
nd_state, own_state = state
levels, labels, sortorder, names = own_state
self._set_levels([Index(x) for x in levels], validate=False)
self._set_labels(labels)
self._set_names(names)
self.sortorder = sortorder
self._verify_integrity()
self._reset_identity()
def __getitem__(self, key):
if np.isscalar(key):
retval = []
for lev, lab in zip(self.levels, self.labels):
if lab[key] == -1:
retval.append(np.nan)
else:
retval.append(lev[lab[key]])
return tuple(retval)
else:
if is_bool_indexer(key):
key = np.asarray(key)
sortorder = self.sortorder
else:
# cannot be sure whether the result will be sorted
sortorder = None
new_labels = [lab[key] for lab in self.labels]
return MultiIndex(levels=self.levels,
labels=new_labels,
names=self.names,
sortorder=sortorder,
verify_integrity=False)
def take(self, indexer, axis=None):
indexer = com._ensure_platform_int(indexer)
new_labels = [lab.take(indexer) for lab in self.labels]
return MultiIndex(levels=self.levels, labels=new_labels,
names=self.names, verify_integrity=False)
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
if not isinstance(other, (list, tuple)):
other = [other]
if all((isinstance(o, MultiIndex) and o.nlevels >= self.nlevels) for o in other):
arrays = []
for i in range(self.nlevels):
label = self.get_level_values(i)
appended = [o.get_level_values(i) for o in other]
arrays.append(label.append(appended))
return MultiIndex.from_arrays(arrays, names=self.names)
to_concat = (self.values,) + tuple(k.values for k in other)
new_tuples = np.concatenate(to_concat)
# if all(isinstance(x, MultiIndex) for x in other):
try:
return MultiIndex.from_tuples(new_tuples, names=self.names)
except:
return Index(new_tuples)
def argsort(self, *args, **kwargs):
return self.values.argsort()
def repeat(self, n):
return MultiIndex(levels=self.levels,
labels=[label.view(np.ndarray).repeat(n) for label in self.labels],
names=self.names,
sortorder=self.sortorder,
verify_integrity=False)
def drop(self, labels, level=None):
"""
Make new MultiIndex with passed list of labels deleted
Parameters
----------
labels : array-like
Must be a list of tuples
level : int or level name, default None
Returns
-------
dropped : MultiIndex
"""
if level is not None:
return self._drop_from_level(labels, level)
try:
if not isinstance(labels, (np.ndarray, Index)):
labels = com._index_labels_to_array(labels)
indexer = self.get_indexer(labels)
mask = indexer == -1
if mask.any():
raise ValueError('labels %s not contained in axis'
% labels[mask])
return self.delete(indexer)
except Exception:
pass
inds = []
for label in labels:
loc = self.get_loc(label)
if isinstance(loc, int):
inds.append(loc)
else:
inds.extend(lrange(loc.start, loc.stop))
return self.delete(inds)
def _drop_from_level(self, labels, level):
labels = com._index_labels_to_array(labels)
i = self._get_level_number(level)
index = self.levels[i]
values = index.get_indexer(labels)
mask = ~lib.ismember(self.labels[i], set(values))
return self[mask]
def droplevel(self, level=0):
"""
Return Index with requested level removed. If MultiIndex has only 2
levels, the result will be of Index type not MultiIndex.
Parameters
----------
level : int/level name or list thereof
Notes
-----
Does not check if result index is unique or not
Returns
-------
index : Index or MultiIndex
"""
levels = level
if not isinstance(levels, (tuple, list)):
levels = [level]
new_levels = list(self.levels)
new_labels = list(self.labels)
new_names = list(self.names)
levnums = sorted(self._get_level_number(lev) for lev in levels)[::-1]
for i in levnums:
new_levels.pop(i)
new_labels.pop(i)
new_names.pop(i)
if len(new_levels) == 1:
# set nan if needed
mask = new_labels[0] == -1
result = new_levels[0].take(new_labels[0])
if mask.any():
result = result.putmask(mask, np.nan)
result.name = new_names[0]
return result
else:
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def swaplevel(self, i, j):
"""
Swap level i with level j. Do not change the ordering of anything
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : MultiIndex
"""
new_levels = list(self.levels)
new_labels = list(self.labels)
new_names = list(self.names)
i = self._get_level_number(i)
j = self._get_level_number(j)
new_levels[i], new_levels[j] = new_levels[j], new_levels[i]
new_labels[i], new_labels[j] = new_labels[j], new_labels[i]
new_names[i], new_names[j] = new_names[j], new_names[i]
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def reorder_levels(self, order):
"""
Rearrange levels using input order. May not drop or duplicate levels
Parameters
----------
"""
order = [self._get_level_number(i) for i in order]
if len(order) != self.nlevels:
raise AssertionError(('Length of order must be same as '
'number of levels (%d), got %d')
% (self.nlevels, len(order)))
new_levels = [self.levels[i] for i in order]
new_labels = [self.labels[i] for i in order]
new_names = [self.names[i] for i in order]
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def __getslice__(self, i, j):
return self.__getitem__(slice(i, j))
def sortlevel(self, level=0, ascending=True, sort_remaining=True):
"""
Sort MultiIndex at the requested level. The result will respect the
original ordering of the associated factor at that level.
Parameters
----------
level : list-like, int or str, default 0
If a string is given, must be a name of the level
If list-like must be names or ints of levels.
ascending : boolean, default True
False to sort in descending order
sort_remaining : sort by the remaining levels after level.
Returns
-------
sorted_index : MultiIndex
"""
from pandas.core.groupby import _indexer_from_factorized
labels = list(self.labels)
shape = list(self.levshape)
if isinstance(level, (str, int)):
level = [level]
level = [self._get_level_number(lev) for lev in level]
# partition labels and shape
primary = tuple(labels.pop(lev - i) for i, lev in enumerate(level))
primshp = tuple(shape.pop(lev - i) for i, lev in enumerate(level))
if sort_remaining:
primary += primary + tuple(labels)
primshp += primshp + tuple(shape)
sortorder = None
else:
sortorder = level[0]
indexer = _indexer_from_factorized(primary,
primshp,
compress=False)
if not ascending:
indexer = indexer[::-1]
indexer = com._ensure_platform_int(indexer)
new_labels = [lab.take(indexer) for lab in self.labels]
new_index = MultiIndex(labels=new_labels, levels=self.levels,
names=self.names, sortorder=sortorder,
verify_integrity=False)
return new_index, indexer
def get_indexer(self, target, method=None, limit=None):
"""
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
current data to the new index. The mask determines whether labels are
found or not in the current index
Parameters
----------
target : MultiIndex or Index (of tuples)
method : {'pad', 'ffill', 'backfill', 'bfill'}
pad / ffill: propagate LAST valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
Notes
-----
This is a low-level method and probably should be used at your own risk
Examples
--------
>>> indexer, mask = index.get_indexer(new_index)
>>> new_values = cur_values.take(indexer)
>>> new_values[-mask] = np.nan
Returns
-------
(indexer, mask) : (ndarray, ndarray)
"""
method = self._get_method(method)
target = _ensure_index(target)
target_index = target
if isinstance(target, MultiIndex):
target_index = target._tuple_index
if target_index.dtype != object:
return np.ones(len(target_index)) * -1
if not self.is_unique:
raise Exception('Reindexing only valid with uniquely valued Index '
'objects')
self_index = self._tuple_index
if method == 'pad':
if not self.is_unique or not self.is_monotonic:
raise AssertionError(('Must be unique and monotonic to '
'use forward fill getting the indexer'))
indexer = self_index._engine.get_pad_indexer(target_index.values,
limit=limit)
elif method == 'backfill':
if not self.is_unique or not self.is_monotonic:
raise AssertionError(('Must be unique and monotonic to '
'use backward fill getting the indexer'))
indexer = self_index._engine.get_backfill_indexer(target_index.values,
limit=limit)
else:
indexer = self_index._engine.get_indexer(target_index.values)
return com._ensure_platform_int(indexer)
def reindex(self, target, method=None, level=None, limit=None):
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.MultiIndex
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, 'names')
if level is not None:
if method is not None:
raise TypeError('Fill method not supported if level passed')
# GH7774: preserve dtype/tz if target is empty and not an Index.
target = _ensure_has_len(target) # target may be an iterator
if len(target) == 0 and not isinstance(target, Index):
idx = self.levels[level]
attrs = idx._get_attributes_dict()
attrs.pop('freq', None) # don't preserve freq
target = type(idx)._simple_new(np.empty(0, dtype=idx.dtype),
**attrs)
else:
target = _ensure_index(target)
target, indexer, _ = self._join_level(target, level, how='right',
return_indexers=True,
keep_order=False)
else:
if self.equals(target):
indexer = None
else:
if self.is_unique:
indexer = self.get_indexer(target, method=method,
limit=limit)
else:
raise Exception(
"cannot handle a non-unique multi-index!")
if not isinstance(target, MultiIndex):
if indexer is None:
target = self
elif (indexer >= 0).all():
target = self.take(indexer)
else:
# hopefully?
target = MultiIndex.from_tuples(target)
if (preserve_names and target.nlevels == self.nlevels and
target.names != self.names):
target = target.copy(deep=False)
target.names = self.names
return target, indexer
@cache_readonly
def _tuple_index(self):
"""
Convert MultiIndex to an Index of tuples
Returns
-------
index : Index
"""
return Index(self.values)
def get_slice_bound(self, label, side):
if not isinstance(label, tuple):
label = label,
return self._partial_tup_index(label, side=side)
def slice_locs(self, start=None, end=None, step=None):
"""
For an ordered MultiIndex, compute the slice locations for input
labels. They can be tuples representing partial levels, e.g. for a
MultiIndex with 3 levels, you can pass a single value (corresponding to
the first level), or a 1-, 2-, or 3-tuple.
Parameters
----------
start : label or tuple, default None
If None, defaults to the beginning
end : label or tuple
If None, defaults to the end
step : int or None
Slice step
Returns
-------
(start, end) : (int, int)
Notes
-----
This function assumes that the data is sorted by the first level
"""
# This function adds nothing to its parent implementation (the magic
# happens in get_slice_bound method), but it adds meaningful doc.
return super(MultiIndex, self).slice_locs(start, end, step)
def _partial_tup_index(self, tup, side='left'):
if len(tup) > self.lexsort_depth:
raise KeyError('Key length (%d) was greater than MultiIndex'
' lexsort depth (%d)' %
(len(tup), self.lexsort_depth))
n = len(tup)
start, end = 0, len(self)
zipped = zip(tup, self.levels, self.labels)
for k, (lab, lev, labs) in enumerate(zipped):
section = labs[start:end]
if lab not in lev:
if not lev.is_type_compatible(lib.infer_dtype([lab])):
raise TypeError('Level type mismatch: %s' % lab)
# short circuit
loc = lev.searchsorted(lab, side=side)
if side == 'right' and loc >= 0:
loc -= 1
return start + section.searchsorted(loc, side=side)
idx = lev.get_loc(lab)
if k < n - 1:
end = start + section.searchsorted(idx, side='right')
start = start + section.searchsorted(idx, side='left')
else:
return start + section.searchsorted(idx, side=side)
def get_loc(self, key):
"""
Get integer location, slice or boolean mask for requested label or tuple
If the key is past the lexsort depth, the return may be a boolean mask
array, otherwise it is always a slice or int.
Parameters
----------
key : label or tuple
Returns
-------
loc : int, slice object or boolean mask
"""
def _maybe_to_slice(loc):
'''convert integer indexer to boolean mask or slice if possible'''
if not isinstance(loc, np.ndarray) or loc.dtype != 'int64':
return loc
loc = lib.maybe_indices_to_slice(loc)
if isinstance(loc, slice):
return loc
mask = np.empty(len(self), dtype='bool')
mask.fill(False)
mask[loc] = True
return mask
if not isinstance(key, tuple):
loc = self._get_level_indexer(key, level=0)
return _maybe_to_slice(loc)
keylen = len(key)
if self.nlevels < keylen:
raise KeyError('Key length ({0}) exceeds index depth ({1})'
''.format(keylen, self.nlevels))
if keylen == self.nlevels and self.is_unique:
def _maybe_str_to_time_stamp(key, lev):
if lev.is_all_dates and not isinstance(key, Timestamp):
try:
return Timestamp(key, tz=getattr(lev, 'tz', None))
except Exception:
pass
return key
key = _values_from_object(key)
key = tuple(map(_maybe_str_to_time_stamp, key, self.levels))
return self._engine.get_loc(key)
# -- partial selection or non-unique index
# break the key into 2 parts based on the lexsort_depth of the index;
# the first part returns a continuous slice of the index; the 2nd part
# needs linear search within the slice
i = self.lexsort_depth
lead_key, follow_key = key[:i], key[i:]
start, stop = self.slice_locs(lead_key, lead_key) \
if lead_key else (0, len(self))
if start == stop:
raise KeyError(key)
if not follow_key:
return slice(start, stop)
warnings.warn('indexing past lexsort depth may impact performance.',
PerformanceWarning)
loc = np.arange(start, stop, dtype='int64')
for i, k in enumerate(follow_key, len(lead_key)):
mask = self.labels[i][loc] == self.levels[i].get_loc(k)
if not mask.all():
loc = loc[mask]
if not len(loc):
raise KeyError(key)
return _maybe_to_slice(loc) \
if len(loc) != stop - start \
else slice(start, stop)
def get_loc_level(self, key, level=0, drop_level=True):
"""
Get integer location slice for requested label or tuple
Parameters
----------
key : label or tuple
level : int/level name or list thereof
Returns
-------
loc : int or slice object
"""
def maybe_droplevels(indexer, levels, drop_level):
if not drop_level:
return self[indexer]
# kludgearound
orig_index = new_index = self[indexer]
levels = [self._get_level_number(i) for i in levels]
for i in sorted(levels, reverse=True):
try:
new_index = new_index.droplevel(i)
except:
# no dropping here
return orig_index
return new_index
if isinstance(level, (tuple, list)):
if len(key) != len(level):
raise AssertionError('Key for location must have same '
'length as number of levels')
result = None
for lev, k in zip(level, key):
loc, new_index = self.get_loc_level(k, level=lev)
if isinstance(loc, slice):
mask = np.zeros(len(self), dtype=bool)
mask[loc] = True
loc = mask
result = loc if result is None else result & loc
return result, maybe_droplevels(result, level, drop_level)
level = self._get_level_number(level)
# kludge for #1796
if isinstance(key, list):
key = tuple(key)
if isinstance(key, tuple) and level == 0:
try:
if key in self.levels[0]:
indexer = self._get_level_indexer(key, level=level)
new_index = maybe_droplevels(indexer, [0], drop_level)
return indexer, new_index
except TypeError:
pass
if not any(isinstance(k, slice) for k in key):
# partial selection
# optionally get indexer to avoid re-calculation
def partial_selection(key, indexer=None):
if indexer is None:
indexer = self.get_loc(key)
ilevels = [i for i in range(len(key))
if key[i] != slice(None, None)]
return indexer, maybe_droplevels(indexer, ilevels,
drop_level)
if len(key) == self.nlevels:
if self.is_unique:
# here we have a completely specified key, but are
# using some partial string matching here
# GH4758
can_index_exactly = any([
(l.is_all_dates and
not isinstance(k, compat.string_types))
for k, l in zip(key, self.levels)
])
if any([
l.is_all_dates for k, l in zip(key, self.levels)
]) and not can_index_exactly:
indexer = self.get_loc(key)
# we have a multiple selection here
if not isinstance(indexer, slice) \
or indexer.stop - indexer.start != 1:
return partial_selection(key, indexer)
key = tuple(self[indexer].tolist()[0])
return (self._engine.get_loc(_values_from_object(key)),
None)
else:
return partial_selection(key)
else:
return partial_selection(key)
else:
indexer = None
for i, k in enumerate(key):
if not isinstance(k, slice):
k = self._get_level_indexer(k, level=i)
if isinstance(k, slice):
# everything
if k.start == 0 and k.stop == len(self):
k = slice(None, None)
else:
k_index = k
if isinstance(k, slice):
if k == slice(None, None):
continue
else:
raise TypeError(key)
if indexer is None:
indexer = k_index
else: # pragma: no cover
indexer &= k_index
if indexer is None:
indexer = slice(None, None)
ilevels = [i for i in range(len(key))
if key[i] != slice(None, None)]
return indexer, maybe_droplevels(indexer, ilevels,
drop_level)
else:
indexer = self._get_level_indexer(key, level=level)
return indexer, maybe_droplevels(indexer, [level], drop_level)
def _get_level_indexer(self, key, level=0):
# return a boolean indexer or a slice showing where the key is
# in the totality of values
level_index = self.levels[level]
labels = self.labels[level]
if isinstance(key, slice):
# handle a slice, returnig a slice if we can
# otherwise a boolean indexer
try:
if key.start is not None:
start = level_index.get_loc(key.start)
else:
start = 0
if key.stop is not None:
stop = level_index.get_loc(key.stop)
else:
stop = len(level_index)-1
step = key.step
except (KeyError):
# we have a partial slice (like looking up a partial date string)
start = stop = level_index.slice_indexer(key.start, key.stop, key.step)
step = start.step
if isinstance(start,slice) or isinstance(stop,slice):
# we have a slice for start and/or stop
# a partial date slicer on a DatetimeIndex generates a slice
# note that the stop ALREADY includes the stopped point (if
# it was a string sliced)
m = np.zeros(len(labels),dtype=bool)
m[np.in1d(labels,np.arange(start.start,stop.stop,step))] = True
return m
elif level > 0 or self.lexsort_depth == 0 or step is not None:
# need to have like semantics here to right
# searching as when we are using a slice
# so include the stop+1 (so we include stop)
m = np.zeros(len(labels),dtype=bool)
m[np.in1d(labels,np.arange(start,stop+1,step))] = True
return m
else:
# sorted, so can return slice object -> view
i = labels.searchsorted(start, side='left')
j = labels.searchsorted(stop, side='right')
return slice(i, j, step)
else:
loc = level_index.get_loc(key)
if level > 0 or self.lexsort_depth == 0:
return np.array(labels == loc,dtype=bool)
else:
# sorted, so can return slice object -> view
i = labels.searchsorted(loc, side='left')
j = labels.searchsorted(loc, side='right')
return slice(i, j)
def get_locs(self, tup):
"""
Given a tuple of slices/lists/labels/boolean indexer to a level-wise spec
produce an indexer to extract those locations
Parameters
----------
key : tuple of (slices/list/labels)
Returns
-------
locs : integer list of locations or boolean indexer suitable
for passing to iloc
"""
# must be lexsorted to at least as many levels
if not self.is_lexsorted_for_tuple(tup):
raise KeyError('MultiIndex Slicing requires the index to be fully lexsorted'
' tuple len ({0}), lexsort depth ({1})'.format(len(tup), self.lexsort_depth))
def _convert_indexer(r):
if isinstance(r, slice):
m = np.zeros(len(self),dtype=bool)
m[r] = True
return m
return r
ranges = []
for i,k in enumerate(tup):
if is_bool_indexer(k):
# a boolean indexer, must be the same length!
k = np.asarray(k)
if len(k) != len(self):
raise ValueError("cannot index with a boolean indexer that is"
" not the same length as the index")
ranges.append(k)
elif is_list_like(k):
# a collection of labels to include from this level (these are or'd)
indexers = []
for x in k:
try:
indexers.append(_convert_indexer(self._get_level_indexer(x, level=i)))
except (KeyError):
# ignore not founds
continue
if len(k):
ranges.append(reduce(np.logical_or, indexers))
else:
ranges.append(np.zeros(self.labels[i].shape, dtype=bool))
elif is_null_slice(k):
# empty slice
pass
elif isinstance(k,slice):
# a slice, include BOTH of the labels
ranges.append(self._get_level_indexer(k,level=i))
else:
# a single label
ranges.append(self.get_loc_level(k,level=i,drop_level=False)[0])
# identity
if len(ranges) == 0:
return slice(0,len(self))
elif len(ranges) == 1:
return ranges[0]
# construct a boolean indexer if we have a slice or boolean indexer
return reduce(np.logical_and,[ _convert_indexer(r) for r in ranges ])
def truncate(self, before=None, after=None):
"""
Slice index between two labels / tuples, return new MultiIndex
Parameters
----------
before : label or tuple, can be partial. Default None
None defaults to start
after : label or tuple, can be partial. Default None
None defaults to end
Returns
-------
truncated : MultiIndex
"""
if after and before and after < before:
raise ValueError('after < before')
i, j = self.levels[0].slice_locs(before, after)
left, right = self.slice_locs(before, after)
new_levels = list(self.levels)
new_levels[0] = new_levels[0][i:j]
new_labels = [lab[left:right] for lab in self.labels]
new_labels[0] = new_labels[0] - i
return MultiIndex(levels=new_levels, labels=new_labels,
verify_integrity=False)
def equals(self, other):
"""
Determines if two MultiIndex objects have the same labeling information
(the levels themselves do not necessarily have to be the same)
See also
--------
equal_levels
"""
if self.is_(other):
return True
if not isinstance(other, MultiIndex):
return array_equivalent(self.values,
_values_from_object(_ensure_index(other)))
if self.nlevels != other.nlevels:
return False
if len(self) != len(other):
return False
for i in range(self.nlevels):
svalues = com.take_nd(self.levels[i].values, self.labels[i],
allow_fill=False)
ovalues = com.take_nd(other.levels[i].values, other.labels[i],
allow_fill=False)
if not array_equivalent(svalues, ovalues):
return False
return True
def equal_levels(self, other):
"""
Return True if the levels of both MultiIndex objects are the same
"""
if self.nlevels != other.nlevels:
return False
for i in range(self.nlevels):
if not self.levels[i].equals(other.levels[i]):
return False
return True
def union(self, other):
"""
Form the union of two MultiIndex objects, sorting if possible
Parameters
----------
other : MultiIndex or array / Index of tuples
Returns
-------
Index
>>> index.union(index2)
"""
self._assert_can_do_setop(other)
if len(other) == 0 or self.equals(other):
return self
result_names = self.names if self.names == other.names else None
uniq_tuples = lib.fast_unique_multiple([self.values, other.values])
return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0,
names=result_names)
def intersection(self, other):
"""
Form the intersection of two MultiIndex objects, sorting if possible
Parameters
----------
other : MultiIndex or array / Index of tuples
Returns
-------
Index
"""
self._assert_can_do_setop(other)
if self.equals(other):
return self
result_names = self.names if self.names == other.names else None
self_tuples = self.values
other_tuples = other.values
uniq_tuples = sorted(set(self_tuples) & set(other_tuples))
if len(uniq_tuples) == 0:
return MultiIndex(levels=[[]] * self.nlevels,
labels=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
else:
return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0,
names=result_names)
def difference(self, other):
"""
Compute sorted set difference of two MultiIndex objects
Returns
-------
diff : MultiIndex
"""
self._assert_can_do_setop(other)
if not isinstance(other, MultiIndex):
if len(other) == 0:
return self
try:
other = MultiIndex.from_tuples(other)
except:
raise TypeError('other must be a MultiIndex or a list of'
' tuples')
result_names = self.names
else:
result_names = self.names if self.names == other.names else None
if self.equals(other):
return MultiIndex(levels=[[]] * self.nlevels,
labels=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
difference = sorted(set(self.values) - set(other.values))
if len(difference) == 0:
return MultiIndex(levels=[[]] * self.nlevels,
labels=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
else:
return MultiIndex.from_tuples(difference, sortorder=0,
names=result_names)
def _assert_can_do_setop(self, other):
pass
def astype(self, dtype):
if np.dtype(dtype) != np.object_:
raise TypeError('Setting %s dtype to anything other than object '
'is not supported' % self.__class__)
return self._shallow_copy()
def insert(self, loc, item):
"""
Make new MultiIndex inserting new item at location
Parameters
----------
loc : int
item : tuple
Must be same length as number of levels in the MultiIndex
Returns
-------
new_index : Index
"""
# Pad the key with empty strings if lower levels of the key
# aren't specified:
if not isinstance(item, tuple):
item = (item,) + ('',) * (self.nlevels - 1)
elif len(item) != self.nlevels:
raise ValueError(
'Item must have length equal to number of levels.')
new_levels = []
new_labels = []
for k, level, labels in zip(item, self.levels, self.labels):
if k not in level:
# have to insert into level
# must insert at end otherwise you have to recompute all the
# other labels
lev_loc = len(level)
level = level.insert(lev_loc, k)
else:
lev_loc = level.get_loc(k)
new_levels.append(level)
new_labels.append(np.insert(_ensure_int64(labels), loc, lev_loc))
return MultiIndex(levels=new_levels, labels=new_labels,
names=self.names, verify_integrity=False)
def delete(self, loc):
"""
Make new index with passed location deleted
Returns
-------
new_index : MultiIndex
"""
new_labels = [np.delete(lab, loc) for lab in self.labels]
return MultiIndex(levels=self.levels, labels=new_labels,
names=self.names, verify_integrity=False)
get_major_bounds = slice_locs
__bounds = None
@property
def _bounds(self):
"""
Return or compute and return slice points for level 0, assuming
sortedness
"""
if self.__bounds is None:
inds = np.arange(len(self.levels[0]))
self.__bounds = self.labels[0].searchsorted(inds)
return self.__bounds
def _wrap_joined_index(self, joined, other):
names = self.names if self.names == other.names else None
return MultiIndex.from_tuples(joined, names=names)
@Appender(Index.isin.__doc__)
def isin(self, values, level=None):
if level is None:
return lib.ismember(self._array_values(), set(values))
else:
num = self._get_level_number(level)
levs = self.levels[num]
labs = self.labels[num]
sought_labels = levs.isin(values).nonzero()[0]
if levs.size == 0:
return np.zeros(len(labs), dtype=np.bool_)
else:
return np.lib.arraysetops.in1d(labs, sought_labels)
MultiIndex._add_numeric_methods_disabled()
MultiIndex._add_logical_methods_disabled()
# For utility purposes
def _sparsify(label_list, start=0, sentinel=''):
pivoted = lzip(*label_list)
k = len(label_list)
result = pivoted[:start + 1]
prev = pivoted[start]
for cur in pivoted[start + 1:]:
sparse_cur = []
for i, (p, t) in enumerate(zip(prev, cur)):
if i == k - 1:
sparse_cur.append(t)
result.append(sparse_cur)
break
if p == t:
sparse_cur.append(sentinel)
else:
sparse_cur.extend(cur[i:])
result.append(sparse_cur)
break
prev = cur
return lzip(*result)
def _ensure_index(index_like, copy=False):
if isinstance(index_like, Index):
if copy:
index_like = index_like.copy()
return index_like
if hasattr(index_like, 'name'):
return Index(index_like, name=index_like.name, copy=copy)
# must check for exactly list here because of strict type
# check in clean_index_list
if isinstance(index_like, list):
if type(index_like) != list:
index_like = list(index_like)
# 2200 ?
converted, all_arrays = lib.clean_index_list(index_like)
if len(converted) > 0 and all_arrays:
return MultiIndex.from_arrays(converted)
else:
index_like = converted
else:
# clean_index_list does the equivalent of copying
# so only need to do this if not list instance
if copy:
from copy import copy
index_like = copy(index_like)
return Index(index_like)
def _ensure_frozen(array_like, categories, copy=False):
array_like = com._coerce_indexer_dtype(array_like, categories)
array_like = array_like.view(FrozenNDArray)
if copy:
array_like = array_like.copy()
return array_like
def _validate_join_method(method):
if method not in ['left', 'right', 'inner', 'outer']:
raise ValueError('do not recognize join method %s' % method)
# TODO: handle index names!
def _get_combined_index(indexes, intersect=False):
indexes = _get_distinct_indexes(indexes)
if len(indexes) == 0:
return Index([])
if len(indexes) == 1:
return indexes[0]
if intersect:
index = indexes[0]
for other in indexes[1:]:
index = index.intersection(other)
return index
union = _union_indexes(indexes)
return _ensure_index(union)
def _get_distinct_indexes(indexes):
return list(dict((id(x), x) for x in indexes).values())
def _union_indexes(indexes):
if len(indexes) == 0:
raise AssertionError('Must have at least 1 Index to union')
if len(indexes) == 1:
result = indexes[0]
if isinstance(result, list):
result = Index(sorted(result))
return result
indexes, kind = _sanitize_and_check(indexes)
def _unique_indices(inds):
def conv(i):
if isinstance(i, Index):
i = i.tolist()
return i
return Index(lib.fast_unique_multiple_list([ conv(i) for i in inds ]))
if kind == 'special':
result = indexes[0]
if hasattr(result, 'union_many'):
return result.union_many(indexes[1:])
else:
for other in indexes[1:]:
result = result.union(other)
return result
elif kind == 'array':
index = indexes[0]
for other in indexes[1:]:
if not index.equals(other):
return _unique_indices(indexes)
return index
else:
return _unique_indices(indexes)
def _trim_front(strings):
"""
Trims zeros and decimal points
"""
trimmed = strings
while len(strings) > 0 and all([x[0] == ' ' for x in trimmed]):
trimmed = [x[1:] for x in trimmed]
return trimmed
def _sanitize_and_check(indexes):
kinds = list(set([type(index) for index in indexes]))
if list in kinds:
if len(kinds) > 1:
indexes = [Index(com._try_sort(x))
if not isinstance(x, Index) else x
for x in indexes]
kinds.remove(list)
else:
return indexes, 'list'
if len(kinds) > 1 or Index not in kinds:
return indexes, 'special'
else:
return indexes, 'array'
def _get_consensus_names(indexes):
# find the non-none names, need to tupleify to make
# the set hashable, then reverse on return
consensus_names = set([
tuple(i.names) for i in indexes if all(n is not None for n in i.names)
])
if len(consensus_names) == 1:
return list(list(consensus_names)[0])
return [None] * indexes[0].nlevels
def _maybe_box(idx):
from pandas.tseries.api import DatetimeIndex, PeriodIndex, TimedeltaIndex
klasses = DatetimeIndex, PeriodIndex, TimedeltaIndex
if isinstance(idx, klasses):
return idx.asobject
return idx
def _all_indexes_same(indexes):
first = indexes[0]
for index in indexes[1:]:
if not first.equals(index):
return False
return True
def _get_na_rep(dtype):
return {np.datetime64: 'NaT', np.timedelta64: 'NaT'}.get(dtype, 'NaN')
def _get_na_value(dtype):
return {np.datetime64: tslib.NaT, np.timedelta64: tslib.NaT}.get(dtype,
np.nan)
def _ensure_has_len(seq):
"""If seq is an iterator, put its values into a list."""
try:
len(seq)
except TypeError:
return list(seq)
else:
return seq
| 34.252133
| 120
| 0.55019
|
4bff0c55c149369feb494481f1d99d07bae1e231
| 43,410
|
py
|
Python
|
pysnmp-with-texts/APPIAN-SERVICES-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/APPIAN-SERVICES-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/APPIAN-SERVICES-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module APPIAN-SERVICES-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/APPIAN-SERVICES-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:23:56 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
acChassisRingId, acChassisCurrentTime = mibBuilder.importSymbols("APPIAN-CHASSIS-MIB", "acChassisRingId", "acChassisCurrentTime")
AcNodeId, AcOpStatus, acServices, AcSlotNumber, AcAdminStatus, AcPortNumber = mibBuilder.importSymbols("APPIAN-SMI-MIB", "AcNodeId", "AcOpStatus", "acServices", "AcSlotNumber", "AcAdminStatus", "AcPortNumber")
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint", "ValueSizeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Counter64, Bits, Gauge32, Counter32, ModuleIdentity, Unsigned32, iso, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, NotificationType, IpAddress, Integer32, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "Bits", "Gauge32", "Counter32", "ModuleIdentity", "Unsigned32", "iso", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "NotificationType", "IpAddress", "Integer32", "TimeTicks")
TextualConvention, DisplayString, MacAddress, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "MacAddress", "TruthValue")
acServicesCommon = ModuleIdentity((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1))
acServicesCommon.setRevisions(('1900-01-31 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: acServicesCommon.setRevisionsDescriptions(('Engineering draft version, not for release.',))
if mibBuilder.loadTexts: acServicesCommon.setLastUpdated('0001310000Z')
if mibBuilder.loadTexts: acServicesCommon.setOrganization('Appian Communications, Inc.')
if mibBuilder.loadTexts: acServicesCommon.setContactInfo('Douglas Theriault')
if mibBuilder.loadTexts: acServicesCommon.setDescription('Appian Communications OSAP Services MIB contain the definitions for Internet Access (IAS) and Transparent LAN (TLS) services.')
class AcQueueWeights(TextualConvention, OctetString):
description = "The weighted values control relative bandwidth for each queue of the four queues. Each byte represents the proportional weight out of 100 for a queue. The sum of these bytes must not exceed 100. For example, if 2 queues are used, and the relative weights (or priorities) are 90 and 10, then acQosQueueWeights should be set to '\\0x5A\\0x0A\\0x00\\0x00'. Another example, if we want a FIFO queueing, we would set acQosQueueWeights to '\\0x64\\0x00\\0x00\\0x00'."
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(4, 4)
fixedLength = 4
class AcQueueBufferingCapacity(TextualConvention, OctetString):
description = "The values control relative queue buffering capacity for each of the four queues. Each byte represents the proportional buffering capacity out of 100 for a queue. The sum of these bytes must not exceed 100. For example, if 2 queues are used, and the relative buffering capacities (or queue depths) are 90 and 10, then acQosQueueBufferingCapacity should be set to '\\0x5A\\0x0A\\0x00\\0x00'. Another example, if we want all the buffering capacity to be allocated to a best effort queue we would set acQosQueueBufferingCapacity to '\\0x64\\0x00\\0x00\\0x00'."
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(4, 4)
fixedLength = 4
class AcClassMapping(TextualConvention, OctetString):
description = 'This octet string defines a mapping between a field within a packet payload and a 2-bit internal queue number. The octet string is a list of queue numbers. Each octet contains a single 2-bit queue number. In this product, 6-bits are wasted in each octet. In the future we may support more queues per service in which case we will extend the bit field in each octet. When this map is used for 802.1p and MPLS, it must be exactly 8-octets in length. For IP TOS, it must be 256 octets in length.'
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 256)
acServiceTable = MibTable((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 1), )
if mibBuilder.loadTexts: acServiceTable.setStatus('current')
if mibBuilder.loadTexts: acServiceTable.setDescription("This table is the common thread of all services, regardless of service types, in other words, every service would have an entry in this table. A service is identified by a tuple of {node, slot, port, channel}. The {slot, port} pair identies the access port of a subscriber. The port can support multiple services, and each such service is identified by a 'service channel'. To create a service, a row should be created in this table. When the service type is set, a row is automatically 'created' in the appropriate service type specific table. For example, to create an IAS service, one first creates a row in this common table, and then set acServiceType to IAS (which triggers the automatic creation of an entry in the IAS service table), and then set the all necessary parameters. To remove a service, remove the row in this common table only.")
acServiceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 1, 1), ).setIndexNames((0, "APPIAN-SERVICES-MIB", "acServiceNodeId"), (0, "APPIAN-SERVICES-MIB", "acServiceSlot"), (0, "APPIAN-SERVICES-MIB", "acServicePort"), (0, "APPIAN-SERVICES-MIB", "acServiceChannel"))
if mibBuilder.loadTexts: acServiceEntry.setStatus('current')
if mibBuilder.loadTexts: acServiceEntry.setDescription('A row equates to a service defined on a particular port and channel.')
acServiceNodeId = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 1, 1, 1), AcNodeId()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: acServiceNodeId.setStatus('current')
if mibBuilder.loadTexts: acServiceNodeId.setDescription('A unique node identification number assigned by the EMS to uniquely identify a node within an Appian Ring.')
acServiceSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 1, 1, 2), AcSlotNumber()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: acServiceSlot.setStatus('current')
if mibBuilder.loadTexts: acServiceSlot.setDescription('The slot number within the chassis where the physical i/o port can be found for customer access.')
acServicePort = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 1, 1, 3), AcPortNumber()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: acServicePort.setStatus('current')
if mibBuilder.loadTexts: acServicePort.setDescription('The port number ranging from (1..8) where the physical i/o connection for this customer service can be found.')
acServiceChannel = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: acServiceChannel.setStatus('current')
if mibBuilder.loadTexts: acServiceChannel.setDescription('The service channel amongst the potentially multiple service channels on a data access port. If only one service channel is provisioned, this channel number should be a value of 1. If multiple service channels are provisioned, the channel number should follow the ranges described below. If multiple service channels are provisioned,each channel maps to a unique 802.1q VLAN tag used to effectively channalize the Ethernet. 4 service channels (1..4) are supported on the 100baseX Ethernet access board. 255 service channels (1..255) are supported on the Gigabit, 1000baseX, Ethernet access board.')
acServiceAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 1, 1, 5), AcAdminStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: acServiceAdminStatus.setStatus('current')
if mibBuilder.loadTexts: acServiceAdminStatus.setDescription('The administrative or desired status.')
acServiceOpStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 1, 1, 6), AcOpStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acServiceOpStatus.setStatus('current')
if mibBuilder.loadTexts: acServiceOpStatus.setDescription('The operational or actual status.')
acServiceType = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("unknown", 0), ("ias", 1), ("tls", 2))).clone('unknown')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: acServiceType.setStatus('current')
if mibBuilder.loadTexts: acServiceType.setDescription('Specifies the service type. Each row in this table has an associated row in one of the service type specific tables. This object indicates which table, IAS or TLS, has a row at the same index containing additional parameters associated with this service.')
acServiceVlanId = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 1, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4095))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: acServiceVlanId.setStatus('current')
if mibBuilder.loadTexts: acServiceVlanId.setDescription('The VLAN ID attribute is only used when the enetEnableMultiService flag has been set to true. The VLAN ID is a 12-bit VLAN ID and is used when the OSAP is processing packets to differentiate between the four service channels. This object must set if an access port supports multiple services. VLAN ID is a 12-bit value and therefore the value range is 0 to 4095. However value 0 is not a VLAN ID per se, but rather a value indicating VLAN tagging is not used.')
acServiceTrunkNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 1, 1, 9), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: acServiceTrunkNumber.setStatus('current')
if mibBuilder.loadTexts: acServiceTrunkNumber.setDescription('Identifies the trunk over which this access service is to be carried onto the network. Maps to a row in the trunk table.')
acServiceQosTemplate = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 1, 1, 10), Integer32().clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: acServiceQosTemplate.setStatus('current')
if mibBuilder.loadTexts: acServiceQosTemplate.setDescription('This object specifies the QOS template to use. A QOS template dictates the QOS model assigned to this subscriber service. If zero(0), no differentiation is done and packets flow through a single queue.')
acServiceGBR = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 1, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1000000)).clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: acServiceGBR.setStatus('current')
if mibBuilder.loadTexts: acServiceGBR.setDescription("The guaranteed bit rate in 1-kpbs increments. On the access side of the service, It should not exceed the maximum bandwidth of the access port. On the network side, the sum of GBR's of all the services carried by a trunk should not exceed the trunk capacity. **NOADMINDOWN**")
acServiceMBR = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 1, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1000000)).clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: acServiceMBR.setStatus('current')
if mibBuilder.loadTexts: acServiceMBR.setDescription('The maximum bit rate in 1-kbps increments. A service may burst up to this rate, but there is no guarantee to the data transmitted above the GBR. The MBR should not exceed the access port capacity nor the trunk capacity. **NOADMINDOWN**')
acServiceResetStats = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 1, 1, 13), TruthValue()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: acServiceResetStats.setStatus('current')
if mibBuilder.loadTexts: acServiceResetStats.setDescription('Setting this varible to TRUE causes all service statistics counters to reset. This variable always return FALSE as its value.')
acServiceUpstreamBuffCapWeight = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 1, 1, 14), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 10)).clone(5)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: acServiceUpstreamBuffCapWeight.setStatus('current')
if mibBuilder.loadTexts: acServiceUpstreamBuffCapWeight.setDescription('The weight associated with the upstream traffic on this service relating to buffering capacity. Upstream traffic is defined as traffic that enters the system from an access port and exits the system on a trunk. A value of 1 provides the least amount of buffering capacity while a value of 10 provides the most buffering capacity. This value is relative in that the number of buffers actually assigned to the upstream traffic depends on how many services share the buffer pool associated with this service.')
acServiceDownstreamBuffCapWeight = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 1, 1, 15), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 10)).clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: acServiceDownstreamBuffCapWeight.setStatus('current')
if mibBuilder.loadTexts: acServiceDownstreamBuffCapWeight.setDescription('The weight associated with the downstream traffic on this service relating to buffering capacity. Downstream traffic is defined as traffic that enters the system from a trunk and exits the system on an access port. A value of 1 provides the least amount of buffering capacity while a value of 10 provides the most buffering capacity. This value is relative in that the number of buffers actually assigned to the downstream traffic depends on how many services share the buffer pool associated with this service.')
acServiceLocalBuffCapWeight = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 1, 1, 16), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 10)).clone(2)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: acServiceLocalBuffCapWeight.setStatus('current')
if mibBuilder.loadTexts: acServiceLocalBuffCapWeight.setDescription('The weight associated with local traffic on this service relating to buffering capacity. Local traffic is defined as traffic which enters the system on an access port and exits the system on an access port. This value is therefore only relevant for transparent LAN services with multiple local access ports. This value is relative in that the number of buffers actually assigned to the local traffic depends on how many services share the buffer pool associated with this service.')
acServiceBufferPool = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 1, 1, 17), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)).clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: acServiceBufferPool.setStatus('current')
if mibBuilder.loadTexts: acServiceBufferPool.setDescription('The buffer pool associated with this service. The default is 1 which indicates the default buffer pool will be used. The value 0 is not allowed and values greater than 1 are user defined buffer pools.')
acServiceStatTable = MibTable((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 4), )
if mibBuilder.loadTexts: acServiceStatTable.setStatus('current')
if mibBuilder.loadTexts: acServiceStatTable.setDescription('The service statistics table. Statistics is provided for each queue priority of a service. Consequently, the table is composed of node, slot, port, channel, queue.')
acServiceStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 4, 1), ).setIndexNames((0, "APPIAN-SERVICES-MIB", "acServiceStatNodeId"), (0, "APPIAN-SERVICES-MIB", "acServiceStatSlot"), (0, "APPIAN-SERVICES-MIB", "acServiceStatPort"), (0, "APPIAN-SERVICES-MIB", "acServiceStatChannel"), (0, "APPIAN-SERVICES-MIB", "acServiceStatQueue"))
if mibBuilder.loadTexts: acServiceStatEntry.setStatus('current')
if mibBuilder.loadTexts: acServiceStatEntry.setDescription('A set of counters collected for a service queue priority.')
acServiceStatNodeId = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 4, 1, 1), AcNodeId()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: acServiceStatNodeId.setStatus('current')
if mibBuilder.loadTexts: acServiceStatNodeId.setDescription('A unique node identification number assigned by the EMS to uniquely identify a node within an Appian Ring.')
acServiceStatSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 4, 1, 2), AcSlotNumber()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: acServiceStatSlot.setStatus('current')
if mibBuilder.loadTexts: acServiceStatSlot.setDescription('The slot number within the chassis where the physical i/o port can be found for customer access.')
acServiceStatPort = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 4, 1, 3), AcPortNumber()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: acServiceStatPort.setStatus('current')
if mibBuilder.loadTexts: acServiceStatPort.setDescription('The port number ranging from (1..8) where the physical i/o connection for this customer service can be found.')
acServiceStatChannel = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 4, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: acServiceStatChannel.setStatus('current')
if mibBuilder.loadTexts: acServiceStatChannel.setDescription('The service queue is one of 4 queues where a service is provisioned. Currently each (data) access port supports 4 service queues, each of which can support a service. The first release of our Ethernet access card supports a max of 4 services/port. In the future this range may increase but will always be dependent upon the access card being provisioned .')
acServiceStatQueue = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 4, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: acServiceStatQueue.setStatus('current')
if mibBuilder.loadTexts: acServiceStatQueue.setDescription("The service queuing priority. Service statistics is collected for each service queuing priority. Current OSAP supports up to 4 priorities. If a service doesn't use a priority, the corresponding entry has 0s for all counters.")
acServiceStatUpstreamFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 4, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acServiceStatUpstreamFrames.setStatus('current')
if mibBuilder.loadTexts: acServiceStatUpstreamFrames.setDescription('The number frames transported from the access port to the network.')
acServiceStatUpstreamBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 4, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acServiceStatUpstreamBytes.setStatus('current')
if mibBuilder.loadTexts: acServiceStatUpstreamBytes.setDescription('The number bytes transported from the access port to the network.')
acServiceStatUpstreamDroppedFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 4, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acServiceStatUpstreamDroppedFrames.setStatus('current')
if mibBuilder.loadTexts: acServiceStatUpstreamDroppedFrames.setDescription('The number frames dropped in the direction of the access port to the network.')
acServiceStatUpstreamDroppedBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 4, 1, 9), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acServiceStatUpstreamDroppedBytes.setStatus('current')
if mibBuilder.loadTexts: acServiceStatUpstreamDroppedBytes.setDescription('The number bytes dropped in the direction of the access port to the network.')
acServiceStatUpstreamUnexpectedFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 4, 1, 10), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acServiceStatUpstreamUnexpectedFrames.setStatus('current')
if mibBuilder.loadTexts: acServiceStatUpstreamUnexpectedFrames.setDescription('The number of frames that are unexpected. An example would a frame of wrong protocol type. Such frames are silently dropped but not counted in the dropped frames counter.')
acServiceStatDownstreamFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 4, 1, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acServiceStatDownstreamFrames.setStatus('current')
if mibBuilder.loadTexts: acServiceStatDownstreamFrames.setDescription('The number frames transported from the network to access port.')
acServiceStatDownstreamBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 4, 1, 12), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acServiceStatDownstreamBytes.setStatus('current')
if mibBuilder.loadTexts: acServiceStatDownstreamBytes.setDescription('The number bytes transported from the network to the access port.')
acServiceStatDownstreamDroppedFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 4, 1, 13), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acServiceStatDownstreamDroppedFrames.setStatus('current')
if mibBuilder.loadTexts: acServiceStatDownstreamDroppedFrames.setDescription('The number frames dropped in the direction of the network to the access port.')
acServiceStatDownstreamDroppedBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 4, 1, 14), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acServiceStatDownstreamDroppedBytes.setStatus('current')
if mibBuilder.loadTexts: acServiceStatDownstreamDroppedBytes.setDescription('The number bytes dropped in the direction of the network to the access port.')
acServiceStatDownstreamUnexpectedFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 4, 1, 15), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acServiceStatDownstreamUnexpectedFrames.setStatus('current')
if mibBuilder.loadTexts: acServiceStatDownstreamUnexpectedFrames.setDescription('The number of frames that are unexpected. An example would a frame of wrong protocol type. Such frames are silently dropped but not counted in the dropped frames counter.')
acIas = MibIdentifier((1, 3, 6, 1, 4, 1, 2785, 2, 8, 2))
acIasTable = MibTable((1, 3, 6, 1, 4, 1, 2785, 2, 8, 2, 1), )
if mibBuilder.loadTexts: acIasTable.setStatus('current')
if mibBuilder.loadTexts: acIasTable.setDescription('The Internet Access Service table.')
acIasEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2785, 2, 8, 2, 1, 1), ).setIndexNames((0, "APPIAN-SERVICES-MIB", "acIasNodeId"), (0, "APPIAN-SERVICES-MIB", "acIasSlot"), (0, "APPIAN-SERVICES-MIB", "acIasPort"), (0, "APPIAN-SERVICES-MIB", "acIasChannel"))
if mibBuilder.loadTexts: acIasEntry.setStatus('current')
if mibBuilder.loadTexts: acIasEntry.setDescription('A row within the IAS provisioning table.')
acIasNodeId = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 2, 1, 1, 1), AcNodeId()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: acIasNodeId.setStatus('current')
if mibBuilder.loadTexts: acIasNodeId.setDescription('The unique node identification number assigned by the EMS which identifies the node where the IAS service is being configured.')
acIasSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 2, 1, 1, 2), AcSlotNumber()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: acIasSlot.setStatus('current')
if mibBuilder.loadTexts: acIasSlot.setDescription('The slot number where the physical data i/o port is located in the OSAP chassis, which will have the IAS service configured.')
acIasPort = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 2, 1, 1, 3), AcPortNumber()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: acIasPort.setStatus('current')
if mibBuilder.loadTexts: acIasPort.setDescription('The physical port number on the i/o slot where this customers data interface will reside.')
acIasChannel = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: acIasChannel.setStatus('current')
if mibBuilder.loadTexts: acIasChannel.setDescription('The service channel.')
acIasDlci = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(16, 1022)).clone(16)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: acIasDlci.setStatus('current')
if mibBuilder.loadTexts: acIasDlci.setDescription('When an IAS service is carried over a FrameRelay trunk, this attribute dictates the frame relay circuit (DLCI) to be used for the service. When other types of trunks are used, this object has not meaning and has the value of 0.')
acIasRespondToArp = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 2, 1, 1, 6), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: acIasRespondToArp.setStatus('current')
if mibBuilder.loadTexts: acIasRespondToArp.setDescription('Flag to indicate we should respond to ARP requests.')
acIasRemoteIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 2, 1, 1, 7), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: acIasRemoteIpAddress.setStatus('current')
if mibBuilder.loadTexts: acIasRemoteIpAddress.setDescription('The IP address of at the network side. The object is optional. If set, ARP requests from the user are responded if the target IP address matchs this object. If this is object is not set, all ARP requests from the user are responded. If this object is not set, it may be learned from the inverse ARP if the service is carried on a frame relay circuit.')
acIasCpeIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 2, 1, 1, 8), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: acIasCpeIpAddress.setStatus('current')
if mibBuilder.loadTexts: acIasCpeIpAddress.setDescription('The IP address of the CPE device. This object is optional and if not set, it will be learned by looking into traffic from the CPE device.')
acIasCpeMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 2, 1, 1, 9), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: acIasCpeMacAddress.setStatus('current')
if mibBuilder.loadTexts: acIasCpeMacAddress.setDescription('The MAC Address of the CPE router.')
acIasCpeEncapsMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 2, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("unknown", 0), ("learning", 1), ("enet", 2), ("snap", 3))).clone('learning')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: acIasCpeEncapsMode.setStatus('current')
if mibBuilder.loadTexts: acIasCpeEncapsMode.setDescription('The type of encapsulation used for packets destined for the CPE. unknown(0) - encapsulation mode is not set and is same as learning(1) mode. learning(1) - learn the encapsulation mode from the packets generated by the CPE. enet(2) - ethernet encapsulation. snap(3) - SNAP encaspualtion mode.')
acIasPerformInverseArp = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 2, 1, 1, 11), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: acIasPerformInverseArp.setStatus('current')
if mibBuilder.loadTexts: acIasPerformInverseArp.setDescription('Flag to indicate whether to perform Inverse ARP request to learn the remote IP address. This is applicable if the service is carried on a frame relay circuit. **-DInitiate Inverse Arp**')
acTls = MibIdentifier((1, 3, 6, 1, 4, 1, 2785, 2, 8, 3))
acTlsTable = MibTable((1, 3, 6, 1, 4, 1, 2785, 2, 8, 3, 1), )
if mibBuilder.loadTexts: acTlsTable.setStatus('current')
if mibBuilder.loadTexts: acTlsTable.setDescription('The Transparent LAN Services table.')
acTlsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2785, 2, 8, 3, 1, 1), ).setIndexNames((0, "APPIAN-SERVICES-MIB", "acTlsNodeId"), (0, "APPIAN-SERVICES-MIB", "acTlsSlot"), (0, "APPIAN-SERVICES-MIB", "acTlsPort"), (0, "APPIAN-SERVICES-MIB", "acTlsChannel"))
if mibBuilder.loadTexts: acTlsEntry.setStatus('current')
if mibBuilder.loadTexts: acTlsEntry.setDescription('A row in the TLS table.')
acTlsNodeId = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 3, 1, 1, 1), AcNodeId()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: acTlsNodeId.setStatus('current')
if mibBuilder.loadTexts: acTlsNodeId.setDescription("The unique node number assigned by the EMS to uniquely identify this node within an Appian Ring of OSAP's which is configured for TLS service.")
acTlsSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 3, 1, 1, 2), AcSlotNumber()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: acTlsSlot.setStatus('current')
if mibBuilder.loadTexts: acTlsSlot.setDescription('The slot upon which the TLS service is being provisioned.')
acTlsPort = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 3, 1, 1, 3), AcPortNumber()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: acTlsPort.setStatus('current')
if mibBuilder.loadTexts: acTlsPort.setDescription('The physical port number on the slot on which the TLS service is provisioned.')
acTlsChannel = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 3, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: acTlsChannel.setStatus('current')
if mibBuilder.loadTexts: acTlsChannel.setDescription('The service channel on which this TLS service is provisioned.')
acTlsTlanId = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 3, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(16, 4095))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: acTlsTlanId.setStatus('current')
if mibBuilder.loadTexts: acTlsTlanId.setDescription("Identifies the Transparent LAN on which this virtual port, subscriber port and channel, is participating. This is a 12-bit value and is assigned by the EMS to ensure it is global to all TLS services and TLS trunks which share any common OSAPs or L2 switches. Note: It is prudent to keep the TlanId unique within the carriers entire TLS service offering until the carrier starts another 'TLS service domain'. WARNING: TLAN ID of zero(0) is invalid. TlanId's 1..15 are reserved for managment access. TlanId's 16..4095 are legal values.")
acTlsServiceId = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 3, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1023))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: acTlsServiceId.setStatus('current')
if mibBuilder.loadTexts: acTlsServiceId.setDescription('An ID which is used within a TLS service to identify which service packets are associated with as they traverse the TLS trunks. Each Tls service, virtual port, configured needs an Id assigned to it. Each packet received on this virtual port, subscriber port and channel, is stamped with this value for its trip around a TLS trunk. These tags are assigned by the EMS and must be unique on a given TLS trunk. This value is a 10-bit number providing a range of 1k. Values 1..15 are reserved. Value 0 is used for TLS services that are not provisioned over a TLS trunk. These trunkless TLS services perform only local switching.')
acTlsPointToPointEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 3, 1, 1, 7), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: acTlsPointToPointEnable.setStatus('current')
if mibBuilder.loadTexts: acTlsPointToPointEnable.setDescription('Flag to indicate whether or not the TLAN over which this Tls service is provisioned is a point to point TLAN. A point to point TLAN is a TLAN with two and only two Tls services, virtual ports.')
acTlsServiceIdSharingEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 3, 1, 1, 8), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: acTlsServiceIdSharingEnable.setStatus('current')
if mibBuilder.loadTexts: acTlsServiceIdSharingEnable.setDescription('Flag to indicate whether or not the Tls Servive Id is shared between this service and other Tls Services provisioned on the same TLS trunk either on this OSAP or different OSAPs.')
acQosTable = MibTable((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 2), )
if mibBuilder.loadTexts: acQosTable.setStatus('current')
if mibBuilder.loadTexts: acQosTable.setDescription('Table of QOS templates.')
acQosEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 2, 1), ).setIndexNames((0, "APPIAN-SERVICES-MIB", "acQosNodeId"), (0, "APPIAN-SERVICES-MIB", "acQosTemplateNumber"))
if mibBuilder.loadTexts: acQosEntry.setStatus('current')
if mibBuilder.loadTexts: acQosEntry.setDescription('A row in the QOS template table.')
acQosNodeId = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 2, 1, 1), AcNodeId()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: acQosNodeId.setStatus('current')
if mibBuilder.loadTexts: acQosNodeId.setDescription('A unique node identification number assigned by the EMS to uniquely identify a node within an Appian Ring.')
acQosTemplateNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 2, 1, 2), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: acQosTemplateNumber.setStatus('current')
if mibBuilder.loadTexts: acQosTemplateNumber.setDescription('A number identifies a QOS template. Also used as the index into the QOS template table.')
acQosAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 2, 1, 3), AcAdminStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: acQosAdminStatus.setStatus('current')
if mibBuilder.loadTexts: acQosAdminStatus.setDescription('The administrative status.')
acQosTemplateName = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: acQosTemplateName.setStatus('current')
if mibBuilder.loadTexts: acQosTemplateName.setDescription('A textual string solely for easy identification of a template.')
acQosQueueWeights = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 2, 1, 5), AcQueueWeights()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: acQosQueueWeights.setStatus('current')
if mibBuilder.loadTexts: acQosQueueWeights.setDescription("The weighted values control relative bandwidth for each queue of the four queues. Each byte represents the proportional weight out of 100 for a queue. The sum of these bytes must equal to 100. For example, if 2 queues are used, and the relative weights (or priorities) are 90 and 10, then acQosQueueWeights should be set to '\\0x5A\\0x0A\\0x00\\0x00'.")
acQosClassMapping = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 2, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 12))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: acQosClassMapping.setStatus('current')
if mibBuilder.loadTexts: acQosClassMapping.setDescription('The object identifies a row in the classification mapping table.')
acQosQueueBuffCaps = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 2, 1, 7), AcQueueBufferingCapacity()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: acQosQueueBuffCaps.setStatus('current')
if mibBuilder.loadTexts: acQosQueueBuffCaps.setDescription("These values control the relative buffering capacities of the four queues. Each byte represents the proportional buffering capacity out of 100 for a queue. The sum of these bytes must equal to 100. For example, if 2 queues are used, and the buffering capacities are 90 and 10, then acQosQueueBuffCaps should be set to '\\0x5A\\0x0A\\0x00\\0x00'.")
acClassMapTable = MibTable((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 3), )
if mibBuilder.loadTexts: acClassMapTable.setStatus('current')
if mibBuilder.loadTexts: acClassMapTable.setDescription('Classification Map table.')
acClassMapEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 3, 1), ).setIndexNames((0, "APPIAN-SERVICES-MIB", "acClassMapNumber"))
if mibBuilder.loadTexts: acClassMapEntry.setStatus('current')
if mibBuilder.loadTexts: acClassMapEntry.setDescription('A row in the class map provisioning table.')
acClassMapNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 12))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: acClassMapNumber.setStatus('current')
if mibBuilder.loadTexts: acClassMapNumber.setDescription('A number to identify a classification mapping. There can be at most four(4) rows for IP TOS based classification, four(4) additional rows for MPLS experimental bits based classfication, and four(4) additional rows for IEEE 802.1p based calssification. This table can have at most 12 rows for this version of the Ethernet access board.')
acClassMapAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 3, 1, 2), AcAdminStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: acClassMapAdminStatus.setStatus('current')
if mibBuilder.loadTexts: acClassMapAdminStatus.setDescription('The administrative status.')
acClassMapName = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 3, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: acClassMapName.setStatus('current')
if mibBuilder.loadTexts: acClassMapName.setDescription('A textual string for easy naming of the classification mapping.')
acClassMapType = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 3, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("unknown", 0), ("iptos", 1), ("mpls", 2), ("dot1p", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: acClassMapType.setStatus('current')
if mibBuilder.loadTexts: acClassMapType.setDescription('The type of classification used. This parameter specifies what fields in the packets are to be used for classification. This in turn dictates the width of the mapping used in acClassMapMapping.')
acClassMapMapping = MibTableColumn((1, 3, 6, 1, 4, 1, 2785, 2, 8, 1, 3, 1, 5), AcClassMapping()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: acClassMapMapping.setStatus('current')
if mibBuilder.loadTexts: acClassMapMapping.setDescription('This octet string defines a mapping between a field within a packet payload and a 2-bit internal queue number. The octet string is a list of queue numbers. Each octet contains a single 2-bit queue number. In this product, 6-bits are wasted in each octet. In the future we may support more queues per service in which case we will extend the bit field in each octet. When this map is used for 802.1p and MPLS, it must be exactly 8-octets in length. For IP TOS, it must be 64-octets in length.')
acServiceTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 2785, 2, 8, 0))
acServiceUpTrap = NotificationType((1, 3, 6, 1, 4, 1, 2785, 2, 8, 0, 1)).setObjects(("APPIAN-CHASSIS-MIB", "acChassisCurrentTime"), ("APPIAN-CHASSIS-MIB", "acChassisRingId"), ("APPIAN-SERVICES-MIB", "acServiceNodeId"), ("APPIAN-SERVICES-MIB", "acServiceSlot"), ("APPIAN-SERVICES-MIB", "acServicePort"), ("APPIAN-SERVICES-MIB", "acServiceChannel"), ("APPIAN-SERVICES-MIB", "acServiceType"))
if mibBuilder.loadTexts: acServiceUpTrap.setStatus('current')
if mibBuilder.loadTexts: acServiceUpTrap.setDescription('This trap indicates a service has become active.')
acServiceDownTrap = NotificationType((1, 3, 6, 1, 4, 1, 2785, 2, 8, 0, 2)).setObjects(("APPIAN-CHASSIS-MIB", "acChassisCurrentTime"), ("APPIAN-CHASSIS-MIB", "acChassisRingId"), ("APPIAN-SERVICES-MIB", "acServiceNodeId"), ("APPIAN-SERVICES-MIB", "acServiceSlot"), ("APPIAN-SERVICES-MIB", "acServicePort"), ("APPIAN-SERVICES-MIB", "acServiceChannel"), ("APPIAN-SERVICES-MIB", "acServiceType"))
if mibBuilder.loadTexts: acServiceDownTrap.setStatus('current')
if mibBuilder.loadTexts: acServiceDownTrap.setDescription('This trap indicates a service has become deactivated. The detailed code indicates the reason why')
mibBuilder.exportSymbols("APPIAN-SERVICES-MIB", acTlsTlanId=acTlsTlanId, acServicesCommon=acServicesCommon, acServicePort=acServicePort, acTls=acTls, acClassMapEntry=acClassMapEntry, acTlsChannel=acTlsChannel, acServiceStatTable=acServiceStatTable, acServiceStatSlot=acServiceStatSlot, acServiceNodeId=acServiceNodeId, acClassMapNumber=acClassMapNumber, acServiceQosTemplate=acServiceQosTemplate, acServiceGBR=acServiceGBR, acServiceVlanId=acServiceVlanId, acTlsNodeId=acTlsNodeId, acServiceStatDownstreamDroppedFrames=acServiceStatDownstreamDroppedFrames, acQosClassMapping=acQosClassMapping, acClassMapAdminStatus=acClassMapAdminStatus, acIasTable=acIasTable, acIasPerformInverseArp=acIasPerformInverseArp, acServiceResetStats=acServiceResetStats, acIasSlot=acIasSlot, acServiceStatUpstreamDroppedFrames=acServiceStatUpstreamDroppedFrames, acTlsServiceIdSharingEnable=acTlsServiceIdSharingEnable, acServiceDownTrap=acServiceDownTrap, acClassMapType=acClassMapType, acServiceUpstreamBuffCapWeight=acServiceUpstreamBuffCapWeight, acServiceStatDownstreamFrames=acServiceStatDownstreamFrames, acServiceStatEntry=acServiceStatEntry, acIasNodeId=acIasNodeId, acClassMapTable=acClassMapTable, acIasChannel=acIasChannel, acClassMapMapping=acClassMapMapping, acClassMapName=acClassMapName, acIasDlci=acIasDlci, acServiceStatQueue=acServiceStatQueue, AcClassMapping=AcClassMapping, acServiceStatUpstreamBytes=acServiceStatUpstreamBytes, acServiceStatChannel=acServiceStatChannel, acServiceDownstreamBuffCapWeight=acServiceDownstreamBuffCapWeight, acServiceStatUpstreamUnexpectedFrames=acServiceStatUpstreamUnexpectedFrames, AcQueueBufferingCapacity=AcQueueBufferingCapacity, acTlsTable=acTlsTable, acQosNodeId=acQosNodeId, acIasCpeEncapsMode=acIasCpeEncapsMode, acIasCpeIpAddress=acIasCpeIpAddress, acTlsPointToPointEnable=acTlsPointToPointEnable, acTlsPort=acTlsPort, acIasRespondToArp=acIasRespondToArp, acServiceUpTrap=acServiceUpTrap, acIasEntry=acIasEntry, acIasCpeMacAddress=acIasCpeMacAddress, acTlsSlot=acTlsSlot, acServiceStatUpstreamDroppedBytes=acServiceStatUpstreamDroppedBytes, acTlsEntry=acTlsEntry, acQosTemplateName=acQosTemplateName, acQosTemplateNumber=acQosTemplateNumber, PYSNMP_MODULE_ID=acServicesCommon, acServiceChannel=acServiceChannel, acServiceStatPort=acServiceStatPort, acServiceLocalBuffCapWeight=acServiceLocalBuffCapWeight, acServiceStatUpstreamFrames=acServiceStatUpstreamFrames, acServiceStatDownstreamUnexpectedFrames=acServiceStatDownstreamUnexpectedFrames, acServiceEntry=acServiceEntry, acServiceStatDownstreamBytes=acServiceStatDownstreamBytes, acServiceStatNodeId=acServiceStatNodeId, acQosEntry=acQosEntry, AcQueueWeights=AcQueueWeights, acServiceBufferPool=acServiceBufferPool, acServiceTrunkNumber=acServiceTrunkNumber, acQosAdminStatus=acQosAdminStatus, acQosQueueWeights=acQosQueueWeights, acServiceSlot=acServiceSlot, acServiceTable=acServiceTable, acTlsServiceId=acTlsServiceId, acServiceAdminStatus=acServiceAdminStatus, acIasRemoteIpAddress=acIasRemoteIpAddress, acServiceType=acServiceType, acServiceTraps=acServiceTraps, acIasPort=acIasPort, acServiceOpStatus=acServiceOpStatus, acServiceStatDownstreamDroppedBytes=acServiceStatDownstreamDroppedBytes, acIas=acIas, acQosTable=acQosTable, acQosQueueBuffCaps=acQosQueueBuffCaps, acServiceMBR=acServiceMBR)
| 156.714801
| 3,296
| 0.786317
|
f63bdf0b2ad799ec15897527cbda561052434faa
| 248
|
py
|
Python
|
Ipython/Requests_1.py
|
shantnu/WebScraping
|
3f8c2cc486c31cdadfaf3edd7efa917d13766ffc
|
[
"MIT"
] | 2
|
2020-07-28T02:19:26.000Z
|
2021-03-14T19:19:01.000Z
|
Ipython/Requests_1.py
|
shantnu/WebScraping
|
3f8c2cc486c31cdadfaf3edd7efa917d13766ffc
|
[
"MIT"
] | null | null | null |
Ipython/Requests_1.py
|
shantnu/WebScraping
|
3f8c2cc486c31cdadfaf3edd7efa917d13766ffc
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# In[15]:
import requests
from bs4 import BeautifulSoup
# In[16]:
r = requests.get("http://pythonforengineers.com/pythonforengineersbook/")
# In[ ]:
for link in soup.find_all('a'):
print link.get('href')
# In[22]:
| 9.92
| 73
| 0.649194
|
0fccb7967793055c2dd7118ca1572f8573660899
| 166
|
py
|
Python
|
blog_post/forms.py
|
ManukumarNM/Django_Blog
|
96c3d666c8bb897a2529c2bdb6ec5186d0e2d98c
|
[
"MIT"
] | null | null | null |
blog_post/forms.py
|
ManukumarNM/Django_Blog
|
96c3d666c8bb897a2529c2bdb6ec5186d0e2d98c
|
[
"MIT"
] | null | null | null |
blog_post/forms.py
|
ManukumarNM/Django_Blog
|
96c3d666c8bb897a2529c2bdb6ec5186d0e2d98c
|
[
"MIT"
] | null | null | null |
from django import forms
from .models import Post
class PostForm(forms.ModelForm):
class Meta:
model= Post
fields= ["title", "content", "image"]
| 20.75
| 45
| 0.656627
|
80989a9d2bfc4b765af9ab7a71205e71d8338971
| 5,535
|
py
|
Python
|
src/python/lstm_seqlabel_load_data.py
|
dptam/neural_wfst
|
c0cd3e340a45c13fbe47310753432d496c20eb83
|
[
"MIT"
] | null | null | null |
src/python/lstm_seqlabel_load_data.py
|
dptam/neural_wfst
|
c0cd3e340a45c13fbe47310753432d496c20eb83
|
[
"MIT"
] | null | null | null |
src/python/lstm_seqlabel_load_data.py
|
dptam/neural_wfst
|
c0cd3e340a45c13fbe47310753432d496c20eb83
|
[
"MIT"
] | null | null | null |
'''
| Filename : lstm_seqlabel_load_data.py
| Description : The load data function used in lstm_seqlabel.py
| Author : Pushpendre Rastogi
| Created : Mon Oct 26 19:36:47 2015 (-0400)
| Last-Updated: Tue Nov 24 03:57:11 2015 (-0500)
| By: Pushpendre Rastogi
| Update #: 25
'''
from collections import defaultdict
import functools
from rasengan import Namespace
import util_lstm_seqlabel
def create_vocabulary(train_lex, valid_lex, test_lex):
vocab = {}
for row in (train_lex + valid_lex + test_lex):
for word in row:
vocab[word] = None
return vocab
def get_loaddata_for_task(args):
if args.task == 'slu':
from data.atis import loaddata
elif args.task == 'chunking':
from data.conll2003_ner import loaddata_chunking
loaddata = functools.partial(loaddata_chunking,
lc=args.lower_case_input,
oov_thresh=args.chunking_oovthresh,
d2z=args.digit_to_zero,
also_add_test_file=True,
care_about_OOV=False)
elif args.task == 'ner':
from data.conll2003_ner import loaddata_ner
loaddata = functools.partial(loaddata_ner,
lc=args.lower_case_input,
oov_thresh=args.ner_oovthresh,
d2z=args.digit_to_zero,
also_add_test_file=True,
care_about_OOV=False)
elif args.task == 'postag':
from data.conll_postag import conll_pos
loaddata = functools.partial(conll_pos,
lc=args.lower_case_input,
oov=args.pos_oovthresh)
elif args.task == 'pdp':
from data.conll_pdp import loaddata as loaddata_pdp
loaddata = functools.partial(loaddata_pdp,
binary_arclabel=args.binary_arclabel,
size_limit=(None
if args.limit_corpus == 0
else args.limit_corpus))
else:
raise NotImplementedError
return loaddata
def load_data(args, summarize=False):
''' This function loads data according to `args` and fills the data
namespace.
'''
loaddata = get_loaddata_for_task(args)
if args.task in ['slu', 'postag']:
train_set, valid_set, test_set, dic = loaddata()
word2idx = dic['words2idx']
label2idx = dic['labels2idx']
train_lex, train_y = train_set
valid_lex, valid_y = valid_set
test_lex, test_y = test_set
elif args.task in ['chunking', 'ner', 'pdp']:
train_lex, train_y, valid_lex, valid_y, test_lex, test_y, word2idx, label2idx = loaddata()
else:
raise NotImplementedError
# Reverse dictionaries from indices to words.
idx2word = dict((k, v) for v, k in word2idx.iteritems())
idx2label = dict((k, v) for v, k in label2idx.iteritems())
# Delete slice of data, in case we want to run fast.
if args.limit_corpus:
for e in [train_lex, train_y, valid_lex, valid_y, test_lex, test_y]:
del e[(args.limit_corpus + 1):]
vocsize = len(word2idx)
nsentences = len(train_lex)
nclasses = len(label2idx)
if args.task != 'pdp':
valid_y = util_lstm_seqlabel.convert_id_to_word(valid_y, idx2label)
test_y = util_lstm_seqlabel.convert_id_to_word(test_y, idx2label)
#--------------------------------------------------------------------------#
# We mix validation into training in case we want to train on all the data #
# and skip validation and instead train for a fixed number of epochs #
#--------------------------------------------------------------------------#
if args.mix_validation_into_training:
print '\n', "We'll mix validation into training data", '\n'
train_lex = train_lex + valid_lex
train_y = train_y + valid_y
#---------------------------------------------------------------------#
# Some times it is nice to validate on the entire training set, so we #
# may replace the validation set by the entire set in this portion #
#---------------------------------------------------------------------#
if args.replace_validation_by_training:
valid_lex = train_lex
valid_y = train_y
#----------------------------------------------#
# Update args and data from the loaded data. #
#----------------------------------------------#
data = Namespace()
data.nsentences = nsentences
data.vocsize = vocsize
data.nclasses = nclasses
data.train_lex = train_lex
data.train_y = train_y
data.valid_lex = valid_lex
data.valid_y = valid_y
data.test_lex = test_lex
data.test_y = test_y
data.idx2label = idx2label
data.idx2word = idx2word
data.words_train = util_lstm_seqlabel.convert_id_to_word(
train_lex, idx2word)
data.words_valid = util_lstm_seqlabel.convert_id_to_word(
valid_lex, idx2word)
data.words_test = util_lstm_seqlabel.convert_id_to_word(
test_lex, idx2word)
if summarize:
print 'Data summary', 'vocsize', vocsize, 'nclasses', nclasses, \
'len(train_lex)', len(train_lex), 'len(valid_lex)', len(valid_lex)
return data
| 41
| 98
| 0.560614
|
47904585755d391dff3ae9c18f453c7c88138a21
| 639
|
py
|
Python
|
tests/vkscript_converter_test.py
|
van-burgerberg/vkbottle
|
134eb76e6289b7674142316ca72646ce999d9388
|
[
"MIT"
] | 2
|
2020-11-12T09:26:16.000Z
|
2020-11-14T16:52:10.000Z
|
tests/vkscript_converter_test.py
|
van-burgerberg/vkbottle
|
134eb76e6289b7674142316ca72646ce999d9388
|
[
"MIT"
] | null | null | null |
tests/vkscript_converter_test.py
|
van-burgerberg/vkbottle
|
134eb76e6289b7674142316ca72646ce999d9388
|
[
"MIT"
] | null | null | null |
from vkbottle import vkscript
BASIC_CYCLE = "var a=%A%;var some_list=[];while(a<100){API.users.get({user_id:a});a = a + 1;};return some_list;"
@vkscript
def basic_cycle(api, a: int = 10):
some_list = []
while a < 100:
api.users.get(user_id=a)
a += 1
return some_list
@vkscript
def types(api):
a = 5.1
b = 5 * a
results = [b, b - 2]
for i in results:
results.append(i ** 2)
results.pop()
return results
def test_vkscript():
assert basic_cycle(a=10) == BASIC_CYCLE.replace("%A%", "10")
assert basic_cycle(a=94) == BASIC_CYCLE.replace("%A%", "94")
assert types()
| 21.3
| 112
| 0.594679
|
90600d4431aefdc66f1f18eaf1e3ddbb24bb6df6
| 4,053
|
py
|
Python
|
util/logger.py
|
zhaolongkzz/DeepMimic_configuration
|
48824e44aa1470dc01956faf83f66d59488d0dce
|
[
"MIT"
] | 9
|
2019-07-06T14:14:06.000Z
|
2021-08-16T12:58:44.000Z
|
util/logger.py
|
zhaolongkzz/DeepMimic_configuration
|
48824e44aa1470dc01956faf83f66d59488d0dce
|
[
"MIT"
] | 1
|
2019-09-11T12:10:28.000Z
|
2019-09-11T12:10:28.000Z
|
util/logger.py
|
zhaolongkzz/DeepMimic_configuration
|
48824e44aa1470dc01956faf83f66d59488d0dce
|
[
"MIT"
] | 1
|
2021-07-27T08:55:57.000Z
|
2021-07-27T08:55:57.000Z
|
import util.mpi_util as MPIUtil
"""
Some simple logging functionality, inspired by rllab's logging.
Assumes that each diagnostic gets logged each iteration
Call logz.configure_output_file() to start logging to a
tab-separated-values file (some_file_name.txt)
To load the learning curves, you can do, for example
A = np.genfromtxt('/tmp/expt_1468984536/log.txt',delimiter='\t',dtype=None, names=True)
A['EpRewMean']
"""
import os.path as osp, shutil, time, atexit, os, subprocess
class Logger:
def print(str):
# rank == ROOT_PROC_RANK (0) is true
if (MPIUtil.is_root_proc()):
print(str)
return
def __init__(self):
self.output_file = None
self.first_row = True
self.log_headers = []
self.log_current_row = {}
self._dump_str_template = ""
return
def reset(self):
self.first_row = True
self.log_headers = []
self.log_current_row = {}
if self.output_file is not None:
self.output_file = open(output_path, 'w')
return
def configure_output_file(self, filename=None):
"""
Set output directory to d, or to /tmp/somerandomnumber if d is None
"""
self.first_row = True
self.log_headers = []
self.log_current_row = {}
output_path = filename or "output/log_%i.txt"%int(time.time())
out_dir = os.path.dirname(output_path)
if not os.path.exists(out_dir) and MPIUtil.is_root_proc():
os.makedirs(out_dir)
if (MPIUtil.is_root_proc()):
self.output_file = open(output_path, 'w')
assert osp.exists(output_path)
atexit.register(self.output_file.close)
Logger.print("Logging data to " + self.output_file.name)
return
def log_tabular(self, key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
"""
if self.first_row and key not in self.log_headers:
self.log_headers.append(key)
else:
assert key in self.log_headers, "Trying to introduce a new key %s that you didn't include in the first iteration"%key
self.log_current_row[key] = val
return
def get_num_keys(self):
return len(self.log_headers)
def print_tabular(self):
"""
Print all of the diagnostics from the current iteration
"""
if (MPIUtil.is_root_proc()):
vals = []
Logger.print("-"*37)
for key in self.log_headers:
val = self.log_current_row.get(key, "")
if isinstance(val, float):
valstr = "%8.3g"%val
elif isinstance(val, int):
valstr = str(val)
else:
valstr = val
Logger.print("| %15s | %15s |"%(key, valstr))
vals.append(val)
Logger.print("-" * 37)
return
def dump_tabular(self):
"""
Write all of the diagnostics from the current iteration
"""
if (MPIUtil.is_root_proc()):
if (self.first_row):
self._dump_str_template = self._build_str_template()
vals = []
for key in self.log_headers:
val = self.log_current_row.get(key, "")
vals.append(val)
if self.output_file is not None:
if self.first_row:
header_str = self._dump_str_template.format(*self.log_headers)
self.output_file.write(header_str + "\n")
val_str = self._dump_str_template.format(*map(str,vals))
self.output_file.write(val_str + "\n")
self.output_file.flush()
self.log_current_row.clear()
self.first_row=False
return
def _build_str_template(self):
num_keys = self.get_num_keys()
template = "{:<25}" * num_keys
return template
| 31.418605
| 129
| 0.574389
|
a12476eb7ed899b357626fd0790a6c9b99a9c1ea
| 2,505
|
py
|
Python
|
tornado/translate_tornado_4_2_1/demos/benchmark/benchmark.py
|
noogel/xyzStudyPython
|
521cee4dc033e2ab1032b561a3229eed75d8067a
|
[
"Apache-2.0"
] | 2
|
2016-07-19T09:23:27.000Z
|
2016-09-27T17:21:24.000Z
|
tornado/translate_tornado_4_2_1/demos/benchmark/benchmark.py
|
noogel/xyzStudyPython
|
521cee4dc033e2ab1032b561a3229eed75d8067a
|
[
"Apache-2.0"
] | null | null | null |
tornado/translate_tornado_4_2_1/demos/benchmark/benchmark.py
|
noogel/xyzStudyPython
|
521cee4dc033e2ab1032b561a3229eed75d8067a
|
[
"Apache-2.0"
] | 1
|
2020-07-23T17:09:19.000Z
|
2020-07-23T17:09:19.000Z
|
#!/usr/bin/env python
# -*— coding:utf-8 -*-
#
# A simple benchmark of tornado's HTTP stack.
# 一个简单的基本的Tornado HTTP 堆栈。
# Requires 'ab' to be installed.
#
# Running without profiling:
# demos/benchmark/benchmark.py
# demos/benchmark/benchmark.py --quiet --num_runs=5|grep "Requests per second"
#
# Running with profiling:
#
# python -m cProfile -o /tmp/prof demos/benchmark/benchmark.py
# python -m pstats /tmp/prof
# % sort time
# % stats 20
from tornado.ioloop import IOLoop
from tornado.options import define, options, parse_command_line
from tornado.web import RequestHandler, Application
import random
import signal
import subprocess
try:
xrange
except NameError:
xrange = range
# choose a random port to avoid colliding with TIME_WAIT sockets left over
# from previous runs.
# 随机选择一个端口。。。
define("min_port", type=int, default=8000)
define("max_port", type=int, default=9000)
# Increasing --n without --keepalive will eventually run into problems
# due to TIME_WAIT sockets
define("n", type=int, default=15000)
define("c", type=int, default=25)
define("keepalive", type=bool, default=False)
define("quiet", type=bool, default=False)
# Repeat the entire benchmark this many times (on different ports)
# This gives JITs time to warm up, etc. Pypy needs 3-5 runs at
# --n=15000 for its JIT to reach full effectiveness
define("num_runs", type=int, default=1)
define("ioloop", type=str, default=None)
class RootHandler(RequestHandler):
def get(self):
self.write("Hello, world")
def _log(self):
pass
def handle_sigchld(sig, frame):
IOLoop.instance().add_callback_from_signal(IOLoop.instance().stop)
def main():
parse_command_line()
if options.ioloop:
IOLoop.configure(options.ioloop)
for i in xrange(options.num_runs):
run()
def run():
app = Application([("/", RootHandler)])
port = random.randrange(options.min_port, options.max_port)
app.listen(port, address='127.0.0.1')
signal.signal(signal.SIGCHLD, handle_sigchld)
args = ["ab"]
args.extend(["-n", str(options.n)])
args.extend(["-c", str(options.c)])
if options.keepalive:
args.append("-k")
if options.quiet:
# just stops the progress messages printed to stderr
args.append("-q")
args.append("http://127.0.0.1:%d/" % port)
subprocess.Popen(args)
IOLoop.instance().start()
IOLoop.instance().close()
del IOLoop._instance
assert not IOLoop.initialized()
if __name__ == '__main__':
main()
| 27.527473
| 78
| 0.698204
|
5377892aa151e0c140875e319fe3fb4019d8d09f
| 511
|
py
|
Python
|
main.py
|
ttw2514/compute_task
|
7744300be06a47c750cc7d7dfe8a25a38b4d067e
|
[
"MIT"
] | 1
|
2018-05-03T02:32:31.000Z
|
2018-05-03T02:32:31.000Z
|
main.py
|
ttw2514/compute_task
|
7744300be06a47c750cc7d7dfe8a25a38b4d067e
|
[
"MIT"
] | null | null | null |
main.py
|
ttw2514/compute_task
|
7744300be06a47c750cc7d7dfe8a25a38b4d067e
|
[
"MIT"
] | 1
|
2018-05-03T02:32:34.000Z
|
2018-05-03T02:32:34.000Z
|
import schedule
import time
import logging
from ml_task import model_update
from compute import daily_compute
FORMAT = '%(asctime)-15s %(levelname)s %(module)s %(filename)s:%(lineno)d - %(message)s'
logging.basicConfig(format=FORMAT, level=logging.DEBUG, filename='./test.log', filemode='a')
def main():
schedule.every(1).minutes.do(model_update)
schedule.every(1).minutes.do(daily_compute)
while True:
schedule.run_pending()
time.sleep(1)
if __name__ == '__main__':
main()
| 24.333333
| 92
| 0.708415
|
5248718dd786927ddc70148821c7363bbbe1987c
| 281
|
py
|
Python
|
tests/compiler/test_opcodes.py
|
Solexplorer/vyper
|
135edd6a91d47c72de105066d6e6c1bdfe9ea66e
|
[
"MIT"
] | 1
|
2021-04-23T21:48:20.000Z
|
2021-04-23T21:48:20.000Z
|
tests/compiler/test_opcodes.py
|
Solexplorer/vyper
|
135edd6a91d47c72de105066d6e6c1bdfe9ea66e
|
[
"MIT"
] | null | null | null |
tests/compiler/test_opcodes.py
|
Solexplorer/vyper
|
135edd6a91d47c72de105066d6e6c1bdfe9ea66e
|
[
"MIT"
] | null | null | null |
import vyper
def test_opcodes():
code = """
@public
def a() -> bool:
return True
"""
out = vyper.compile_code(code, ['opcodes_runtime', 'opcodes'])
assert len(out['opcodes']) > len(out['opcodes_runtime'])
assert out['opcodes_runtime'] in out['opcodes']
| 18.733333
| 66
| 0.626335
|
5b02a7a231427421a39fbfce53255ed3869cd49e
| 1,441
|
py
|
Python
|
examples/script/eval_subnet_noise_ratiorand_insuper.py
|
zyh1999/pytorch-quantum
|
c00bd564a99001fee2fd6b30e5e34562ab981e28
|
[
"MIT"
] | 98
|
2021-07-23T07:11:32.000Z
|
2021-12-19T14:04:58.000Z
|
examples/script/eval_subnet_noise_ratiorand_insuper.py
|
zyh1999/pytorch-quantum
|
c00bd564a99001fee2fd6b30e5e34562ab981e28
|
[
"MIT"
] | 2
|
2021-02-11T19:01:48.000Z
|
2021-04-04T20:29:57.000Z
|
examples/script/eval_subnet_noise_ratiorand_insuper.py
|
zyh1999/pytorch-quantum
|
c00bd564a99001fee2fd6b30e5e34562ab981e28
|
[
"MIT"
] | 12
|
2021-07-23T07:10:47.000Z
|
2021-12-16T23:44:44.000Z
|
import subprocess
from torchpack.utils.logging import logger
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--supernet', type=str)
parser.add_argument('--gpu', type=int)
args = parser.parse_args()
pres = ['python',
'examples/eval.py',
'examples/configs/mnist/four0123/eval/x2/noise/opt2/valid_500.yml',
'--run-dir=runs/mnist.four0123.train.super'
f'.{args.supernet}',
'--ckpt.name',
'checkpoints/step-18400.pt',
f'--gpu={args.gpu}',
'--dataset.split=valid']
with open(f'logs/sfsuper/eval_subnet_noise_x2_opt2_ratio_500'
f'insuper_{args.supernet}.txt',
'w') as \
wfid:
for blk in range(1, 9):
# for ratio in ['0', '0.25', '0.5', '0.75', '1']:
for ratio in ['0', '0.3', '0.6', '1']:
exp = f"--model.arch.sample_arch=blk{blk}_ratio{ratio}"
logger.info(f"running command {pres + [exp]}")
subprocess.run(pres + [exp], stderr=wfid)
# for blk in range(1, 9):
# for rand in range(4):
# exp = f"--model.arch.sample_arch=sharefront0_blk{blk}_ran" \
# f"d{rand}"
# logger.info(f"running command {pres + [exp]}")
#
# subprocess.run(pres + [exp], stderr=wfid)
| 36.948718
| 79
| 0.528105
|
1843962330a18d0c095a4fe1f2427fd560d278c8
| 44,872
|
py
|
Python
|
mytrain.py
|
xuritian317/pytorch-image-models
|
034139e4871cddd0f6c24931b6ac380ba9f28711
|
[
"Apache-2.0"
] | null | null | null |
mytrain.py
|
xuritian317/pytorch-image-models
|
034139e4871cddd0f6c24931b6ac380ba9f28711
|
[
"Apache-2.0"
] | null | null | null |
mytrain.py
|
xuritian317/pytorch-image-models
|
034139e4871cddd0f6c24931b6ac380ba9f28711
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
""" ImageNet Training Script
This is intended to be a lean and easily modifiable ImageNet training script that reproduces ImageNet
training results with some of the latest networks and training techniques. It favours canonical PyTorch
and standard Python style over trying to be able to 'do it all.' That said, it offers quite a few speed
and training result improvements over the usual PyTorch example scripts. Repurpose as you see fit.
This script was started from an early version of the PyTorch ImageNet example
(https://github.com/pytorch/examples/tree/master/imagenet)
NVIDIA CUDA specific speedups adopted from NVIDIA Apex examples
(https://github.com/NVIDIA/apex/tree/master/examples/imagenet)
Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
"""
import argparse
import time
import yaml
import os
import logging
from collections import OrderedDict
from contextlib import suppress
from datetime import datetime
import torch
import torch.nn as nn
import torchvision.utils
from torch.nn.parallel import DistributedDataParallel as NativeDDP
from timm.data import create_dataset, create_loader, resolve_data_config, Mixup, FastCollateMixup, AugMixDataset
from timm.models import create_model, safe_model_name, resume_checkpoint, load_checkpoint, \
convert_splitbn_model, model_parameters
from timm.utils import *
from timm.loss import *
from timm.optim import create_optimizer_v2, optimizer_kwargs
from timm.scheduler import create_scheduler
from timm.utils import ApexScaler, NativeScaler
from tensorboardX import SummaryWriter
import tsensor
from main.ctfg.ctfg import *
from main.old.cct.src import *
from main.old.transfg_ctfg.modelingv0 import *
is_nni = False
if False:
import nni
from nni.utils import merge_parameter
is_nni = True
is_debug = False
try:
from apex import amp
from apex.parallel import DistributedDataParallel as ApexDDP
from apex.parallel import convert_syncbn_model
has_apex = True
except ImportError:
has_apex = False
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
try:
import wandb
has_wandb = True
except ImportError:
has_wandb = False
torch.backends.cudnn.benchmark = True
_logger = logging.getLogger('train')
# The first arg parser parses out only the --config argument, this argument is used to
# load a yaml file containing key-values that override the defaults for the main parser below
config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False)
parser.add_argument('-c', '--config', default='', type=str, metavar='FILE',
help='YAML config file specifying default arguments')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# Dataset parameters
parser.add_argument('--data_dir', metavar='DIR', default='', type=str,
help='path to dataset')
parser.add_argument('--dataset', '-d', metavar='NAME', default='',
help='dataset type (default: ImageFolder/ImageTar if empty)')
parser.add_argument('--train-split', metavar='NAME', default='train',
help='dataset train split (default: train)')
parser.add_argument('--val-split', metavar='NAME', default='validation',
help='dataset validation split (default: validation)')
parser.add_argument('--dataset-download', action='store_true', default=False,
help='Allow download of dataset for torch/ and tfds/ datasets that support it.')
parser.add_argument('--class-map', default='', type=str, metavar='FILENAME',
help='path to class to idx mapping file (default: "")')
# Model parameters
parser.add_argument('--model', default='resnet50', type=str, metavar='MODEL',
help='Name of model to train (default: "resnet50"')
parser.add_argument('--pretrained', action='store_true', default=False,
help='Start with pretrained version of specified network (if avail)')
parser.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH',
help='Initialize model from this checkpoint (default: none)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='Resume full model and optimizer state from checkpoint (default: none)')
parser.add_argument('--no-resume-opt', action='store_true', default=False,
help='prevent resume of optimizer state when resuming model')
parser.add_argument('--num-classes', type=int, default=None, metavar='N',
help='number of label classes (Model default if None)')
parser.add_argument('--gp', default=None, type=str, metavar='POOL',
help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')
parser.add_argument('--img-size', type=int, default=None, metavar='N',
help='Image patch size (default: None => model default)')
parser.add_argument('--input-size', default=None, nargs=3, type=int,
metavar='N N N',
help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
parser.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop percent (for validation only)')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('-b', '--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('-vb', '--validation-batch-size', type=int, default=None, metavar='N',
help='validation batch size override (default: None)')
# Optimizer parameters
parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "sgd"')
parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: None, use opt default)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='Optimizer momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=2e-5,
help='weight decay (default: 2e-5)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--clip-mode', type=str, default='norm',
help='Gradient clipping mode. One of ("norm", "value", "agc")')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "step"')
parser.add_argument('--lr', type=float, default=0.05, metavar='LR',
help='learning rate (default: 0.05)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--lr-cycle-mul', type=float, default=1.0, metavar='MULT',
help='learning rate cycle len multiplier (default: 1.0)')
parser.add_argument('--lr-cycle-decay', type=float, default=0.5, metavar='MULT',
help='amount to decay each learning rate cycle (default: 0.5)')
parser.add_argument('--lr-cycle-limit', type=int, default=1, metavar='N',
help='learning rate cycle limit, cycles enabled if > 1')
parser.add_argument('--lr-k-decay', type=float, default=1.0,
help='learning rate k-decay for cosine/poly (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=0.0001, metavar='LR',
help='warmup learning rate (default: 0.0001)')
parser.add_argument('--min-lr', type=float, default=1e-6, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--epochs', type=int, default=300, metavar='N',
help='number of epochs to train (default: 300)')
parser.add_argument('--epoch-repeats', type=float, default=0., metavar='N',
help='epoch repeat multiplier (number of times to repeat dataset epoch per train epoch).')
parser.add_argument('--start-epoch', default=None, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--decay-epochs', type=float, default=100, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=3, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation & regularization parameters
parser.add_argument('--no-aug', action='store_true', default=False,
help='Disable all training augmentation, override other train aug args')
parser.add_argument('--scale', type=float, nargs='+', default=[0.08, 1.0], metavar='PCT',
help='Random resize scale (default: 0.08 1.0)')
parser.add_argument('--ratio', type=float, nargs='+', default=[3. / 4., 4. / 3.], metavar='RATIO',
help='Random resize aspect ratio (default: 0.75 1.33)')
parser.add_argument('--hflip', type=float, default=0.5,
help='Horizontal flip training aug probability')
parser.add_argument('--vflip', type=float, default=0.,
help='Vertical flip training aug probability')
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default=None, metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". (default: None)'),
parser.add_argument('--aug-repeats', type=int, default=0,
help='Number of augmentation repetitions (distributed training only) (default: 0)')
parser.add_argument('--aug-splits', type=int, default=0,
help='Number of augmentation splits (default: 0, valid: 0 or >=2)')
parser.add_argument('--jsd-loss', action='store_true', default=False,
help='Enable Jensen-Shannon Divergence + CE loss. Use with `--aug-splits`.')
parser.add_argument('--bce-loss', action='store_true', default=False,
help='Enable BCE loss w/ Mixup/CutMix use.')
parser.add_argument('--bce-target-thresh', type=float, default=None,
help='Threshold for binarizing softened BCE targets (default: None, disabled)')
parser.add_argument('--reprob', type=float, default=0., metavar='PCT',
help='Random erase prob (default: 0.)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
parser.add_argument('--mixup', type=float, default=0.0,
help='mixup alpha, mixup enabled if > 0. (default: 0.)')
parser.add_argument('--cutmix', type=float, default=0.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 0.)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
parser.add_argument('--mixup-off-epoch', default=0, type=int, metavar='N',
help='Turn off mixup after this epoch, disabled if 0 (default: 0)')
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='random',
help='Training interpolation (random, bilinear, bicubic default: "random")')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-connect', type=float, default=None, metavar='PCT',
help='Drop connect rate, DEPRECATED, use drop-path (default: None)')
parser.add_argument('--drop-path', type=float, default=None, metavar='PCT',
help='Drop path rate (default: None)')
parser.add_argument('--drop-block', type=float, default=None, metavar='PCT',
help='Drop block rate (default: None)')
# Batch norm parameters (only works with gen_efficientnet based models currently)
parser.add_argument('--bn-tf', action='store_true', default=False,
help='Use Tensorflow BatchNorm defaults for models that support it (default: False)')
parser.add_argument('--bn-momentum', type=float, default=None,
help='BatchNorm momentum override (if not None)')
parser.add_argument('--bn-eps', type=float, default=None,
help='BatchNorm epsilon override (if not None)')
parser.add_argument('--sync-bn', action='store_true',
help='Enable NVIDIA Apex or Torch synchronized BatchNorm.')
parser.add_argument('--dist-bn', type=str, default='reduce',
help='Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")')
parser.add_argument('--split-bn', action='store_true',
help='Enable separate BN layers per augmentation split.')
# Model Exponential Moving Average
parser.add_argument('--model-ema', action='store_true', default=False,
help='Enable tracking moving average of model weights')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False,
help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.')
parser.add_argument('--model-ema-decay', type=float, default=0.9998,
help='decay factor for model weights moving average (default: 0.9998)')
# Misc
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--worker-seeding', type=str, default='all',
help='worker seed mode (default: all)')
parser.add_argument('--log-interval', type=int, default=50, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--recovery-interval', type=int, default=0, metavar='N',
help='how many batches to wait before writing recovery checkpoint')
parser.add_argument('--checkpoint-hist', type=int, default=2, metavar='N',
help='number of checkpoints to keep (default: 10)')
parser.add_argument('-j', '--workers', type=int, default=4, metavar='N',
help='how many training processes to use (default: 4)')
parser.add_argument('--save-images', action='store_true', default=False,
help='save images of input bathes every log interval for debugging')
parser.add_argument('--amp', action='store_true', default=False,
help='use NVIDIA Apex AMP or Native AMP for mixed precision training')
parser.add_argument('--apex-amp', action='store_true', default=False,
help='Use NVIDIA Apex AMP mixed precision')
parser.add_argument('--native-amp', action='store_true', default=False,
help='Use Native Torch AMP mixed precision')
parser.add_argument('--no-ddp-bb', action='store_true', default=False,
help='Force broadcast buffers for native DDP to off.')
parser.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
parser.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--output', default='', type=str, metavar='PATH',
help='path to output folder (default: none, current dir)')
parser.add_argument('--experiment', default='', type=str, metavar='NAME',
help='name of train experiment, name of sub-folder for output')
parser.add_argument('--eval-metric', default='top1', type=str, metavar='EVAL_METRIC',
help='Best metric (default: "top1"')
parser.add_argument('--tta', type=int, default=0, metavar='N',
help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--use-multi-epochs-loader', action='store_true', default=False,
help='use the multi-epochs-loader to save time at the beginning of every epoch')
parser.add_argument('--torchscript', dest='torchscript', action='store_true',
help='convert model torchscript for inference')
parser.add_argument('--log-wandb', action='store_true', default=False,
help='log training and validation metrics to wandb')
parser.add_argument('--is_con_loss', action='store_true', default=False,
help='Disable all training augmentation, override other train aug args')
parser.add_argument('--is_need_da', action='store_true', default=False,
help='whether to need data enhancement')
parser.add_argument('--is_use_timm_model', action='store_true', default=False,
help='whether to use original model ')
parser.add_argument('--pretrained_dir', type=str, default='',
help='pretrained_dir')
parser.add_argument('--is_changeSize', action='store_true', default=False,
help='If you have pretrained checkpoint, whether to change size')
def _parse_args():
# Do we have a config file to parse?
args_config, remaining = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(remaining)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
def main():
setup_default_logging()
args, args_text = _parse_args()
# _logger.info(args_text)
if is_debug:
args.is_need_da = True
args.is_con_loss = True
args.experiment = 'ubuntu240'
args.data_dir = '/home/ubuntu/xu/cub2'
args.pretrained_dir = '/home/ubuntu/xu/cct_14_7x2_384_imagenet.pth'
args.model = 'ctfg_14_7x2_384_heads'
args.batch_size = 12
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
if is_nni:
tuner_params = nni.get_next_parameter()
print("get nni parameter")
args = merge_parameter(args, tuner_params)
writer = SummaryWriter(log_dir=os.path.join('output/nni', os.environ['NNI_OUTPUT_DIR'], "tensorboard"))
lr = args.lr
args.min_lr = lr / 50
args.warmup_lr = lr / 500
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
else:
writer = SummaryWriter(log_dir="logs")
_logger.info(args_text)
if args.log_wandb:
if has_wandb:
wandb.init(project=args.experiment, config=args)
else:
_logger.warning("You've requested to log metrics to wandb but package not found. "
"Metrics not being logged to wandb, try `pip install wandb`")
args.prefetcher = not args.no_prefetcher
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
args.device = 'cuda:0'
args.world_size = 1
args.rank = 0 # global rank
if args.distributed:
args.device = 'cuda:%d' % args.local_rank
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
args.world_size = torch.distributed.get_world_size()
args.rank = torch.distributed.get_rank()
_logger.info('Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.'
% (args.rank, args.world_size))
else:
_logger.info('Training with a single process on 1 GPUs.')
assert args.rank >= 0
# resolve AMP arguments based on PyTorch / Apex availability
use_amp = None
if args.amp:
# `--amp` chooses native amp before apex (APEX ver not actively maintained)
if has_native_amp:
args.native_amp = True
elif has_apex:
args.apex_amp = True
if args.apex_amp and has_apex:
use_amp = 'apex'
elif args.native_amp and has_native_amp:
use_amp = 'native'
elif args.apex_amp or args.native_amp:
_logger.warning("Neither APEX or native Torch AMP is available, using float32. "
"Install NVIDA apex or upgrade to PyTorch 1.6")
random_seed(args.seed, args.rank)
if not args.is_use_timm_model:
model = create_model(
args.model,
pretrained=args.pretrained,
pretrained_dir=args.pretrained_dir,
is_changeSize=args.is_changeSize,
num_classes=args.num_classes,
drop_rate=args.drop,
drop_connect_rate=args.drop_connect, # DEPRECATED, use drop_path
drop_path_rate=args.drop_path,
drop_block_rate=args.drop_block,
global_pool=args.gp,
bn_tf=args.bn_tf,
bn_momentum=args.bn_momentum,
bn_eps=args.bn_eps,
scriptable=args.torchscript,
checkpoint_path=args.initial_checkpoint)
else:
model = create_model(
args.model,
pretrained=args.pretrained,
num_classes=args.num_classes,
drop_rate=args.drop,
drop_connect_rate=args.drop_connect, # DEPRECATED, use drop_path
drop_path_rate=args.drop_path,
drop_block_rate=args.drop_block,
global_pool=args.gp,
bn_tf=args.bn_tf,
bn_momentum=args.bn_momentum,
bn_eps=args.bn_eps,
scriptable=args.torchscript,
checkpoint_path=args.initial_checkpoint)
if args.num_classes is None:
assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.'
args.num_classes = model.num_classes # FIXME handle model default vs config num_classes more elegantly
if args.local_rank == 0:
_logger.info(
f'Model {safe_model_name(args.model)} created, param count:{sum([m.numel() for m in model.parameters()])}')
data_config = resolve_data_config(vars(args), model=model, verbose=args.local_rank == 0)
# setup augmentation batch splits for contrastive loss or split bn
num_aug_splits = 0
if args.aug_splits > 0:
assert args.aug_splits > 1, 'A split of 1 makes no sense'
num_aug_splits = args.aug_splits
# enable split bn (separate bn stats per batch-portion)
if args.split_bn:
assert num_aug_splits > 1 or args.resplit
model = convert_splitbn_model(model, max(num_aug_splits, 2))
# move model to GPU, enable channels last layout if set
model.cuda()
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
# setup synchronized BatchNorm for distributed training
if args.distributed and args.sync_bn:
assert not args.split_bn
if has_apex and use_amp == 'apex':
# Apex SyncBN preferred unless native amp is activated
model = convert_syncbn_model(model)
else:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
if args.local_rank == 0:
_logger.info(
'Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using '
'zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled.')
if args.torchscript:
assert not use_amp == 'apex', 'Cannot use APEX AMP with torchscripted model'
assert not args.sync_bn, 'Cannot use SyncBatchNorm with torchscripted model'
model = torch.jit.script(model)
optimizer = create_optimizer_v2(model, **optimizer_kwargs(cfg=args))
# setup automatic mixed-precision (AMP) loss scaling and op casting
amp_autocast = suppress # do nothing
loss_scaler = None
if use_amp == 'apex':
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
loss_scaler = ApexScaler()
if args.local_rank == 0:
_logger.info('Using NVIDIA APEX AMP. Training in mixed precision.')
elif use_amp == 'native':
amp_autocast = torch.cuda.amp.autocast
loss_scaler = NativeScaler()
if args.local_rank == 0:
_logger.info('Using native Torch AMP. Training in mixed precision.')
else:
if args.local_rank == 0:
_logger.info('AMP not enabled. Training in float32.')
# optionally resume from a checkpoint
resume_epoch = None
if args.resume:
resume_epoch = resume_checkpoint(
model, args.resume,
optimizer=None if args.no_resume_opt else optimizer,
loss_scaler=None if args.no_resume_opt else loss_scaler,
log_info=args.local_rank == 0)
# setup exponential moving average of model weights, SWA could be used here too
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEmaV2(
model, decay=args.model_ema_decay, device='cpu' if args.model_ema_force_cpu else None)
if args.resume:
load_checkpoint(model_ema.module, args.resume, use_ema=True)
# setup distributed training
if args.distributed:
if has_apex and use_amp == 'apex':
# Apex DDP preferred unless native amp is activated
if args.local_rank == 0:
_logger.info("Using NVIDIA APEX DistributedDataParallel.")
model = ApexDDP(model, delay_allreduce=True)
else:
if args.local_rank == 0:
_logger.info("Using native Torch DistributedDataParallel.")
model = NativeDDP(model, device_ids=[args.local_rank], broadcast_buffers=not args.no_ddp_bb)
# NOTE: EMA model does not need to be wrapped by DDP
# setup learning rate schedule and starting epoch
lr_scheduler, num_epochs = create_scheduler(args, optimizer)
start_epoch = 0
if args.start_epoch is not None:
# a specified start_epoch will always override the resume epoch
start_epoch = args.start_epoch
elif resume_epoch is not None:
start_epoch = resume_epoch
if lr_scheduler is not None and start_epoch > 0:
lr_scheduler.step(start_epoch)
if args.local_rank == 0:
_logger.info('Scheduled epochs: {}'.format(num_epochs))
# create the train and eval datasets
dataset_train = create_dataset(
args.dataset, root=args.data_dir, split=args.train_split, is_training=True,
class_map=args.class_map,
download=args.dataset_download,
batch_size=args.batch_size,
repeats=args.epoch_repeats)
dataset_eval = create_dataset(
args.dataset, root=args.data_dir, split=args.val_split, is_training=False,
class_map=args.class_map,
download=args.dataset_download,
batch_size=args.batch_size)
# setup mixup / cutmix
collate_fn = None
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_args = dict(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.num_classes)
if args.prefetcher:
assert not num_aug_splits # collate conflict (need to support deinterleaving in collate mixup)
collate_fn = FastCollateMixup(**mixup_args)
else:
mixup_fn = Mixup(**mixup_args)
# wrap dataset in AugMix helper
if num_aug_splits > 1:
dataset_train = AugMixDataset(dataset_train, num_splits=num_aug_splits)
# create data loaders w/ augmentation pipeiine
train_interpolation = args.train_interpolation
if args.no_aug or not train_interpolation:
train_interpolation = data_config['interpolation']
loader_train = create_loader(
dataset_train,
input_size=data_config['input_size'],
batch_size=args.batch_size,
is_training=True,
use_prefetcher=args.prefetcher,
no_aug=args.no_aug,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
re_split=args.resplit,
scale=args.scale,
ratio=args.ratio,
hflip=args.hflip,
vflip=args.vflip,
color_jitter=args.color_jitter,
auto_augment=args.aa,
num_aug_repeats=args.aug_repeats,
num_aug_splits=num_aug_splits,
interpolation=train_interpolation,
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
collate_fn=collate_fn,
pin_memory=args.pin_mem,
use_multi_epochs_loader=args.use_multi_epochs_loader,
worker_seeding=args.worker_seeding,
)
loader_eval = create_loader(
dataset_eval,
input_size=data_config['input_size'],
batch_size=args.validation_batch_size or args.batch_size,
is_training=False,
use_prefetcher=args.prefetcher,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
crop_pct=data_config['crop_pct'],
pin_memory=args.pin_mem,
)
# setup loss function
if args.jsd_loss:
assert num_aug_splits > 1 # JSD only valid with aug splits set
train_loss_fn = JsdCrossEntropy(num_splits=num_aug_splits, smoothing=args.smoothing)
elif mixup_active:
# smoothing is handled with mixup target transform which outputs sparse, soft targets
if args.bce_loss:
train_loss_fn = BinaryCrossEntropy(target_threshold=args.bce_target_thresh)
else:
train_loss_fn = SoftTargetCrossEntropy()
elif args.smoothing:
if args.bce_loss:
train_loss_fn = BinaryCrossEntropy(smoothing=args.smoothing, target_threshold=args.bce_target_thresh)
else:
train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
train_loss_fn = nn.CrossEntropyLoss()
train_loss_fn = train_loss_fn.cuda()
validate_loss_fn = nn.CrossEntropyLoss().cuda()
con_loss = ConLossEntropy().cuda()
# setup checkpoint saver and eval metric tracking
eval_metric = args.eval_metric
best_metric = None
best_epoch = None
saver = None
output_dir = None
if args.rank == 0:
if args.experiment and is_nni:
exp_name = '_'.join([
datetime.now().strftime("%Y%m%d-%H%M%S"),
str(format(args.lr, '.1e')),
str(format(args.warmup_lr, '.1e')),
args.experiment
])
else:
exp_name = '_'.join([
datetime.now().strftime("%Y%m%d-%H%M%S"),
safe_model_name(args.model),
str(format(args.lr, '.1e')),
args.dataset,
args.experiment
])
output_dir = get_outdir(args.output if args.output else './output/train', exp_name)
decreasing = True if eval_metric == 'loss' else False
saver = CheckpointSaver(
model=model, optimizer=optimizer, args=args, model_ema=model_ema, amp_scaler=loss_scaler,
checkpoint_dir=output_dir, recovery_dir=output_dir, decreasing=decreasing, max_history=args.checkpoint_hist)
with open(os.path.join(output_dir, 'args.yaml'), 'w') as f:
f.write(args_text)
try:
for epoch in range(start_epoch, num_epochs):
if args.distributed and hasattr(loader_train.sampler, 'set_epoch'):
loader_train.sampler.set_epoch(epoch)
train_metrics = train_one_epoch(
epoch, model, loader_train, optimizer, train_loss_fn, args,
lr_scheduler=lr_scheduler, saver=saver, output_dir=output_dir,
amp_autocast=amp_autocast, loss_scaler=loss_scaler,
model_ema=model_ema, mixup_fn=mixup_fn, con_loss=con_loss)
if args.distributed and args.dist_bn in ('broadcast', 'reduce'):
if args.local_rank == 0:
_logger.info("Distributing BatchNorm running means and vars")
distribute_bn(model, args.world_size, args.dist_bn == 'reduce')
eval_metrics = validate(model, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast)
if model_ema is not None and not args.model_ema_force_cpu:
if args.distributed and args.dist_bn in ('broadcast', 'reduce'):
distribute_bn(model_ema, args.world_size, args.dist_bn == 'reduce')
ema_eval_metrics = validate(
model_ema.module, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast,
log_suffix=' (EMA)')
eval_metrics = ema_eval_metrics
if lr_scheduler is not None:
# step LR for next epoch
lr_scheduler.step(epoch + 1, eval_metrics[eval_metric])
if output_dir is not None:
update_summary(
epoch, train_metrics, eval_metrics, os.path.join(output_dir, 'summary.csv'),
write_header=best_metric is None, log_wandb=args.log_wandb and has_wandb)
if saver is not None:
# save proper checkpoint with eval metric
save_metric = eval_metrics[eval_metric]
best_metric, best_epoch = saver.save_checkpoint(epoch, metric=save_metric)
writer.add_scalar("train/loss", scalar_value=train_metrics['loss'], global_step=epoch)
writer.add_scalar("train/lr", scalar_value=train_metrics['lr'][0], global_step=epoch)
writer.add_scalar("test/loss", scalar_value=eval_metrics['loss'], global_step=epoch)
writer.add_scalar("test/acc_Top1", scalar_value=eval_metrics['top1'], global_step=epoch)
writer.add_scalar("test/acc_Top5", scalar_value=eval_metrics['top5'], global_step=epoch)
except KeyboardInterrupt:
pass
if best_metric is not None:
_logger.info('*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch))
if is_nni:
nni.report_final_result(best_metric)
# writer.export_scalars_to_json(os.path.join("logs", "all_scalars.json"))
writer.close()
def train_one_epoch(
epoch, model, loader, optimizer, loss_fn, args,
lr_scheduler=None, saver=None, output_dir=None, amp_autocast=suppress,
loss_scaler=None, model_ema=None, mixup_fn=None, writer=None, con_loss=None):
if args.mixup_off_epoch and epoch >= args.mixup_off_epoch:
if args.prefetcher and loader.mixup_enabled:
loader.mixup_enabled = False
elif mixup_fn is not None:
mixup_fn.mixup_enabled = False
second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
losses_m = AverageMeter()
model.train()
end = time.time()
last_idx = len(loader) - 1
num_updates = epoch * len(loader)
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
data_time_m.update(time.time() - end)
if not args.prefetcher:
input, target = input.cuda(), target.cuda()
if mixup_fn is not None:
input, target = mixup_fn(input, target)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
if args.is_con_loss:
output, part_token = model(input, True)
loss = loss_fn(output, target)
if args.is_need_da:
contrast_loss = con_loss(part_token, target[:, 0])
else:
contrast_loss = con_loss(part_token, target)
loss = loss + contrast_loss
else:
output = model(input)
loss = loss_fn(output, target)
if not args.distributed:
losses_m.update(loss.item(), input.size(0))
optimizer.zero_grad()
if loss_scaler is not None:
loss_scaler(
loss, optimizer,
clip_grad=args.clip_grad, clip_mode=args.clip_mode,
parameters=model_parameters(model, exclude_head='agc' in args.clip_mode),
create_graph=second_order)
else:
loss.backward(create_graph=second_order)
if args.clip_grad is not None:
dispatch_clip_grad(
model_parameters(model, exclude_head='agc' in args.clip_mode),
value=args.clip_grad, mode=args.clip_mode)
optimizer.step()
if model_ema is not None:
model_ema.update(model)
torch.cuda.synchronize()
num_updates += 1
batch_time_m.update(time.time() - end)
if last_batch or batch_idx % args.log_interval == 0:
lrl = [param_group['lr'] for param_group in optimizer.param_groups]
lr = sum(lrl) / len(lrl)
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
losses_m.update(reduced_loss.item(), input.size(0))
if args.local_rank == 0:
_logger.info(
'Train: {} [{:>4d}/{} ({:>3.0f}%)] '
'Loss: {loss.val:#.4g} ({loss.avg:#.3g}) '
'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s '
'({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
'LR: {lr:.3e} '
'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(
epoch,
batch_idx, len(loader),
100. * batch_idx / last_idx,
loss=losses_m,
batch_time=batch_time_m,
rate=input.size(0) * args.world_size / batch_time_m.val,
rate_avg=input.size(0) * args.world_size / batch_time_m.avg,
lr=lr,
data_time=data_time_m))
if args.save_images and output_dir:
torchvision.utils.save_image(
input,
os.path.join(output_dir, 'train-batch-%d.jpg' % batch_idx),
padding=0,
normalize=True)
if saver is not None and args.recovery_interval and (
last_batch or (batch_idx + 1) % args.recovery_interval == 0):
saver.save_recovery(epoch, batch_idx=batch_idx)
if lr_scheduler is not None:
lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg)
end = time.time()
# end for
if hasattr(optimizer, 'sync_lookahead'):
optimizer.sync_lookahead()
return OrderedDict([('loss', losses_m.avg), ('lr', lr_scheduler.get_epoch_values(epoch))])
def validate(model, loader, loss_fn, args, amp_autocast=suppress, log_suffix=''):
batch_time_m = AverageMeter()
losses_m = AverageMeter()
top1_m = AverageMeter()
top5_m = AverageMeter()
model.eval()
end = time.time()
last_idx = len(loader) - 1
with torch.no_grad():
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
if not args.prefetcher:
input = input.cuda()
target = target.cuda()
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
if isinstance(output, (tuple, list)):
output = output[0]
# augmentation reduction
reduce_factor = args.tta
if reduce_factor > 1:
output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2)
target = target[0:target.size(0):reduce_factor]
loss = loss_fn(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
acc1 = reduce_tensor(acc1, args.world_size)
acc5 = reduce_tensor(acc5, args.world_size)
else:
reduced_loss = loss.data
torch.cuda.synchronize()
losses_m.update(reduced_loss.item(), input.size(0))
top1_m.update(acc1.item(), output.size(0))
top5_m.update(acc5.item(), output.size(0))
batch_time_m.update(time.time() - end)
end = time.time()
if args.local_rank == 0 and (last_batch or batch_idx % args.log_interval == 0):
log_name = 'Test' + log_suffix
_logger.info(
'{0}: [{1:>4d}/{2}] '
'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '
'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '
'Acc@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) '
'Acc@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format(
log_name, batch_idx, last_idx, batch_time=batch_time_m,
loss=losses_m, top1=top1_m, top5=top5_m))
metrics = OrderedDict([('loss', losses_m.avg), ('top1', top1_m.avg), ('top5', top5_m.avg)])
if is_nni:
nni.report_intermediate_result(top1_m.avg)
return metrics
if __name__ == '__main__':
main()
| 47.184017
| 120
| 0.639553
|
5cf8ed5bac1ca41cf21aead137b275864868297d
| 562
|
py
|
Python
|
enaml/enaml/web/hyperlink.py
|
ContinuumIO/ashiba
|
a93e7785d1fcf397baeb8a0b687a162a2b2aef3d
|
[
"BSD-3-Clause"
] | 11
|
2015-03-14T14:30:51.000Z
|
2022-03-15T13:01:44.000Z
|
enaml/enaml/web/hyperlink.py
|
ContinuumIO/ashiba
|
a93e7785d1fcf397baeb8a0b687a162a2b2aef3d
|
[
"BSD-3-Clause"
] | 3
|
2015-01-31T11:12:56.000Z
|
2022-03-14T00:53:25.000Z
|
enaml/enaml/web/hyperlink.py
|
ContinuumIO/ashiba
|
a93e7785d1fcf397baeb8a0b687a162a2b2aef3d
|
[
"BSD-3-Clause"
] | 4
|
2015-01-27T01:56:14.000Z
|
2021-02-23T07:21:20.000Z
|
from atom.api import Unicode
from enaml.core.declarative import d_
from html_object import HTMLObject
from lxml.html import builder as E
class Hyperlink(HTMLObject):
tag = E.A
text = d_(Unicode())
id = d_(Unicode())
type = d_(Unicode())
def initialize(self):
super(Hyperlink, self).initialize()
def buildHTML(self, *args):
self.addTags()
self.addText(self.text)
kwargs = {'class': self.type, 'id': self.id}
self.addAttributes(**kwargs)
return super(Hyperlink, self).buildHTML(*args)
| 22.48
| 54
| 0.645907
|
2e213fa8fb3d7a787ca0e0556eadcceaff035628
| 1,942
|
py
|
Python
|
spacq/tests/tool/box.py
|
bleutooth65/SpanishAcquisition3
|
50d1445c57f7ecf3bbf03a2cb28befedba1bd57a
|
[
"BSD-2-Clause"
] | null | null | null |
spacq/tests/tool/box.py
|
bleutooth65/SpanishAcquisition3
|
50d1445c57f7ecf3bbf03a2cb28befedba1bd57a
|
[
"BSD-2-Clause"
] | null | null | null |
spacq/tests/tool/box.py
|
bleutooth65/SpanishAcquisition3
|
50d1445c57f7ecf3bbf03a2cb28befedba1bd57a
|
[
"BSD-2-Clause"
] | null | null | null |
import logging
log = logging.getLogger(__name__)
from nose.plugins.skip import SkipTest
import re
from unittest import TestCase
from testconfig import config as tc
class AssertHandler(logging.handlers.BufferingHandler):
"""
A logging handler that allows making assertions based on its contents.
"""
def __init__(self, capacity=100, *args, **kwargs):
"""
Add ourselves to the main logger.
"""
logging.handlers.BufferingHandler.__init__(self, capacity, *args, **kwargs)
logging.getLogger().addHandler(self)
def assert_logged(self, level, msg, ignore_case=True, literal=False):
"""
Assert that a message matching the level and regular expression has been logged.
"""
level = level.lower()
re_flags = 0
if ignore_case:
re_flags |= re.IGNORECASE
for record in self.buffer:
if record.levelname.lower() == level:
if (literal and msg == record.msg or
not literal and re.search(msg, record.msg, re_flags)):
return
assert False, 'Log message not found at level "{0}": {1}'.format(level, msg)
class DeviceServerTestCase(TestCase):
"""
Class for a device server test.
"""
mock = False
def obtain_device(self, impl=None, manufacturer=None, model=None):
"""
Try to get a handle for a physical device.
"""
if self.mock:
return impl()
all_devices = list(tc['devices'].values())
if manufacturer is None or model is None:
if impl is not None:
return impl(**all_devices[-1]['address'])
else:
return all_devices[-1]
potential_devices = [dev for dev in all_devices if 'address' in dev and
dev['manufacturer'] == manufacturer and dev['model'] == model]
for device in potential_devices:
try:
if impl is not None:
return impl(**device['address'])
else:
return device
except Exception as e:
log.info('Could not connect to device at "{0}": {1}'.format(device['address'], e))
raise SkipTest('Could not connect to device.')
| 23.975309
| 86
| 0.692585
|
d56e5a741e7672b87fed4e665ba078dd851d4764
| 1,325
|
py
|
Python
|
pymillerrabin.py
|
milanboers/pymillerrabin
|
f42e4a8de934b1906dfe8000da3a1b09b976c560
|
[
"MIT"
] | 1
|
2021-06-01T22:59:18.000Z
|
2021-06-01T22:59:18.000Z
|
pymillerrabin.py
|
milanboers/pymillerrabin
|
f42e4a8de934b1906dfe8000da3a1b09b976c560
|
[
"MIT"
] | null | null | null |
pymillerrabin.py
|
milanboers/pymillerrabin
|
f42e4a8de934b1906dfe8000da3a1b09b976c560
|
[
"MIT"
] | null | null | null |
def findsd(n):
d = n - 1
s = 0
while d % 2 == 0:
d >>= 1
s += 1
return (s, d)
# Is composite?
def MillerRabin(n, a, s, d):
if pow(a, d, n) == 1:
return False #might be prime
for r in range(0, s, 1):
if pow(a, 2**r*d, n) == n-1:
return False #might be prime
return True #definitely not prime
# Is n prime? Works for n < 3317044064679887385961981
def isPrime(n):
if n == 2:
return True
elif n < 2047:
asl = [2]
elif n < 1373653:
asl = [2,3]
elif n < 9080191:
asl = [31, 73]
elif n < 25326001:
asl = [2, 3, 5]
elif n < 3215031751:
asl = [2, 3, 5, 7]
elif n < 4759123141:
asl = [2, 7, 61]
elif n < 1122004669633:
asl = [2, 13, 23, 1662803]
elif n < 2152302898747:
asl = [2, 3, 5, 7, 11]
elif n < 3474749660383:
asl = [2, 3, 5, 7, 11, 13]
elif n < 341550071728321:
asl = [2, 3, 5, 7, 11, 13, 17]
elif n < 3825123056546413051:
asl = [2, 3, 5, 7, 11, 13, 17, 19, 23]
elif n < 318665857834031151167461:
asl = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]
elif n < 3317044064679887385961981:
asl = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
else:
raise Exception("n too big. Needs to be < 3317044064679887385961981")
(s, d) = findsd(n)
for a in asl:
if MillerRabin(n, a, s, d):
# definitely not prime
return False
# definitely prime
return True
| 21.721311
| 71
| 0.581132
|
f8b40307423db0804310b9650782734b139fb697
| 498
|
py
|
Python
|
modules/order_strategy/models/logic/static.py
|
heolin123/funcrowd
|
20167783de208394c09ed0429a5f02ec6dd79c42
|
[
"MIT"
] | null | null | null |
modules/order_strategy/models/logic/static.py
|
heolin123/funcrowd
|
20167783de208394c09ed0429a5f02ec6dd79c42
|
[
"MIT"
] | 11
|
2019-11-12T23:26:45.000Z
|
2021-06-10T17:37:23.000Z
|
modules/order_strategy/models/logic/static.py
|
heolin123/funcrowd
|
20167783de208394c09ed0429a5f02ec6dd79c42
|
[
"MIT"
] | null | null | null |
from .base import BaseStrategyLogic
class StaticStrategyLogic(BaseStrategyLogic):
def next(self):
items = self.task.items
if not self.task.multiple_annotations:
items = self.task.exclude_items_with_user_annotations(items, self.user)
if self.item:
return items.filter(order__gt=self.item.order).first()
else:
return items.first()
def prev(self):
return self.task.items.filter(order__lt=self.item.order).last()
| 27.666667
| 83
| 0.664659
|
486ad788737a7dce7e043b56cb56dd402a61e4b7
| 12,841
|
py
|
Python
|
ebirdtaiwan/dash_apps/ShowWinners.py
|
even311379/EbirdTaiwan2020
|
2c1aa4d7346b5ade909d45f7c245fa4988394124
|
[
"MIT"
] | null | null | null |
ebirdtaiwan/dash_apps/ShowWinners.py
|
even311379/EbirdTaiwan2020
|
2c1aa4d7346b5ade909d45f7c245fa4988394124
|
[
"MIT"
] | null | null | null |
ebirdtaiwan/dash_apps/ShowWinners.py
|
even311379/EbirdTaiwan2020
|
2c1aa4d7346b5ade909d45f7c245fa4988394124
|
[
"MIT"
] | null | null | null |
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from django_plotly_dash import DjangoDash
import dash_bootstrap_components as dbc
import plotly.graph_objs as go
import dash_table
import datetime, re
import pandas as pd
from fall.models import PredictionData, Survey, SurveyObs
app = DjangoDash(
'ShowWinners',
add_bootstrap_links=True,
)
page0 = html.Div([
html.Div('台北觀鳥大賽'),
html.Div('得獎名單'),
], className='winner_title', id='page0', style={'display':'block'})
page1 = html.Div([
html.Div('三隊總成績', className='winner_stitle'),
dbc.Row([
dbc.Col(
dbc.Card([
dbc.CardImg(src="/static/img/fall/farmbird.png", top=True, className='winner_card_img'),
html.H2('彩鷸隊',className='card-title my-3'),
html.Table([
html.Tr([html.Td('鳥種數',style={'text-align':'left'}),html.Td('',style={'text-align':'right'},id='t1sn')]),
html.Tr([html.Td('鳥隻數',style={'text-align':'left'}),html.Td('',style={'text-align':'right'}, id='t1sc')]),
html.Tr([html.Td('清單數',style={'text-align':'left'}),html.Td('',style={'text-align':'right'}, id='t1nl')]),
], className='winner_table'),
html.P(''),
html.P(''),
]),width=4),
dbc.Col(
dbc.Card([
dbc.CardImg(src="/static/img/fall/citybird.png", top=True, className='winner_card_img'),
html.H2('家燕隊',className='card-title my-3'),
html.Table([
html.Tr([html.Td('鳥種數',style={'text-align':'left'}),html.Td('',style={'text-align':'right'},id='t2sn')]),
html.Tr([html.Td('鳥隻數',style={'text-align':'left'}),html.Td('',style={'text-align':'right'}, id='t2sc')]),
html.Tr([html.Td('清單數',style={'text-align':'left'}),html.Td('',style={'text-align':'right'}, id='t2nl')]),
], className='winner_table'),
html.P(''),
html.P(''),
]),width=4),
dbc.Col(
dbc.Card([
dbc.CardImg(src="/static/img/fall/forestbird.png", top=True, className='winner_card_img'),
html.H2('大冠鷲隊',className='card-title my-3'),
html.Table([
html.Tr([html.Td('鳥種數',style={'text-align':'left'}),html.Td('',style={'text-align':'right'}, id='t3sn')]),
html.Tr([html.Td('鳥隻數',style={'text-align':'left'}),html.Td('',style={'text-align':'right'}, id='t3sc')]),
html.Tr([html.Td('清單數',style={'text-align':'left'}),html.Td('',style={'text-align':'right'}, id='t3nl')]),
], className='winner_table'),
html.P(''),
html.P(''),
]),width=4),
], className='fff'),
], id='page1', style={'display':'none'})
page2 = html.Div([
html.Div('個人獎', className='winner_stitle'),
html.Table([
html.Tr([html.Td('鳥種數',style={'text-align':'left'}) ,html.Td('', id='pw1'), html.Td('',id='pw1d',style={'text-align':'right'})]),
html.Tr([html.Td('鳥隻數',style={'text-align':'left'}) ,html.Td('', id='pw2'), html.Td('',id='pw2d',style={'text-align':'right'})]),
html.Tr([html.Td('清單數',style={'text-align':'left'}) ,html.Td('', id='pw3'), html.Td('',id='pw3d',style={'text-align':'right'})]),
], className='winner_table2'),
], id='page2', style={'display':'none'})
page3 = html.Div([
html.Div('團體獎', className='winner_stitle'),
dbc.Row([
dbc.Col(
dbc.Card([
dbc.CardImg(src="", top=True, className='winner_card_img',id='sn_img'),
dbc.CardBody([
html.H1("鳥種數"),
html.Br(),
html.H1("",id='snwt'),
html.H1("",id='snwtd'),
]),
]),width=4),
dbc.Col(
dbc.Card([
dbc.CardImg(src="", top=True, className='winner_card_img',id='sc_img'),
dbc.CardBody([
html.H1("鳥隻數"),
html.Br(),
html.H1("",id='scwt'),
html.H1("",id='scwtd'),
]),
]),width=4),
dbc.Col(
dbc.Card([
dbc.CardImg(src="", top=True, className='winner_card_img',id='nl_img'),
dbc.CardBody([
html.H1("清單數"),
html.Br(),
html.H1("",id='nlwt'),
html.H1("",id='nlwtd'),
]),
]),width=4),
], className='fff'),
],id='page3', style={'display':'none'})
page4 = html.Div([
html.Div('猜猜樂得獎名單', className='winner_stitle'),
dbc.Row("", className='fff', id='guess_result'),
],id='page4', style={'display':'none'})
page_index = 0
pages = [page0, page1, page2, page3, page4]
app.layout = html.Button([
html.Div(pages,id='page_content'),
dcc.Location(id='url'),
html.Div('',id='empty',style={'display':'none'}),
html.Div('',id='empty2',style={'display':'none'})
], id='Bg_btn', n_clicks=0)
app.clientside_callback(
"""
function(path) {
return String(window.innerWidth) + ',' + String(window.innerHeight);
}
""",
Output('empty', 'children'),
[Input('url', 'pathname')]
)
@app.callback(
[Output('t1sn','children'),
Output('t1sc','children'),
Output('t1nl','children'),
Output('t2sn','children'),
Output('t2sc','children'),
Output('t2nl','children'),
Output('t3sn','children'),
Output('t3sc','children'),
Output('t3nl','children'),
Output('pw1','children'),
Output('pw2','children'),
Output('pw3','children'),
Output('pw1d','children'),
Output('pw2d','children'),
Output('pw3d','children'),
Output('sn_img','src'),
Output('snwt','children'),
Output('snwtd','children'),
Output('sc_img','src'),
Output('scwt','children'),
Output('scwtd','children'),
Output('nl_img','src'),
Output('nlwt','children'),
Output('nlwtd','children'),
Output('guess_result','children'),],
[Input('empty', 'children')],
prevent_initial_call = True
)
def init_pages(h):
global page_index
page_index = 0
# 三隊總成績
t1_rsn = SurveyObs.objects.filter(survey__team = '彩鷸隊', survey__is_valid=True).values_list('species_name', flat=True)
t1sn = len(set([re.sub(r' ?\(.*?\)','',s) for s in t1_rsn]))
t1sc = sum(SurveyObs.objects.filter(survey__team = '彩鷸隊', survey__is_valid=True).values_list('amount', flat=True))
t1nl = len(Survey.objects.filter(team='彩鷸隊', is_valid=True))
t2_rsn = SurveyObs.objects.filter(survey__team = '家燕隊', survey__is_valid=True).values_list('species_name', flat=True)
t2sn = len(set([re.sub(r' ?\(.*?\)','',s) for s in t2_rsn]))
t2sc = sum(SurveyObs.objects.filter(survey__team = '家燕隊', survey__is_valid=True).values_list('amount', flat=True))
t2nl = len(Survey.objects.filter(team='家燕隊', is_valid=True))
t3_rsn = SurveyObs.objects.filter(survey__team = '大冠鷲隊', survey__is_valid=True).values_list('species_name', flat=True)
t3sn = len(set([re.sub(r' ?\(.*?\)','',s) for s in t3_rsn]))
t3sc = sum(SurveyObs.objects.filter(survey__team = '大冠鷲隊', survey__is_valid=True).values_list('amount', flat=True))
t3nl = len(Survey.objects.filter(team='大冠鷲隊', is_valid=True))
# 個人獎
all_participants = list(set(Survey.objects.filter(is_valid=True).values_list('creator', flat=True)))
participants_sn = []
participants_sc = []
participants_nl = []
for participant in all_participants:
rsn = SurveyObs.objects.filter(survey__creator=participant, survey__is_valid=True).values_list('species_name', flat=True)
participants_sn.append(len(set([re.sub(r' ?\(.*?\)','',s) for s in rsn])))
participants_sc.append(sum(SurveyObs.objects.filter(survey__creator=participant, survey__is_valid=True).values_list('amount', flat=True)))
participants_nl.append(len(Survey.objects.filter(creator=participant, is_valid=True)))
pw1d = max(participants_sn)
pw1 = all_participants[participants_sn.index(pw1d)]
pw2d = max(participants_sc)
pw2 = all_participants[participants_sc.index(pw2d)]
pw3d = max(participants_nl)
pw3 = all_participants[participants_nl.index(pw3d)]
# 團體獎
img_srcs = ["/static/img/fall/farmbird.png", "/static/img/fall/citybird.png", "/static/img/fall/forestbird.png"]
team_names = ['彩鷸隊', '家燕隊', '大冠鷲隊']
sns = [t1sn, t2sn, t3sn]
sn_img = img_srcs[sns.index(max(sns))]
snwt = team_names[sns.index(max(sns))]
snwtd = max(sns)
scs = [t1sc, t2sc, t3sc]
sc_img = img_srcs[scs.index(max(scs))]
scwt = team_names[scs.index(max(scs))]
scwtd = max(scs)
nls = [t1nl, t2nl,t3nl]
nl_img = img_srcs[nls.index(max(nls))]
nlwt = team_names[nls.index(max(nls))]
nlwtd = max(nls)
# 我要猜
rts = SurveyObs.objects.filter(survey__is_valid=True).values_list('species_name', flat=True)
total_species = len(set([re.sub(r' ?\(.*?\)','',s) for s in rts]))
total_count = sum(SurveyObs.objects.filter(survey__is_valid=True).values_list('amount', flat=True))
df = pd.DataFrame.from_records(PredictionData.objects.all().values('participant_name','guess_n_species','guess_total_individual'))
df['tsrd'] = (df.guess_n_species - total_species).tolist()
df['abs_tsrd'] = [abs(i) for i in df.tsrd]
df['tcrd'] = (df.guess_total_individual - total_count).tolist()
df['abs_tcrd'] = [abs(i) for i in df.tcrd]
ts = df.sort_values(by=['abs_tsrd']).participant_name.tolist()[:10]
tsd = [f'+{i}' if i > 0 else f'-{i}' for i in df.sort_values(by=['abs_tsrd']).tsrd.tolist()[:10]]
tc = df.sort_values(by=['abs_tcrd']).participant_name.tolist()[:10]
tcd = [f'+{i}' if i > 0 else f'-{i}' for i in df.sort_values(by=['abs_tcrd']).tcrd.tolist()[:10]]
guess_result = [
dbc.Col([
html.H1('最終數值:'),
html.H3(f'總鳥種數: {total_species}',className='ml-5'),
html.H3(f'總鳥隻數: {total_count}',className='ml-5'),
],width=4,style={'text-align':'left'}),
dbc.Col([
html.H1('預測鳥種數最接近:'),
html.H3(f'(1) {ts[0]} ({tsd[0]})',className='ml-5'),
html.H3(f'(2) {ts[1]} ({tsd[1]})',className='ml-5'),
html.H3(f'(3) {ts[2]} ({tsd[2]})',className='ml-5'),
html.H3(f'(4) {ts[3]} ({tsd[3]})',className='ml-5'),
html.H3(f'(5) {ts[4]} ({tsd[4]})',className='ml-5'),
html.H3(f'(6) {ts[5]} ({tsd[5]})',className='ml-5'),
html.H3(f'(7) {ts[6]} ({tsd[6]})',className='ml-5'),
html.H3(f'(8) {ts[7]} ({tsd[7]})',className='ml-5'),
html.H3(f'(9) {ts[8]} ({tsd[8]})',className='ml-5'),
html.H3(f'(10) {ts[9]} ({tsd[9]})',className='ml-5'),
],width=4,style={'text-align':'left'}),
dbc.Col([
html.H1('預測鳥隻數最接近:'),
html.H3(f'(1) {tc[0]} ({tcd[0]})',className='ml-5'),
html.H3(f'(2) {tc[1]} ({tcd[1]})',className='ml-5'),
html.H3(f'(3) {tc[2]} ({tcd[2]})',className='ml-5'),
html.H3(f'(4) {tc[3]} ({tcd[3]})',className='ml-5'),
html.H3(f'(5) {tc[4]} ({tcd[4]})',className='ml-5'),
html.H3(f'(6) {tc[5]} ({tcd[5]})',className='ml-5'),
html.H3(f'(7) {tc[6]} ({tcd[6]})',className='ml-5'),
html.H3(f'(8) {tc[7]} ({tcd[7]})',className='ml-5'),
html.H3(f'(9) {tc[8]} ({tcd[8]})',className='ml-5'),
html.H3(f'(10) {tc[9]} ({tcd[9]})',className='ml-5'),
],width=4,style={'text-align':'left'}),
]
return [t1sn, t1sc, t1nl, t2sn, t2sc,\
t2nl, t3sn, t3sc, t3nl, pw1, pw2, pw3, \
pw1d, pw2d, pw3d, sn_img, snwt, snwtd, \
sc_img, scwt, scwtd, nl_img, nlwt, nlwtd, guess_result]
# @app.callback(
# [Output('page0','style'),
# Output('page1','style'),
# Output('page2','style'),
# Output('page3','style'),
# Output('page4','style'),],
# [Input('Bg_btn','n_clicks')],
# prevent_initial_call = True
# )
# def Update_Page(nc):
# global page_index
# all_none = [{'display':'none'}, {'display':'none'}, {'display':'none'}, {'display':'none'}, {'display':'none'}]
# '''
# if works like a fucking idiot in server...
# so I code this silly way, if I can fix it up?
# '''
# if page_index == 0:
# page_index = 1
# elif page_index == 1:
# page_index = 2
# elif page_index == 2:
# page_index = 3
# elif page_index == 3:
# page_index = 4
# all_none[page_index] = {'display':'block'}
# return all_none
| 42.66113
| 146
| 0.551515
|
3fa502faf70c398bf078b702343f7e9a67252400
| 781
|
pyde
|
Python
|
sketch_191218d_list79_zadanie/sketch_191218d_list79_zadanie.pyde
|
takoe-sebe/2019-fall-polytech-cs
|
a98472c7689cfdc42da710bf6ef8aef7278b276f
|
[
"MIT"
] | null | null | null |
sketch_191218d_list79_zadanie/sketch_191218d_list79_zadanie.pyde
|
takoe-sebe/2019-fall-polytech-cs
|
a98472c7689cfdc42da710bf6ef8aef7278b276f
|
[
"MIT"
] | null | null | null |
sketch_191218d_list79_zadanie/sketch_191218d_list79_zadanie.pyde
|
takoe-sebe/2019-fall-polytech-cs
|
a98472c7689cfdc42da710bf6ef8aef7278b276f
|
[
"MIT"
] | null | null | null |
lg_diam =0
lg_rad=0
lg_circ=0
sm_diam=0
cx=0
cy=0
def setup():
global lg_diam , lg_rad , lg_circ , sm_diam , cx , cy
background(100)
smooth()
size(500,400)
noStroke()
lg_diam = width * .55
lg_rad = lg_diam /2
lg_circ = PI * lg_diam
cx = width /2
cy = height /2
colorMode (HSB)
def draw():
fill(0,10)
rect(0,0,width,height)
nbr_circles = int(map(mouseX , 0, width , 6, 50))
sm_diam = ( lg_circ / nbr_circles )
myColor = int(map(mouseY , 0, height , 150, 255))
fill(myColor ,180 ,190 ,100)
for i in range(int(nbr_circles)):
angle = i * TWO_PI / nbr_circles
x = cx + cos(angle) * lg_rad
y = cy + sin(angle) * lg_rad
ellipse (x, y, sm_diam , sm_diam )
filter (BLUR , 3)
| 23.666667
| 57
| 0.573624
|
cb670a8d0da408c0175f99a7ecd067395b0d35c3
| 141
|
py
|
Python
|
att_app/templatetags/filters.py
|
tunir27/django-Attendance
|
4075c93bce56f02b06de126349bcc63294e07f0b
|
[
"MIT"
] | 3
|
2019-07-05T16:03:39.000Z
|
2019-11-06T07:20:29.000Z
|
att_app/templatetags/filters.py
|
tunir27/django-Attendance
|
4075c93bce56f02b06de126349bcc63294e07f0b
|
[
"MIT"
] | 6
|
2020-06-05T17:53:31.000Z
|
2021-09-07T23:50:09.000Z
|
att_app/templatetags/filters.py
|
tunir27/django-Attendance
|
4075c93bce56f02b06de126349bcc63294e07f0b
|
[
"MIT"
] | 3
|
2018-04-30T15:09:04.000Z
|
2018-12-15T12:45:14.000Z
|
from django import template
register = template.Library()
@register.filter(name='subtract')
def subtract(value, arg):
return value - arg
| 23.5
| 33
| 0.751773
|
db65b3be21fa4680000310bd6ddb51617e24adb2
| 303
|
py
|
Python
|
sql/sql_sql.py
|
Zuoxiaoxian/Excel_Oracle_conf_log
|
e4ded91c333af466542ba25dfd162f192347bc82
|
[
"MIT"
] | null | null | null |
sql/sql_sql.py
|
Zuoxiaoxian/Excel_Oracle_conf_log
|
e4ded91c333af466542ba25dfd162f192347bc82
|
[
"MIT"
] | null | null | null |
sql/sql_sql.py
|
Zuoxiaoxian/Excel_Oracle_conf_log
|
e4ded91c333af466542ba25dfd162f192347bc82
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2018/5/25 10:45
# @Author : xiao xian.zuo
# @Email : 1980179070@163.com
# @File : sql_sql.py
# @Software : PyCharm
# select_sql = """select * from ACTION_FLAG_TYPE"""
def select():
return """select * from CST_EXCEL_DETAIL_SAMPLE where INSPECTID=2"""
| 25.25
| 72
| 0.627063
|
6ad36266c800e7b4e38302c183644bea968fa162
| 5,613
|
py
|
Python
|
models/cells/modelDCN2011Luthman.py
|
HarshKhilawala/cerebmodels
|
d2a2f2ef947ef9dc23ddce6e55159240cd3233cb
|
[
"BSD-3-Clause"
] | null | null | null |
models/cells/modelDCN2011Luthman.py
|
HarshKhilawala/cerebmodels
|
d2a2f2ef947ef9dc23ddce6e55159240cd3233cb
|
[
"BSD-3-Clause"
] | 9
|
2020-03-24T17:09:03.000Z
|
2021-05-17T16:11:17.000Z
|
models/cells/modelDCN2011Luthman.py
|
myHBPwork/cerebmodels
|
371ea7f1bbe388f1acade17c7128b8ca6ab8fb7a
|
[
"BSD-3-Clause"
] | 1
|
2021-05-21T03:08:41.000Z
|
2021-05-21T03:08:41.000Z
|
# ~/models/cells/modelDCN2011Luthman.py
import os
pwd = os.getcwd() # record root directory path ~/cerebmodels
path_to_files = pwd + os.sep + "models" + os.sep + "cells" + os.sep + \
"DCN2011Luthman" + os.sep # record path to this model/folder
from models.cells.DCN2011Luthman.ExcitatoryDCNneuron import ExcitatoryDCNneuron
from executive import ExecutiveControl
from managers.simulation import SimulationManager as sm
from managers.read import ReadManager as rm
from managers.signalprocessing import SignalProcessingManager as spm
import sciunit
from cerebunit.capabilities.cells.response import ProducesElectricalResponse
from cerebunit.capabilities.cells.measurements import ProducesEphysMeasurement
class DeepCerebellarNucleiCell( sciunit.Model,
ProducesElectricalResponse,
ProducesEphysMeasurement ):
"""USE CASE:
"""
# AFTER the model is in the HBP Validation Framework Model catalog, set the generated uuid
#uuid = "22dc8fd3-c62b-4e07-9e47-f5829e038d6d"
def __init__(self):
### ===================== Descriptive Attributes ======================
self.modelscale = "cells"
self.modelname = "DCN2011Luthman"
# ------specify cell-regions from with response are recorded-------
self.regions = {"soma": ["v"]}
self.recordingunits = {"v": "mV"}
# -----------attributed inheritance from sciunit.Model--------------
self.name = "Luthman et al. 2011 model of DeepCerebellarNucleiCell"
self.description = "Luthman 2011 model of a neuron in Deep Cerebellar Nuclei (DCN) and published in 10.1007/s12311-011-0295-9 This is a multi-compartment (517) model. This model is the SciUnit wrapped version of the NEURON model in modelDB accession # 144523."
#
### =================== Instantiate cell template ====================
sm.lock_and_load_model_libraries(modelscale=self.modelscale,
modelname=self.modelname)
os.chdir(path_to_files)
self.cell = ExcitatoryDCNneuron()
os.chdir(pwd)
### ===============================================================
self.fullfilename = "nil"
self.prediction = "nil"
#
# =======================================================================
# +++++++++++++++++++++++ MODEL CAPABILITIES ++++++++++++++++++++++++++++
# =======================================================================
# --------------------- produce_voltage_response ------------------------
def produce_voltage_response(self, **kwargs):
"""generic/essential model response
**Keyword Arguments:**
kwargs = { "parameters": dictionary with keys,
"stimparameters": None or dictionary with keys "type" and "stimlist",
"onmodel": instantiated model }
"""
#ExecutiveControl.launch_model_raw("cells")
print("Simulation produce_voltage_response starting ...")
ec = ExecutiveControl() # only works when in ~/cerebmodels
model = ec.launch_model( parameters = kwargs["parameters"],
stimparameters = kwargs["stimparameters"],
stimloc = kwargs["stimloc"],
onmodel = kwargs["onmodel"], mode = "raw" )
print("File saving ...")
fullfilename = ec.save_response()
setattr(model, "fullfilename", fullfilename)
print("File saved.")
print("Simulation produce_voltage_response Done.")
return model
# ----------------------- produce_restingVm -----------------------------
def produce_restingVm(self, **kwargs):
"""
kwargs = { "parameters": dictionary with keys,
"stimparameters": dictionary with keys "type" and "stimlist",
"onmodel": instantiated model }
"""
print("Sim produce_restingVm starting ...")
ec = ExecutiveControl() # only works when in ~/cerebmodels
model = ec.launch_model( parameters = kwargs["parameters"],
stimparameters = kwargs["stimparameters"],
stimloc = kwargs["stimloc"], onmodel = kwargs["onmodel"],
capabilities = {"model": "produce_voltage_response",
"vtest": ProducesElectricalResponse},
mode="capability")
#self.fullfilename # already saved by invoking produce_voltage_response above
#print("Signal Processing ...")
nwbfile = rm.load_nwbfile(model.fullfilename)
orderedepochs = rm.order_all_epochs_for_region(nwbfile=nwbfile, region="soma")
timestamps_over_epochs = [ rm.timestamps_for_epoch( orderedepochs[i] )
for i in range(len(orderedepochs)) ]
data_over_epochs = [ rm.data_for_epoch( orderedepochs[i] )
for i in range(len(orderedepochs)) ]
baseVms = spm.distill_Vm_pre_epoch( timestamps = timestamps_over_epochs,
datavalues = data_over_epochs )
#print("Signal Processing Done.")
setattr(model, "prediction", baseVms)
print("Simulation produce_restingVm Done.")
return model
# ----------------------- produce_spike_train ---------------------------
def produce_spike_train(self, **kwargs):
"""
Use case:
"""
pass
| 50.116071
| 268
| 0.554605
|
9323cc3e8650367cb28a40007fd2fcbf37b5a04a
| 79,581
|
py
|
Python
|
controllers/inv.py
|
ramdesh/eden
|
501eab1782ba97417afb8d236b211dbdd6750dfb
|
[
"MIT"
] | 2
|
2019-11-25T20:34:52.000Z
|
2021-06-04T20:05:46.000Z
|
controllers/inv.py
|
ramdesh/eden
|
501eab1782ba97417afb8d236b211dbdd6750dfb
|
[
"MIT"
] | 1
|
2020-01-29T15:33:17.000Z
|
2020-01-29T15:33:17.000Z
|
controllers/inv.py
|
ramdesh/eden
|
501eab1782ba97417afb8d236b211dbdd6750dfb
|
[
"MIT"
] | 3
|
2019-11-30T20:25:55.000Z
|
2022-02-03T17:12:16.000Z
|
# -*- coding: utf-8 -*-
"""
Inventory Management
A module to record inventories of items at a locations (sites),
including Warehouses, Offices, Shelters & Hospitals
"""
module = request.controller
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
return settings.customise_home(module, alt_function="index_alt")
# -----------------------------------------------------------------------------
def index_alt():
"""
Module homepage for non-Admin users when no CMS content found
"""
# Just redirect to the Warehouse Summary View
s3_redirect_default(URL(f="warehouse", args="summary"))
# -----------------------------------------------------------------------------
def index2():
"""
Alternative Application Home page
- custom View
"""
# Need CRUD String
table = s3db.table("cr_shelter", None)
module_name = settings.modules[module].get("name_nice")
response.title = module_name
response.view = "inv/index.html"
if s3.debug:
# Start of TEST CODE for multiple dataTables,
#this also required views/inv/index.html to be modified
from s3.s3data import S3DataTable
representation = request.extension
if representation == "html" or get_vars.id == "warehouse_list_1":
resource = s3db.resource("inv_warehouse")
totalrows = resource.count()
list_fields = ["id",
"name",
"organisation_id",
]
orderby = "inv_warehouse.name asc"
if representation == "aadata":
query, orderby, left = resource.datatable_filter(list_fields, get_vars)
if orderby is None:
orderby = default_orderby
start = int(get_vars.displayStart) if get_vars.displayStart else 0
limit = int(get_vars.pageLength) if get_vars.pageLength else s3.ROWSPERPAGE
data = resource.select(list_fields,
start = start,
limit = limit,
orderby = orderby,
count = True,
represent = True)
filteredrows = data["numrows"]
if totalrows is None:
totalrows = filteredrows
rfields = data["rfields"]
rows = data["rows"]
dt = S3DataTable(rfields, rows)
dt.defaultActionButtons(resource)
if representation == "html":
warehouses = dt.html(totalrows,
filteredrows,
"warehouse_list_1",
dt_ajax_url = URL(c = "inv",
f = "index2",
extension = "aadata",
vars = {"id":"warehouse_list_1"},
),
dt_group = 2,
dt_searching = "true",
)
else:
warehouse = dt.json(totalrows,
filteredrows,
"warehouse_list_1",
int(get_vars.draw),
)
return warehouse
# Second Table
if representation == "html" or get_vars.id == "inventory_list_1":
if "Adjust" in request.post_vars:
if request.post_vars.selected == "":
inventory = "Well you could have selected something :("
else:
inventory = "Adjustment not currently supported... :-) you selected the following items: %s" % request.post_vars.selected
else:
resource = s3db.resource("inv_inv_item")
totalrows = resource.count()
table = resource.table
stable = s3db.supply_item
list_fields = ["id",
"site_id",
"item_id$name",
"quantity",
"pack_value",
"total_value",
]
orderby = "inv_inv_item.site_id asc"
if representation == "aadata":
query, orderby, left = resource.datatable_filter(list_fields, get_vars)
if orderby is None:
orderby = default_orderby
site_list = {}
data = resource.select(list_fields,
limit = None,
orderby = orderby,
count = True
)
filteredrows = data["numrows"]
if totalrows is None:
totalrows = filteredrows
rows = data["rows"]
for row in rows:
site_id = row["inv_inv_item.site_id"]
if site_id not in site_list:
site_list[site_id] = 1
else:
site_list[site_id] += 1
formatted_site_list = {}
repr = table.site_id.represent
for (key,value) in site_list.items():
formatted_site_list[str(repr(key))] = value
if isinstance(orderby, bool):
orderby = [table.site_id, stable.name, ~table.quantity]
start = int(get_vars.displayStart) if get_vars.displayStart else 0
limit = int(get_vars.pageLength) if get_vars.pageLength else s3.ROWSPERPAGE
data = resource.select(list_fields,
orderby = orderby,
start = start,
limit = limit,
represent = True)
rfields = data["rfields"]
rows = data["rows"]
dt = S3DataTable(rfields,
rows,
orderby = orderby,
)
custom_actions = [{"label": s3_str(T("Warehouse")),
"_class": "action-icon",
"img": "/%s/static/img/markers/gis_marker.image.Agri_Commercial_Food_Distribution_Center_S1.png" % appname,
"url": URL(c = "inv",
f = "warehouse",
args = ["[id]", "update"]
)
},
]
dt.defaultActionButtons(resource, custom_actions)
if representation == "html":
rows = current.db(table.quantity < 100.0).select(table.id, table.quantity)
errorList = []
warningList = []
alertList = []
for row in rows:
if row.quantity < 0.0:
errorList.append(row.id)
elif row.quantity == 0.0:
warningList.append(row.id)
else:
alertList.append(row.id)
inventory = dt.html(totalrows,
filteredrows,
"inventory_list_1",
dt_action_col = -1,
dt_ajax_url = URL(c = "inv",
f = "index2",
extension = "aadata",
vars = {"id":"inventory_list_1"},
),
dt_bulk_actions = "Adjust",
dt_group = [1, 2],
dt_group_totals = [formatted_site_list],
dt_searching = "true",
dt_styles = {"dtdisable": errorList,
"dtwarning": warningList,
"dtalert": alertList,
},
#dt_text_maximum_len = 10,
#dt_text_condense_len = 8,
#dt_group_space = True,
dt_shrink_groups = "accordion",
#dt_shrink_groups = "individual",
)
s3.actions = None
elif representation == "aadata":
inventory = dt.json(totalrows,
filteredrows,
"inventory_list_1",
int(get_vars.draw),
dt_action_col = -1,
dt_bulk_actions = "Adjust",
dt_group_totals = [formatted_site_list],
)
return inventory
else:
# Probably not the way to do it.... but
s3db.configure("inv_inv_item",
list_fields = list_fields,
report_groupby = "site_id",
pdf_groupby = "site_id",
)
s3.filter = filter
r = s3_request("inv", "inv_item",
vars={"orderby" : orderby})
r.resource = resource
output = r(pdf_groupby = "site_id",
dt_group = 1,
)
return output
# Third table
if representation == "html" or get_vars.id == "supply_list_1":
resource = s3db.resource("supply_item")
list_fields = ["id",
"name",
"um",
"model",
]
orderby = "inv_inv_item.site_id asc"
if representation == "aadata":
query, orderby, left = resource.datatable_filter(list_fields, get_vars)
if orderby is None:
orderby = default_orderby
data = resource.select(list_fields,
limit = None,
orderby = orderby,
count = True,
represent = True
)
rows = data["rows"]
rfields = data["rfields"]
numrows = data["numrows"]
dt = S3DataTable(rfields, rows)
dt.defaultActionButtons(resource)
if representation == "html":
supply_items = dt.html(numrows,
numrows,
"supply_list_1",
dt_action_col = 1,
dt_ajax_url = URL(c = "inv",
f = "index2",
extension = "aadata",
vars = {"id": "supply_list_1"},
),
dt_pageLength = 10,
)
else:
supply_items = dt.json(numrows,
numrows,
"supply_list_1",
int(get_vars.draw),
dt_action_col = 1,
)
return supply_items
r = s3_request(prefix = "inv", name = "inv_item")
return {"module_name": module_name,
"warehouses": warehouses,
"inventory": inventory,
"supply_items": supply_items,
"r": r,
}
# End of TEST CODE
return {"module_name": module_name,
}
# -----------------------------------------------------------------------------
def warehouse():
"""
RESTful CRUD controller
"""
request_args = request.args
if "viewing" in get_vars:
viewing = get_vars.viewing
tn, id = viewing.split(".", 1)
if tn == "inv_warehouse":
request_args.insert(0, id)
# CRUD pre-process
def prep(r):
if r.component:
component_name = r.component_name
if component_name == "inv_item":
# Filter out items which are already in this inventory
s3db.inv_prep(r)
# Remove the Warehouse Name from the list_fields
list_fields = s3db.get_config("inv_inv_item", "list_fields")
try:
list_fields.remove("site_id")
s3db.configure("inv_inv_item",
list_fields = list_fields,
)
except:
pass
elif component_name == "recv":
# Filter out items which are already in this inventory
s3db.inv_prep(r)
# Configure which fields in inv_recv are readable/writable
# depending on status
recvtable = s3db.inv_recv
if r.component_id:
record = db(recvtable.id == r.component_id).select(recvtable.status,
limitby = (0, 1)
).first()
set_recv_attr(record.status)
else:
set_recv_attr(s3db.inv_ship_status["IN_PROCESS"])
recvtable.recv_ref.readable = False
if r.method and r.method != "read":
# Don't want to see in Create forms
recvtable.status.readable = False
elif component_name == "send":
# Filter out items which are already in this inventory
s3db.inv_prep(r)
elif component_name == "human_resource":
s3db.org_site_staff_config(r)
elif component_name == "req":
s3db.req_prep(r)
if r.method != "update" and r.method != "read":
# Hide fields which don't make sense in a Create form
# inc list_create (list_fields over-rides)
s3db.req_create_form_mods()
elif component_name == "asset":
# Default/Hide the Organisation & Site fields
record = r.record
atable = s3db.asset_asset
field = atable.organisation_id
field.default = record.organisation_id
field.readable = field.writable = False
field = atable.site_id
field.default = record.site_id
field.readable = field.writable = False
# Stay within Warehouse tab
s3db.configure("asset_asset",
create_next = None,
)
elif r.id:
r.table.obsolete.readable = r.table.obsolete.writable = True
# "show_obsolete" var option can be added (btn?) later to
# disable this filter
if r.method in [None, "list"] and \
not r.vars.get("show_obsolete", False):
r.resource.add_filter(db.inv_warehouse.obsolete != True)
if r.representation == "xls":
list_fields = r.resource.get_config("list_fields")
list_fields += ["location_id$lat",
"location_id$lon",
"location_id$inherited",
]
return True
s3.prep = prep
# CRUD post-process
def postp(r, output):
if r.interactive and not r.component and r.method != "import":
if auth.s3_has_permission("read", "inv_inv_item"):
# Change Action buttons to open Stock Tab by default
read_url = URL(f="warehouse", args=["[id]", "inv_item"])
update_url = URL(f="warehouse", args=["[id]", "inv_item"])
s3_action_buttons(r,
read_url = read_url,
update_url = update_url)
else:
cname = r.component_name
if cname == "human_resource":
# Modify action button to open staff instead of human_resource
read_url = URL(c="hrm", f="staff", args=["[id]"])
update_url = URL(c="hrm", f="staff", args=["[id]", "update"])
s3_action_buttons(r, read_url = read_url,
#delete_url = delete_url,
update_url = update_url)
if isinstance(output, dict) and \
"add_btn" in output:
del output["add_btn"]
return output
s3.postp = postp
if "extra_data" in get_vars:
resourcename = "inv_item"
else:
resourcename = "warehouse"
csv_stylesheet = "%s.xsl" % resourcename
if len(request_args) > 1 and request_args[1] in ("req", "send", "recv"):
# Sends/Receives should break out of Component Tabs
# To allow access to action buttons in inv_recv rheader
native = True
else:
native = False
output = s3_rest_controller(module, resourcename,
#hide_filter = {"inv_item": False,
# "_default": True,
# },
# Extra fields for CSV uploads:
#csv_extra_fields = [
# dict(label="Organisation",
# field=s3db.org_organisation_id(comment=None))
#]
csv_stylesheet = csv_stylesheet,
csv_template = resourcename,
native = native,
rheader = s3db.inv_rheader,
)
return output
# -----------------------------------------------------------------------------
def warehouse_type():
"""
RESTful CRUD controller
"""
return s3_rest_controller()
# -----------------------------------------------------------------------------
def supplier():
"""
Filtered version of the organisation() REST controller
"""
get_vars["organisation_type.name"] = "Supplier"
# Load model (including normal CRUD strings)
table = s3db.org_organisation
# Modify CRUD Strings
s3.crud_strings.org_organisation = Storage(
label_create = T("Create Supplier"),
title_display = T("Supplier Details"),
title_list = T("Suppliers"),
title_update = T("Edit Supplier"),
title_upload = T("Import Suppliers"),
label_list_button = T("List Suppliers"),
label_delete_button = T("Delete Supplier"),
msg_record_created = T("Supplier added"),
msg_record_modified = T("Supplier updated"),
msg_record_deleted = T("Supplier deleted"),
msg_list_empty = T("No Suppliers currently registered")
)
# Open record in this controller after creation
s3db.configure("org_organisation",
create_next = URL(c="inv", f="supplier",
args = ["[id]", "read"]),
)
# NB Type gets defaulted in the Custom CRUD form
# - user needs create permissions for org_organisation_organisation_type
return s3db.org_organisation_controller()
# =============================================================================
def inv_item():
""" REST Controller """
# If this url has a viewing track items then redirect to track_movement
viewing = get_vars.get("viewing", None)
if viewing:
tn, id = viewing.split(".", 1)
if tn == "inv_track_item":
table = s3db.inv_track_item
record = db(table.id == id).select(table.item_id,
limitby = (0, 1)
).first()
redirect(URL(c = "inv",
f = "track_movement",
args = [],
vars = {"viewing" : "%s.%s" % ("inv_inv_item", record.item_id)}
))
tablename = "inv_inv_item"
# Load model to be able to override CRUD string(s)
table = s3db[tablename]
# Limit site_id to sites the user has permissions for
auth.permitted_facilities(table = table,
error_msg = T("You do not have permission for any site to add an inventory item."))
s3.crud_strings[tablename].msg_list_empty = T("No Stock currently registered")
report = get_vars.get("report")
if report == "mon":
s3.crud_strings[tablename].update({"title_list": T("Monetization Report"),
"subtitle_list": T("Monetization Details"),
#"msg_list_empty": T("No Stock currently registered"),
})
s3db.configure(tablename,
list_fields = ["id",
(T("Donor"), "supply_org_id"),
(T("Items/Description"), "item_id"),
(T("Quantity"), "quantity"),
(T("Unit"), "item_pack_id"),
(T("Unit Value"), "pack_value"),
(T("Total Value"), "total_value"),
(T("Remarks"), "comments"),
"status",
]
)
else:
s3db.configure(tablename,
insertable = settings.get_inv_direct_stock_edits(),
list_fields = ["id",
"site_id",
"item_id",
"item_id$code",
"item_id$item_category_id",
"quantity",
"pack_value",
#(T("Total Value"), "total_value"),
]
)
if len(request.args) > 1 and request.args[1] == "track_item":
# remove CRUD generated buttons in the tabs
s3db.configure("inv_track_item",
create = False,
listadd = False,
editable = False,
deletable = False,
)
else:
s3.filter = (table.quantity != 0)
def prep(r):
if r.method != "report":
s3.dataTable_group = 1
return True
s3.prep = prep
# Import pre-process
def import_prep(data):
"""
Deletes all Stock records of the organisation/branch
before processing a new data import
"""
resource, tree = data
xml = current.xml
tag = xml.TAG
att = xml.ATTRIBUTE
if s3.importerReplace:
if tree is not None:
root = tree.getroot()
expr = "/%s/%s[@%s='org_organisation']/%s[@%s='name']" % \
(tag.root, tag.resource, att.name, tag.data, att.field)
orgs = root.xpath(expr)
otable = s3db.org_organisation
stable = s3db.org_site
itable = s3db.inv_inv_item
for org in orgs:
org_name = org.get("value", None) or org.text
if org_name:
try:
org_name = json.loads(xml.xml_decode(org_name))
except:
pass
if org_name:
query = (otable.name == org_name) & \
(stable.organisation_id == otable.id) & \
(itable.site_id == stable.id)
resource = s3db.resource("inv_inv_item", filter=query)
# Use cascade=True so that the deletion gets
# rolled back if the import fails:
resource.delete(format="xml", cascade=True)
resource.skip_import = True
s3.import_prep = import_prep
# Upload for configuration (add replace option)
s3.importerPrep = lambda: {"ReplaceOption": T("Remove existing data before import")}
output = s3_rest_controller(#csv_extra_fields = [{"label": "Organisation",
# "field": s3db.org_organisation_id(comment = None)
# },
# ],
pdf_orientation = "Landscape",
pdf_table_autogrow = "B",
pdf_groupby = "site_id, item_id",
pdf_orderby = "expiry_date, supply_org_id",
rheader = s3db.inv_rheader,
)
if not settings.get_inv_direct_stock_edits() and \
isinstance(output, dict) and \
"add_btn" in output:
del output["add_btn"]
return output
# -----------------------------------------------------------------------------
def track_movement():
""" REST Controller """
table = s3db.inv_track_item
s3db.configure("inv_track_item",
create = False,
deletable = False,
editable = False,
listadd = False,
)
def prep(r):
if r.interactive:
if "viewing" in get_vars:
dummy, item_id = get_vars.viewing.split(".")
if item_id != "None":
query = (table.send_inv_item_id == item_id ) | \
(table.recv_inv_item_id == item_id)
r.resource.add_filter(query)
return True
s3.prep = prep
output = s3_rest_controller("inv", "track_item",
rheader = s3db.inv_rheader,
)
if isinstance(output, dict) and \
"add_btn" in output:
del output["add_btn"]
return output
# -----------------------------------------------------------------------------
def inv_item_quantity():
"""
Access via the .json representation to avoid work rendering menus, etc
"""
try:
item_id = request.args[0]
except:
raise HTTP(400, current.xml.json_message(False, 400, "No value provided!"))
table = s3db.inv_inv_item
ptable = db.supply_item_pack
query = (table.id == item_id) & \
(table.item_pack_id == ptable.id)
record = db(query).select(table.quantity,
ptable.quantity,
limitby = (0, 1)
).first()
d = {"iquantity" : record.inv_inv_item.quantity,
"pquantity" : record.supply_item_pack.quantity,
}
output = json.dumps(d)
response.headers["Content-Type"] = "application/json"
return output
# -----------------------------------------------------------------------------
def inv_item_packs():
"""
Called by S3OptionsFilter to provide the pack options for a
particular Item
Access via the .json representation to avoid work rendering menus, etc
"""
try:
item_id = request.args[0]
except:
raise HTTP(400, current.xml.json_message(False, 400, "No value provided!"))
table = s3db.inv_inv_item
ptable = db.supply_item_pack
query = (table.id == item_id) & \
(table.item_id == ptable.item_id)
records = db(query).select(ptable.id,
ptable.name,
ptable.quantity)
output = records.json()
response.headers["Content-Type"] = "application/json"
return output
# =============================================================================
def send():
""" RESTful CRUD controller """
return s3db.inv_send_controller()
# ==============================================================================
def send_commit():
"""
Send a Shipment containing all items in a Commitment
"""
return s3db.req_send_commit()
# -----------------------------------------------------------------------------
def send_process():
""" Process a Shipment """
return s3db.inv_send_process()
# -----------------------------------------------------------------------------
def send_returns():
"""
This will cancel a shipment that has been sent
@todo need to roll back commitments
"""
try:
send_id = request.args[0]
except:
redirect(f="send")
stable = s3db.inv_send
if not auth.s3_has_permission("update", stable, record_id=send_id):
session.error = T("You do not have permission to return this sent shipment.")
send_record = db(stable.id == send_id).select(stable.status,
limitby = (0, 1)
).first()
inv_ship_status = s3db.inv_ship_status
if send_record.status == inv_ship_status["IN_PROCESS"]:
session.error = T("This shipment has not been sent - it cannot be returned because it can still be edited.")
if session.error:
redirect(URL(c="inv", f="send",
args=[send_id]))
rtable = s3db.inv_recv
tracktable = s3db.inv_track_item
# Okay no error so far, change the status to Returning
ADMIN = auth.get_system_roles().ADMIN
stable[send_id] = dict(status = inv_ship_status["RETURNING"],
owned_by_user = None,
owned_by_group = ADMIN)
recv_row = db(tracktable.send_id == send_id).select(tracktable.recv_id,
limitby = (0, 1)
).first()
if recv_row:
recv_id = recv_row.recv_id
rtable[recv_id] = dict(date = request.utcnow,
status = inv_ship_status["RETURNING"],
owned_by_user = None,
owned_by_group = ADMIN)
# Set all track items to status of returning
db(tracktable.send_id == send_id).update(status = s3db.inv_tracking_status["RETURNING"])
session.confirmation = T("Sent Shipment has returned, indicate how many items will be returned to Warehouse.")
redirect(URL(c="inv", f="send",
args=[send_id, "track_item"]))
# -----------------------------------------------------------------------------
def return_process():
"""
Return some stock from a shipment back into the warehouse
"""
try:
send_id = request.args[0]
except:
redirect(f="send")
stable = s3db.inv_send
if not auth.s3_has_permission("update", stable, record_id=send_id):
session.error = T("You do not have permission to return this sent shipment.")
send_record = db(stable.id == send_id).select(stable.status,
limitby = (0, 1)
).first()
inv_ship_status = s3db.inv_ship_status
if send_record.status != inv_ship_status["RETURNING"]:
session.error = T("This shipment has not been returned.")
if session.error:
redirect(URL(c="inv", f="send",
args=[send_id]))
invtable = s3db.inv_inv_item
rtable = s3db.inv_recv
tracktable = s3db.inv_track_item
# Okay no error so far, let's move the goods back into the warehouse
# and then change the status to received
# Update Receive record & lock for editing
# Move each item to the site
track_rows = db(tracktable.send_id == send_id).select(tracktable.id,
tracktable.quantity,
tracktable.return_quantity,
tracktable.send_inv_item_id,
)
for track_item in track_rows:
send_inv_id = track_item.send_inv_item_id
return_qnty = track_item.return_quantity
if return_qnty == None:
return_qnty = 0
# update the receive quantity in the tracking record
tracktable[track_item.id] = dict(recv_quantity = track_item.quantity - return_qnty)
if return_qnty:
db(invtable.id == send_inv_id).update(quantity = invtable.quantity + return_qnty)
ADMIN = auth.get_system_roles().ADMIN
stable[send_id] = dict(status = inv_ship_status["RECEIVED"],
owned_by_user = None,
owned_by_group = ADMIN)
recv_row = db(tracktable.send_id == send_id).select(tracktable.recv_id,
limitby = (0, 1)
).first()
if recv_row:
recv_id = recv_row.recv_id
rtable[recv_id] = dict(date = request.utcnow,
status = inv_ship_status["RECEIVED"],
owned_by_user = None,
owned_by_group = ADMIN)
# Change the status for all track items in this shipment to Received
db(tracktable.send_id == send_id).update(status = s3db.inv_tracking_status["RECEIVED"])
redirect(URL(f = "send",
args = [send_id]))
# -----------------------------------------------------------------------------
def send_cancel():
"""
This will cancel a shipment that has been sent
@todo need to roll back commitments
"""
try:
send_id = request.args[0]
except:
redirect(f="send")
stable = s3db.inv_send
if not auth.s3_has_permission("delete", stable, record_id=send_id):
session.error = T("You do not have permission to cancel this sent shipment.")
send_record = db(stable.id == send_id).select(stable.status,
limitby = (0, 1)
).first()
inv_ship_status = s3db.inv_ship_status
if send_record.status != inv_ship_status["SENT"]:
session.error = T("This shipment has not been sent - it has NOT been canceled because it can still be edited.")
if session.error:
redirect(URL(c="inv", f="send",
args=[send_id]))
rtable = s3db.inv_recv
tracktable = s3db.inv_track_item
# Okay no error so far, let's delete that baby
# Change the send and recv status to cancelled
ADMIN = auth.get_system_roles().ADMIN
db(stable.id == send_id).update(status = inv_ship_status["CANCEL"],
owned_by_user = None,
owned_by_group = ADMIN)
recv_row = db(tracktable.send_id == send_id).select(tracktable.recv_id,
limitby = (0, 1)
).first()
if recv_row:
recv_id = recv_row.recv_id
db(rtable.id == recv_id).update(date = request.utcnow,
status = inv_ship_status["CANCEL"],
owned_by_user = None,
owned_by_group = ADMIN)
# Change the track items status to canceled and then delete them
# If they are linked to a request then the in transit total will also be reduced
# Records can only be deleted if the status is In Process (or preparing)
# so change the status before we delete
tracking_status = s3db.inv_tracking_status
db(tracktable.send_id == send_id).update(status = tracking_status["IN_PROCESS"])
track_rows = db(tracktable.send_id == send_id).select(tracktable.id)
for track_item in track_rows:
s3db.inv_track_item_deleting(track_item.id)
# Now change the status to (cancelled)
db(tracktable.send_id == send_id).update(status = tracking_status["CANCEL"])
session.confirmation = T("Sent Shipment canceled and items returned to Warehouse")
redirect(URL(f = "send",
args = [send_id]))
# =============================================================================
def set_recv_attr(status):
"""
Set field attributes for inv_recv table
"""
recvtable = s3db.inv_recv
ship_status = s3db.inv_ship_status
recvtable.sender_id.readable = recvtable.sender_id.writable = False
recvtable.grn_status.readable = recvtable.grn_status.writable = False
recvtable.cert_status.readable = recvtable.cert_status.writable = False
recvtable.eta.readable = False
recvtable.req_ref.writable = True
if status == ship_status["IN_PROCESS"]:
recvtable.send_ref.writable = True
recvtable.recv_ref.readable = False
recvtable.sender_id.readable = False
else:
# Make all fields writable False
for field in recvtable.fields:
recvtable[field].writable = False
if status == ship_status["SENT"]:
recvtable.date.writable = True
recvtable.recipient_id.readable = recvtable.recipient_id.writable = True
recvtable.comments.writable = True
# -----------------------------------------------------------------------------
def recv():
""" RESTful CRUD controller """
recvtable = s3db.inv_recv
# Limit site_id to sites the user has permissions for
if settings.get_inv_shipment_name() == "order":
error_msg = T("You do not have permission for any facility to add an order.")
else:
error_msg = T("You do not have permission for any facility to receive a shipment.")
auth.permitted_facilities(table=recvtable, error_msg=error_msg)
tracktable = s3db.inv_track_item
atable = s3db.inv_adj_item
# The inv_recv record might be created when the shipment is send and so it
# might not have the recipient identified. If it is null then set it to
# the person who is logged in (the default)
id = request.args(0)
if id and isinstance(id, int):
record = db(recvtable.id == id).select(recvtable.recipient_id,
limitby = (0, 1)
).first()
try:
if record.recipient_id is None:
db(recvtable.id == id).update(recipient_id = auth.s3_logged_in_person())
except:
pass
status = s3db.inv_ship_status
SHIP_STATUS_IN_PROCESS = status["IN_PROCESS"]
SHIP_STATUS_SENT = status["SENT"]
SHIP_STATUS_RECEIVED = status["RECEIVED"]
SHIP_STATUS_CANCEL = status["CANCEL"]
status = s3db.inv_tracking_status
TRACK_STATUS_UNKNOWN = status["UNKNOWN"]
TRACK_STATUS_PREPARING = status["IN_PROCESS"]
TRACK_STATUS_TRANSIT = status["SENT"]
TRACK_STATUS_UNLOADING = status["UNLOADING"]
TRACK_STATUS_ARRIVED = status["RECEIVED"]
TRACK_STATUS_CANCELED = status["CANCEL"]
def set_track_attr(status):
# By default Make all fields writable False
for field in tracktable.fields:
tracktable[field].writable = False
# Hide some fields
tracktable.send_id.readable = False
tracktable.recv_id.readable = False
tracktable.bin.readable = False
tracktable.adj_item_id.readable = False
tracktable.recv_quantity.readable = True
if status == TRACK_STATUS_PREPARING:
# Show some fields
tracktable.item_source_no.writable = True
tracktable.item_id.writable = True
tracktable.item_pack_id.writable = True
tracktable.quantity.writable = True
tracktable.currency.writable = True
tracktable.pack_value.writable = True
tracktable.expiry_date.writable = True
tracktable.recv_bin.writable = True
tracktable.owner_org_id.writable = True
tracktable.supply_org_id.writable = True
tracktable.inv_item_status.writable = True
tracktable.comments.writable = True
tracktable.recv_quantity.readable = False
# Hide some fields
tracktable.send_inv_item_id.readable = False
# Change some labels - NO - use consistent labels
#tracktable.quantity.label = T("Quantity Delivered")
tracktable.recv_bin.label = T("Bin")
elif status == TRACK_STATUS_TRANSIT:
# Hide the values that will be copied from the inv_inv_item record
tracktable.send_inv_item_id.readable = False
tracktable.send_inv_item_id.writable = False
tracktable.item_source_no.readable = True
tracktable.item_source_no.writable = False
# Display the values that can only be entered on create
tracktable.recv_quantity.writable = True
tracktable.recv_bin.readable = True
tracktable.recv_bin.writable = True
tracktable.comments.writable = True
# This is a received purchase so change the label to reflect this - NO - use consistent labels
#tracktable.quantity.label = T("Quantity Delivered")
elif status == TRACK_STATUS_ARRIVED:
tracktable.item_source_no.readable = True
tracktable.item_source_no.writable = False
tracktable.item_id.writable = False
tracktable.send_inv_item_id.writable = False
tracktable.item_pack_id.writable = False
tracktable.quantity.writable = False
tracktable.currency.writable = False
tracktable.pack_value.writable = False
tracktable.expiry_date.writable = False
tracktable.owner_org_id.writable = False
tracktable.supply_org_id.writable = False
tracktable.recv_bin.readable = True
tracktable.recv_bin.writable = True
def prep(r):
record = r.record
if record and \
record.status not in (SHIP_STATUS_IN_PROCESS, SHIP_STATUS_SENT):
# Now that the shipment has been sent
# lock the record so that it can't be meddled with
s3db.configure("inv_recv",
create = False,
deletable = False,
editable = False,
listadd = False,
)
component = r.component
if record and component and component.name == "track_item":
# Can only create or delete track items for a recv record
# if the status is preparing:
if r.method == "create" or r.method == "delete":
if record.status != SHIP_STATUS_IN_PROCESS:
return False
# Configure which fields in track_item are readable/writable
# depending on status:
if r.component_id:
track_record = db(tracktable.id == r.component_id).select(tracktable.status,
limitby = (0, 1)
).first()
set_track_attr(track_record.status)
else:
set_track_attr(TRACK_STATUS_PREPARING)
tracktable.status.readable = False
# Adjust CRUD strings
if record.status == SHIP_STATUS_IN_PROCESS:
s3.crud_strings.inv_recv.title_update = \
s3.crud_strings.inv_recv.title_display = T("Process Received Shipment")
# Default the Supplier/Donor to the Org sending the shipment
tracktable.supply_org_id.default = record.organisation_id
else:
# Configure which fields in inv_recv are readable/writable
# depending on status
if r.id:
record = db(recvtable.id == r.id).select(recvtable.status,
limitby = (0, 1)
).first()
set_recv_attr(record.status)
else:
set_recv_attr(SHIP_STATUS_IN_PROCESS)
recvtable.recv_ref.readable = False
if r.method and r.method != "read":
# Don't want to see in Create forms
recvtable.status.readable = False
return True
s3.prep = prep
if len(request.args) > 1 and request.args[1] == "track_item":
record = db(recvtable.id == request.args[0]).select(recvtable.status,
limitby = (0, 1)
).first()
status = record.status if record else None
if status == SHIP_STATUS_SENT:
list_fields = ["id",
"status",
"item_id",
"item_pack_id",
"quantity",
"currency",
"pack_value",
"recv_quantity",
"recv_bin",
"owner_org_id",
"supply_org_id",
]
s3db.configure("inv_track_item",
# Remove CRUD generated buttons in the tabs
create = False,
deletable = False,
editable = True,
listadd = False,
list_fields = list_fields,
)
elif status:
# Remove CRUD generated buttons in the tabs
s3db.configure("inv_track_item",
create = False,
deletable = False,
editable = False,
listadd = False,
)
output = s3_rest_controller(rheader = s3db.inv_recv_rheader,
)
return output
# -----------------------------------------------------------------------------
def req_items_for_inv(site_id, quantity_type):
"""
Used by recv_process & send_process
returns a dict of unique req items (with min db.req_req.date_required | db.req_req.date)
key = item_id
@param site_id: The inventory to find the req_items from
@param quantity_type: str ("commit", "transit" or "fulfil) The
quantity type which will be used to determine if this item is still outstanding
"""
if not settings.has_module("req"):
return Storage()
table = s3db.req_req
itable = s3db.req_req_item
query = (table.site_id == site_id) & \
(table.id == itable.req_id) & \
(itable.item_pack_id == itable.item_pack_id) & \
(itable["quantity_%s" % quantity_type] < itable.quantity) & \
(table.cancel == False) & \
(table.deleted == False) & \
(itable.deleted == False)
req_items = db(query).select(itable.id,
itable.req_id,
itable.item_id,
itable.quantity,
itable["quantity_%s" % quantity_type],
itable.item_pack_id,
orderby = table.date_required | table.date,
#groupby = itable.item_id
)
# Because groupby doesn't follow the orderby, this will remove any
# duplicate req_item, using the first record according to the orderby
# req_items = req_items.as_dict( key = "req_req_item.item_id") <- doensn't work
# @todo: web2py Rows.as_dict function could be extended to enable this functionality instead
req_item_ids = []
unique_req_items = Storage()
for req_item in req_items:
if req_item.item_id not in req_item_ids:
# This item is not already in the dict
unique_req_items[req_item.item_id] = Storage( req_item.as_dict() )
req_item_ids.append(req_item.item_id)
return unique_req_items
# -----------------------------------------------------------------------------
def req_item_in_shipment(shipment_item,
shipment_type,
req_items,
):
"""
Checks if a shipment item is in a request and updates req_item
and the shipment.
"""
shipment_item_table = "inv_%s_item" % shipment_type
try:
item_id = shipment_item[shipment_item_table].item_id
except:
item_id = shipment_item.inv_inv_item.item_id
# Check for req_items
if item_id in req_items:
shipment_to_req_type = {"recv": "fulfil",
"send": "transit",
}
quantity_req_type = "quantity_%s" % shipment_to_req_type[shipment_type]
# This item has been requested from this inv
req_item = req_items[item_id]
req_item_id = req_item.id
# Update the req quantity
# convert the shipment items quantity into the req_tem.quantity_fulfil (according to pack)
quantity = req_item[quantity_req_type] + \
(shipment_item[shipment_item_table].pack_quantity / \
req_item.pack_quantity) * \
shipment_item[shipment_item_table].quantity
quantity = min(quantity, req_item.quantity) #Cap at req. quantity
s3db.req_req_item[req_item_id] = {quantity_req_type: quantity}
# Link the shipment_item to the req_item
s3db[shipment_item_table][shipment_item[shipment_item_table].id] = \
{"req_item_id": req_item_id}
# Flag req record to update status_fulfil
return req_item.req_id, req_item.id
else:
return None, None
# -----------------------------------------------------------------------------
def recv_process():
""" Receive a Shipment """
try:
recv_id = long(request.args[0])
except (IndexError, ValueError):
# recv_id missing from URL or invalid
redirect(URL(f="recv"))
rtable = s3db.inv_recv
if not auth.s3_has_permission("update", rtable, record_id=recv_id):
session.error = T("You do not have permission to receive this shipment.")
redirect(URL(c="inv", f="recv", args=[recv_id]))
recv_record = db(rtable.id == recv_id).select(rtable.date,
rtable.status,
rtable.site_id,
rtable.recv_ref,
limitby = (0, 1)
).first()
# Check status
status = recv_record.status
inv_ship_status = s3db.inv_ship_status
if status == inv_ship_status["RECEIVED"]:
session.error = T("This shipment has already been received.")
redirect(URL(c="inv", f="recv", args=[recv_id]))
elif status == inv_ship_status["CANCEL"]:
session.error = T("This shipment has already been received & subsequently canceled.")
redirect(URL(c="inv", f="recv", args=[recv_id]))
# Update Receive record & lock for editing
ADMIN = auth.get_system_roles().ADMIN
data = {"status": inv_ship_status["RECEIVED"],
"owned_by_user": None,
"owned_by_group": ADMIN,
}
if not recv_record.recv_ref:
# No recv_ref yet? => add one now
code = s3db.supply_get_shipping_code(settings.get_inv_recv_shortname(),
recv_record.site_id,
s3db.inv_recv.recv_ref,
)
data["recv_ref"] = code
if not recv_record.date:
# Date not set? => set to now
data["date"] = request.utcnow
db(rtable.id == recv_id).update(**data)
# Update the Send record & lock for editing
stable = db.inv_send
tracktable = db.inv_track_item
send_row = db(tracktable.recv_id == recv_id).select(tracktable.send_id,
limitby = (0, 1)
).first()
if send_row:
send_id = send_row.send_id
db(stable.id == send_id).update(status = inv_ship_status["RECEIVED"],
owned_by_user = None,
owned_by_group = ADMIN,
)
# Change the status for all track items in this shipment to Unloading
# the onaccept will then move the values into the site, update any request
# record, create any adjustment if needed and change the status to Arrived
db(tracktable.recv_id == recv_id).update(status = 3)
# Move each item to the site
track_rows = db(tracktable.recv_id == recv_id).select()
for track_item in track_rows:
row = Storage(track_item)
s3db.inv_track_item_onaccept(Storage(vars = Storage(id=row.id),
record = row,
))
# Done => confirmation message, open the record
session.confirmation = T("Shipment Items Received")
redirect(URL(c="inv", f="recv", args=[recv_id]))
# -----------------------------------------------------------------------------
def recv_cancel():
"""
Cancel a Received Shipment
@todo what to do if the quantity cancelled doesn't exist?
"""
try:
recv_id = request.args[0]
except:
redirect(URL(f="recv"))
rtable = s3db.inv_recv
if not auth.s3_has_permission("delete", rtable, record_id=recv_id):
session.error = T("You do not have permission to cancel this received shipment.")
redirect(URL(c="inv", f="recv", args=[recv_id]))
recv_record = db(rtable.id == recv_id).select(rtable.status,
limitby = (0, 1)
).first()
inv_ship_status = s3db.inv_ship_status
if recv_record.status != inv_ship_status["RECEIVED"]:
session.error = T("This shipment has not been received - it has NOT been canceled because it can still be edited.")
redirect(URL(c="inv", f="recv", args=[recv_id]))
stable = s3db.inv_send
tracktable = s3db.inv_track_item
inv_item_table = s3db.inv_inv_item
ritable = s3db.req_req_item
siptable = s3db.supply_item_pack
# Go through each item in the shipment remove them from the site store
# and put them back in the track item record
query = (tracktable.recv_id == recv_id) & \
(tracktable.deleted == False)
recv_items = db(query).select(tracktable.recv_inv_item_id,
tracktable.recv_quantity,
tracktable.send_id,
)
send_id = None
for recv_item in recv_items:
inv_item_id = recv_item.recv_inv_item_id
# This assumes that the inv_item has the quantity
quantity = inv_item_table.quantity - recv_item.recv_quantity
if quantity == 0:
db(inv_item_table.id == inv_item_id).delete()
else:
db(inv_item_table.id == inv_item_id).update(quantity = quantity)
db(tracktable.recv_id == recv_id).update(status = 2) # In transit
# @todo potential problem in that the send id should be the same for all track items but is not explicitly checked
if send_id is None and recv_item.send_id is not None:
send_id = recv_item.send_id
track_rows = db(tracktable.recv_id == recv_id).select(tracktable.req_item_id,
tracktable.item_pack_id,
tracktable.recv_quantity,
)
for track_item in track_rows:
# If this is linked to a request
# then remove these items from the quantity in fulfil
if track_item.req_item_id:
req_id = track_item.req_item_id
req_item = db(ritable.id == req_id).select(ritable.quantity_fulfil,
ritable.item_pack_id,
limitby = (0, 1)
).first()
req_quantity = req_item.quantity_fulfil
# @ToDo: Optimise by reading these 2 in a single DB query
req_pack_quantity = db(siptable.id == req_item.item_pack_id).select(siptable.quantity,
limitby = (0, 1)
).first().quantity
track_pack_quantity = db(siptable.id == track_item.item_pack_id).select(siptable.quantity,
limitby = (0, 1)
).first().quantity
quantity_fulfil = s3db.supply_item_add(req_quantity,
req_pack_quantity,
- track_item.recv_quantity,
track_pack_quantity
)
db(ritable.id == req_id).update(quantity_fulfil = quantity_fulfil)
s3db.req_update_status(req_id)
# Now set the recv record to cancelled and the send record to sent
ADMIN = auth.get_system_roles().ADMIN
db(rtable.id == recv_id).update(date = request.utcnow,
status = inv_ship_status["CANCEL"],
owned_by_user = None,
owned_by_group = ADMIN
)
if send_id != None:
# The sent record is now set back to SENT so the source warehouse can
# now cancel this record to get the stock back into their warehouse.
# IMPORTANT reports need to locate this record otherwise it can be
# a mechanism to circumvent the auditing of stock
db(stable.id == send_id).update(status = inv_ship_status["SENT"],
owned_by_user = None,
owned_by_group = ADMIN
)
redirect(URL(c="inv", f="recv",
args = [recv_id]
))
# =============================================================================
def track_item():
""" RESTful CRUD controller """
table = s3db.inv_track_item
s3db.configure("inv_track_item",
create = False,
deletable = False,
editable = False,
insertable = False,
listadd = False,
)
report = get_vars.get("report")
if report == "rel":
# Summary of Releases
s3.crud_strings["inv_track_item"] = Storage(title_list = T("Summary of Releases"),
subtitle_list = T("Summary Details"),
)
s3db.configure("inv_track_item",
list_fields = ["id",
#"send_id",
#"req_item_id",
(T("Date Released"), "send_id$date"),
(T("Beneficiary"), "send_id$site_id"),
(settings.get_inv_send_shortname(), "send_id$send_ref"),
(settings.get_req_shortname(), "send_id$req_ref"),
(T("Items/Description"), "item_id"),
(T("Source"), "supply_org_id"),
(T("Unit"), "item_pack_id"),
(T("Quantity"), "quantity"),
(T("Unit Cost"), "pack_value"),
(T("Total Cost"), "total_value"),
],
orderby = "inv_send.site_id",
sort = True
)
s3.filter = (FS("send_id") != None)
elif report == "inc":
# Summary of Incoming Supplies
s3.crud_strings["inv_track_item"] = Storage(title_list = T("Summary of Incoming Supplies"),
subtitle_list = T("Summary Details"),
)
s3db.configure("inv_track_item",
list_fields = ["id",
(T("Date Received"), "recv_id$date"),
(T("Received By"), "recv_id$recipient_id"),
(settings.get_inv_send_shortname(), "recv_id$send_ref"),
(settings.get_inv_recv_shortname(), "recv_id$recv_ref"),
(settings.get_proc_shortname(), "recv_id$purchase_ref"),
(T("Item/Description"), "item_id"),
(T("Unit"), "item_pack_id"),
(T("Quantity"), "quantity"),
(T("Unit Cost"), "pack_value"),
(T("Total Cost"), "total_value"),
(T("Source"), "supply_org_id"),
(T("Remarks"), "comments"),
],
orderby = "inv_recv.recipient_id",
)
s3.filter = (FS("recv_id") != None)
elif report == "util":
# Utilization Report
s3.crud_strings["inv_track_item"] = Storage(title_list = T("Utilization Report"),
subtitle_list = T("Utilization Details"),
)
s3db.configure("inv_track_item",
list_fields = ["id",
(T("Item/Description"), "item_id$name"),
(T("Beneficiary"), "send_id$site_id"),
(settings.get_inv_send_shortname(), "send_id$send_ref"),
(settings.get_req_shortname(), "send_id$req_ref"),
(T("Items/Description"), "item_id"),
(T("Source"), "supply_org_id"),
(T("Unit"), "item_pack_id"),
(T("Quantity"), "quantity"),
(T("Unit Cost"), "pack_value"),
(T("Total Cost"), "total_value"),
]
)
s3.filter = (FS("item_id") != None)
elif report == "exp":
# Expiration Report
s3.crud_strings["inv_track_item"] = Storage(title_list = T("Expiration Report"),
subtitle_list = T("Expiration Details"),
)
s3db.configure("inv_track_item",
list_fields = ["id",
"recv_inv_item_id$site_id",
(T("Item/Description"), "item_id"),
(T("Expiration Date"), "expiry_date"),
(T("Source"), "supply_org_id"),
(T("Unit"), "item_pack_id"),
(T("Quantity"), "quantity"),
(T("Unit Cost"), "pack_value"),
(T("Total Cost"), "total_value"),
]
)
s3.filter = (FS("expiry_date") != None)
output = s3_rest_controller(rheader = s3db.inv_rheader,
)
return output
# =============================================================================
def adj():
""" RESTful CRUD controller """
table = s3db.inv_adj
# Limit site_id to sites the user has permissions for
error_msg = T("You do not have permission to adjust the stock level in this warehouse.")
auth.permitted_facilities(table=table, error_msg=error_msg)
def prep(r):
if r.interactive:
if r.component:
if r.component_name == "adj_item":
if r.component_id:
aitable = s3db.inv_adj_item
if r.record.status == 0:
aitable.reason.writable = True
record = db(aitable.id == r.component_id).select(aitable.inv_item_id,
limitby = (0, 1)
).first()
if record.inv_item_id:
aitable.item_id.writable = False
aitable.item_id.comment = None
aitable.item_pack_id.writable = False
elif r.component_name == "image":
doc_table = s3db.doc_image
doc_table.organisation_id.readable = doc_table.organisation_id.writable = False
doc_table.person_id.readable = doc_table.person_id.writable = False
doc_table.location_id.readable = doc_table.location_id.writable = False
else:
# if an adjustment has been selected and it has been completed
# then make the fields read only
if r.record and r.record.status:
table.adjuster_id.writable = False
table.site_id.writable = False
table.comments.writable = False
else:
if "item" in get_vars and "site" in get_vars:
# create a adj record with a single adj_item record
adj_id = table.insert(adjuster_id = auth.s3_logged_in_person(),
site_id = get_vars.site,
adjustment_date = request.utcnow,
status = 0,
category = 1,
comments = "Single item adjustment"
)
inv_item_table = s3db.inv_inv_item
inv_item = inv_item_table[get_vars.item]
adjitemtable = s3db.inv_adj_item
adj_item_id = adjitemtable.insert(reason = 0,
adj_id = adj_id,
inv_item_id = inv_item.id, # original source inv_item
item_id = inv_item.item_id, # the supply item
item_pack_id = inv_item.item_pack_id,
old_quantity = inv_item.quantity,
currency = inv_item.currency,
old_status = inv_item.status,
new_status = inv_item.status,
old_pack_value = inv_item.pack_value,
new_pack_value = inv_item.pack_value,
expiry_date = inv_item.expiry_date,
bin = inv_item.bin,
old_owner_org_id = inv_item.owner_org_id,
new_owner_org_id = inv_item.owner_org_id,
)
redirect(URL(c = "inv",
f = "adj",
args = [adj_id,
"adj_item",
adj_item_id,
"update"]
))
else:
table.comments.default = "Complete Stock Adjustment"
if "site" in get_vars:
table.site_id.writable = True
table.site_id.default = get_vars.site
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
s3_action_buttons(r, deletable=False)
return output
s3.postp = postp
args = request.args
if len(args) > 1 and args[1] == "adj_item" and \
table[args[0]].status:
# remove CRUD generated buttons in the tabs
s3db.configure("inv_adj_item",
create = False,
deletable = False,
editable = False,
listadd = False,
)
output = s3_rest_controller(rheader = s3db.inv_adj_rheader,
)
return output
# -----------------------------------------------------------------------------
def adj_close():
""" RESTful CRUD controller """
try:
adj_id = request.args[0]
except:
redirect(URL(f="adj"))
atable = s3db.inv_adj
# Limit site_id to sites the user has permissions for
error_msg = T("You do not have permission to adjust the stock level in this warehouse.")
auth.permitted_facilities(table=atable, error_msg=error_msg)
adj_rec = db(atable.id == adj_id).select(atable.status,
atable.site_id,
limitby = (0, 1)
).first()
if adj_rec.status != 0:
session.error = T("This adjustment has already been closed.")
if session.error:
redirect(URL(c="inv", f="adj",
args = [adj_id]
))
aitable = s3db.inv_adj_item
inv_item_table = s3db.inv_inv_item
get_realm_entity = auth.get_realm_entity
site_id = adj_rec.site_id
# Go through all the adj_items
query = (aitable.adj_id == adj_id) & \
(aitable.deleted == False)
adj_items = db(query).select()
for adj_item in adj_items:
if adj_item.inv_item_id is None:
# Create a new stock item
inv_item = {"site_id": site_id,
"item_id": adj_item.item_id,
"item_pack_id": adj_item.item_pack_id,
"currency": adj_item.currency,
"bin": adj_item.bin,
"pack_value": adj_item.old_pack_value,
"expiry_date": adj_item.expiry_date,
"quantity": adj_item.new_quantity,
"owner_org_id": adj_item.old_owner_org_id,
}
inv_item_id = inv_item_table.insert(**inv_item)
# Apply the realm entity
inv_item["id"] = inv_item_id
realm_entity = get_realm_entity(inv_item_table, inv_item)
db(inv_item_table.id == inv_item_id).update(realm_entity = realm_entity)
# Add the inventory item id to the adjustment record
db(aitable.id == adj_item.id).update(inv_item_id = inv_item_id)
elif adj_item.new_quantity is not None:
# Update the existing stock item
db(inv_item_table.id == adj_item.inv_item_id).update(item_pack_id = adj_item.item_pack_id,
bin = adj_item.bin,
pack_value = adj_item.old_pack_value,
expiry_date = adj_item.expiry_date,
quantity = adj_item.new_quantity,
owner_org_id = adj_item.new_owner_org_id,
status = adj_item.new_status,
)
# Change the status of the adj record to Complete
db(atable.id == adj_id).update(status = 1)
# Go to the Inventory of the Site which has adjusted these items
(prefix, resourcename, id) = s3db.get_instance(s3db.org_site,
site_id)
redirect(URL(c = prefix,
f = resourcename,
args = [id, "inv_item"],
))
# =============================================================================
def recv_item_json():
"""
Used by s3.supply.js
Access via the .json representation to avoid work rendering menus, etc
"""
try:
item_id = request.args[0]
except:
raise HTTP(400, current.xml.json_message(False, 400, "No value provided!"))
stable = s3db.org_site
rtable = s3db.inv_recv
ittable = s3db.inv_track_item
rtable.date.represent = lambda dt: dt[:10]
query = (ittable.req_item_id == item_id) & \
(rtable.id == ittable.recv_id) & \
(rtable.site_id == stable.id) & \
(rtable.status == s3db.inv_ship_status["RECEIVED"]) & \
(ittable.deleted == False)
records = db(query).select(rtable.id,
rtable.date,
stable.name,
ittable.quantity)
output = "[%s,%s" % (json.dumps(dict(id = str(T("Received")),
quantity = "#"
)),
records.json()[1:])
response.headers["Content-Type"] = "application/json"
return output
# -----------------------------------------------------------------------------
def send_item_json():
"""
Used by s3.supply.js
Access via the .json representation to avoid work rendering menus, etc
"""
try:
item_id = request.args[0]
except:
raise HTTP(400, current.xml.json_message(False, 400, "No value provided!"))
stable = s3db.org_site
istable = s3db.inv_send
ittable = s3db.inv_track_item
inv_ship_status = s3db.inv_ship_status
istable.date.represent = lambda dt: dt[:10]
query = (ittable.req_item_id == item_id) & \
(istable.id == ittable.send_id) & \
(istable.site_id == stable.id) & \
((istable.status == inv_ship_status["SENT"]) | \
(istable.status == inv_ship_status["RECEIVED"])) & \
(ittable.deleted == False)
records = db(query).select(istable.id,
istable.date,
stable.name,
ittable.quantity)
output = "[%s,%s" % (json.dumps(dict(id = str(T("Sent")),
quantity = "#"
)),
records.json()[1:])
response.headers["Content-Type"] = "application/json"
return output
# -----------------------------------------------------------------------------
def kitting():
return s3_rest_controller(rheader = s3db.inv_rheader,
)
# -----------------------------------------------------------------------------
def facility():
# Open record in this controller after creation
s3db.configure("org_facility",
create_next = URL(c="inv", f="facility",
args = ["[id]", "read"]),
)
return s3db.org_facility_controller()
# -----------------------------------------------------------------------------
def facility_type():
return s3_rest_controller("org")
# -----------------------------------------------------------------------------
def incoming():
"""
Incoming Shipments for Sites
Used from Requests rheader when looking at Transport Status
"""
# @ToDo: Create this function!
return s3db.inv_incoming()
# -----------------------------------------------------------------------------
def req_match():
""" Match Requests """
return s3db.req_match()
# END =========================================================================
| 43.749863
| 142
| 0.464244
|
1ced76791b4e15b5dc10f620fcd5f06c71cd7b7f
| 2,852
|
py
|
Python
|
project/apps/adjudication/tests/factories.py
|
dbinetti/barberscore
|
13c3d8193834bd2bb79922e28d3f5ab1675bdffd
|
[
"BSD-2-Clause"
] | 13
|
2017-08-07T15:45:49.000Z
|
2019-07-03T13:58:50.000Z
|
project/apps/adjudication/tests/factories.py
|
barberscore/barberscore-api
|
2aa9f8598c18c28ba1d4a294f76fd055619f803e
|
[
"BSD-2-Clause"
] | 309
|
2017-07-14T02:34:12.000Z
|
2022-01-14T21:37:02.000Z
|
project/apps/adjudication/tests/factories.py
|
dbinetti/barberscore-django
|
16fbd9945becda0a765bbdf52ad459a63655128f
|
[
"BSD-2-Clause"
] | 5
|
2017-08-07T14:01:07.000Z
|
2019-06-24T19:44:55.000Z
|
# Standard Library
import datetime
import rest_framework_jwt
# Third-Party
from factory import Faker # post_generation,
from factory import Iterator
from factory import LazyAttribute
from factory import PostGenerationMethodCall
from factory import RelatedFactory
from factory import Sequence
from factory import SubFactory
from factory.django import DjangoModelFactory
from factory.django import mute_signals
from factory.fuzzy import FuzzyInteger
# Django
from django.db.models.signals import pre_delete
from django.db.models.signals import pre_save
from django.db.models.signals import m2m_changed
from django_fsm.signals import post_transition
# First-Party
from apps.adjudication.models import Appearance
from apps.adjudication.models import Outcome
from apps.adjudication.models import Panelist
from apps.adjudication.models import Round
from apps.adjudication.models import Score
from apps.adjudication.models import Song
from rest_framework_jwt.models import User
@mute_signals(post_transition)
class AppearanceFactory(DjangoModelFactory):
status = Appearance.STATUS.new
num = 1
actual_start = None
actual_finish = None
round = SubFactory('apps.adjudication.tests.factories.RoundFactory')
# group = SubFactory('factories.GroupFactory')
class Meta:
model = Appearance
class OutcomeFactory(DjangoModelFactory):
round = SubFactory('apps.adjudication.tests.factories.RoundFactory')
# award = SubFactory('factories.AwardFactory')
class Meta:
model = Outcome
@mute_signals(post_transition)
class PanelistFactory(DjangoModelFactory):
status = Panelist.STATUS.new
kind = Panelist.KIND.official
category = Panelist.CATEGORY.drcj
round = SubFactory('apps.adjudication.tests.factories.RoundFactory')
# person = SubFactory('factories.PersonFactory')
class Meta:
model = Panelist
@mute_signals(post_transition)
class RoundFactory(DjangoModelFactory):
status = Round.STATUS.new
kind = Round.KIND.finals
num = 1
class Meta:
model = Round
class ScoreFactory(DjangoModelFactory):
status = Score.STATUS.new
points = FuzzyInteger(50, 90)
song = SubFactory('apps.adjudication.tests.factories.SongFactory')
panelist = SubFactory('apps.adjudication.tests.factories.PanelistFactory')
class Meta:
model = Score
class SongFactory(DjangoModelFactory):
status = Song.STATUS.new
num = 1
appearance = SubFactory('apps.adjudication.tests.factories.AppearanceFactory')
# chart = None
class Meta:
model = Song
@mute_signals(pre_delete, pre_save, m2m_changed)
class UserFactory(DjangoModelFactory):
name = Faker('name_male')
email = Faker('email')
password = PostGenerationMethodCall('set_password', 'password')
is_staff = False
class Meta:
model = User
| 26.407407
| 82
| 0.761571
|
4faf69786777daab9c3debdefa35f5c67125dca4
| 4,597
|
py
|
Python
|
tests/test_misc.py
|
cswartzvi/conda
|
b2e0ed6b6119b7623d8f64c47d4d04f56e9cf137
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_misc.py
|
cswartzvi/conda
|
b2e0ed6b6119b7623d8f64c47d4d04f56e9cf137
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_misc.py
|
cswartzvi/conda
|
b2e0ed6b6119b7623d8f64c47d4d04f56e9cf137
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
import codecs
import os
import sys
import tempfile
import unittest
from conda.core.subdir_data import cache_fn_url
from conda.misc import url_pat, walk_prefix
from conda.utils import Utf8NamedTemporaryFile
class TestMisc(unittest.TestCase):
def test_Utf8NamedTemporaryFile(self):
test_string = 'ōγђ家固한áêñßôç'
try:
with Utf8NamedTemporaryFile(delete=False) as tf:
tf.write(test_string.encode('utf-8') if hasattr(test_string, 'encode') else test_string)
fname = tf.name
with codecs.open(fname, mode='rb', encoding='utf-8') as fh:
value = fh.read()
assert value == test_string
except Exception as e:
raise e
def test_using_system_temp(self):
tempdir = tempfile.gettempdir()
saved_temp_env = os.environ.get("CONDA_USE_PREFIX_TEMP")
if saved_temp_env:
del os.environ["CONDA_USE_PREFIX_TEMP"]
try:
with Utf8NamedTemporaryFile(delete=False) as tf:
fname = tf.name
assert os.path.commonprefix([tempdir, fname]) == tempdir
finally:
if saved_temp_env:
os.environ["CONDA_USE_PREFIX_TEMP"] = saved_temp_env
def test_using_prefix_temp(self):
tempdir = tempfile.gettempdir()
saved_temp_env = os.environ.get("CONDA_USE_PREFIX_TEMP")
if not saved_temp_env:
os.environ["CONDA_USE_PREFIX_TEMP"] = "TRUE"
try:
with Utf8NamedTemporaryFile(delete=False) as tf:
fname = tf.name
assert os.path.commonprefix([tempdir, fname]) != tempdir
assert os.path.commonprefix([sys.prefix, fname]) == sys.prefix
finally:
if not saved_temp_env:
del os.environ["CONDA_USE_PREFIX_TEMP"]
def test_cache_fn_url(self):
url = "http://repo.continuum.io/pkgs/pro/osx-64/"
# implicit repodata.json
self.assertEqual(cache_fn_url(url), '7618c8b6.json')
# explicit repodata.json
self.assertEqual(cache_fn_url(url, 'repodata.json'), '7618c8b6.json')
# explicit current_repodata.json
self.assertEqual(cache_fn_url(url, "current_repodata.json"), '8be5dc16.json')
url = "http://repo.anaconda.com/pkgs/pro/osx-64/"
self.assertEqual(cache_fn_url(url), 'e42afea8.json')
def test_url_pat_1(self):
m = url_pat.match('http://www.cont.io/pkgs/linux-64/foo.tar.bz2'
'#d6918b03927360aa1e57c0188dcb781b')
self.assertEqual(m.group('url_p'), 'http://www.cont.io/pkgs/linux-64')
self.assertEqual(m.group('fn'), 'foo.tar.bz2')
self.assertEqual(m.group('md5'), 'd6918b03927360aa1e57c0188dcb781b')
def test_url_pat_2(self):
m = url_pat.match('http://www.cont.io/pkgs/linux-64/foo.tar.bz2')
self.assertEqual(m.group('url_p'), 'http://www.cont.io/pkgs/linux-64')
self.assertEqual(m.group('fn'), 'foo.tar.bz2')
self.assertEqual(m.group('md5'), None)
def test_url_pat_3(self):
m = url_pat.match('http://www.cont.io/pkgs/linux-64/foo.tar.bz2#1234')
self.assertEqual(m, None)
def make_mock_directory(tmpdir, mock_directory):
for key, value in mock_directory.items():
if value is None:
tmpdir.join(key).write("TEST")
else:
make_mock_directory(tmpdir.mkdir(key), value)
def test_walk_prefix(tmpdir): # tmpdir is a py.test utility
# Each directory is a dict whose keys are names. If the value is
# None, then that key represents a file. If it's another dict, that key is
# a file
mock_directory = {
"LICENSE.txt": None,
"envs": {"ignore1": None,
"ignore2": None},
"python.app": None,
"bin": {"activate": None,
"conda": None,
"deactivate": None,
"testfile": None},
"testdir1": {"testfile": None,
"testdir2": {"testfile": None}},
"testfile1": None,
}
make_mock_directory(tmpdir, mock_directory)
# walk_prefix has windows_forward_slahes on by default, so we don't need
# any special-casing there
answer = {"testfile1", "bin/testfile", "testdir1/testfile",
"testdir1/testdir2/testfile"}
if sys.platform != "darwin":
answer.add("python.app")
assert walk_prefix(tmpdir.strpath) == answer
if __name__ == '__main__':
unittest.main()
| 35.361538
| 104
| 0.621057
|
2e303b9e4b92b8d4727af1d75cc1a9cc053be5c4
| 1,403
|
py
|
Python
|
atg_project/urls.py
|
zaryab2000/DJANGO_BLOG_ATG
|
b09f2624eff3d5a807b1e9242aa29072980e92eb
|
[
"MIT"
] | null | null | null |
atg_project/urls.py
|
zaryab2000/DJANGO_BLOG_ATG
|
b09f2624eff3d5a807b1e9242aa29072980e92eb
|
[
"MIT"
] | null | null | null |
atg_project/urls.py
|
zaryab2000/DJANGO_BLOG_ATG
|
b09f2624eff3d5a807b1e9242aa29072980e92eb
|
[
"MIT"
] | null | null | null |
"""atg_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.auth import views as auth_views
from users import views as user_views
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('blog.urls')),
path('register/', user_views.register, name='register'),
path('login/',auth_views.LoginView.as_view(template_name='users/login.html'),name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='users/logout.html'),name='logout'),
# path('profileupdate/',user_views.profileupdate, name='profileupdate'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 40.085714
| 100
| 0.73129
|
783c97aec79ed0f418674b2b9218d2ffe84a5b9e
| 184
|
py
|
Python
|
python/Exercicios/ex097.py
|
Robert-Marchinhaki/primeiros-passos-Python
|
515c2c418bfb941bd9af14cf598eca7fe2985592
|
[
"MIT"
] | null | null | null |
python/Exercicios/ex097.py
|
Robert-Marchinhaki/primeiros-passos-Python
|
515c2c418bfb941bd9af14cf598eca7fe2985592
|
[
"MIT"
] | null | null | null |
python/Exercicios/ex097.py
|
Robert-Marchinhaki/primeiros-passos-Python
|
515c2c418bfb941bd9af14cf598eca7fe2985592
|
[
"MIT"
] | null | null | null |
def mensagem(num, txt):
print('-' * (num + 2))
print('', txt)
print('-' * (num + 2))
frase = str(input('Digite sua frase: ')).strip()
mensagem(num=len(frase), txt=frase)
| 20.444444
| 48
| 0.554348
|
d1e10e2bd4cb53a9d5ea81b4a80dd4f2ed3ca1b3
| 13,328
|
py
|
Python
|
mayan/apps/documents/models/document_models.py
|
bonitobonita24/Mayan-EDMS
|
7845fe0e1e83c81f5d227a16116397a3d3883b85
|
[
"Apache-2.0"
] | 343
|
2015-01-05T14:19:35.000Z
|
2018-12-10T19:07:48.000Z
|
mayan/apps/documents/models/document_models.py
|
bonitobonita24/Mayan-EDMS
|
7845fe0e1e83c81f5d227a16116397a3d3883b85
|
[
"Apache-2.0"
] | 191
|
2015-01-03T00:48:19.000Z
|
2018-11-30T09:10:25.000Z
|
mayan/apps/documents/models/document_models.py
|
bonitobonita24/Mayan-EDMS
|
7845fe0e1e83c81f5d227a16116397a3d3883b85
|
[
"Apache-2.0"
] | 114
|
2015-01-08T20:21:05.000Z
|
2018-12-10T19:07:53.000Z
|
import logging
from pathlib import Path
import uuid
from django.apps import apps
from django.core.files import File
from django.db import models, transaction
from django.urls import reverse
from django.utils.timezone import now
from django.utils.translation import ugettext, ugettext_lazy as _
from mayan.apps.converter.exceptions import AppImageError
from mayan.apps.databases.model_mixins import ExtraDataModelMixin
from mayan.apps.common.signals import signal_mayan_pre_save
from mayan.apps.events.classes import EventManagerSave
from mayan.apps.events.decorators import method_event
from mayan.apps.storage.compressed_files import Archive
from mayan.apps.storage.exceptions import NoMIMETypeMatch
from ..events import (
event_document_created, event_document_edited,
event_document_trashed, event_document_type_changed,
event_trashed_document_deleted
)
from ..literals import (
DEFAULT_LANGUAGE, DOCUMENT_FILE_ACTION_PAGES_APPEND,
DOCUMENT_FILE_ACTION_PAGES_KEEP, DOCUMENT_FILE_ACTION_PAGES_NEW,
IMAGE_ERROR_NO_ACTIVE_VERSION
)
from ..managers import (
DocumentManager, TrashCanManager, ValidDocumentManager,
ValidRecentlyCreatedDocumentManager
)
from ..signals import signal_post_document_type_change
from .document_type_models import DocumentType
from .mixins import HooksModelMixin
__all__ = ('Document', 'DocumentSearchResult',)
logger = logging.getLogger(name=__name__)
class Document(
ExtraDataModelMixin, HooksModelMixin, models.Model
):
"""
Defines a single document with it's fields and properties
Fields:
* uuid - UUID of a document, universally Unique ID. An unique identifier
generated for each document. No two documents can ever have the same UUID.
This ID is generated automatically.
"""
_hooks_pre_create = []
uuid = models.UUIDField(
default=uuid.uuid4, editable=False, help_text=_(
'UUID of a document, universally Unique ID. An unique identifier '
'generated for each document.'
), verbose_name=_('UUID')
)
document_type = models.ForeignKey(
help_text=_('The document type of the document.'),
on_delete=models.CASCADE, related_name='documents', to=DocumentType,
verbose_name=_('Document type')
)
label = models.CharField(
blank=True, db_index=True, default='', max_length=255,
help_text=_(
'A short text identifying the document. By default, will be '
'set to the filename of the first file uploaded to the document.'
),
verbose_name=_('Label')
)
description = models.TextField(
blank=True, default='', help_text=_(
'An optional short text describing a document.'
), verbose_name=_('Description')
)
datetime_created = models.DateTimeField(
auto_now_add=True, db_index=True, help_text=_(
'The date and time of the document creation.'
), verbose_name=_('Created')
)
language = models.CharField(
blank=True, default=DEFAULT_LANGUAGE, help_text=_(
'The primary language in the document.'
), max_length=8, verbose_name=_('Language')
)
in_trash = models.BooleanField(
db_index=True, default=False, help_text=_(
'Whether or not this document is in the trash.'
), editable=False, verbose_name=_('In trash?')
)
trashed_date_time = models.DateTimeField(
blank=True, editable=True, help_text=_(
'The server date and time when the document was moved to the '
'trash.'
), null=True, verbose_name=_('Date and time trashed')
)
is_stub = models.BooleanField(
db_index=True, default=True, editable=False, help_text=_(
'A document stub is a document with an entry on the database but '
'no file uploaded. This could be an interrupted upload or a '
'deferred upload via the API.'
), verbose_name=_('Is stub?')
)
objects = DocumentManager()
trash = TrashCanManager()
valid = ValidDocumentManager()
@classmethod
def execute_pre_create_hooks(cls, kwargs=None):
"""
Helper method to allow checking if it is possible to create
a new document.
"""
cls._execute_hooks(
hook_list=cls._hooks_pre_create, kwargs=kwargs
)
@classmethod
def register_pre_create_hook(cls, func, order=None):
cls._insert_hook_entry(
hook_list=cls._hooks_pre_create, func=func, order=order
)
class Meta:
ordering = ('label',)
verbose_name = _('Document')
verbose_name_plural = _('Documents')
def __str__(self):
return self.get_label()
def add_as_recent_document_for_user(self, user):
RecentlyAccessedDocument = apps.get_model(
app_label='documents', model_name='RecentlyAccessedDocument'
)
return RecentlyAccessedDocument.valid.add_document_for_user(
document=self, user=user
)
def delete(self, *args, **kwargs):
to_trash = kwargs.pop('to_trash', True)
user = kwargs.pop('_user', self.__dict__.pop('_event_actor', None))
if not self.in_trash and to_trash:
self.in_trash = True
self.trashed_date_time = now()
with transaction.atomic():
self._event_ignore = True
self.save(update_fields=('in_trash', 'trashed_date_time'))
event_document_trashed.commit(actor=user, target=self)
else:
with transaction.atomic():
for document_file in self.files.all():
document_file.delete()
super().delete(*args, **kwargs)
event_trashed_document_deleted.commit(
actor=user, target=self.document_type
)
def document_type_change(self, document_type, force=False, _user=None):
has_changed = self.document_type != document_type
if has_changed or force:
self.document_type = document_type
self._event_ignore = True
self.save(update_fields=('document_type',))
if _user:
self.add_as_recent_document_for_user(user=_user)
signal_post_document_type_change.send(
sender=self.__class__, instance=self
)
event_document_type_changed.commit(
action_object=document_type, actor=_user, target=self
)
@property
def file_latest(self):
return self.files.order_by('timestamp').last()
def file_new(
self, file_object, action=None, comment=None, filename=None,
expand=False, _user=None
):
logger.info('Creating new document file for document: %s', self)
if not action:
action = DOCUMENT_FILE_ACTION_PAGES_NEW
if not comment:
comment = ''
DocumentFile = apps.get_model(
app_label='documents', model_name='DocumentFile'
)
if expand:
try:
compressed_file = Archive.open(file_object=file_object)
for compressed_file_member in compressed_file.members():
with compressed_file.open_member(filename=compressed_file_member) as compressed_file_member_file_object:
# Recursive call to expand nested compressed files
# expand=True literal for recursive nested files.
# Might cause problem with office files inside a
# compressed file.
# Don't use keyword arguments for Path to allow
# partials.
self.file_new(
action=action, comment=comment, expand=False,
file_object=compressed_file_member_file_object,
filename=Path(compressed_file_member).name,
_user=_user
)
# Avoid executing the expand=False code path.
return
except NoMIMETypeMatch:
logger.debug(msg='No expanding; Exception: NoMIMETypeMatch')
# Fall through to same code path as expand=False to avoid
# duplicating code.
DocumentVersion = apps.get_model(
app_label='documents', model_name='DocumentVersion'
)
try:
document_file = DocumentFile(
document=self, comment=comment, file=File(file=file_object),
filename=filename or Path(file_object.name).name
)
document_file._event_actor = _user
document_file.save()
except Exception as exception:
logger.error(
'Error creating new file for document: %s; %s', self,
exception, exc_info=True
)
raise
else:
logger.info('New document file queued for document: %s', self)
DocumentVersion = apps.get_model(
app_label='documents', model_name='DocumentVersion'
)
if action == DOCUMENT_FILE_ACTION_PAGES_NEW:
document_version = DocumentVersion(
document=self, comment=comment
)
document_version._event_actor = _user
document_version.save()
annotated_content_object_list = DocumentVersion.annotate_content_object_list(
content_object_list=document_file.pages.all()
)
document_version.pages_remap(
annotated_content_object_list=annotated_content_object_list,
_user=_user
)
elif action == DOCUMENT_FILE_ACTION_PAGES_APPEND:
annotated_content_object_list = []
annotated_content_object_list.extend(
DocumentVersion.annotate_content_object_list(
content_object_list=self.version_active.page_content_objects
)
)
annotated_content_object_list.extend(
DocumentVersion.annotate_content_object_list(
content_object_list=document_file.pages.all(),
start_page_number=self.version_active.pages.count() + 1
)
)
document_version = DocumentVersion(
document=self, comment=comment
)
document_version._event_actor = _user
document_version.save()
document_version.pages_remap(
annotated_content_object_list=annotated_content_object_list,
_user=_user
)
elif action == DOCUMENT_FILE_ACTION_PAGES_KEEP:
return document_file
return document_file
def get_absolute_url(self):
return reverse(
viewname='documents:document_preview', kwargs={
'document_id': self.pk
}
)
def get_api_image_url(self, *args, **kwargs):
version_active = self.version_active
if version_active:
return version_active.get_api_image_url(*args, **kwargs)
else:
raise AppImageError(error_name=IMAGE_ERROR_NO_ACTIVE_VERSION)
def get_label(self):
return self.label or ugettext('Document stub, id: %d') % self.pk
get_label.short_description = _('Label')
@property
def is_in_trash(self):
return self.in_trash
def natural_key(self):
return (self.uuid,)
natural_key.dependencies = ['documents.DocumentType']
@property
def pages(self):
try:
return self.version_active.pages
except AttributeError:
# Document has no version yet.
DocumentVersionPage = apps.get_model(
app_label='documents', model_name='DocumentVersionPage'
)
return DocumentVersionPage.objects.none()
@method_event(
event_manager_class=EventManagerSave,
created={
'event': event_document_created,
'action_object': 'document_type',
'target': 'self'
},
edited={
'event': event_document_edited,
'target': 'self'
}
)
def save(self, *args, **kwargs):
user = self.__dict__.pop('_event_actor', None)
new_document = not self.pk
signal_mayan_pre_save.send(
sender=Document, instance=self, user=user
)
super().save(*args, **kwargs)
if new_document:
if user:
self.add_as_recent_document_for_user(user=user)
@property
def version_active(self):
try:
return self.versions.filter(active=True).first()
except self.versions.model.DoesNotExist:
return self.versions.none()
class DocumentSearchResult(Document):
class Meta:
proxy = True
class RecentlyCreatedDocument(Document):
objects = models.Manager()
valid = ValidRecentlyCreatedDocumentManager()
class Meta:
proxy = True
| 34.618182
| 124
| 0.617347
|
6c8ebbd9c639c8240b718362149058a4d7e7642a
| 2,126
|
py
|
Python
|
gmail.py
|
ramrom/haus
|
6ad300be0c1dd0818248503ffe70695a878a1ace
|
[
"MIT"
] | 1
|
2019-11-30T03:45:38.000Z
|
2019-11-30T03:45:38.000Z
|
gmail.py
|
ramrom/haus
|
6ad300be0c1dd0818248503ffe70695a878a1ace
|
[
"MIT"
] | null | null | null |
gmail.py
|
ramrom/haus
|
6ad300be0c1dd0818248503ffe70695a878a1ace
|
[
"MIT"
] | null | null | null |
#!/usr/local/bin/python
import httplib2
import os, pdb
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/gmail-python-quickstart.json
SCOPES = 'https://www.googleapis.com/auth/gmail.readonly'
CLIENT_SECRET_FILE = '/Users/smittapalli/.creds/gcloud_oauth2_webapp_haus.json'
APPLICATION_NAME = 'Gmail API Python Quickstart'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.creds')
credential_path = os.path.join(credential_dir, 'gmail-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def main():
"""Shows basic usage of the Gmail API.
Creates a Gmail API service object and outputs a list of label names
of the user's Gmail account.
"""
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('gmail', 'v1', http=http)
results = service.users().labels().list(userId='me').execute()
labels = results.get('labels', [])
if not labels:
print('No labels found.')
else:
print('Labels:')
for label in labels:
print(label['name'])
if __name__ == '__main__':
pdb.set_trace()
#main()
| 29.527778
| 80
| 0.731421
|
1bb93f7f6281968c55a969271822f25589f2cd04
| 6,178
|
py
|
Python
|
tests/mutations/test_mediaobject.py
|
trompamusic/ce-queries-template
|
cc5ae69d0e76623bfd72e9453f569f6624bf7c3b
|
[
"Apache-2.0"
] | 1
|
2020-06-18T15:43:18.000Z
|
2020-06-18T15:43:18.000Z
|
tests/mutations/test_mediaobject.py
|
trompamusic/ce-queries-template
|
cc5ae69d0e76623bfd72e9453f569f6624bf7c3b
|
[
"Apache-2.0"
] | 60
|
2019-12-17T11:08:28.000Z
|
2021-03-02T16:19:41.000Z
|
tests/mutations/test_mediaobject.py
|
trompamusic/trompace-client
|
cc5ae69d0e76623bfd72e9453f569f6624bf7c3b
|
[
"Apache-2.0"
] | null | null | null |
import os
import pytest
from tests import CeTestCase
from trompace.exceptions import UnsupportedLanguageException, NotAMimeTypeException
from trompace.mutations import mediaobject
class TestMediaObject(CeTestCase):
def setUp(self) -> None:
super()
self.data_dir = os.path.join(self.test_directory, "data", "mediaobject")
def test_create(self):
expected = self.read_file(os.path.join(self.data_dir, "create_mediaobject.txt"))
created_mediaobject = mediaobject.mutation_create_media_object(
title="Rossinyol", description="Traditional choir piece", date="1972", creator="trompamusic.eu",
contributor="www.upf.edu", format_="text/html", encodingformat="text/html", source="https://www.cpdl.org/wiki/index.php/Rossinyol",
contenturl="https://www.cpdl.org/wiki/index.php/Rossinyol", language="en", inlanguage="ca"
)
self.assert_queries_equal(created_mediaobject, expected)
def test_update_name(self):
expected = self.read_file(os.path.join(self.data_dir, "update_mediaobject_name.txt"))
created_update = mediaobject.mutation_update_media_object(
'2eeca6dd-c62c-490e-beb0-2e3899fca74f',
name="Rossinyol")
self.assert_queries_equal(created_update, expected)
def test_update_all(self):
expected = self.read_file(os.path.join(self.data_dir, "update_mediaobject_all.txt"))
created_update = mediaobject.mutation_update_media_object(
'2eeca6dd-c62c-490e-beb0-2e3899fca74f', name="Rossinyol", description="Traditional choir piece",
date="1972", creator="trompamusic.eu", contributor="www.upf.edu", format_="text/html", encodingformat="text/html",
source="https://www.cpdl.org/wiki/index.php/Rossinyol", contenturl="https://www.cpdl.org/wiki/index.php/Rossinyol", language="en"
)
self.assert_queries_equal(created_update, expected)
def test_delete(self):
expected = self.read_file(os.path.join(self.data_dir, "delete_mediaobject.txt"))
created_delete = mediaobject.mutation_delete_media_object('2eeca6dd-c62c-490e-beb0-2e3899fca74f')
print(created_delete)
self.assert_queries_equal(created_delete, expected)
def test_add_broad_match(self):
expected = self.read_file(os.path.join(self.data_dir, "merge_exampleofwork.txt"))
created_match = mediaobject.mutation_merge_mediaobject_example_of_work(
"ff562d2e-2265-4f61-b340-561c92e797e9",
"59ce8093-5e0e-4d59-bfa6-805edb11e396")
self.assert_queries_equal(created_match, expected)
def test_remove_broad_match(self):
expected = self.read_file(os.path.join(self.data_dir, "remove_exampleofwork.txt"))
created_match = mediaobject.mutation_remove_mediaobject_example_of_work(
"ff562d2e-2265-4f61-b340-561c92e797e9",
"59ce8093-5e0e-4d59-bfa6-805edb11e396")
self.assert_queries_equal(created_match, expected)
def test_invalid_language(self):
with pytest.raises(UnsupportedLanguageException):
mediaobject.mutation_update_media_object('2eeca6dd-c62c-490e-beb0-2e3899fca74f', language="ja")
with pytest.raises(UnsupportedLanguageException):
mediaobject.mutation_create_media_object(title="Rossinyol", description="Traditional choir piece", date="1972", creator="trompamusic.eu",
contributor="www.upf.edu", format_="text/html", encodingformat="text/html", source="https://www.cpdl.org/wiki/index.php/Rossinyol",
contenturl="https://www.cpdl.org/wiki/index.php/Rossinyol", language="ja", inlanguage="ca")
def test_invalid_format(self):
with pytest.raises(NotAMimeTypeException):
mediaobject.mutation_update_media_object('2eeca6dd-c62c-490e-beb0-2e3899fca74f', format_="test,html")
with pytest.raises(NotAMimeTypeException):
mediaobject.mutation_update_media_object('2eeca6dd-c62c-490e-beb0-2e3899fca74f', encodingformat="test,html")
with pytest.raises(NotAMimeTypeException):
mediaobject.mutation_update_media_object('2eeca6dd-c62c-490e-beb0-2e3899fca74f', name="Rossinyol", description="Traditional choir piece", \
date="1972", creator="trompamusic.eu", contributor="www.upf.edu", format_="text,html", encodingformat="text/html", \
source="https://www.cpdl.org/wiki/index.php/Rossinyol", subject="Catalan choir piece", contenturl="https://www.cpdl.org/wiki/index.php/Rossinyol", \
language="en", inlanguage="ca")
with pytest.raises(NotAMimeTypeException):
mediaobject.mutation_update_media_object('2eeca6dd-c62c-490e-beb0-2e3899fca74f', name="Rossinyol", description="Traditional choir piece", \
date="1972", creator="trompamusic.eu", contributor="www.upf.edu", format_="text/html", encodingformat="text,html", \
source="https://www.cpdl.org/wiki/index.php/Rossinyol", subject="Catalan choir piece", contenturl="https://www.cpdl.org/wiki/index.php/Rossinyol", language="en", \
inlanguage="ca")
def test_merge_exampleOf(self):
expected = self.read_file(os.path.join(self.data_dir, "merge_object_encoding.txt"))
created_match = mediaobject.mutation_merge_media_object_encoding(
"ff562d2e-2265-4f61-b340-561c92e797e9",
"59ce8093-5e0e-4d59-bfa6-805edb11e396")
self.assert_queries_equal(created_match, expected)
def test_remove_exampleOf(self):
expected = self.read_file(os.path.join(self.data_dir, "remove_object_encoding.txt"))
created_match = mediaobject.mutation_remove_media_object_encoding(
"ff562d2e-2265-4f61-b340-561c92e797e9",
"59ce8093-5e0e-4d59-bfa6-805edb11e396")
self.assert_queries_equal(created_match, expected)
| 58.283019
| 216
| 0.67708
|
04f4519ff2edf4745d63b96935346215aa4b19bf
| 230
|
py
|
Python
|
scripts/mic.py
|
georgsp/mdmTerminal2
|
50255ff72ec35d6a7d567f0c15d02d82a0476670
|
[
"Apache-2.0",
"MIT"
] | 24
|
2018-08-28T10:02:56.000Z
|
2021-12-27T15:10:16.000Z
|
scripts/mic.py
|
georgsp/mdmTerminal2
|
50255ff72ec35d6a7d567f0c15d02d82a0476670
|
[
"Apache-2.0",
"MIT"
] | 14
|
2018-11-10T15:28:57.000Z
|
2021-01-22T23:17:14.000Z
|
scripts/mic.py
|
georgsp/mdmTerminal2
|
50255ff72ec35d6a7d567f0c15d02d82a0476670
|
[
"Apache-2.0",
"MIT"
] | 10
|
2018-10-30T14:28:32.000Z
|
2021-01-22T05:30:47.000Z
|
#!/usr/bin/env python3
import speech_recognition as sr
for index, name in enumerate(sr.Microphone.list_microphone_names()):
print("Microphone with name \"{1}\" found for `Microphone(device_index={0})`".format(index, name))
| 28.75
| 102
| 0.73913
|
c5af33b2193203efa7ab1ff071afe9a56d3af6ae
| 35,090
|
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/network/cloudengine/ce_ospf.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/network/cloudengine/ce_ospf.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/network/cloudengine/ce_ospf.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_ospf
short_description: Manages configuration of an OSPF instance on HUAWEI CloudEngine switches.
description:
- Manages configuration of an OSPF instance on HUAWEI CloudEngine switches.
author: QijunPan (@QijunPan)
notes:
- This module requires the netconf system service be enabled on the remote device being managed.
- Recommended connection is C(netconf).
- This module also works with C(local) connections for legacy playbooks.
options:
process_id:
description:
- Specifies a process ID.
The value is an integer ranging from 1 to 4294967295.
required: true
area:
description:
- Specifies the area ID. The area with the area-id being 0 is a backbone area.
Valid values are a string, formatted as an IP address
(i.e. "0.0.0.0") or as an integer between 1 and 4294967295.
addr:
description:
- Specifies the address of the network segment where the interface resides.
The value is in dotted decimal notation.
mask:
description:
- IP network wildcard bits in decimal format between 0 and 32.
auth_mode:
description:
- Specifies the authentication type.
choices: ['none', 'hmac-sha256', 'md5', 'hmac-md5', 'simple']
auth_text_simple:
description:
- Specifies a password for simple authentication.
The value is a string of 1 to 8 characters.
auth_key_id:
description:
- Authentication key id when C(auth_mode) is 'hmac-sha256', 'md5' or 'hmac-md5.
Valid value is an integer is in the range from 1 to 255.
auth_text_md5:
description:
- Specifies a password for MD5, HMAC-MD5, or HMAC-SHA256 authentication.
The value is a string of 1 to 255 case-sensitive characters, spaces not supported.
nexthop_addr:
description:
- IPv4 address for configure next-hop address's weight.
Valid values are a string, formatted as an IP address.
nexthop_weight:
description:
- Indicates the weight of the next hop.
The smaller the value is, the higher the preference of the route is.
It is an integer that ranges from 1 to 254.
max_load_balance:
description:
- The maximum number of paths for forward packets over multiple paths.
Valid value is an integer in the range from 1 to 64.
state:
description:
- Determines whether the config should be present or not
on the device.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: ospf module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Configure ospf
ce_ospf:
process_id: 1
area: 100
state: present
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"process_id": "1", "area": "100"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {"process_id": "1", "areas": [], "nexthops":[], "max_load_balance": "32"}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"process_id": "1",
"areas": [{"areaId": "0.0.0.100", "areaType": "Normal"}],
"nexthops":[], "max_load_balance": "32"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["ospf 1", "area 0.0.0.100"]
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
'''
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec
CE_NC_GET_OSPF = """
<filter type="subtree">
<ospfv2 xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ospfv2comm>
<ospfSites>
<ospfSite>
<processId>%s</processId>
<routerId></routerId>
<vrfName></vrfName>
<ProcessTopologys>
<ProcessTopology>
<nexthopMTs></nexthopMTs>
<maxLoadBalancing></maxLoadBalancing>
</ProcessTopology>
</ProcessTopologys>
<areas>
<area>
<areaId></areaId>
<areaType></areaType>
<authenticationMode></authenticationMode>
<authTextSimple></authTextSimple>
<keyId></keyId>
<authTextMd5></authTextMd5>
<networks>
<network>
<ipAddress></ipAddress>
<wildcardMask></wildcardMask>
</network>
</networks>
</area>
</areas>
</ospfSite>
</ospfSites>
</ospfv2comm>
</ospfv2>
</filter>
"""
CE_NC_CREATE_PROCESS = """
<config>
<ospfv2 xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ospfv2comm>
<ospfSites>
<ospfSite operation="merge">
<processId>%s</processId>
</ospfSite>
</ospfSites>
</ospfv2comm>
</ospfv2>
</config>
"""
CE_NC_DELETE_PROCESS = """
<config>
<ospfv2 xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ospfv2comm>
<ospfSites>
<ospfSite operation="delete">
<processId>%s</processId>
</ospfSite>
</ospfSites>
</ospfv2comm>
</ospfv2>
</config>
"""
CE_NC_XML_BUILD_MERGE_PROCESS = """
<config>
<ospfv2 xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ospfv2comm>
<ospfSites>
<ospfSite operation="merge">
<processId>%s</processId>
%s
</ospfSite>
</ospfSites>
</ospfv2comm>
</ospfv2>
</config>
"""
CE_NC_XML_BUILD_PROCESS = """
<config>
<ospfv2 xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ospfv2comm>
<ospfSites>
<ospfSite>
<processId>%s</processId>
%s
</ospfSite>
</ospfSites>
</ospfv2comm>
</ospfv2>
</config>
"""
CE_NC_XML_BUILD_MERGE_AREA = """
<areas>
<area operation="merge">
<areaId>%s</areaId>
%s
</area>
</areas>
"""
CE_NC_XML_BUILD_DELETE_AREA = """
<areas>
<area operation="delete">
<areaId>%s</areaId>
%s
</area>
</areas>
"""
CE_NC_XML_BUILD_AREA = """
<areas>
<area>
<areaId>%s</areaId>
%s
</area>
</areas>
"""
CE_NC_XML_SET_AUTH_MODE = """
<authenticationMode>%s</authenticationMode>
"""
CE_NC_XML_SET_AUTH_TEXT_SIMPLE = """
<authTextSimple>%s</authTextSimple>
"""
CE_NC_XML_SET_AUTH_MD5 = """
<keyId>%s</keyId>
<authTextMd5>%s</authTextMd5>
"""
CE_NC_XML_MERGE_NETWORKS = """
<networks>
<network operation="merge">
<ipAddress>%s</ipAddress>
<wildcardMask>%s</wildcardMask>
</network>
</networks>
"""
CE_NC_XML_DELETE_NETWORKS = """
<networks>
<network operation="delete">
<ipAddress>%s</ipAddress>
<wildcardMask>%s</wildcardMask>
</network>
</networks>
"""
CE_NC_XML_SET_LB = """
<maxLoadBalancing>%s</maxLoadBalancing>
"""
CE_NC_XML_BUILD_MERGE_TOPO = """
<ProcessTopologys>
<ProcessTopology operation="merge">
<topoName>base</topoName>
%s
</ProcessTopology>
</ProcessTopologys>
"""
CE_NC_XML_BUILD_TOPO = """
<ProcessTopologys>
<ProcessTopology >
<topoName>base</topoName>
%s
</ProcessTopology>
</ProcessTopologys>
"""
CE_NC_XML_MERGE_NEXTHOP = """
<nexthopMTs>
<nexthopMT operation="merge">
<ipAddress>%s</ipAddress>
<weight>%s</weight>
</nexthopMT>
</nexthopMTs>
"""
CE_NC_XML_DELETE_NEXTHOP = """
<nexthopMTs>
<nexthopMT operation="delete">
<ipAddress>%s</ipAddress>
</nexthopMT>
</nexthopMTs>
"""
class OSPF(object):
"""
Manages configuration of an ospf instance.
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# module input info
self.process_id = self.module.params['process_id']
self.area = self.module.params['area']
self.addr = self.module.params['addr']
self.mask = self.module.params['mask']
self.auth_mode = self.module.params['auth_mode']
self.auth_text_simple = self.module.params['auth_text_simple']
self.auth_key_id = self.module.params['auth_key_id']
self.auth_text_md5 = self.module.params['auth_text_md5']
self.nexthop_addr = self.module.params['nexthop_addr']
self.nexthop_weight = self.module.params['nexthop_weight']
self.max_load_balance = self.module.params['max_load_balance']
self.state = self.module.params['state']
# ospf info
self.ospf_info = dict()
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def init_module(self):
""" init module """
required_together = [
("addr", "mask"),
("auth_key_id", "auth_text_md5"),
("nexthop_addr", "nexthop_weight")
]
self.module = AnsibleModule(
argument_spec=self.spec, required_together=required_together, supports_check_mode=True)
def check_response(self, xml_str, xml_name):
"""Check if response message is already succeed."""
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def get_wildcard_mask(self):
"""convert mask length to ip address wildcard mask, i.e. 24 to 0.0.0.255"""
mask_int = ["255"] * 4
length = int(self.mask)
if length > 32:
self.module.fail_json(msg='IPv4 ipaddress mask length is invalid')
if length < 8:
mask_int[0] = str(int(~(0xFF << (8 - length % 8)) & 0xFF))
if length >= 8:
mask_int[0] = '0'
mask_int[1] = str(int(~(0xFF << (16 - (length % 16))) & 0xFF))
if length >= 16:
mask_int[1] = '0'
mask_int[2] = str(int(~(0xFF << (24 - (length % 24))) & 0xFF))
if length >= 24:
mask_int[2] = '0'
mask_int[3] = str(int(~(0xFF << (32 - (length % 32))) & 0xFF))
if length == 32:
mask_int[3] = '0'
return '.'.join(mask_int)
def get_area_ip(self):
"""convert integer to ip address"""
if not self.area.isdigit():
return self.area
addr_int = ['0'] * 4
addr_int[0] = str(((int(self.area) & 0xFF000000) >> 24) & 0xFF)
addr_int[1] = str(((int(self.area) & 0x00FF0000) >> 16) & 0xFF)
addr_int[2] = str(((int(self.area) & 0x0000FF00) >> 8) & 0XFF)
addr_int[3] = str(int(self.area) & 0xFF)
return '.'.join(addr_int)
def get_ospf_dict(self, process_id):
""" get one ospf attributes dict."""
ospf_info = dict()
conf_str = CE_NC_GET_OSPF % process_id
xml_str = get_nc_config(self.module, conf_str)
if "<data/>" in xml_str:
return ospf_info
xml_str = xml_str.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get process base info
root = ElementTree.fromstring(xml_str)
ospfsite = root.find("ospfv2/ospfv2comm/ospfSites/ospfSite")
if ospfsite:
for site in ospfsite:
if site.tag in ["processId", "routerId", "vrfName"]:
ospf_info[site.tag] = site.text
# get Topology info
topo = root.find(
"ospfv2/ospfv2comm/ospfSites/ospfSite/ProcessTopologys/ProcessTopology")
if topo:
for eles in topo:
if eles.tag in ["maxLoadBalancing"]:
ospf_info[eles.tag] = eles.text
# get nexthop info
ospf_info["nexthops"] = list()
nexthops = root.findall(
"ospfv2/ospfv2comm/ospfSites/ospfSite/ProcessTopologys/ProcessTopology/nexthopMTs/nexthopMT")
if nexthops:
for nexthop in nexthops:
nh_dict = dict()
for ele in nexthop:
if ele.tag in ["ipAddress", "weight"]:
nh_dict[ele.tag] = ele.text
ospf_info["nexthops"].append(nh_dict)
# get areas info
ospf_info["areas"] = list()
areas = root.findall(
"ospfv2/ospfv2comm/ospfSites/ospfSite/areas/area")
if areas:
for area in areas:
area_dict = dict()
for ele in area:
if ele.tag in ["areaId", "authTextSimple", "areaType",
"authenticationMode", "keyId", "authTextMd5"]:
area_dict[ele.tag] = ele.text
if ele.tag == "networks":
# get networks info
area_dict["networks"] = list()
for net in ele:
net_dict = dict()
for net_ele in net:
if net_ele.tag in ["ipAddress", "wildcardMask"]:
net_dict[net_ele.tag] = net_ele.text
area_dict["networks"].append(net_dict)
ospf_info["areas"].append(area_dict)
return ospf_info
def is_area_exist(self):
"""is ospf area exist"""
if not self.ospf_info:
return False
for area in self.ospf_info["areas"]:
if area["areaId"] == self.get_area_ip():
return True
return False
def is_network_exist(self):
"""is ospf area network exist"""
if not self.ospf_info:
return False
for area in self.ospf_info["areas"]:
if area["areaId"] == self.get_area_ip():
if not area.get("networks"):
return False
for network in area.get("networks"):
if network["ipAddress"] == self.addr and network["wildcardMask"] == self.get_wildcard_mask():
return True
return False
def is_nexthop_exist(self):
"""is ospf nexthop exist"""
if not self.ospf_info:
return False
for nexthop in self.ospf_info["nexthops"]:
if nexthop["ipAddress"] == self.nexthop_addr:
return True
return False
def is_nexthop_change(self):
"""is ospf nexthop change"""
if not self.ospf_info:
return True
for nexthop in self.ospf_info["nexthops"]:
if nexthop["ipAddress"] == self.nexthop_addr:
if nexthop["weight"] == self.nexthop_weight:
return False
else:
return True
return True
def create_process(self):
"""Create ospf process"""
xml_area = ""
self.updates_cmd.append("ospf %s" % self.process_id)
xml_create = CE_NC_CREATE_PROCESS % self.process_id
set_nc_config(self.module, xml_create)
# nexthop weight
xml_nh = ""
if self.nexthop_addr:
xml_nh = CE_NC_XML_MERGE_NEXTHOP % (
self.nexthop_addr, self.nexthop_weight)
self.updates_cmd.append("nexthop %s weight %s" % (
self.nexthop_addr, self.nexthop_weight))
# max load balance
xml_lb = ""
if self.max_load_balance:
xml_lb = CE_NC_XML_SET_LB % self.max_load_balance
self.updates_cmd.append(
"maximum load-balancing %s" % self.max_load_balance)
xml_topo = ""
if xml_lb or xml_nh:
xml_topo = CE_NC_XML_BUILD_TOPO % (xml_nh + xml_lb)
if self.area:
self.updates_cmd.append("area %s" % self.get_area_ip())
xml_auth = ""
xml_network = ""
# networks
if self.addr and self.mask:
xml_network = CE_NC_XML_MERGE_NETWORKS % (
self.addr, self.get_wildcard_mask())
self.updates_cmd.append("network %s %s" % (
self.addr, self.get_wildcard_mask()))
# authentication mode
if self.auth_mode:
xml_auth += CE_NC_XML_SET_AUTH_MODE % self.auth_mode
if self.auth_mode == "none":
self.updates_cmd.append("undo authentication-mode")
else:
self.updates_cmd.append(
"authentication-mode %s" % self.auth_mode)
if self.auth_mode == "simple" and self.auth_text_simple:
xml_auth += CE_NC_XML_SET_AUTH_TEXT_SIMPLE % self.auth_text_simple
self.updates_cmd.pop()
self.updates_cmd.append(
"authentication-mode %s %s" % (self.auth_mode, self.auth_text_simple))
if self.auth_mode in ["hmac-sha256", "hmac-sha256", "md5"]:
if self.auth_key_id and self.auth_text_md5:
xml_auth += CE_NC_XML_SET_AUTH_MD5 % (
self.auth_key_id, self.auth_text_md5)
self.updates_cmd.pop()
self.updates_cmd.append(
"authentication-mode %s %s %s" % (self.auth_mode, self.auth_key_id, self.auth_text_md5))
if xml_network or xml_auth or not self.is_area_exist():
xml_area += CE_NC_XML_BUILD_MERGE_AREA % (
self.get_area_ip(), xml_network + xml_auth)
xml_str = CE_NC_XML_BUILD_MERGE_PROCESS % (
self.process_id, xml_topo + xml_area)
recv_xml = set_nc_config(self.module, xml_str)
self.check_response(recv_xml, "CREATE_PROCESS")
self.changed = True
def delete_process(self):
"""Delete ospf process"""
xml_str = CE_NC_DELETE_PROCESS % self.process_id
recv_xml = set_nc_config(self.module, xml_str)
self.check_response(recv_xml, "DELETE_PROCESS")
self.updates_cmd.append("undo ospf %s" % self.process_id)
self.changed = True
def merge_process(self):
"""merge ospf process"""
xml_area = ""
xml_str = ""
self.updates_cmd.append("ospf %s" % self.process_id)
# nexthop weight
xml_nh = ""
if self.nexthop_addr and self.is_nexthop_change():
xml_nh = CE_NC_XML_MERGE_NEXTHOP % (
self.nexthop_addr, self.nexthop_weight)
self.updates_cmd.append("nexthop %s weight %s" % (
self.nexthop_addr, self.nexthop_weight))
# max load balance
xml_lb = ""
if self.max_load_balance and self.ospf_info.get("maxLoadBalancing") != self.max_load_balance:
xml_lb = CE_NC_XML_SET_LB % self.max_load_balance
self.updates_cmd.append(
"maximum load-balancing %s" % self.max_load_balance)
xml_topo = ""
if xml_lb or xml_nh:
xml_topo = CE_NC_XML_BUILD_MERGE_TOPO % (xml_nh + xml_lb)
if self.area:
self.updates_cmd.append("area %s" % self.get_area_ip())
xml_network = ""
xml_auth = ""
if self.addr and self.mask:
if not self.is_network_exist():
xml_network += CE_NC_XML_MERGE_NETWORKS % (
self.addr, self.get_wildcard_mask())
self.updates_cmd.append("network %s %s" % (
self.addr, self.get_wildcard_mask()))
# NOTE: for security, authentication config will always be update
if self.auth_mode:
xml_auth += CE_NC_XML_SET_AUTH_MODE % self.auth_mode
if self.auth_mode == "none":
self.updates_cmd.append("undo authentication-mode")
else:
self.updates_cmd.append(
"authentication-mode %s" % self.auth_mode)
if self.auth_mode == "simple" and self.auth_text_simple:
xml_auth += CE_NC_XML_SET_AUTH_TEXT_SIMPLE % self.auth_text_simple
self.updates_cmd.pop()
self.updates_cmd.append(
"authentication-mode %s %s" % (self.auth_mode, self.auth_text_simple))
if self.auth_mode in ["hmac-sha256", "hmac-sha256", "md5"]:
if self.auth_key_id and self.auth_text_md5:
xml_auth += CE_NC_XML_SET_AUTH_MD5 % (
self.auth_key_id, self.auth_text_md5)
self.updates_cmd.pop()
self.updates_cmd.append(
"authentication-mode %s %s %s" % (self.auth_mode, self.auth_key_id, self.auth_text_md5))
if xml_network or xml_auth or not self.is_area_exist():
xml_area += CE_NC_XML_BUILD_MERGE_AREA % (
self.get_area_ip(), xml_network + xml_auth)
elif self.is_area_exist():
self.updates_cmd.pop() # remove command: area
else:
pass
if xml_area or xml_topo:
xml_str = CE_NC_XML_BUILD_MERGE_PROCESS % (
self.process_id, xml_topo + xml_area)
recv_xml = set_nc_config(self.module, xml_str)
self.check_response(recv_xml, "MERGE_PROCESS")
self.changed = True
def remove_area_network(self):
"""remvoe ospf area network"""
if not self.is_network_exist():
return
xml_network = CE_NC_XML_DELETE_NETWORKS % (
self.addr, self.get_wildcard_mask())
xml_area = CE_NC_XML_BUILD_AREA % (self.get_area_ip(), xml_network)
xml_str = CE_NC_XML_BUILD_PROCESS % (self.process_id, xml_area)
recv_xml = set_nc_config(self.module, xml_str)
self.check_response(recv_xml, "DELETE_AREA_NETWORK")
self.updates_cmd.append("ospf %s" % self.process_id)
self.updates_cmd.append("area %s" % self.get_area_ip())
self.updates_cmd.append("undo network %s %s" %
(self.addr, self.get_wildcard_mask()))
self.changed = True
def remove_area(self):
"""remove ospf area"""
if not self.is_area_exist():
return
xml_area = CE_NC_XML_BUILD_DELETE_AREA % (self.get_area_ip(), "")
xml_str = CE_NC_XML_BUILD_PROCESS % (self.process_id, xml_area)
recv_xml = set_nc_config(self.module, xml_str)
self.check_response(recv_xml, "DELETE_AREA")
self.updates_cmd.append("ospf %s" % self.process_id)
self.updates_cmd.append("undo area %s" % self.get_area_ip())
self.changed = True
def remove_nexthop(self):
"""remove ospf nexthop weight"""
if not self.is_nexthop_exist():
return
xml_nh = CE_NC_XML_DELETE_NEXTHOP % self.nexthop_addr
xml_topo = CE_NC_XML_BUILD_TOPO % xml_nh
xml_str = CE_NC_XML_BUILD_PROCESS % (self.process_id, xml_topo)
recv_xml = set_nc_config(self.module, xml_str)
self.check_response(recv_xml, "DELETE_NEXTHOP_WEIGHT")
self.updates_cmd.append("ospf %s" % self.process_id)
self.updates_cmd.append("undo nexthop %s" % self.nexthop_addr)
self.changed = True
def is_valid_v4addr(self, addr):
"""check is ipv4 addr is valid"""
if addr.find('.') != -1:
addr_list = addr.split('.')
if len(addr_list) != 4:
return False
for each_num in addr_list:
if not each_num.isdigit():
return False
if int(each_num) > 255:
return False
return True
return False
def convert_ip_to_network(self):
"""convert ip to subnet address"""
ip_list = self.addr.split('.')
mask_list = self.get_wildcard_mask().split('.')
for i in range(len(ip_list)):
ip_list[i] = str((int(ip_list[i]) & (~int(mask_list[i]))) & 0xff)
self.addr = '.'.join(ip_list)
def check_params(self):
"""Check all input params"""
# process_id check
if not self.process_id.isdigit():
self.module.fail_json(msg="Error: process_id is not digit.")
if int(self.process_id) < 1 or int(self.process_id) > 4294967295:
self.module.fail_json(
msg="Error: process_id must be an integer between 1 and 4294967295.")
if self.area:
# area check
if self.area.isdigit():
if int(self.area) < 0 or int(self.area) > 4294967295:
self.module.fail_json(
msg="Error: area id (Integer) must be between 0 and 4294967295.")
else:
if not self.is_valid_v4addr(self.area):
self.module.fail_json(msg="Error: area id is invalid.")
# area network check
if self.addr:
if not self.is_valid_v4addr(self.addr):
self.module.fail_json(
msg="Error: network addr is invalid.")
if not self.mask.isdigit():
self.module.fail_json(
msg="Error: network mask is not digit.")
if int(self.mask) < 0 or int(self.mask) > 32:
self.module.fail_json(
msg="Error: network mask is invalid.")
# area authentication check
if self.state == "present" and self.auth_mode:
if self.auth_mode == "simple":
if self.auth_text_simple and len(self.auth_text_simple) > 8:
self.module.fail_json(
msg="Error: auth_text_simple is not in the range from 1 to 8.")
if self.auth_mode in ["hmac-sha256", "hmac-sha256", "md5"]:
if self.auth_key_id:
if not self.auth_key_id.isdigit():
self.module.fail_json(
msg="Error: auth_key_id is not digit.")
if int(self.auth_key_id) < 1 or int(self.auth_key_id) > 255:
self.module.fail_json(
msg="Error: auth_key_id is not in the range from 1 to 255.")
if self.auth_text_md5 and len(self.auth_text_md5) > 255:
self.module.fail_json(
msg="Error: auth_text_md5 is not in the range from 1 to 255.")
# process max load balance check
if self.state == "present" and self.max_load_balance:
if not self.max_load_balance.isdigit():
self.module.fail_json(
msg="Error: max_load_balance is not digit.")
if int(self.max_load_balance) < 1 or int(self.max_load_balance) > 64:
self.module.fail_json(
msg="Error: max_load_balance is not in the range from 1 to 64.")
# process nexthop weight check
if self.nexthop_addr:
if not self.is_valid_v4addr(self.nexthop_addr):
self.module.fail_json(msg="Error: nexthop_addr is invalid.")
if not self.nexthop_weight.isdigit():
self.module.fail_json(
msg="Error: nexthop_weight is not digit.")
if int(self.nexthop_weight) < 1 or int(self.nexthop_weight) > 254:
self.module.fail_json(
msg="Error: nexthop_weight is not in the range from 1 to 254.")
if self.addr:
self.convert_ip_to_network()
def get_proposed(self):
"""get proposed info"""
self.proposed["process_id"] = self.process_id
self.proposed["area"] = self.area
if self.area:
self.proposed["addr"] = self.addr
self.proposed["mask"] = self.mask
if self.auth_mode:
self.proposed["auth_mode"] = self.auth_mode
if self.auth_mode == "simple":
self.proposed["auth_text_simple"] = self.auth_text_simple
if self.auth_mode in ["hmac-sha256", "hmac-sha256", "md5"]:
self.proposed["auth_key_id"] = self.auth_key_id
self.proposed["auth_text_md5"] = self.auth_text_md5
if self.nexthop_addr:
self.proposed["nexthop_addr"] = self.nexthop_addr
self.proposed["nexthop_weight"] = self.nexthop_weight
self.proposed["max_load_balance"] = self.max_load_balance
self.proposed["state"] = self.state
def get_existing(self):
"""get existing info"""
if not self.ospf_info:
return
self.existing["process_id"] = self.process_id
self.existing["areas"] = self.ospf_info["areas"]
self.existing["nexthops"] = self.ospf_info["nexthops"]
self.existing["max_load_balance"] = self.ospf_info.get(
"maxLoadBalancing")
def get_end_state(self):
"""get end state info"""
ospf_info = self.get_ospf_dict(self.process_id)
if not ospf_info:
return
self.end_state["process_id"] = self.process_id
self.end_state["areas"] = ospf_info["areas"]
self.end_state["nexthops"] = ospf_info["nexthops"]
self.end_state["max_load_balance"] = ospf_info.get("maxLoadBalancing")
if self.end_state == self.existing:
if not self.auth_text_simple and not self.auth_text_md5:
self.changed = False
def work(self):
"""worker"""
self.check_params()
self.ospf_info = self.get_ospf_dict(self.process_id)
self.get_existing()
self.get_proposed()
# deal present or absent
if self.state == "present":
if not self.ospf_info:
# create ospf process
self.create_process()
else:
# merge ospf
self.merge_process()
else:
if self.ospf_info:
if self.area:
if self.addr:
# remove ospf area network
self.remove_area_network()
else:
# remove ospf area
self.remove_area()
if self.nexthop_addr:
# remove ospf nexthop weight
self.remove_nexthop()
if not self.area and not self.nexthop_addr:
# remove ospf process
self.delete_process()
else:
self.module.fail_json(msg='Error: ospf process does not exist')
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
process_id=dict(required=True, type='str'),
area=dict(required=False, type='str'),
addr=dict(required=False, type='str'),
mask=dict(required=False, type='str'),
auth_mode=dict(required=False,
choices=['none', 'hmac-sha256', 'md5', 'hmac-md5', 'simple'], type='str'),
auth_text_simple=dict(required=False, type='str', no_log=True),
auth_key_id=dict(required=False, type='str'),
auth_text_md5=dict(required=False, type='str', no_log=True),
nexthop_addr=dict(required=False, type='str'),
nexthop_weight=dict(required=False, type='str'),
max_load_balance=dict(required=False, type='str'),
state=dict(required=False, default='present',
choices=['present', 'absent'])
)
argument_spec.update(ce_argument_spec)
module = OSPF(argument_spec)
module.work()
if __name__ == '__main__':
main()
| 36.06372
| 140
| 0.552978
|
a8fe6262ad03fc409436f437e392415ab3c346f1
| 6,398
|
py
|
Python
|
monetio/obs/openaq.py
|
aring1988/monetio
|
c14a0d9fbf82a4d4048ebde71add88c7e1c3de08
|
[
"MIT"
] | 10
|
2020-03-12T19:01:15.000Z
|
2022-03-08T20:19:55.000Z
|
monetio/obs/openaq.py
|
aring1988/monetio
|
c14a0d9fbf82a4d4048ebde71add88c7e1c3de08
|
[
"MIT"
] | 42
|
2020-03-13T19:26:17.000Z
|
2022-03-30T20:35:31.000Z
|
monetio/obs/openaq.py
|
aring1988/monetio
|
c14a0d9fbf82a4d4048ebde71add88c7e1c3de08
|
[
"MIT"
] | 11
|
2020-03-30T20:23:24.000Z
|
2022-03-04T18:46:24.000Z
|
"""Short summary.
Attributes
----------
url : type
Description of attribute `url`.
dates : type
Description of attribute `dates`.
df : type
Description of attribute `df`.
daily : type
Description of attribute `daily`.
objtype : type
Description of attribute `objtype`.
filelist : type
Description of attribute `filelist`.
monitor_file : type
Description of attribute `monitor_file`.
__class__ : type
Description of attribute `__class__`.
monitor_df : type
Description of attribute `monitor_df`.
savecols : type
Description of attribute `savecols`.
"""
import json
import dask
import dask.dataframe as dd
import pandas as pd
from numpy import NaN, vectorize
def add_data(dates, n_procs=1):
"""add openaq data from the amazon s3 server.
Parameters
----------
dates : pd.DateTimeIndex or list of datatime objects
this is a list of dates to download
n_procs : type
Description of parameter `n_procs`.
Returns
-------
type
Description of returned object.
"""
a = OPENAQ()
return a.add_data(dates, num_workers=n_procs)
class OPENAQ():
def __init__(self):
import s3fs
from numpy import vectorize
self.fs = s3fs.S3FileSystem(anon=True)
self.s3bucket = 'openaq-fetches/realtime'
def _get_available_days(self, dates):
folders = self.fs.ls(self.s3bucket)
days = [j.split('/')[2] for j in folders]
avail_dates = pd.to_datetime(days, format='%Y-%m-%d', errors='coerce')
dates = pd.to_datetime(dates).floor(freq='D')
d = pd.Series(dates, name='dates').drop_duplicates()
ad = pd.Series(avail_dates, name='dates')
return pd.merge(d, ad, how='inner')
def _get_files_in_day(self, date):
files = self.fs.ls("{}/{}".format(self.s3bucket,
date.strftime('%Y-%m-%d')))
return files
def build_urls(self, dates):
d = self._get_available_days(dates)
urls = pd.Series([], name='url')
for i in d.dates:
files = self._get_files_in_day(i)
furls = pd.Series([
f.replace('openaq-fetches',
'https://openaq-fetches.s3.amazonaws.com')
for f in files
],
name='url')
urls = pd.merge(urls, furls, how='outer')
return urls.url.values
def add_data(self, dates, num_workers=1):
import dask.dataframe as dd
import dask
urls = self.build_urls(dates).tolist()
# z = dd.read_json(urls).compute()
dfs = [dask.delayed(self.read_json)(f) for f in urls]
dff = dd.from_delayed(dfs)
z = dff.compute(num_workers=num_workers)
z.coordinates.replace(to_replace=[None], value=pd.np.nan, inplace=True)
z = z.dropna().reset_index(drop=True)
js = json.loads(z[['coordinates', 'date']].to_json(orient='records'))
dff = pd.io.json.json_normalize(js)
dff.columns = dff.columns.str.split('.').str[1]
dff.rename({
'local': 'time_local',
'utc': 'time'
},
axis=1,
inplace=True)
dff['time'] = pd.to_datetime(dff.time)
dff['time_local'] = pd.to_datetime(dff.time_local)
zzz = z.join(dff).drop(
columns=['coordinates', 'date', 'attribution', 'averagingPeriod'])
zp = self._pivot_table(zzz)
zp['siteid'] = zp.country + '_' + zp.latitude.round(3).astype(
str) + 'N_' + zp.longitude.round(3).astype(str) + 'E'
zp['time'] = zp.time.dt.tz_localize(None)
tzinfo = zp.time_local.apply(lambda x: x.tzinfo.utcoffset(x))
zp['time_local'] = zp['time'] + tzinfo
return zp.loc[zp.time >= dates.min()]
def read_json(self, url):
return pd.read_json(url, lines=True).dropna().sort_index(axis=1)
# def read_json(self, url):
# df = pd.read_json(url, lines=True).dropna()
# df.coordinates.replace(to_replace=[None],
# value=pd.np.nan,
# inplace=True)
# df = df.dropna(subset=['coordinates'])
# # df = self._parse_latlon(df)
# # json_struct = json.loads(df.coordinates.to_json(orient='records'))
# # df_flat = pd.io.json.json_normalize(json_struct)
# # df = self._parse_datetime(df)
# # df = self._fix_units(df)
# # df = self._pivot_table(df)
# return df
def _parse_latlon(self, df):
# lat = vectorize(lambda x: x['latitude'])
# lon = vectorize(lambda x: x['longitude'])
def lat(x):
return x['latitude']
def lon(x):
return x['longitude']
df['latitude'] = df.coordinates.apply(lat)
df['longitude'] = df.coordinates.apply(lon)
return df.drop(columns='coordinates')
def _parse_datetime(self, df):
def utc(x):
return pd.to_datetime(x['utc'])
def local(x):
return pd.to_datetime(x['local'])
df['time'] = df.date.apply(utc)
df['time_local'] = df.date.apply(local)
return df.drop(columns='date')
def _fix_units(self, df):
df.loc[df.value <= 0] = NaN
df.loc[(df.parameter == 'co') & (df.unit != 'ppm'), 'value'] /= 1145
df.loc[(df.parameter == 'o3') & (df.unit != 'ppm'), 'value'] /= 2000
df.loc[(df.parameter == 'so2') & (df.unit != 'ppm'), 'value'] /= 2620
df.loc[(df.parameter == 'no2') & (df.unit != 'ppm'), 'value'] /= 1880
return df
def _pivot_table(self, df):
w = df.pivot_table(values='value',
index=[
'time', 'latitude', 'longitude', 'sourceName',
'sourceType', 'city', 'country', 'time_local'
],
columns='parameter').reset_index()
w = w.rename(dict(co='co_ppm',
o3='o3_ppm',
no2='no2_ppm',
so2='so2_ppm',
bc='bc_umg3',
pm25='pm25_ugm3',
pm10='pm10_ugm3'),
axis=1)
return w
| 34.031915
| 79
| 0.539075
|
ded4d628cf3a808fe2070cdb8fb3e2a3ab3d5e20
| 1,367
|
py
|
Python
|
pictures/mahuika.py
|
pletzer/fidibench
|
d6465445d6fb3ffd20b53419dc7f833650071e93
|
[
"MIT"
] | 7
|
2018-02-02T21:12:56.000Z
|
2020-09-10T01:07:18.000Z
|
pictures/mahuika.py
|
pletzer/fidibench
|
d6465445d6fb3ffd20b53419dc7f833650071e93
|
[
"MIT"
] | 3
|
2020-09-09T23:17:00.000Z
|
2020-09-15T02:11:33.000Z
|
pictures/mahuika.py
|
pletzer/fidibench
|
d6465445d6fb3ffd20b53419dc7f833650071e93
|
[
"MIT"
] | 2
|
2019-01-31T22:15:40.000Z
|
2022-03-30T02:08:30.000Z
|
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
font = {'family' : 'normal',
'size' : 18}
matplotlib.rc('font', **font)
from matplotlib import pylab
# for nth in 36 24 18 12 4 2 1; do export OMP_NUM_THREADS=$nth; time ./upwind/cxx/upwindCxx -numCells 512 -numSteps 10 ; done
# upwindCxx -numCells 512 -numSteps 10
nthreads_mahuika = [32, 16, 8, 4, 2, 1]
# Release
intel_times = [6.5, 10.5, 18.7, 34.9, 1*60+7.8, 2*60+13.5]
gnu_times = [39.6, 59.8, 1*60+16.3, 1*60+26.4, 3*60+11.6, 2*60+52.8]
gnuO3_times = [6.6, 10.4, 18.4, 34.4, 1*60+6.4, 2*60+10.7]
cray_times = [6.8, 11.0, 19.2, 37.1, 1*60+9.9, 2*60+17.5]
pylab.figure(1)
pylab.plot(nthreads_mahuika, intel_times, 'r-')
pylab.plot(nthreads_mahuika, gnu_times, 'g--')
pylab.plot(nthreads_mahuika, gnuO3_times, 'g--')
pylab.plot(nthreads_mahuika, cray_times, 'b-')
pylab.legend(['intel 19.1.0.166', 'gnu 9.2.0', 'gnu -O3 9.2.0', 'cray 8.7.7'])
pylab.xlabel('number of OpenMP threads')
pylab.ylabel('execution time [s]')
pylab.title('mahuika upwindCxx -numCells 512 -numSteps 10')
pylab.plot(nthreads_mahuika, intel_times, 'ko', markerfacecolor='None')
pylab.plot(nthreads_mahuika, gnu_times, 'ko', markerfacecolor='None')
pylab.plot(nthreads_mahuika, gnuO3_times, 'ko', markerfacecolor='None')
pylab.plot(nthreads_mahuika, cray_times, 'ko', markerfacecolor='None')
pylab.show()
| 36.945946
| 125
| 0.697879
|
b6c5d4f1680ef76decd76176686d9563176a1322
| 2,038
|
py
|
Python
|
python_modules/dagster/dagster_tests/core_tests/test_solid_with_config.py
|
shahvineet98/dagster
|
2471d39c52f660e23e8c0d8e8ded873ddc3df036
|
[
"Apache-2.0"
] | 3
|
2020-09-09T04:10:23.000Z
|
2021-11-08T02:10:42.000Z
|
python_modules/dagster/dagster_tests/core_tests/test_solid_with_config.py
|
shahvineet98/dagster
|
2471d39c52f660e23e8c0d8e8ded873ddc3df036
|
[
"Apache-2.0"
] | 2
|
2021-05-11T13:36:27.000Z
|
2021-09-03T01:53:11.000Z
|
python_modules/dagster/dagster_tests/core_tests/test_solid_with_config.py
|
shahvineet98/dagster
|
2471d39c52f660e23e8c0d8e8ded873ddc3df036
|
[
"Apache-2.0"
] | 1
|
2021-02-21T12:16:47.000Z
|
2021-02-21T12:16:47.000Z
|
import pytest
from dagster import DagsterInvalidConfigError, Field, String, execute_pipeline, pipeline, solid
def test_basic_solid_with_config():
did_get = {}
@solid(
name='solid_with_context',
input_defs=[],
output_defs=[],
config={'some_config': Field(String)},
)
def solid_with_context(context):
did_get['yep'] = context.solid_config
@pipeline
def pipeline_def():
solid_with_context()
execute_pipeline(
pipeline_def, {'solids': {'solid_with_context': {'config': {'some_config': 'foo'}}}}
)
assert 'yep' in did_get
assert 'some_config' in did_get['yep']
def test_config_arg_mismatch():
def _t_fn(*_args):
raise Exception('should not reach')
@solid(
name='solid_with_context',
input_defs=[],
output_defs=[],
config={'some_config': Field(String)},
)
def solid_with_context(context):
raise Exception('should not reach')
@pipeline
def pipeline_def():
solid_with_context()
with pytest.raises(DagsterInvalidConfigError):
execute_pipeline(
pipeline_def, {'solids': {'solid_with_context': {'config': {'some_config': 1}}}}
)
def test_solid_not_found():
@solid(name='find_me_solid', input_defs=[], output_defs=[])
def find_me_solid(_):
raise Exception('should not reach')
@pipeline
def pipeline_def():
find_me_solid()
with pytest.raises(DagsterInvalidConfigError):
execute_pipeline(pipeline_def, {'solids': {'not_found': {'config': {'some_config': 1}}}})
def test_config_for_no_config():
@solid(name='no_config_solid', input_defs=[], output_defs=[])
def no_config_solid(_):
raise Exception('should not reach')
@pipeline
def pipeline_def():
return no_config_solid()
with pytest.raises(DagsterInvalidConfigError):
execute_pipeline(
pipeline_def, {'solids': {'no_config_solid': {'config': {'some_config': 1}}}}
)
| 25.797468
| 97
| 0.636899
|
2d194dfd1d958a6ae83a0cb283b19882b473a40a
| 13,280
|
py
|
Python
|
fs_image/compiler/tests/test_image_layer.py
|
singhaditya28/fs_image
|
3d122da48eab8b26e5add6754cc1f91296139c58
|
[
"MIT"
] | null | null | null |
fs_image/compiler/tests/test_image_layer.py
|
singhaditya28/fs_image
|
3d122da48eab8b26e5add6754cc1f91296139c58
|
[
"MIT"
] | null | null | null |
fs_image/compiler/tests/test_image_layer.py
|
singhaditya28/fs_image
|
3d122da48eab8b26e5add6754cc1f91296139c58
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import json
import os
import unittest
from contextlib import contextmanager
from grp import getgrnam
from pwd import getpwnam
from fs_image.artifacts_dir import find_repo_root
from fs_image.btrfs_diff.tests.demo_sendstreams_expected import (
render_demo_subvols
)
from fs_image.btrfs_diff.tests.render_subvols import (
check_common_rpm_render, render_sendstream, pop_path
)
from fs_image.find_built_subvol import find_built_subvol
from ..procfs_serde import deserialize_int
TARGET_ENV_VAR_PREFIX = 'test_image_layer_path_to_'
TARGET_TO_PATH = {
target[len(TARGET_ENV_VAR_PREFIX):]: path
for target, path in os.environ.items()
if target.startswith(TARGET_ENV_VAR_PREFIX)
}
class ImageLayerTestCase(unittest.TestCase):
def setUp(self):
# More output for easier debugging
unittest.util._MAX_LENGTH = 12345
self.maxDiff = 12345
@contextmanager
def target_subvol(self, target, mount_config=None):
with self.subTest(target):
# The mount configuration is very uniform, so we can check it here.
expected_config = {
'is_directory': True,
'build_source': {
'type': 'layer',
'source': '//fs_image/compiler/test_images:' + target,
},
}
if mount_config:
expected_config.update(mount_config)
with open(TARGET_TO_PATH[target] + '/mountconfig.json') as infile:
self.assertEqual(expected_config, json.load(infile))
yield find_built_subvol(TARGET_TO_PATH[target])
def _check_hello(self, subvol_path):
with open(os.path.join(subvol_path, b'hello_world')) as hello:
self.assertEqual('', hello.read())
def _check_parent(self, subvol_path):
self._check_hello(subvol_path)
# :parent_layer
for path in [
b'rpm_test/hello_world.tar',
b'foo/bar/even_more_hello_world.tar',
]:
self.assertTrue(
os.path.isfile(os.path.join(subvol_path, path)),
path,
)
# :feature_dirs not tested by :parent_layer
self.assertTrue(
os.path.isdir(os.path.join(subvol_path, b'foo/bar/baz')),
)
# :hello_world_base was mounted here
self.assertTrue(os.path.exists(
os.path.join(subvol_path, b'mounted_hello/hello_world')
))
# :feature_symlinks
for source, dest in [
(b'bar', b'foo/fighter'),
(b'bar', b'foo/face'),
(b'..', b'foo/bar/baz/bar'),
(b'hello_world.tar', b'foo/symlink_to_hello_world.tar'),
]:
self.assertTrue(os.path.exists(os.path.join(
subvol_path, os.path.dirname(dest), source,
)), (dest, source))
self.assertTrue(
os.path.islink(os.path.join(subvol_path, dest)),
dest
)
self.assertEqual(
source, os.readlink(os.path.join(subvol_path, dest))
)
def _check_child(self, subvol_path):
self._check_parent(subvol_path)
for path in [
# :feature_tar_and_rpms
b'foo/borf/hello_world',
b'foo/hello_world',
b'rpm_test/mice.txt',
b'rpm_test/cheese2.txt',
# :child/layer
b'foo/extracted_hello/hello_world',
b'foo/more_extracted_hello/hello_world',
]:
self.assertTrue(os.path.isfile(os.path.join(subvol_path, path)))
for path in [
# :feature_tar_and_rpms ensures these are absent
b'rpm_test/carrot.txt',
b'rpm_test/milk.txt',
]:
self.assertFalse(os.path.exists(os.path.join(subvol_path, path)))
def test_image_layer_targets(self):
# Future: replace these checks by a more comprehensive test of the
# image's data & metadata using our `btrfs_diff` library.
with self.target_subvol(
'hello_world_base',
mount_config={'runtime_source': {'type': 'chicken'}},
) as subvol:
self._check_hello(subvol.path())
with self.target_subvol(
'parent_layer',
mount_config={'runtime_source': {'type': 'turkey'}},
) as subvol:
self._check_parent(subvol.path())
# Cannot check this in `_check_parent`, since that gets called
# by `_check_child`, but the RPM gets removed in the child.
self.assertTrue(os.path.isfile(
subvol.path('rpm_test/carrot.txt')
))
with self.target_subvol('child/layer') as subvol:
self._check_child(subvol.path())
with self.target_subvol('base_cheese_layer') as subvol:
self.assertTrue(os.path.isfile(
subvol.path('/rpm_test/cheese2.txt')
))
with self.target_subvol('older_cheese_layer') as subvol:
self.assertTrue(os.path.isfile(
subvol.path('/rpm_test/cheese1.txt')
))
# Make sure the original file is removed when the RPM is downgraded
self.assertFalse(os.path.isfile(
subvol.path('/rpm_test/cheese2.txt')
))
with self.target_subvol('newer_cheese_layer') as subvol:
self.assertTrue(os.path.isfile(
subvol.path('/rpm_test/cheese3.txt')
))
# Make sure the original file is removed when the RPM is upgraded
self.assertFalse(os.path.isfile(
subvol.path('/rpm_test/cheese2.txt')
))
def test_layer_from_demo_sendstreams(self):
# `btrfs_diff.demo_sendstream` produces a subvolume send-stream with
# fairly thorough coverage of filesystem features. This test grabs
# that send-stream, receives it into an `image_layer`, and validates
# that the send-stream of the **received** volume has the same
# rendering as the original send-stream was supposed to have.
#
# In other words, besides testing `image_sendstream_layer`, this is
# also a test of idempotence for btrfs send+receive.
#
# Notes:
# - `compiler/tests/TARGETS` explains why `mutate_ops` is not here.
# - Currently, `mutate_ops` also uses `--no-data`, which would
# break this test of idempotence.
for original_name, subvol_name, mount_config in [
('create_ops', 'create_ops', None),
('create_ops', 'create_ops-from-dir', None),
('create_ops', 'create_ops-from-layer', None),
('create_ops', 'create_ops-alias', {
'build_source': {
'type': 'layer',
'source': '//fs_image/compiler/test_images:create_ops',
}
}),
]:
with self.target_subvol(
subvol_name, mount_config=mount_config,
) as sv:
self.assertEqual(
render_demo_subvols(**{original_name: original_name}),
render_sendstream(sv.mark_readonly_and_get_sendstream()),
)
# This is reused by `test_foreign_layer` because we currently lack
# rendering for incremental sendstreams.
@contextmanager
def _check_build_appliance(self, rsrc_name, yum_dnf):
with self.target_subvol(rsrc_name) as sv:
r = render_sendstream(sv.mark_readonly_and_get_sendstream())
ino, = pop_path(r, 'bin/sh') # Busybox from `rpm-test-milk`
# NB: We changed permissions on this at some point, but after
# the migration diffs land, the [75] can become a 5.
self.assertRegex(ino, r'^\(File m[75]55 d[0-9]+\)$')
self.assertEqual(['(Dir)', {
'milk.txt': ['(File d12)'],
# From the `rpm-test-milk` post-install script
'post.txt': ['(File d6)'],
}], pop_path(r, 'rpm_test'))
ino, _ = pop_path(r, 'usr/lib/.build-id')
self.assertEqual('(Dir)', ino)
self.assertEqual(['(Dir)', {}], pop_path(r, 'bin'))
yield sv, r
self.assertEqual(['(Dir)', {}], pop_path(r, 'var/tmp'))
self.assertEqual(['(Dir)', {}], pop_path(r, 'usr'))
check_common_rpm_render(self, r, yum_dnf)
def test_dnf_build_appliance(self):
with self._check_build_appliance(
'validates-dnf-build-appliance', 'dnf',
) as (_, r):
self.assertEqual(['(Dir)', {}], pop_path(r, 'usr/lib'))
def test_yum_build_appliance(self):
with self._check_build_appliance(
'validates-yum-build-appliance', 'yum',
) as (_, r):
self.assertEqual(['(Dir)', {}], pop_path(r, 'usr/lib'))
def test_foreign_layer(self):
with self._check_build_appliance('foreign-layer', 'dnf') as (sv, r):
# The desired side effect of the run:
self.assertEqual(['(File)'], pop_path(r, 'I_AM_FOREIGN_LAYER'))
# Fixme: This `os-release` is an artifact of `nspawn_in_subvol`.
# We should probably not be leaking this into the layer, but
# it's unlikely to show up in real-world examples.
self.assertEqual(
['(Dir)', {'os-release': ['(File)']}],
pop_path(r, 'usr/lib')
)
# Maybe fixme: `nspawn_in_subvol` could potentially clean this
# up but it seems unlikely to affect prod since it's only a
# thing in `@mode/dev`, which should never ship prod artifacts.
if deserialize_int(
sv, '/meta/private/opts/artifacts_may_require_repo',
):
# Assume that the prefix of the repo (e.g. /home or /data)
# is not one of the normal FHS-type directories below.
d = os.path.abspath(find_repo_root())
while d != '/':
self.assertEqual(['(Dir)', {}], pop_path(r, d))
d = os.path.dirname(d)
# Clean other, less sketchy side effects of `nspawn_in_subvol`:
# empty LFS directories. (`/logs` is not LFS, but an FB-ism)
for d in ('logs', 'proc', 'root', 'run', 'sys', 'tmp'):
self.assertEqual(['(Dir)', {}], pop_path(r, d))
# This nspawn-created symlink isn't great, but, again, it
# shouldn't affect production use-cases.
self.assertEqual(['(Symlink usr/lib)'], pop_path(r, 'lib'))
def test_non_default_rpm_snapshot(self):
with self.target_subvol('layer-with-non-default-snapshot-rpm') as sv:
r = render_sendstream(sv.mark_readonly_and_get_sendstream())
self.assertEqual(['(Dir)', {
'cake.txt': ['(File d17)'],
'cheese.txt': ['(File d11)'],
}], pop_path(r, 'rpm_test'))
check_common_rpm_render(self, r, 'yum')
def _check_installed_files_bar(self, r):
# We don't know the exact sizes because these 2 may be wrapped
ino, = pop_path(r, 'installed/print-ok')
self.assertRegex(ino, r'^\(File m555 d[0-9]+\)$')
ino, = pop_path(r, 'installed/print-ok-too')
self.assertRegex(ino, r'^\(File m555 d[0-9]+\)$')
uid = getpwnam('nobody').pw_uid
gid = getgrnam('nobody').gr_gid
self.assertEqual(['(Dir)', {
'baz': ['(Dir)', {}],
'hello_world.tar': ['(File m444 d10240)'],
'hello_world_again.tar': [f'(File m444 o{uid}:{gid} d10240)'],
'installed': ['(Dir)', {
'yittal-kitteh': ['(File m444 d5)'],
'script-dir': ['(Dir)', {
'subdir': ['(Dir)', {'exe.sh': ['(File m555 d21)']}],
'data.txt': ['(File m444 d6)'],
}],
'solo-exe.sh': ['(File m555 d21)'],
}],
}], r)
def test_installed_files(self):
with self.target_subvol('installed-files') as sv:
r = render_sendstream(sv.mark_readonly_and_get_sendstream())
self._check_installed_files_bar(pop_path(r, 'foo/bar'))
self.assertEqual(['(Dir)', {
'foo': ['(Dir)', {}],
'meta': ['(Dir)', {'private': ['(Dir)', {'opts': ['(Dir)', {
'artifacts_may_require_repo': ['(File d2)'],
}]}]}],
}], r)
def test_cloned_files(self):
with self.target_subvol('cloned-files') as sv:
r = render_sendstream(sv.mark_readonly_and_get_sendstream())
for bar in ['bar', 'bar2', 'bar3']:
self._check_installed_files_bar(pop_path(r, bar))
self.assertEqual(['(Dir)', {
'meta': ['(Dir)', {'private': ['(Dir)', {'opts': ['(Dir)', {
'artifacts_may_require_repo': ['(File d2)'],
}]}]}],
}], r)
| 40.736196
| 79
| 0.564684
|
01259b0dbf285619e49513bfdb8365c7a7dca42d
| 1,396
|
py
|
Python
|
aliyun-python-sdk-rdc/aliyunsdkrdc/request/v20180821/JoinCompanyRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 1,001
|
2015-07-24T01:32:41.000Z
|
2022-03-25T01:28:18.000Z
|
aliyun-python-sdk-rdc/aliyunsdkrdc/request/v20180821/JoinCompanyRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 363
|
2015-10-20T03:15:00.000Z
|
2022-03-08T12:26:19.000Z
|
aliyun-python-sdk-rdc/aliyunsdkrdc/request/v20180821/JoinCompanyRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 682
|
2015-09-22T07:19:02.000Z
|
2022-03-22T09:51:46.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkrdc.endpoint import endpoint_data
class JoinCompanyRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rdc', '2018-08-21', 'JoinCompany','rdc')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Code(self):
return self.get_body_params().get('Code')
def set_Code(self,Code):
self.add_body_params('Code', Code)
| 36.736842
| 74
| 0.760029
|
47b6d115bdd1491145b3b37e4faca1134f5cd754
| 482
|
py
|
Python
|
tests/extension/thread_/stream_fifo/test_thread_stream_fifo.py
|
jesseclin/veriloggen
|
a645f2c53f04e5b88213eef17779d212192ea2b5
|
[
"Apache-2.0"
] | 232
|
2015-09-01T16:07:48.000Z
|
2022-03-28T14:53:28.000Z
|
tests/extension/thread_/stream_fifo/test_thread_stream_fifo.py
|
jesseclin/veriloggen
|
a645f2c53f04e5b88213eef17779d212192ea2b5
|
[
"Apache-2.0"
] | 34
|
2015-08-21T09:13:03.000Z
|
2022-03-21T23:52:44.000Z
|
tests/extension/thread_/stream_fifo/test_thread_stream_fifo.py
|
jesseclin/veriloggen
|
a645f2c53f04e5b88213eef17779d212192ea2b5
|
[
"Apache-2.0"
] | 46
|
2015-09-24T14:39:57.000Z
|
2022-02-23T21:59:56.000Z
|
from __future__ import absolute_import
from __future__ import print_function
import os
import veriloggen
import thread_stream_fifo
def test(request):
veriloggen.reset()
simtype = request.config.getoption('--sim')
rslt = thread_stream_fifo.run(filename=None, simtype=simtype,
outputfile=os.path.splitext(os.path.basename(__file__))[0] + '.out')
verify_rslt = rslt.splitlines()[-1]
assert(verify_rslt == '# verify: PASSED')
| 25.368421
| 102
| 0.692946
|
8ca60eedf8faaa4acc60e1a3d4e37ad872c192bf
| 1,997
|
py
|
Python
|
linkedlist/Reference_code/q6.py
|
pengfei-chen/algorithm_qa
|
c2ccdcb77004e88279d61e4e433ee49527fc34d6
|
[
"MIT"
] | 79
|
2018-03-27T12:37:49.000Z
|
2022-01-21T10:18:17.000Z
|
linkedlist/Reference_code/q6.py
|
pengfei-chen/algorithm_qa
|
c2ccdcb77004e88279d61e4e433ee49527fc34d6
|
[
"MIT"
] | null | null | null |
linkedlist/Reference_code/q6.py
|
pengfei-chen/algorithm_qa
|
c2ccdcb77004e88279d61e4e433ee49527fc34d6
|
[
"MIT"
] | 27
|
2018-04-08T03:07:06.000Z
|
2021-10-30T00:01:50.000Z
|
"""
问题描述:41个人排成一个圈,由第一个人开始报数,报数到3的人就自杀,
然后再由下一个人重新报1,报数到3的人再自杀,这样依次下去,直到剩下
最后一个人时,他可以自由选择自己的命运。这就是著名的约瑟夫问题,现在
请用单向环形链表描述该结构并呈现整个自杀过程
要求:输入一个环形单向链表的头结点head和报数的值m,返回最后生存的节
点,该节点自己组成一个单向环形链表,其他节点都删掉
进阶:
如果链表头结点数为N,想在时间复杂度为O(N)时完成原问题的要求,如何实现
思路:参见https://blog.oldj.net/2010/05/27/joseph-ring/
"""
from linkedlist.toolcls import Node, PrintMixin
class JosephusCircle(PrintMixin):
@classmethod
def kill_1(cls, head, m):
if head is None or head.next == head or m < 1:
return head
count = 1
node = head.next
pre = head
while node != head:
count += 1
if count < m:
pre = node
else:
pre.next = node.next
m = 1
node = node.next
return node
@classmethod
def kill_2(cls, head, m):
if head is None or head.next == head or m < 1:
return head
length = 1
node = head.next
while node != head:
length += 1
node = node.next
alive_num = cls.get_alive(length, m)
count = 1
node = head
while count < alive_num:
count += 1
node = node.next
return node
@staticmethod
def get_alive(n, m):
if n == 1:
return 1
return (JosephusCircle.get_alive(n-1, m) + m) % n
if __name__ == '__main__':
cur_node = Node(1)
cur_node.next = Node(2)
cur_node.next.next = Node(3)
cur_node.next.next.next = Node(4)
cur_node.next.next.next.next = Node(5)
cur_node.next.next.next.next.next = cur_node
cur_node = JosephusCircle.kill_1(cur_node, 3)
print(cur_node.value)
cur_node = Node(1)
cur_node.next = Node(2)
cur_node.next.next = Node(3)
cur_node.next.next.next = Node(4)
cur_node.next.next.next.next = Node(5)
cur_node.next.next.next.next.next = cur_node
cur_node = JosephusCircle.kill_2(cur_node, 3)
print(cur_node.value)
| 23.494118
| 57
| 0.587882
|
64e503cc24246a8ce05d2cecc4894e56a7f9c57f
| 770
|
py
|
Python
|
ib_trading_calendars/exchange_calendar_bvl.py
|
alexanu/ib-trading-calendars
|
5a92770d106542968e856aa54ae48d48b306d7f3
|
[
"Apache-2.0"
] | 9
|
2019-02-04T19:42:12.000Z
|
2021-08-04T18:36:43.000Z
|
ib_trading_calendars/exchange_calendar_bvl.py
|
alexanu/ib-trading-calendars
|
5a92770d106542968e856aa54ae48d48b306d7f3
|
[
"Apache-2.0"
] | 1
|
2020-03-12T17:32:38.000Z
|
2020-03-12T17:32:38.000Z
|
ib_trading_calendars/exchange_calendar_bvl.py
|
alexanu/ib-trading-calendars
|
5a92770d106542968e856aa54ae48d48b306d7f3
|
[
"Apache-2.0"
] | 8
|
2019-02-04T21:08:38.000Z
|
2021-08-04T18:36:45.000Z
|
# Copyright 2019 QuantRocket LLC - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import time
from trading_calendars.exchange_calendar_xlis import XLISExchangeCalendar
class BVLExchangeCalendar(XLISExchangeCalendar):
name = "BVL"
| 36.666667
| 74
| 0.781818
|
36d1de7eff4868d89e95fd1d67526e1ae69ed1b7
| 1,148
|
py
|
Python
|
cancer-immune/EMEWS-scripts/python/test/py_tests.py
|
rheiland/PhysiCell-EMEWS-2
|
ec6ae7dab314b839f46a152ce9f5905155012d48
|
[
"BSD-3-Clause"
] | null | null | null |
cancer-immune/EMEWS-scripts/python/test/py_tests.py
|
rheiland/PhysiCell-EMEWS-2
|
ec6ae7dab314b839f46a152ce9f5905155012d48
|
[
"BSD-3-Clause"
] | null | null | null |
cancer-immune/EMEWS-scripts/python/test/py_tests.py
|
rheiland/PhysiCell-EMEWS-2
|
ec6ae7dab314b839f46a152ce9f5905155012d48
|
[
"BSD-3-Clause"
] | 2
|
2019-05-24T02:42:11.000Z
|
2021-07-12T12:19:46.000Z
|
import unittest
import xml.etree.ElementTree as ET
import params2xml
class TestParamsToXML(unittest.TestCase):
def test_params_to_xml(self):
params = {'user_parameters.tumor_radius' : 'foo:bar:100.32',
'user_parameters.number_of_immune_cells' : '10',
'overall.max_time' : '2'}
xml_file = './test/test_data/PhysiCell.xml'
xml_out = './test/test_data/xml_out.xml'
params2xml.params_to_xml(params, xml_file, xml_out)
root = ET.parse(xml_out)
tumor_radius = root.findall("./user_parameters/tumor_radius")[0]
self.assertEqual("foo", tumor_radius.get('type'))
self.assertEqual("bar", tumor_radius.get('units'))
self.assertEqual("100.32", tumor_radius.text)
cells = root.findall("./user_parameters/number_of_immune_cells")[0]
self.assertEqual("int", cells.get('type'))
self.assertEqual("dimensionless", cells.get('units'))
self.assertEqual("10", cells.text)
max_time = root.findall("./overall/max_time")[0]
self.assertEqual('2', max_time.text)
if __name__ == '__main__':
unittest.main()
| 34.787879
| 75
| 0.65331
|
275aa5b5abd42c2c7578ceeca816e0b73d784da0
| 578
|
py
|
Python
|
drf_api_sample/drfapi/models.py
|
yuekui/drf-api-tracking
|
c9264e8c9486288130ea014798d235bc84e94ca2
|
[
"ISC"
] | 191
|
2020-02-23T21:51:48.000Z
|
2022-03-31T15:23:26.000Z
|
drf_api_sample/drfapi/models.py
|
yuekui/drf-api-tracking
|
c9264e8c9486288130ea014798d235bc84e94ca2
|
[
"ISC"
] | 61
|
2020-02-27T17:03:03.000Z
|
2022-03-28T10:59:36.000Z
|
drf_api_sample/drfapi/models.py
|
yuekui/drf-api-tracking
|
c9264e8c9486288130ea014798d235bc84e94ca2
|
[
"ISC"
] | 45
|
2020-02-27T18:46:09.000Z
|
2022-03-11T03:41:11.000Z
|
from django.db import models
# Create your models here.
from django.contrib.auth.models import User
from rest_framework import serializers, viewsets
from rest_framework_tracking.mixins import LoggingMixin
# Serializers define the API representation.
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ['url', 'username', 'email', 'is_staff']
# ViewSets define the view behavior.
class UserViewSet(LoggingMixin, viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
| 27.52381
| 61
| 0.769896
|
5f66153e2359b645e9fe5d3ebb197700a314cc93
| 1,410
|
py
|
Python
|
beakerx/beakerx/beakerx_server.py
|
altavir/beakerx
|
06fb4200d8042fc2a52e3a1ce8be8aa4b72d3743
|
[
"Apache-2.0"
] | 1
|
2018-10-16T18:59:59.000Z
|
2018-10-16T18:59:59.000Z
|
beakerx/beakerx/beakerx_server.py
|
altavir/beakerx
|
06fb4200d8042fc2a52e3a1ce8be8aa4b72d3743
|
[
"Apache-2.0"
] | 6
|
2020-05-20T17:44:02.000Z
|
2020-05-20T17:58:57.000Z
|
beakerx/beakerx/beakerx_server.py
|
altavir/beakerx
|
06fb4200d8042fc2a52e3a1ce8be8aa4b72d3743
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import zmq
import threading
class BeakerxZMQServer:
def __init__(self, beakerXQueue):
self.queue = beakerXQueue
self.url = "tcp://127.0.0.1:" + BeakerxZMQServer.get_free_tcp_port()
thread = threading.Thread(target=self.threaded_function, daemon=True)
thread.start()
def threaded_function(self):
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind(self.url)
while True:
message = socket.recv()
self.queue.put(message)
socket.send_string("Ok")
@staticmethod
def get_free_tcp_port():
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp.bind(('localhost', 0))
addr, port = tcp.getsockname()
tcp.close()
return str(port)
| 32.045455
| 77
| 0.678723
|
698b2ac1b92f2be1eb73b13e8f72b45496aa096c
| 444
|
py
|
Python
|
pypipe/lib/__init__.py
|
AGrigis/pypipe
|
a77fc2c81cb469535b650c79718f811c5c056238
|
[
"CECILL-B"
] | null | null | null |
pypipe/lib/__init__.py
|
AGrigis/pypipe
|
a77fc2c81cb469535b650c79718f811c5c056238
|
[
"CECILL-B"
] | null | null | null |
pypipe/lib/__init__.py
|
AGrigis/pypipe
|
a77fc2c81cb469535b650c79718f811c5c056238
|
[
"CECILL-B"
] | null | null | null |
##########################################################################
# PyPipe - Copyright (C) AGrigis, 2017
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Module that defines all the package tools.
"""
| 34.153846
| 74
| 0.488739
|
c70c144dae1ae89f815263eb79999e4629d33c34
| 2,564
|
py
|
Python
|
src/container/task.py
|
eebook/jianshu2e-book
|
d638fb8c2f47cf8e91e9f74e2e1e5f61f3c98a48
|
[
"MIT"
] | 7
|
2019-01-02T14:52:48.000Z
|
2021-11-05T06:11:46.000Z
|
src/container/task.py
|
knarfeh/jianshu2e-book
|
d638fb8c2f47cf8e91e9f74e2e1e5f61f3c98a48
|
[
"MIT"
] | 2
|
2021-03-22T17:11:32.000Z
|
2021-12-13T19:36:17.000Z
|
src/container/task.py
|
ee-book/jianshu2e-book
|
d638fb8c2f47cf8e91e9f74e2e1e5f61f3c98a48
|
[
"MIT"
] | 2
|
2019-04-18T05:44:24.000Z
|
2021-06-10T09:35:44.000Z
|
# -*- coding: utf-8 -*-
from src.tools.type import Type
from src.container.initialbook import InitialBook
class Spider(object):
def __init__(self):
self.href = ''
return
class SingleTask(object):
u"""
任务信息以对象属性方式进行存储
"""
def __init__(self):
self.kind = ''
self.spider = Spider()
self.book = InitialBook()
return
class TaskPackage(object):
u"""
work_list: kind->single_task.spider_href
book_list: kind->single_task.book
"""
def __init__(self):
self.work_list = {}
self.book_list = {}
return
def add_task(self, single_task=SingleTask()):
if single_task.kind not in self.work_list:
self.work_list[single_task.kind] = []
self.work_list[single_task.kind].append(single_task.spider.href)
if single_task.kind not in self.book_list:
self.book_list[single_task.kind] = []
self.book_list[single_task.kind].append(single_task.book)
return
def get_task(self):
u"""
:return: TaskPackage
"""
if Type.jianshu in self.book_list:
self.merge_jianshu_article_book_list(Type.jianshu)
return self
def merge_jianshu_article_book_list(self, book_type):
u"""
相同类型的 book_type 不同 id merge到一条sql语句中, 比如book_list中,有info_extra='A', info_extra='B'
那么merge之后的sql语句为, select * from jianshu_article where info_extra='A' or info_extra='B'
:param book_type:
:return:
"""
book_list = self.book_list[Type.jianshu]
book = InitialBook()
info_extra = [item.sql.info_extra for item in book_list]
article_extra = [item.sql.article_extra for item in book_list]
book.kind = book_type
book.author_id = book_list[0].author_id
book.sql.info = 'select * from jianshu_info where ({})'.format(' or '.join(info_extra))
book.sql.article = 'select * from jianshu_article where ({})'.format(' or '.join(article_extra))
book.sql.answer = 'select * from jianshu_article where ({})'.format(' or '.join(article_extra))
self.book_list[book_type] = [book]
return
def is_work_list_empty(self):
for kind in Type.jianshu_type_list: # 目前只有latest_article一种
if self.work_list.get(kind):
return False
return True
def is_book_list_empty(self):
for kind in Type.jianshu_type_list:
if self.book_list.get(kind):
return False
return True
| 30.891566
| 104
| 0.620905
|
503936d53eae6b38f64fc9d3cf14cb902ff82a89
| 264
|
py
|
Python
|
filer/test_utils/custom_image/models.py
|
PeterW-LWL/django-filer
|
472a0419bfa185a8b0a861bd0779ac6d817082c7
|
[
"BSD-3-Clause"
] | 134
|
2015-01-01T17:57:03.000Z
|
2021-11-01T15:21:47.000Z
|
filer/test_utils/custom_image/models.py
|
PeterW-LWL/django-filer
|
472a0419bfa185a8b0a861bd0779ac6d817082c7
|
[
"BSD-3-Clause"
] | 143
|
2015-01-05T04:53:01.000Z
|
2015-11-27T14:44:29.000Z
|
filer/test_utils/custom_image/models.py
|
PeterW-LWL/django-filer
|
472a0419bfa185a8b0a861bd0779ac6d817082c7
|
[
"BSD-3-Clause"
] | 86
|
2015-01-05T13:05:25.000Z
|
2021-04-03T01:36:15.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.db import models
from ...models.abstract import BaseImage
class Image(BaseImage):
extra_description = models.TextField()
class Meta(object):
app_label = 'custom_image'
| 18.857143
| 42
| 0.715909
|
72a27ff16b57bad4f9472eb182ecd486526c1596
| 88
|
py
|
Python
|
danny_project/stats.py
|
df3960/APW1
|
2a7c4da19c6077cddbe24d172df281b59c2a8fef
|
[
"CC0-1.0"
] | null | null | null |
danny_project/stats.py
|
df3960/APW1
|
2a7c4da19c6077cddbe24d172df281b59c2a8fef
|
[
"CC0-1.0"
] | null | null | null |
danny_project/stats.py
|
df3960/APW1
|
2a7c4da19c6077cddbe24d172df281b59c2a8fef
|
[
"CC0-1.0"
] | null | null | null |
import Pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pd.read_csv("")
| 14.666667
| 31
| 0.772727
|
40c72b235206093afcfcf615e0b8c3359ce07edb
| 7,405
|
py
|
Python
|
contrib/runners/windows_command_runner/tests/unit/test_windows_runners.py
|
magiceses/st2
|
a048ba92a8a1a5d272f277bf8fab0951df903306
|
[
"Apache-2.0"
] | null | null | null |
contrib/runners/windows_command_runner/tests/unit/test_windows_runners.py
|
magiceses/st2
|
a048ba92a8a1a5d272f277bf8fab0951df903306
|
[
"Apache-2.0"
] | 2
|
2020-03-04T08:33:36.000Z
|
2020-03-04T08:34:14.000Z
|
contrib/runners/windows_command_runner/tests/unit/test_windows_runners.py
|
magiceses/st2
|
a048ba92a8a1a5d272f277bf8fab0951df903306
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest2 import TestCase
import mock
from windows_command_runner.windows_command_runner import BaseWindowsRunner
from windows_script_runner.windows_script_runner import WindowsScriptRunner
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
FIXTURES_DIR = os.path.abspath(os.path.join(BASE_DIR, '../fixtures/windows'))
class WindowsRunnerTestCase(TestCase):
def test_get_winexe_command_args(self):
arguments = [
{
'host': 'localhost',
'username': 'Administrator1',
'password': 'bar1',
'command': 'powershell.exe "C:\\\\myscript.ps1"'
},
{
'host': '127.0.0.1',
'username': 'Administrator2',
'password': 'bar2',
'command': 'dir'
},
{
'host': 'localhost',
'username': 'Administrator3',
'password': 'bar3',
'command': 'dir',
'domain': 'MyDomain'
}
]
expected_values = [
[
'winexe',
'--interactive', '0',
'-U', 'Administrator1%bar1',
'//localhost',
'powershell.exe "C:\\\\myscript.ps1"'
],
[
'winexe',
'--interactive', '0',
'-U', 'Administrator2%bar2',
'//127.0.0.1',
'dir'
],
[
'winexe',
'--interactive', '0',
'-U', 'MyDomain\Administrator3%bar3',
'//localhost',
'dir'
]
]
runner = self._get_base_runner()
for arguments, expected_value in zip(arguments, expected_values):
actual_value = runner._get_winexe_command_args(**arguments)
self.assertEqual(actual_value, expected_value)
def test_get_smbclient_command_args(self):
arguments = [
{
'host': 'localhost',
'username': 'Administrator1',
'password': 'bar1',
'command': 'put /home/1.txt 1.txt',
'share': 'C$'
},
{
'host': 'localhost',
'username': 'Administrator2',
'password': 'bar2',
'command': 'put /home/2.txt 2.txt',
'share': 'D$'
},
{
'host': 'localhost',
'username': 'Administrator3',
'password': 'bar3',
'command': 'dir',
'share': 'E$',
'domain': 'MyDomain'
}
]
expected_values = [
[
'smbclient',
'-U', 'Administrator1%bar1',
'//localhost/C$',
'-c', 'put /home/1.txt 1.txt'
],
[
'smbclient',
'-U', 'Administrator2%bar2',
'//localhost/D$',
'-c', 'put /home/2.txt 2.txt'
],
[
'smbclient',
'-U', 'MyDomain\Administrator3%bar3',
'//localhost/E$',
'-c', 'dir'
],
]
runner = self._get_base_runner()
for arguments, expected_value in zip(arguments, expected_values):
actual_value = runner._get_smbclient_command_args(**arguments)
self.assertEqual(actual_value, expected_value)
def test_get_script_args(self):
arguments = [
{
'positional_args': 'a b c',
'named_args': {
'arg1': 'value1',
'arg2': 'value2'
}
},
{
'positional_args': 'a b c',
'named_args': {
'arg1': 'value1',
'arg2': True,
'arg3': False,
'arg4': ['foo', 'bar', 'baz']
}
}
]
expected_values = [
'a b c -arg1 value1 -arg2 value2',
'a b c -arg1 value1 -arg2 -arg3:$false -arg4 foo,bar,baz'
]
runner = self._get_script_runner()
for arguments, expected_value in zip(arguments, expected_values):
actual_value = runner._get_script_arguments(**arguments)
self.assertEqual(actual_value, expected_value)
def test_parse_share_information(self):
runner = self._get_script_runner()
fixture_path = os.path.join(FIXTURES_DIR, 'net_share_C_stdout.txt')
with open(fixture_path, 'r') as fp:
stdout = fp.read()
result = runner._parse_share_information(stdout=stdout)
expected_keys = ['share_name', 'path', 'remark', 'maximum_users', 'users', 'caching',
'permission']
for key in expected_keys:
self.assertTrue(key in result)
self.assertEqual(result['share_name'], 'C$')
self.assertEqual(result['path'], 'C:\\')
self.assertEqual(result['users'], None)
@mock.patch('windows_script_runner.windows_script_runner.run_command')
def test_get_share_absolute_path(self, mock_run_command):
runner = self._get_script_runner()
fixture_path = os.path.join(FIXTURES_DIR, 'net_share_C_stdout.txt')
with open(fixture_path, 'r') as fp:
stdout = fp.read()
# Failure, non-zero status code
mock_run_command.return_value = (2, '', '', False)
self.assertRaises(Exception, runner._get_share_absolute_path, share='C$')
# Failure, missing / corrupted data
mock_run_command.return_value = (0, '', '', False)
self.assertRaises(Exception, runner._get_share_absolute_path, share='C$')
# Success, everything OK
mock_run_command.return_value = (0, stdout, '', False)
share_path = runner._get_share_absolute_path(share='C$')
self.assertEqual(share_path, 'C:\\')
def test_shell_command_parameter_escaping(self):
pass
def _get_base_runner(self):
class Runner(BaseWindowsRunner):
def pre_run(self):
pass
def run(self):
pass
runner = Runner('id')
return runner
def _get_script_runner(self):
runner = WindowsScriptRunner('id')
runner._host = None
runner._username = None
runner._password = None
runner._timeout = None
return runner
| 33.506787
| 93
| 0.521269
|
7f63dc1b3e40c7be10de47ab50ed56e30efd7a12
| 2,782
|
py
|
Python
|
frappe-bench/env/lib/python2.7/site-packages/github/tests/Status.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/env/lib/python2.7/site-packages/github/tests/Status.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/env/lib/python2.7/site-packages/github/tests/Status.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
############################ Copyrights and license ############################
# #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import Framework
import github
import datetime
class Status(Framework.TestCase):
def testGetStatus(self):
status = self.g.get_api_status()
self.assertEqual(status.status, "good")
self.assertEqual(status.last_updated, datetime.datetime(2013, 9, 6, 8, 29, 27))
def testGetLastMessage(self):
message = self.g.get_last_api_status_message()
self.assertEqual(message.status, "good")
self.assertEqual(message.body, "Everything operating normally.")
self.assertEqual(message.created_on, datetime.datetime(2013, 9, 1, 15, 41, 46))
def testGetMessages(self):
self.assertListKeyEqual(self.g.get_api_status_messages(), lambda m: m.status, ["good", "minor", "good", "minor", "good", "minor", "good", "minor", "good", "major", "good", "minor"])
| 57.958333
| 189
| 0.47376
|
3a4ed027220ffee3285fe614f03f49e14216d1c3
| 2,864
|
py
|
Python
|
logger_bot.py
|
mcurasya/dnd-bot
|
4a08ec98df1f1e80f0a00b8d95f7242405cdb7d9
|
[
"MIT"
] | 1
|
2018-07-07T18:19:07.000Z
|
2018-07-07T18:19:07.000Z
|
logger_bot.py
|
mcurasya/dnd-bot
|
4a08ec98df1f1e80f0a00b8d95f7242405cdb7d9
|
[
"MIT"
] | null | null | null |
logger_bot.py
|
mcurasya/dnd-bot
|
4a08ec98df1f1e80f0a00b8d95f7242405cdb7d9
|
[
"MIT"
] | null | null | null |
import logging
import telebot
import constants
import time
import json
log_bot = telebot.TeleBot(constants.log_token)
abilities = {}
def next_step(message):
if message.from_user.id == constants.my_id:
user_markup = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=False, resize_keyboard=True)
user_markup.add('/users', '/names', '/surnames', '/abilities')
log_bot.send_message(constants.my_id, 'логгирование', reply_markup=user_markup)
else:
log_bot.send_message(message.from_user.id, 'извините, этот бот не для вас')
@log_bot.message_handler(commands='start')
def start_handler(message):
if message.from_user.id == constants.my_id:
user_markup = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=False, resize_keyboard=True)
user_markup.add('/users', '/names', '/surnames', '/abilities')
log_bot.send_message(constants.my_id, 'логгирование', reply_markup=user_markup)
else:
log_bot.send_message(message.from_user.id, 'извините, этот бот не для вас')
@log_bot.message_handler(commands='users')
def users_handler(message):
with open('names_and_surnames\\users.txt', 'r', encoding='utf-8') as f:
users = f.read().strip().split(' ')
for user in users:
log_bot.send_message(constants.my_id, user)
@log_bot.message_handler(commands='names')
def names_handler(message):
with open('names_and_surnames\\names.txt', 'r', encoding='utf-8') as f:
names= f.read().strip().split(' ')
for name in names:
log_bot.send_message(constants.my_id, name)
@log_bot.message_handler(commands='surnames')
def names_handler(message):
with open('names_and_surnames\\surnames.txt', 'r', encoding='utf-8') as f:
names = list(map(str.strip, filter(None, f.read().strip().split(','))))
for name in names:
log_bot.send_message(constants.my_id, name)
@log_bot.message_handler(commands='abilities')
def abilities_handler(message):
global abilities
markup = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=False, resize_keyboard=True)
with open('names_and_surnames\\abilities.json', 'r', encoding='utf-8') as f:
s = f.read()
abilities = json.loads(s)
for name in abilities:
markup.add(name)
log_bot.send_message(constants.my_id, name)
msg = log_bot.send_message(constants.my_id, 'выбери способность', reply_markup=markup)
log_bot.register_next_step_handler(msg, process_ability)
def process_ability(message):
log_bot.send_message(constants.my_id, abilities[message.text])
next_step(message)
while True:
try:
log_bot.polling(none_stop=True)
except Exception as e:
print(e)
time.sleep(3)
logging.error(e)
| 36.253165
| 103
| 0.678422
|
92266c0ffb99b98fe3349cc13c25bc2724b9f8e2
| 2,796
|
py
|
Python
|
hydroffice/ssp_manager/refmonitor_ui.py
|
hydroffice/hyo_sspmanager
|
6722cd9ecd5bf7236b65aa394287751302d641e7
|
[
"BSD-3-Clause"
] | null | null | null |
hydroffice/ssp_manager/refmonitor_ui.py
|
hydroffice/hyo_sspmanager
|
6722cd9ecd5bf7236b65aa394287751302d641e7
|
[
"BSD-3-Clause"
] | null | null | null |
hydroffice/ssp_manager/refmonitor_ui.py
|
hydroffice/hyo_sspmanager
|
6722cd9ecd5bf7236b65aa394287751302d641e7
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import, division, print_function, unicode_literals
import wx
import os
import logging
log = logging.getLogger(__name__)
class RefMonitorBase(wx.Frame):
here = os.path.abspath(os.path.dirname(__file__))
def __init__(self, *args, **kwds):
# begin wxGlade: RefractionMonitorBase.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
# Menu Bar
self.RefractionMonitorFrame_menubar = wx.MenuBar()
self.SetMenuBar(self.RefractionMonitorFrame_menubar)
# Menu Bar end
self.RefractionMonitorFrame_statusbar = self.CreateStatusBar(2, 0)
self.SVCorrectorSlider = wx.Slider(self, -1, 0, -100, 100,
style=wx.SL_HORIZONTAL | wx.SL_AUTOTICKS | wx.SL_LABELS)
self.SVCorrectorSlider.SetTickFreq(10, 1)
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_COMMAND_SCROLL, self.on_ssp_scroll, self.SVCorrectorSlider)
# end wxGlade
def __set_properties(self):
favicon = wx.Icon(os.path.join(self.here, 'media', 'favicon.png'),
wx.BITMAP_TYPE_PNG, 32, 32)
wx.Frame.SetIcon(self, favicon)
if os.name == 'nt':
try:
# This is needed to display the app icon on the taskbar on Windows 7
import ctypes
app_id = 'SSP Manager - Refraction Monitor'
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(app_id)
except AttributeError as e:
log.debug("Unable to change app icon: %s" % e)
# begin wxGlade: RefractionMonitorBase.__set_properties
self.SetTitle("Refraction Monitor")
self.SetSize((700, 700))
self.RefractionMonitorFrame_statusbar.SetStatusWidths([-1, 400])
# statusbar fields
RefractionMonitorFrame_statusbar_fields = ["", ""]
for i in range(len(RefractionMonitorFrame_statusbar_fields)):
self.RefractionMonitorFrame_statusbar.SetStatusText(RefractionMonitorFrame_statusbar_fields[i], i)
# end wxGlade
def __do_layout(self):
# begin wxGlade: RefractionMonitorBase.__do_layout
sizer_1 = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(sizer_1)
self.Layout()
self.SetSize((700, 700))
# end wxGlade
def on_ssp_scroll(self, event): # wxGlade: RefractionMonitorBase.<event_handler>
print("Event handler `OnSVScroll' not implemented!")
event.Skip()
if __name__ == "__main__":
app = wx.App(False)
RefractionMonitorFrame = RefMonitorBase(None, -1, "")
app.SetTopWindow(RefractionMonitorFrame)
RefractionMonitorFrame.Show()
app.MainLoop()
| 36.311688
| 110
| 0.650572
|
517798d10e5a519a6b43fada0361d028a989bd95
| 1,412
|
py
|
Python
|
packages/amuse-mameclot/setup.py
|
rknop/amuse
|
85d5bdcc29cfc87dc69d91c264101fafd6658aec
|
[
"Apache-2.0"
] | 1
|
2021-02-24T16:47:48.000Z
|
2021-02-24T16:47:48.000Z
|
packages/amuse-mameclot/setup.py
|
rknop/amuse
|
85d5bdcc29cfc87dc69d91c264101fafd6658aec
|
[
"Apache-2.0"
] | 2
|
2018-06-22T13:02:14.000Z
|
2018-09-06T20:08:43.000Z
|
packages/amuse-mameclot/setup.py
|
rknop/amuse
|
85d5bdcc29cfc87dc69d91c264101fafd6658aec
|
[
"Apache-2.0"
] | null | null | null |
import sys
import os
from support.version import version, main_version
from support.classifiers import classifiers
from setuptools import setup
import support
support.use("system")
from support.setup_codes import setup_commands
name = 'amuse-mameclot'
author = 'The AMUSE team'
author_email = 'info@amusecode.org'
license_ = "Apache License 2.0"
url = 'http://www.amusecode.org/'
install_requires = [
'amuse-framework>=%s' % main_version,
]
description = 'The Astrophysical Multipurpose Software Environment - Mameclot'
with open("README.md", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/markdown"
extensions = []
all_data_files = []
packages = ['amuse.community.mameclot']
package_data = {
}
mapping_from_command_name_to_command_class=setup_commands()
setup(
name=name,
version=version,
classifiers=classifiers,
url=url,
author_email=author_email,
author=author,
license=license_,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
install_requires=install_requires,
python_requires=">=3.5",
cmdclass=mapping_from_command_name_to_command_class,
ext_modules=extensions,
package_dir={'amuse.community.mameclot': 'src/amuse/community/mameclot'},
packages=packages,
package_data=package_data,
data_files=all_data_files,
)
| 25.214286
| 78
| 0.763456
|
5540fec2e4cecc684c96f23c09b974ddee0839c1
| 13
|
py
|
Python
|
__init__.py
|
BoWarburton/lambdata-bowarburton
|
16ee2cbf11987e73720789246c8350c032403781
|
[
"MIT"
] | null | null | null |
__init__.py
|
BoWarburton/lambdata-bowarburton
|
16ee2cbf11987e73720789246c8350c032403781
|
[
"MIT"
] | null | null | null |
__init__.py
|
BoWarburton/lambdata-bowarburton
|
16ee2cbf11987e73720789246c8350c032403781
|
[
"MIT"
] | null | null | null |
# Empty init
| 6.5
| 12
| 0.692308
|
e9cd30f0e163aa50934d1efcd256e008c31ce2a3
| 89
|
py
|
Python
|
misc/partsdb/webui/partsdb/apps.py
|
LagunaCreek-Makers/general_info
|
fdd26cd3fbbaf2961f35a8bf39ade51bb2ffdeae
|
[
"Unlicense"
] | null | null | null |
misc/partsdb/webui/partsdb/apps.py
|
LagunaCreek-Makers/general_info
|
fdd26cd3fbbaf2961f35a8bf39ade51bb2ffdeae
|
[
"Unlicense"
] | null | null | null |
misc/partsdb/webui/partsdb/apps.py
|
LagunaCreek-Makers/general_info
|
fdd26cd3fbbaf2961f35a8bf39ade51bb2ffdeae
|
[
"Unlicense"
] | null | null | null |
from django.apps import AppConfig
class PartsdbConfig(AppConfig):
name = 'partsdb'
| 14.833333
| 33
| 0.752809
|
da99dc386a7c69e3a6d88f3053200a75b14cf248
| 408
|
py
|
Python
|
packages/python/plotly/plotly/validators/layout/xaxis/_uirevision.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/layout/xaxis/_uirevision.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/layout/xaxis/_uirevision.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
import _plotly_utils.basevalidators
class UirevisionValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="uirevision", parent_name="layout.xaxis", **kwargs):
super(UirevisionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
| 34
| 87
| 0.678922
|
8b80f0e2016a1e49bd9b7fda71058c3b3b3d40a3
| 69
|
py
|
Python
|
ex17/ex17-short.py
|
python-practice/lpthw
|
ad06dfe6a5d2351ee9216b365ff688db820cc035
|
[
"MIT"
] | 1
|
2015-07-18T15:09:40.000Z
|
2015-07-18T15:09:40.000Z
|
ex17/ex17-short.py
|
python-practice/lpthw
|
ad06dfe6a5d2351ee9216b365ff688db820cc035
|
[
"MIT"
] | null | null | null |
ex17/ex17-short.py
|
python-practice/lpthw
|
ad06dfe6a5d2351ee9216b365ff688db820cc035
|
[
"MIT"
] | null | null | null |
from sys import argv; open(argv[2], 'w').write(open(argv[1]).read())
| 34.5
| 68
| 0.652174
|
0ef5af6d2bb27698763da99d536f736aa785f5bb
| 3,242
|
py
|
Python
|
elasticsearch/client/ingest.py
|
achave11/elasticsearch-py
|
5611445203ebabb1a450b17c2c93cd3546a12071
|
[
"Apache-2.0"
] | 3
|
2020-12-02T21:25:28.000Z
|
2021-12-01T18:21:21.000Z
|
elasticsearch/client/ingest.py
|
achave11/elasticsearch-py
|
5611445203ebabb1a450b17c2c93cd3546a12071
|
[
"Apache-2.0"
] | 1
|
2021-06-02T01:45:42.000Z
|
2021-06-02T01:45:42.000Z
|
env/lib/python2.7/site-packages/elasticsearch/client/ingest.py
|
Eric-Muthemba/qontroverse
|
1f12d0e3bbdee628a88bac77dc53426ded220755
|
[
"MIT"
] | 1
|
2021-12-01T18:21:12.000Z
|
2021-12-01T18:21:12.000Z
|
from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
class IngestClient(NamespacedClient):
@query_params("master_timeout")
def get_pipeline(self, id=None, params=None):
"""
Returns a pipeline.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/get-pipeline-api.html>`_
:arg id: Comma separated list of pipeline ids. Wildcards
supported
:arg master_timeout: Explicit operation timeout for connection
to master node
"""
return self.transport.perform_request(
"GET", _make_path("_ingest", "pipeline", id), params=params
)
@query_params("master_timeout", "timeout")
def put_pipeline(self, id, body, params=None):
"""
Creates or updates a pipeline.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/put-pipeline-api.html>`_
:arg id: Pipeline ID
:arg body: The ingest definition
:arg master_timeout: Explicit operation timeout for connection
to master node
:arg timeout: Explicit operation timeout
"""
for param in (id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"PUT", _make_path("_ingest", "pipeline", id), params=params, body=body
)
@query_params("master_timeout", "timeout")
def delete_pipeline(self, id, params=None):
"""
Deletes a pipeline.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-pipeline-api.html>`_
:arg id: Pipeline ID
:arg master_timeout: Explicit operation timeout for connection
to master node
:arg timeout: Explicit operation timeout
"""
if id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'id'.")
return self.transport.perform_request(
"DELETE", _make_path("_ingest", "pipeline", id), params=params
)
@query_params("verbose")
def simulate(self, body, id=None, params=None):
"""
Allows to simulate a pipeline with example documents.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-pipeline-api.html>`_
:arg body: The simulate definition
:arg id: Pipeline ID
:arg verbose: Verbose mode. Display data output for each
processor in executed pipeline
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request(
"GET",
_make_path("_ingest", "pipeline", id, "_simulate"),
params=params,
body=body,
)
@query_params()
def processor_grok(self, params=None):
"""
Returns a list of the built-in patterns.
`<https://www.elastic.co/guide/en/elasticsearch/reference/master/grok-processor.html#grok-processor-rest-get>`_
"""
return self.transport.perform_request(
"GET", "/_ingest/processor/grok", params=params
)
| 36.426966
| 119
| 0.627082
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.