hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cee91e0a7cd69bd9e810fde981d454c673db94a7 | 4,746 | py | Python | is_even_nn.py | phschoepf/isEvenNN | 975ab4d936ab669550919a0eb91d9acffd8a4820 | [
"MIT"
] | null | null | null | is_even_nn.py | phschoepf/isEvenNN | 975ab4d936ab669550919a0eb91d9acffd8a4820 | [
"MIT"
] | null | null | null | is_even_nn.py | phschoepf/isEvenNN | 975ab4d936ab669550919a0eb91d9acffd8a4820 | [
"MIT"
] | null | null | null | import torch
import random
import struct
from torch import nn
from torch.utils.data import TensorDataset, DataLoader
from typing import Union
def binary_float(num: float, network=True) -> list[float]:
"""Convert a float to a 32-long list of bits according to IEEE 754.
:param: num number to be converted, must be float
:param: network format in network byte order, i.e. big endian. Default True.
:returns: list of float, either 1.0f or 0.0f (this is because Pytorch uses float tensors)
"""
fmt = '!f' if network else 'f'
bitstring = ''.join(bin(c).replace('0b', '').rjust(8, '0') for c in struct.pack(fmt, num))
return [float(bit) for bit in bitstring]
| 37.666667 | 117 | 0.600295 | import torch
import random
import struct
from torch import nn
from torch.utils.data import TensorDataset, DataLoader
from typing import Union
class IsEvenNN(object):
def __init__(self, optimizer=torch.optim.Adam, criterion=nn.L1Loss()):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.net = nn.Sequential(
nn.Linear(32, 64),
nn.ReLU(),
nn.Linear(64, 64),
nn.ReLU(),
nn.Linear(64, 16),
nn.ReLU(),
nn.Linear(16, 1),
nn.Sigmoid()
)
self.net.to(self.device)
self.optimizer = optimizer(self.net.parameters())
self.criterion = criterion
def train(self, xtrain: list, ytrain: list, n_epochs: int):
assert len(xtrain) == len(ytrain), "data and label list are not same length"
train_set = TensorDataset(torch.tensor(xtrain, device=self.device), torch.tensor(ytrain, device=self.device))
self.net.train()
for epoch in range(n_epochs): # loop over the dataset multiple times
running_loss = 0.0
for i, (inputs, labels) in enumerate(DataLoader(train_set, batch_size=32)):
# zero the parameter gradients
self.optimizer.zero_grad()
# forward + backward + optimize
outputs = self.net(inputs)
loss = self.criterion(outputs, labels.unsqueeze(1))
loss.backward()
self.optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print(f'[{epoch}, {i + 1:5d}] loss: {running_loss / 2000:.3e}')
running_loss = 0.0
print('Finished Training')
def predict(self, xtest: list[list[float]], extras=False):
self.net.eval()
xtest_tensor = torch.tensor(xtest, device=self.device)
with torch.no_grad():
outputs = self.net(xtest_tensor).squeeze().tolist() # float outputs of the network
if type(outputs) is not list:
outputs = [outputs]
predictions = [y > 0.5 for y in outputs] # boolean predictions
return predictions, outputs if extras else predictions
def accuracy(self, xtest: list[list[float]], ytest: list[Union[float, bool]]) -> float:
preds = self.predict(xtest)
if type(ytest) == list[float]:
ytest = [y > 0.5 for y in ytest] # convert ground truths to boolean
corrects = [x == y for x, y in zip(preds, ytest)]
return sum(corrects)/len(corrects)
def predict_single(self, number) -> tuple[bool, float]:
"""Predict a single number. Any format that can be understood by int() is accepted."""
bits = binary_int(int(number))
if len(bits) != 32:
raise IndexError(f"Could not convert {number} to 32-bit int")
outputs, conf = self.predict([bits], extras=True)
return outputs[0], conf[0]
def __call__(self, *args, **kwargs):
return self.net(*args, **kwargs)
def _random_float(lower, upper) -> float:
return random.random() * (upper - lower) + lower
def binary_float(num: float, network=True) -> list[float]:
"""Convert a float to a 32-long list of bits according to IEEE 754.
:param: num number to be converted, must be float
:param: network format in network byte order, i.e. big endian. Default True.
:returns: list of float, either 1.0f or 0.0f (this is because Pytorch uses float tensors)
"""
fmt = '!f' if network else 'f'
bitstring = ''.join(bin(c).replace('0b', '').rjust(8, '0') for c in struct.pack(fmt, num))
return [float(bit) for bit in bitstring]
def binary_int(num: int) -> list[float]:
bitstring = format(num, 'b').rjust(32, '0')
return [float(bit) for bit in bitstring]
def generate_floats(length: int, lower: float = 0, upper: float = 1e9) -> tuple[
list[float], list[list[float]], list[float]]:
xarray = []
yarray = []
floats = []
for i in range(length):
number = _random_float(lower, upper)
floats.append(number)
xarray.append(binary_float(number))
yarray.append(float(int(number) % 2 == 0))
return floats, xarray, yarray
def generate_ints(length: int, lower: int = 0, upper: int = 0xfffffff) -> tuple[
list[int], list[list[float]], list[float]]:
xarray = []
yarray = []
ints = []
for i in range(length):
number = random.randint(lower, upper)
ints.append(number)
xarray.append(binary_int(number))
yarray.append(float(number % 2 == 0))
return ints, xarray, yarray
| 3,412 | 526 | 115 |
e20ae054adaddd3e168e05dc5a2073da1717e776 | 3,132 | py | Python | data/crop_frames.py | Aggrathon/TrafficSignRecognizer | e8425eb967baa39b4f57f2636eb3566a291e926b | [
"Apache-2.0"
] | null | null | null | data/crop_frames.py | Aggrathon/TrafficSignRecognizer | e8425eb967baa39b4f57f2636eb3566a291e926b | [
"Apache-2.0"
] | null | null | null | data/crop_frames.py | Aggrathon/TrafficSignRecognizer | e8425eb967baa39b4f57f2636eb3566a291e926b | [
"Apache-2.0"
] | null | null | null |
import os
import pygame
from config import DIR_FRAMES_POTENTIAL, DIR_FRAMES_SIGNS, DIR_CROPPED_SIGNS, DIR_FRAMES_NO_SIGNS, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGES_PER_SECOND, CROPPED_SIZE
from window import Window, get_rnd_filename, save_cropped
if __name__ == "__main__":
main()
| 40.153846 | 157 | 0.683589 |
import os
import pygame
from config import DIR_FRAMES_POTENTIAL, DIR_FRAMES_SIGNS, DIR_CROPPED_SIGNS, DIR_FRAMES_NO_SIGNS, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGES_PER_SECOND, CROPPED_SIZE
from window import Window, get_rnd_filename, save_cropped
def no_sign(images, window, index, last):
os.rename(os.path.join(DIR_FRAMES_POTENTIAL, images[index]), os.path.join(DIR_FRAMES_NO_SIGNS, images[index]))
window.show_image(images[index + 1], DIR_FRAMES_POTENTIAL)
return index + 1, index
def has_sign(images, window, index, last):
os.rename(os.path.join(DIR_FRAMES_POTENTIAL, images[index]), os.path.join(DIR_FRAMES_SIGNS, images[index]))
window.show_image(images[index+1], DIR_FRAMES_POTENTIAL)
return index+1, index
def undo(images, window, index, last):
index = max(0, index-IMAGES_PER_SECOND)
for i in range(index, last+1):
try:
os.rename(os.path.join(DIR_FRAMES_NO_SIGNS, images[i]), os.path.join(DIR_FRAMES_POTENTIAL, images[i]))
except:
pass
try:
os.rename(os.path.join(DIR_FRAMES_SIGNS, images[i]), os.path.join(DIR_FRAMES_POTENTIAL, images[i]))
except:
pass
window.show_image(images[index], DIR_FRAMES_POTENTIAL)
return index, index
def on_mouse_move(images, window, index, last):
x, y = pygame.mouse.get_pos()
size = CROPPED_SIZE*IMAGE_WIDTH//window.screen.get_width()
x = min(max(0, x-size//2), window.screen.get_width()-size)
y = min(max(0, y-size//2), window.screen.get_height()-size)
window.draw_rects([
((255, 0, 0), (x, y, size, size), 1),
((128, 128, 128), (x+20, y+20, size-20, size-20), 1),
])
return index, last
def on_mouse_click(images, window, index, last):
#DRAW
x, y = pygame.mouse.get_pos()
size_x = CROPPED_SIZE*window.screen.get_width()//IMAGE_WIDTH
size_y = CROPPED_SIZE*window.screen.get_height()//IMAGE_HEIGHT
x = min(max(0, x-size_x//2), window.screen.get_width()-size_x)
y = min(max(0, y-size_y//2), window.screen.get_height()-size_y)
window.draw_rects([((0, 0, 255), (x, y, size_x, size_y), 1)])
#SAVE
x, y = pygame.mouse.get_pos()
x = max(0, min(x*IMAGE_WIDTH//window.screen.get_width()-CROPPED_SIZE//2, IMAGE_WIDTH-CROPPED_SIZE))
y = max(0, min(y*IMAGE_HEIGHT//window.screen.get_height()-CROPPED_SIZE//2, IMAGE_HEIGHT-CROPPED_SIZE))
img = pygame.image.load(os.path.join(DIR_FRAMES_POTENTIAL, images[index]))
save_cropped(img, (x, y, CROPPED_SIZE, CROPPED_SIZE), DIR_CROPPED_SIGNS)
return index, last
def main():
os.makedirs(DIR_FRAMES_NO_SIGNS, exist_ok=True)
os.makedirs(DIR_FRAMES_SIGNS, exist_ok=True)
os.makedirs(DIR_CROPPED_SIGNS, exist_ok=True)
imgs = os.listdir(DIR_FRAMES_POTENTIAL)
imgs.sort(reverse=True)
window = Window("P: No Sign O: Has Sign <=: Undo M1: Crop Sign")
actions = {
pygame.K_p: no_sign,
pygame.K_o: has_sign,
pygame.K_BACKSPACE: undo,
-12: on_mouse_move,
-10: on_mouse_click
}
window.iterate(imgs, actions, DIR_FRAMES_POTENTIAL)
if __name__ == "__main__":
main()
| 2,712 | 0 | 138 |
7371bd623568cf774e17cfe3b560b41b5586c054 | 4,749 | py | Python | starthinker_ui/account/models.py | arbrown/starthinker | 1a14664fb1a8f2a757b100363ea8958833b7754c | [
"Apache-2.0"
] | 138 | 2018-11-28T21:42:44.000Z | 2022-03-30T17:26:35.000Z | starthinker_ui/account/models.py | arbrown/starthinker | 1a14664fb1a8f2a757b100363ea8958833b7754c | [
"Apache-2.0"
] | 36 | 2019-02-19T18:33:20.000Z | 2022-01-24T18:02:44.000Z | starthinker_ui/account/models.py | arbrown/starthinker | 1a14664fb1a8f2a757b100363ea8958833b7754c | [
"Apache-2.0"
] | 54 | 2018-12-06T05:47:32.000Z | 2022-02-21T22:01:01.000Z | ###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
from random import choice
from googleapiclient import discovery
from django.db import models
from django.conf import settings
from django.contrib.auth.models import BaseUserManager, AbstractBaseUser
from starthinker.util.auth_wrapper import CredentialsUserWrapper
from starthinker_ui.account.apps import USER_BUCKET
| 30.248408 | 111 | 0.695936 | ###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
from random import choice
from googleapiclient import discovery
from django.db import models
from django.conf import settings
from django.contrib.auth.models import BaseUserManager, AbstractBaseUser
from starthinker.util.auth_wrapper import CredentialsUserWrapper
from starthinker_ui.account.apps import USER_BUCKET
def token_generate(model_class, model_field, length=8):
token = None
while not token:
token = ''.join([
choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
for i in range(length)
])
try:
if model_class.objects.filter(**{model_field: token}).exists():
token = None
except Exception:
pass
return token
def get_profile(credentials):
service = discovery.build('oauth2', 'v2', credentials=credentials)
return service.userinfo().get().execute()
class AccountManager(BaseUserManager):
def create_user(self, credentials=None, password=None, profile=None):
if profile is None:
profile = get_profile(credentials)
account = self.model(
identifier=profile['id'],
email=self.normalize_email(profile['email']),
name=profile['given_name'],
domain=profile.get('hd', ''),
picture=profile['picture'],
)
account.set_credentials(credentials)
account.set_password(password)
account.save(using=self._db)
return account
def get_or_create_user(self, credentials=None, password=None):
profile = get_profile(credentials)
try:
account = Account.objects.get(identifier=profile['id'])
account.email = self.normalize_email(profile['email'])
account.name = profile['given_name']
domain = profile.get('hd', '')
account.picture = profile['picture']
account.set_credentials(credentials)
account.set_password(password)
account.save(using=self._db)
except:
account = self.create_user(credentials, password, profile)
return account
def create_superuser(self, credentials, password):
account = self.create_user(credentials, password)
account.is_admin = True
account.save(using=self._db)
return account
class Account(AbstractBaseUser):
identifier = models.CharField(max_length=64, unique=True, db_index=True)
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255, blank=True, default='')
domain = models.CharField(max_length=255, blank=True, default='')
picture = models.CharField(max_length=255, blank=True, default='')
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
birthday = models.DateField(auto_now_add=True)
objects = AccountManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['identifier']
def get_full_name(self):
return self.email
def get_short_name(self):
return self.email
def __str__(self):
return self.email
def __unicode__(self):
return self.email
def has_perm(self, perm, obj=None):
return True
def has_module_perms(self, app_label):
return True
@property
def is_staff(self):
return self.is_admin
def get_profile(self):
return get_profile(self.get_credentials())
def get_domain(self):
try:
domain = get_profile(self.get_credentials()).get('hd', '')
if domain != self.domain:
self.domain = domain
self.save(update_fields=['domain'])
except:
pass
return self.domain
def set_credentials(self, credentials):
# check if refresh token exists before saving credentials ( only given when authenticating not refreshing )
if self.identifier and credentials.refresh_token:
buffer = CredentialsUserWrapper()
buffer.from_credentials(credentials)
buffer.save(self.get_credentials_path())
def get_credentials(self):
return CredentialsUserWrapper(
self.get_credentials_path()) if self.identifier else None
def get_credentials_path(self):
return '%s:ui/%s.json' % (USER_BUCKET, self.identifier)
| 2,602 | 916 | 167 |
1cd8854e57629a270ecee576f8e3a1125e739dfe | 543 | py | Python | scripts/HumSensor.py | leardilap/monitoring | f0cf2c49ff1be4c33237d005899a842f0cdd6c8e | [
"MIT"
] | 1 | 2021-07-01T13:32:05.000Z | 2021-07-01T13:32:05.000Z | scripts/HumSensor.py | leardilap/monitoring | f0cf2c49ff1be4c33237d005899a842f0cdd6c8e | [
"MIT"
] | null | null | null | scripts/HumSensor.py | leardilap/monitoring | f0cf2c49ff1be4c33237d005899a842f0cdd6c8e | [
"MIT"
] | 1 | 2021-07-01T13:43:13.000Z | 2021-07-01T13:43:13.000Z | #!/usr/bin/python3.4
import socket
import pickle
import struct
import serial
import time
from datetime import datetime
import sys
import math
import snap7
client = snap7.client.Client()
client.connect('137.138.192.181', 0, 0)
topo = client.db_read(402,36,1)
topo2 = client.db_read(402,44,1)
print(hex(topo[0]), hex(topo2[0]))
print(topo[0]&0b00001, topo2[0]&0b00001)
#for probe in probes:
# byte_index=probes[probe]
# x = topo[byte_index:byte_index + 4]
# temps[probe] = struct.unpack('>f', struct.pack('4B', *x))[0]
| 19.392857 | 67 | 0.694291 | #!/usr/bin/python3.4
import socket
import pickle
import struct
import serial
import time
from datetime import datetime
import sys
import math
import snap7
client = snap7.client.Client()
client.connect('137.138.192.181', 0, 0)
topo = client.db_read(402,36,1)
topo2 = client.db_read(402,44,1)
print(hex(topo[0]), hex(topo2[0]))
print(topo[0]&0b00001, topo2[0]&0b00001)
#for probe in probes:
# byte_index=probes[probe]
# x = topo[byte_index:byte_index + 4]
# temps[probe] = struct.unpack('>f', struct.pack('4B', *x))[0]
| 0 | 0 | 0 |
6d2a659f5d9c0423ed4e59aac625ef2b6e383780 | 2,849 | py | Python | switchboard/tests_mailer.py | Duke-GCB/D4S2 | 47bef4b632967440608f2cc7a3fc31c32b2060fa | [
"MIT"
] | null | null | null | switchboard/tests_mailer.py | Duke-GCB/D4S2 | 47bef4b632967440608f2cc7a3fc31c32b2060fa | [
"MIT"
] | 138 | 2016-09-23T18:09:18.000Z | 2022-03-03T15:50:19.000Z | switchboard/tests_mailer.py | Duke-GCB/D4S2 | 47bef4b632967440608f2cc7a3fc31c32b2060fa | [
"MIT"
] | null | null | null | from django.test import TestCase, override_settings
from switchboard.mailer import generate_message
TEST_EMAIL_FROM_ADDRESS='noreply@domain.com'
@override_settings(EMAIL_FROM_ADDRESS=TEST_EMAIL_FROM_ADDRESS)
| 44.515625 | 135 | 0.67357 | from django.test import TestCase, override_settings
from switchboard.mailer import generate_message
TEST_EMAIL_FROM_ADDRESS='noreply@domain.com'
@override_settings(EMAIL_FROM_ADDRESS=TEST_EMAIL_FROM_ADDRESS)
class MailerTestCase(TestCase):
def setUp(self):
self.reply_to_email = 'sender@domain.com'
self.rcpt_email = 'receiver@school.edu'
self.cc_email = 'core@domain.com'
self.subject = 'Data is ready'
self.template_text = 'order {{ order_number }} draft to {{ recipient_name }} from {{ sender_name }} for {{ project_name }}'
self.context = {
'order_number': 12345,
'project_name': 'Project ABC',
'recipient_name': 'Receiver Name',
'sender_name': 'Sender Name',
}
def test_generate_message(self):
message = generate_message(self.reply_to_email, self.rcpt_email, self.cc_email, self.subject, self.template_text, self.context)
self.assertIn('order 12345', message.body)
self.assertIn('draft to Receiver Name', message.body)
self.assertIn('from Sender Name', message.body)
self.assertEqual(TEST_EMAIL_FROM_ADDRESS, message.from_email)
self.assertIn(self.reply_to_email, message.reply_to)
self.assertEqual(self.subject, message.subject)
self.assertIn(self.rcpt_email, message.to)
self.assertIn(self.cc_email, message.cc)
def test_generate_message_no_cc(self):
message = generate_message(self.reply_to_email, self.rcpt_email, None, self.subject, self.template_text, self.context)
self.assertEqual(message.cc, [])
def test_generate_message_no_escape(self):
template_text = 'message {{ message }}'
context = {
'message': "I don't want this",
}
message = generate_message(self.reply_to_email, self.rcpt_email, self.cc_email, self.subject, template_text, context)
self.assertIn("message I don't want this", message.body)
def test_generate_message_strsplit(self):
template_text = 'message {{ message | strsplit:"_" | listidx:1 }}'
context = {
'message': "This_that_other",
}
message = generate_message(self.reply_to_email, self.rcpt_email, self.cc_email, self.subject, template_text, context)
self.assertEqual("message that", message.body)
context = {
'message': "one_two_three_four",
}
message = generate_message(self.reply_to_email, self.rcpt_email, self.cc_email, self.subject, template_text, context)
self.assertEqual("message two", message.body)
context = {
'message': "one",
}
message = generate_message(self.reply_to_email, self.rcpt_email, self.cc_email, self.subject, template_text, context)
self.assertEqual("message ", message.body)
| 2,471 | 10 | 157 |
0be45e1af6aebf70bf6117504616061c6e554fe6 | 26,842 | py | Python | hio-yocto-bsp/sources/poky/bitbake/lib/bb/utils.py | qiangzai00001/hio-prj | 060ff97fe21093b1369db78109d5b730b2b181c8 | [
"MIT"
] | null | null | null | hio-yocto-bsp/sources/poky/bitbake/lib/bb/utils.py | qiangzai00001/hio-prj | 060ff97fe21093b1369db78109d5b730b2b181c8 | [
"MIT"
] | null | null | null | hio-yocto-bsp/sources/poky/bitbake/lib/bb/utils.py | qiangzai00001/hio-prj | 060ff97fe21093b1369db78109d5b730b2b181c8 | [
"MIT"
] | null | null | null | # ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
"""
BitBake Utility Functions
"""
# Copyright (C) 2004 Michael Lauer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import re, fcntl, os, string, stat, shutil, time
import sys
import errno
import logging
import bb
import bb.msg
import multiprocessing
import fcntl
import subprocess
import glob
import traceback
import errno
from commands import getstatusoutput
from contextlib import contextmanager
logger = logging.getLogger("BitBake.Util")
# Context used in better_exec, eval
_context = clean_context()
def split_version(s):
"""Split a version string into its constituent parts (PE, PV, PR)"""
s = s.strip(" <>=")
e = 0
if s.count(':'):
e = int(s.split(":")[0])
s = s.split(":")[1]
r = ""
if s.count('-'):
r = s.rsplit("-", 1)[1]
s = s.rsplit("-", 1)[0]
v = s
return (e, v, r)
def explode_deps(s):
"""
Take an RDEPENDS style string of format:
"DEPEND1 (optional version) DEPEND2 (optional version) ..."
and return a list of dependencies.
Version information is ignored.
"""
r = []
l = s.split()
flag = False
for i in l:
if i[0] == '(':
flag = True
#j = []
if not flag:
r.append(i)
#else:
# j.append(i)
if flag and i.endswith(')'):
flag = False
# Ignore version
#r[-1] += ' ' + ' '.join(j)
return r
def explode_dep_versions2(s):
"""
Take an RDEPENDS style string of format:
"DEPEND1 (optional version) DEPEND2 (optional version) ..."
and return a dictionary of dependencies and versions.
"""
r = {}
l = s.replace(",", "").split()
lastdep = None
lastcmp = ""
lastver = ""
incmp = False
inversion = False
for i in l:
if i[0] == '(':
incmp = True
i = i[1:].strip()
if not i:
continue
if incmp:
incmp = False
inversion = True
# This list is based on behavior and supported comparisons from deb, opkg and rpm.
#
# Even though =<, <<, ==, !=, =>, and >> may not be supported,
# we list each possibly valid item.
# The build system is responsible for validation of what it supports.
if i.startswith(('<=', '=<', '<<', '==', '!=', '>=', '=>', '>>')):
lastcmp = i[0:2]
i = i[2:]
elif i.startswith(('<', '>', '=')):
lastcmp = i[0:1]
i = i[1:]
else:
# This is an unsupported case!
lastcmp = (i or "")
i = ""
i.strip()
if not i:
continue
if inversion:
if i.endswith(')'):
i = i[:-1] or ""
inversion = False
if lastver and i:
lastver += " "
if i:
lastver += i
if lastdep not in r:
r[lastdep] = []
r[lastdep].append(lastcmp + " " + lastver)
continue
#if not inversion:
lastdep = i
lastver = ""
lastcmp = ""
if not (i in r and r[i]):
r[lastdep] = []
return r
def join_deps(deps, commasep=True):
"""
Take the result from explode_dep_versions and generate a dependency string
"""
result = []
for dep in deps:
if deps[dep]:
if isinstance(deps[dep], list):
for v in deps[dep]:
result.append(dep + " (" + v + ")")
else:
result.append(dep + " (" + deps[dep] + ")")
else:
result.append(dep)
if commasep:
return ", ".join(result)
else:
return " ".join(result)
def _print_trace(body, line):
"""
Print the Environment of a Text Body
"""
error = []
# print the environment of the method
min_line = max(1, line-4)
max_line = min(line + 4, len(body))
for i in range(min_line, max_line + 1):
if line == i:
error.append(' *** %.4d:%s' % (i, body[i-1].rstrip()))
else:
error.append(' %.4d:%s' % (i, body[i-1].rstrip()))
return error
def better_compile(text, file, realfile, mode = "exec"):
"""
A better compile method. This method
will print the offending lines.
"""
try:
return compile(text, file, mode)
except Exception as e:
error = []
# split the text into lines again
body = text.split('\n')
error.append("Error in compiling python function in %s:\n" % realfile)
if e.lineno:
error.append("The code lines resulting in this error were:")
error.extend(_print_trace(body, e.lineno))
else:
error.append("The function causing this error was:")
for line in body:
error.append(line)
error.append("%s: %s" % (e.__class__.__name__, str(e)))
logger.error("\n".join(error))
e = bb.BBHandledException(e)
raise e
def better_exec(code, context, text = None, realfile = "<code>"):
"""
Similiar to better_compile, better_exec will
print the lines that are responsible for the
error.
"""
import bb.parse
if not text:
text = code
if not hasattr(code, "co_filename"):
code = better_compile(code, realfile, realfile)
try:
exec(code, get_context(), context)
except bb.BBHandledException:
# Error already shown so passthrough
raise
except Exception as e:
(t, value, tb) = sys.exc_info()
if t in [bb.parse.SkipPackage, bb.build.FuncFailed]:
raise
try:
_print_exception(t, value, tb, realfile, text, context)
except Exception as e:
logger.error("Exception handler error: %s" % str(e))
e = bb.BBHandledException(e)
raise e
@contextmanager
def fileslocked(files):
"""Context manager for locking and unlocking file locks."""
locks = []
if files:
for lockfile in files:
locks.append(bb.utils.lockfile(lockfile))
yield
for lock in locks:
bb.utils.unlockfile(lock)
def lockfile(name, shared=False, retry=True):
"""
Use the file fn as a lock file, return when the lock has been acquired.
Returns a variable to pass to unlockfile().
"""
dirname = os.path.dirname(name)
mkdirhier(dirname)
if not os.access(dirname, os.W_OK):
logger.error("Unable to acquire lock '%s', directory is not writable",
name)
sys.exit(1)
op = fcntl.LOCK_EX
if shared:
op = fcntl.LOCK_SH
if not retry:
op = op | fcntl.LOCK_NB
while True:
# If we leave the lockfiles lying around there is no problem
# but we should clean up after ourselves. This gives potential
# for races though. To work around this, when we acquire the lock
# we check the file we locked was still the lock file on disk.
# by comparing inode numbers. If they don't match or the lockfile
# no longer exists, we start again.
# This implementation is unfair since the last person to request the
# lock is the most likely to win it.
try:
lf = open(name, 'a+')
fileno = lf.fileno()
fcntl.flock(fileno, op)
statinfo = os.fstat(fileno)
if os.path.exists(lf.name):
statinfo2 = os.stat(lf.name)
if statinfo.st_ino == statinfo2.st_ino:
return lf
lf.close()
except Exception:
try:
lf.close()
except Exception:
pass
pass
if not retry:
return None
def unlockfile(lf):
"""
Unlock a file locked using lockfile()
"""
try:
# If we had a shared lock, we need to promote to exclusive before
# removing the lockfile. Attempt this, ignore failures.
fcntl.flock(lf.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB)
os.unlink(lf.name)
except (IOError, OSError):
pass
fcntl.flock(lf.fileno(), fcntl.LOCK_UN)
lf.close()
def md5_file(filename):
"""
Return the hex string representation of the MD5 checksum of filename.
"""
try:
import hashlib
m = hashlib.md5()
except ImportError:
import md5
m = md5.new()
with open(filename, "rb") as f:
for line in f:
m.update(line)
return m.hexdigest()
def sha256_file(filename):
"""
Return the hex string representation of the 256-bit SHA checksum of
filename. On Python 2.4 this will return None, so callers will need to
handle that by either skipping SHA checks, or running a standalone sha256sum
binary.
"""
try:
import hashlib
except ImportError:
return None
s = hashlib.sha256()
with open(filename, "rb") as f:
for line in f:
s.update(line)
return s.hexdigest()
def preserved_envvars_exported():
"""Variables which are taken from the environment and placed in and exported
from the metadata"""
return [
'BB_TASKHASH',
'HOME',
'LOGNAME',
'PATH',
'PWD',
'SHELL',
'TERM',
'USER',
]
def preserved_envvars():
"""Variables which are taken from the environment and placed in the metadata"""
v = [
'BBPATH',
'BB_PRESERVE_ENV',
'BB_ENV_WHITELIST',
'BB_ENV_EXTRAWHITE',
]
return v + preserved_envvars_exported()
def filter_environment(good_vars):
"""
Create a pristine environment for bitbake. This will remove variables that
are not known and may influence the build in a negative way.
"""
removed_vars = {}
for key in os.environ.keys():
if key in good_vars:
continue
removed_vars[key] = os.environ[key]
os.unsetenv(key)
del os.environ[key]
if len(removed_vars):
logger.debug(1, "Removed the following variables from the environment: %s", ", ".join(removed_vars.keys()))
return removed_vars
def approved_variables():
"""
Determine and return the list of whitelisted variables which are approved
to remain in the envrionment.
"""
if 'BB_PRESERVE_ENV' in os.environ:
return os.environ.keys()
approved = []
if 'BB_ENV_WHITELIST' in os.environ:
approved = os.environ['BB_ENV_WHITELIST'].split()
approved.extend(['BB_ENV_WHITELIST'])
else:
approved = preserved_envvars()
if 'BB_ENV_EXTRAWHITE' in os.environ:
approved.extend(os.environ['BB_ENV_EXTRAWHITE'].split())
if 'BB_ENV_EXTRAWHITE' not in approved:
approved.extend(['BB_ENV_EXTRAWHITE'])
return approved
def clean_environment():
"""
Clean up any spurious environment variables. This will remove any
variables the user hasn't chosen to preserve.
"""
if 'BB_PRESERVE_ENV' not in os.environ:
good_vars = approved_variables()
return filter_environment(good_vars)
return {}
def empty_environment():
"""
Remove all variables from the environment.
"""
for s in os.environ.keys():
os.unsetenv(s)
del os.environ[s]
def build_environment(d):
"""
Build an environment from all exported variables.
"""
import bb.data
for var in bb.data.keys(d):
export = d.getVarFlag(var, "export")
if export:
os.environ[var] = d.getVar(var, True) or ""
def remove(path, recurse=False):
"""Equivalent to rm -f or rm -rf"""
if not path:
return
if recurse:
# shutil.rmtree(name) would be ideal but its too slow
subprocess.call(['rm', '-rf'] + glob.glob(path))
return
for name in glob.glob(path):
try:
os.unlink(name)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
#
# Could also use return re.compile("(%s)" % "|".join(map(re.escape, suffixes))).sub(lambda mo: "", var)
# but thats possibly insane and suffixes is probably going to be small
#
def mkdirhier(directory):
"""Create a directory like 'mkdir -p', but does not complain if
directory already exists like os.makedirs
"""
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
def movefile(src, dest, newmtime = None, sstat = None):
"""Moves a file from src to dest, preserving all permissions and
attributes; mtime will be preserved even when moving across
filesystems. Returns true on success and false on failure. Move is
atomic.
"""
#print "movefile(" + src + "," + dest + "," + str(newmtime) + "," + str(sstat) + ")"
try:
if not sstat:
sstat = os.lstat(src)
except Exception as e:
print("movefile: Stating source file failed...", e)
return None
destexists = 1
try:
dstat = os.lstat(dest)
except:
dstat = os.lstat(os.path.dirname(dest))
destexists = 0
if destexists:
if stat.S_ISLNK(dstat[stat.ST_MODE]):
try:
os.unlink(dest)
destexists = 0
except Exception as e:
pass
if stat.S_ISLNK(sstat[stat.ST_MODE]):
try:
target = os.readlink(src)
if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
os.unlink(dest)
os.symlink(target, dest)
#os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
os.unlink(src)
return os.lstat(dest)
except Exception as e:
print("movefile: failed to properly create symlink:", dest, "->", target, e)
return None
renamefailed = 1
if sstat[stat.ST_DEV] == dstat[stat.ST_DEV]:
try:
os.rename(src, dest)
renamefailed = 0
except Exception as e:
if e[0] != errno.EXDEV:
# Some random error.
print("movefile: Failed to move", src, "to", dest, e)
return None
# Invalid cross-device-link 'bind' mounted or actually Cross-Device
if renamefailed:
didcopy = 0
if stat.S_ISREG(sstat[stat.ST_MODE]):
try: # For safety copy then move it over.
shutil.copyfile(src, dest + "#new")
os.rename(dest + "#new", dest)
didcopy = 1
except Exception as e:
print('movefile: copy', src, '->', dest, 'failed.', e)
return None
else:
#we don't yet handle special, so we need to fall back to /bin/mv
a = getstatusoutput("/bin/mv -f " + "'" + src + "' '" + dest + "'")
if a[0] != 0:
print("movefile: Failed to move special file:" + src + "' to '" + dest + "'", a)
return None # failure
try:
if didcopy:
os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID])
os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
os.unlink(src)
except Exception as e:
print("movefile: Failed to chown/chmod/unlink", dest, e)
return None
if newmtime:
os.utime(dest, (newmtime, newmtime))
else:
os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
newmtime = sstat[stat.ST_MTIME]
return newmtime
def copyfile(src, dest, newmtime = None, sstat = None):
"""
Copies a file from src to dest, preserving all permissions and
attributes; mtime will be preserved even when moving across
filesystems. Returns true on success and false on failure.
"""
#print "copyfile(" + src + "," + dest + "," + str(newmtime) + "," + str(sstat) + ")"
try:
if not sstat:
sstat = os.lstat(src)
except Exception as e:
logger.warn("copyfile: stat of %s failed (%s)" % (src, e))
return False
destexists = 1
try:
dstat = os.lstat(dest)
except:
dstat = os.lstat(os.path.dirname(dest))
destexists = 0
if destexists:
if stat.S_ISLNK(dstat[stat.ST_MODE]):
try:
os.unlink(dest)
destexists = 0
except Exception as e:
pass
if stat.S_ISLNK(sstat[stat.ST_MODE]):
try:
target = os.readlink(src)
if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
os.unlink(dest)
os.symlink(target, dest)
#os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
return os.lstat(dest)
except Exception as e:
logger.warn("copyfile: failed to create symlink %s to %s (%s)" % (dest, target, e))
return False
if stat.S_ISREG(sstat[stat.ST_MODE]):
try:
srcchown = False
if not os.access(src, os.R_OK):
# Make sure we can read it
srcchown = True
os.chmod(src, sstat[stat.ST_MODE] | stat.S_IRUSR)
# For safety copy then move it over.
shutil.copyfile(src, dest + "#new")
os.rename(dest + "#new", dest)
except Exception as e:
logger.warn("copyfile: copy %s to %s failed (%s)" % (src, dest, e))
return False
finally:
if srcchown:
os.chmod(src, sstat[stat.ST_MODE])
os.utime(src, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
else:
#we don't yet handle special, so we need to fall back to /bin/mv
a = getstatusoutput("/bin/cp -f " + "'" + src + "' '" + dest + "'")
if a[0] != 0:
logger.warn("copyfile: failed to copy special file %s to %s (%s)" % (src, dest, a))
return False # failure
try:
os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID])
os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
except Exception as e:
logger.warn("copyfile: failed to chown/chmod %s (%s)" % (dest, e))
return False
if newmtime:
os.utime(dest, (newmtime, newmtime))
else:
os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
newmtime = sstat[stat.ST_MTIME]
return newmtime
def which(path, item, direction = 0, history = False):
"""
Locate a file in a PATH
"""
hist = []
paths = (path or "").split(':')
if direction != 0:
paths.reverse()
for p in paths:
next = os.path.join(p, item)
hist.append(next)
if os.path.exists(next):
if not os.path.isabs(next):
next = os.path.abspath(next)
if history:
return next, hist
return next
if history:
return "", hist
return ""
#
# Was present to work around multiprocessing pool bugs in python < 2.7.3
#
| 30.536974 | 181 | 0.558006 | # ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
"""
BitBake Utility Functions
"""
# Copyright (C) 2004 Michael Lauer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import re, fcntl, os, string, stat, shutil, time
import sys
import errno
import logging
import bb
import bb.msg
import multiprocessing
import fcntl
import subprocess
import glob
import traceback
import errno
from commands import getstatusoutput
from contextlib import contextmanager
logger = logging.getLogger("BitBake.Util")
def clean_context():
return {
"os": os,
"bb": bb,
"time": time,
}
def get_context():
return _context
def set_context(ctx):
_context = ctx
# Context used in better_exec, eval
_context = clean_context()
def explode_version(s):
r = []
alpha_regexp = re.compile('^([a-zA-Z]+)(.*)$')
numeric_regexp = re.compile('^(\d+)(.*)$')
while (s != ''):
if s[0] in string.digits:
m = numeric_regexp.match(s)
r.append((0, int(m.group(1))))
s = m.group(2)
continue
if s[0] in string.letters:
m = alpha_regexp.match(s)
r.append((1, m.group(1)))
s = m.group(2)
continue
if s[0] == '~':
r.append((-1, s[0]))
else:
r.append((2, s[0]))
s = s[1:]
return r
def split_version(s):
"""Split a version string into its constituent parts (PE, PV, PR)"""
s = s.strip(" <>=")
e = 0
if s.count(':'):
e = int(s.split(":")[0])
s = s.split(":")[1]
r = ""
if s.count('-'):
r = s.rsplit("-", 1)[1]
s = s.rsplit("-", 1)[0]
v = s
return (e, v, r)
def vercmp_part(a, b):
va = explode_version(a)
vb = explode_version(b)
while True:
if va == []:
(oa, ca) = (0, None)
else:
(oa, ca) = va.pop(0)
if vb == []:
(ob, cb) = (0, None)
else:
(ob, cb) = vb.pop(0)
if (oa, ca) == (0, None) and (ob, cb) == (0, None):
return 0
if oa < ob:
return -1
elif oa > ob:
return 1
elif ca < cb:
return -1
elif ca > cb:
return 1
def vercmp(ta, tb):
(ea, va, ra) = ta
(eb, vb, rb) = tb
r = int(ea or 0) - int(eb or 0)
if (r == 0):
r = vercmp_part(va, vb)
if (r == 0):
r = vercmp_part(ra, rb)
return r
def vercmp_string(a, b):
ta = split_version(a)
tb = split_version(b)
return vercmp(ta, tb)
def explode_deps(s):
"""
Take an RDEPENDS style string of format:
"DEPEND1 (optional version) DEPEND2 (optional version) ..."
and return a list of dependencies.
Version information is ignored.
"""
r = []
l = s.split()
flag = False
for i in l:
if i[0] == '(':
flag = True
#j = []
if not flag:
r.append(i)
#else:
# j.append(i)
if flag and i.endswith(')'):
flag = False
# Ignore version
#r[-1] += ' ' + ' '.join(j)
return r
def explode_dep_versions2(s):
"""
Take an RDEPENDS style string of format:
"DEPEND1 (optional version) DEPEND2 (optional version) ..."
and return a dictionary of dependencies and versions.
"""
r = {}
l = s.replace(",", "").split()
lastdep = None
lastcmp = ""
lastver = ""
incmp = False
inversion = False
for i in l:
if i[0] == '(':
incmp = True
i = i[1:].strip()
if not i:
continue
if incmp:
incmp = False
inversion = True
# This list is based on behavior and supported comparisons from deb, opkg and rpm.
#
# Even though =<, <<, ==, !=, =>, and >> may not be supported,
# we list each possibly valid item.
# The build system is responsible for validation of what it supports.
if i.startswith(('<=', '=<', '<<', '==', '!=', '>=', '=>', '>>')):
lastcmp = i[0:2]
i = i[2:]
elif i.startswith(('<', '>', '=')):
lastcmp = i[0:1]
i = i[1:]
else:
# This is an unsupported case!
lastcmp = (i or "")
i = ""
i.strip()
if not i:
continue
if inversion:
if i.endswith(')'):
i = i[:-1] or ""
inversion = False
if lastver and i:
lastver += " "
if i:
lastver += i
if lastdep not in r:
r[lastdep] = []
r[lastdep].append(lastcmp + " " + lastver)
continue
#if not inversion:
lastdep = i
lastver = ""
lastcmp = ""
if not (i in r and r[i]):
r[lastdep] = []
return r
def explode_dep_versions(s):
r = explode_dep_versions2(s)
for d in r:
if not r[d]:
r[d] = None
continue
if len(r[d]) > 1:
bb.warn("explode_dep_versions(): Item %s appeared in dependency string '%s' multiple times with different values. explode_dep_versions cannot cope with this." % (d, s))
r[d] = r[d][0]
return r
def join_deps(deps, commasep=True):
"""
Take the result from explode_dep_versions and generate a dependency string
"""
result = []
for dep in deps:
if deps[dep]:
if isinstance(deps[dep], list):
for v in deps[dep]:
result.append(dep + " (" + v + ")")
else:
result.append(dep + " (" + deps[dep] + ")")
else:
result.append(dep)
if commasep:
return ", ".join(result)
else:
return " ".join(result)
def _print_trace(body, line):
"""
Print the Environment of a Text Body
"""
error = []
# print the environment of the method
min_line = max(1, line-4)
max_line = min(line + 4, len(body))
for i in range(min_line, max_line + 1):
if line == i:
error.append(' *** %.4d:%s' % (i, body[i-1].rstrip()))
else:
error.append(' %.4d:%s' % (i, body[i-1].rstrip()))
return error
def better_compile(text, file, realfile, mode = "exec"):
"""
A better compile method. This method
will print the offending lines.
"""
try:
return compile(text, file, mode)
except Exception as e:
error = []
# split the text into lines again
body = text.split('\n')
error.append("Error in compiling python function in %s:\n" % realfile)
if e.lineno:
error.append("The code lines resulting in this error were:")
error.extend(_print_trace(body, e.lineno))
else:
error.append("The function causing this error was:")
for line in body:
error.append(line)
error.append("%s: %s" % (e.__class__.__name__, str(e)))
logger.error("\n".join(error))
e = bb.BBHandledException(e)
raise e
def _print_exception(t, value, tb, realfile, text, context):
error = []
try:
exception = traceback.format_exception_only(t, value)
error.append('Error executing a python function in %s:\n' % realfile)
# Strip 'us' from the stack (better_exec call)
tb = tb.tb_next
textarray = text.split('\n')
linefailed = tb.tb_lineno
tbextract = traceback.extract_tb(tb)
tbformat = traceback.format_list(tbextract)
error.append("The stack trace of python calls that resulted in this exception/failure was:")
error.append("File: '%s', lineno: %s, function: %s" % (tbextract[0][0], tbextract[0][1], tbextract[0][2]))
error.extend(_print_trace(textarray, linefailed))
# See if this is a function we constructed and has calls back into other functions in
# "text". If so, try and improve the context of the error by diving down the trace
level = 0
nexttb = tb.tb_next
while nexttb is not None and (level+1) < len(tbextract):
error.append("File: '%s', lineno: %s, function: %s" % (tbextract[level+1][0], tbextract[level+1][1], tbextract[level+1][2]))
if tbextract[level][0] == tbextract[level+1][0] and tbextract[level+1][2] == tbextract[level][0]:
# The code was possibly in the string we compiled ourselves
error.extend(_print_trace(textarray, tbextract[level+1][1]))
elif tbextract[level+1][0].startswith("/"):
# The code looks like it might be in a file, try and load it
try:
with open(tbextract[level+1][0], "r") as f:
text = f.readlines()
error.extend(_print_trace(text, tbextract[level+1][1]))
except:
error.append(tbformat[level+1])
elif "d" in context and tbextract[level+1][2]:
# Try and find the code in the datastore based on the functionname
d = context["d"]
functionname = tbextract[level+1][2]
text = d.getVar(functionname, True)
if text:
error.extend(_print_trace(text.split('\n'), tbextract[level+1][1]))
else:
error.append(tbformat[level+1])
else:
error.append(tbformat[level+1])
nexttb = tb.tb_next
level = level + 1
error.append("Exception: %s" % ''.join(exception))
finally:
logger.error("\n".join(error))
def better_exec(code, context, text = None, realfile = "<code>"):
"""
Similiar to better_compile, better_exec will
print the lines that are responsible for the
error.
"""
import bb.parse
if not text:
text = code
if not hasattr(code, "co_filename"):
code = better_compile(code, realfile, realfile)
try:
exec(code, get_context(), context)
except bb.BBHandledException:
# Error already shown so passthrough
raise
except Exception as e:
(t, value, tb) = sys.exc_info()
if t in [bb.parse.SkipPackage, bb.build.FuncFailed]:
raise
try:
_print_exception(t, value, tb, realfile, text, context)
except Exception as e:
logger.error("Exception handler error: %s" % str(e))
e = bb.BBHandledException(e)
raise e
def simple_exec(code, context):
exec(code, get_context(), context)
def better_eval(source, locals):
return eval(source, get_context(), locals)
@contextmanager
def fileslocked(files):
"""Context manager for locking and unlocking file locks."""
locks = []
if files:
for lockfile in files:
locks.append(bb.utils.lockfile(lockfile))
yield
for lock in locks:
bb.utils.unlockfile(lock)
def lockfile(name, shared=False, retry=True):
"""
Use the file fn as a lock file, return when the lock has been acquired.
Returns a variable to pass to unlockfile().
"""
dirname = os.path.dirname(name)
mkdirhier(dirname)
if not os.access(dirname, os.W_OK):
logger.error("Unable to acquire lock '%s', directory is not writable",
name)
sys.exit(1)
op = fcntl.LOCK_EX
if shared:
op = fcntl.LOCK_SH
if not retry:
op = op | fcntl.LOCK_NB
while True:
# If we leave the lockfiles lying around there is no problem
# but we should clean up after ourselves. This gives potential
# for races though. To work around this, when we acquire the lock
# we check the file we locked was still the lock file on disk.
# by comparing inode numbers. If they don't match or the lockfile
# no longer exists, we start again.
# This implementation is unfair since the last person to request the
# lock is the most likely to win it.
try:
lf = open(name, 'a+')
fileno = lf.fileno()
fcntl.flock(fileno, op)
statinfo = os.fstat(fileno)
if os.path.exists(lf.name):
statinfo2 = os.stat(lf.name)
if statinfo.st_ino == statinfo2.st_ino:
return lf
lf.close()
except Exception:
try:
lf.close()
except Exception:
pass
pass
if not retry:
return None
def unlockfile(lf):
"""
Unlock a file locked using lockfile()
"""
try:
# If we had a shared lock, we need to promote to exclusive before
# removing the lockfile. Attempt this, ignore failures.
fcntl.flock(lf.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB)
os.unlink(lf.name)
except (IOError, OSError):
pass
fcntl.flock(lf.fileno(), fcntl.LOCK_UN)
lf.close()
def md5_file(filename):
"""
Return the hex string representation of the MD5 checksum of filename.
"""
try:
import hashlib
m = hashlib.md5()
except ImportError:
import md5
m = md5.new()
with open(filename, "rb") as f:
for line in f:
m.update(line)
return m.hexdigest()
def sha256_file(filename):
"""
Return the hex string representation of the 256-bit SHA checksum of
filename. On Python 2.4 this will return None, so callers will need to
handle that by either skipping SHA checks, or running a standalone sha256sum
binary.
"""
try:
import hashlib
except ImportError:
return None
s = hashlib.sha256()
with open(filename, "rb") as f:
for line in f:
s.update(line)
return s.hexdigest()
def preserved_envvars_exported():
"""Variables which are taken from the environment and placed in and exported
from the metadata"""
return [
'BB_TASKHASH',
'HOME',
'LOGNAME',
'PATH',
'PWD',
'SHELL',
'TERM',
'USER',
]
def preserved_envvars():
"""Variables which are taken from the environment and placed in the metadata"""
v = [
'BBPATH',
'BB_PRESERVE_ENV',
'BB_ENV_WHITELIST',
'BB_ENV_EXTRAWHITE',
]
return v + preserved_envvars_exported()
def filter_environment(good_vars):
"""
Create a pristine environment for bitbake. This will remove variables that
are not known and may influence the build in a negative way.
"""
removed_vars = {}
for key in os.environ.keys():
if key in good_vars:
continue
removed_vars[key] = os.environ[key]
os.unsetenv(key)
del os.environ[key]
if len(removed_vars):
logger.debug(1, "Removed the following variables from the environment: %s", ", ".join(removed_vars.keys()))
return removed_vars
def approved_variables():
"""
Determine and return the list of whitelisted variables which are approved
to remain in the envrionment.
"""
if 'BB_PRESERVE_ENV' in os.environ:
return os.environ.keys()
approved = []
if 'BB_ENV_WHITELIST' in os.environ:
approved = os.environ['BB_ENV_WHITELIST'].split()
approved.extend(['BB_ENV_WHITELIST'])
else:
approved = preserved_envvars()
if 'BB_ENV_EXTRAWHITE' in os.environ:
approved.extend(os.environ['BB_ENV_EXTRAWHITE'].split())
if 'BB_ENV_EXTRAWHITE' not in approved:
approved.extend(['BB_ENV_EXTRAWHITE'])
return approved
def clean_environment():
"""
Clean up any spurious environment variables. This will remove any
variables the user hasn't chosen to preserve.
"""
if 'BB_PRESERVE_ENV' not in os.environ:
good_vars = approved_variables()
return filter_environment(good_vars)
return {}
def empty_environment():
"""
Remove all variables from the environment.
"""
for s in os.environ.keys():
os.unsetenv(s)
del os.environ[s]
def build_environment(d):
"""
Build an environment from all exported variables.
"""
import bb.data
for var in bb.data.keys(d):
export = d.getVarFlag(var, "export")
if export:
os.environ[var] = d.getVar(var, True) or ""
def remove(path, recurse=False):
"""Equivalent to rm -f or rm -rf"""
if not path:
return
if recurse:
# shutil.rmtree(name) would be ideal but its too slow
subprocess.call(['rm', '-rf'] + glob.glob(path))
return
for name in glob.glob(path):
try:
os.unlink(name)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
def prunedir(topdir):
# Delete everything reachable from the directory named in 'topdir'.
# CAUTION: This is dangerous!
for root, dirs, files in os.walk(topdir, topdown = False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
if os.path.islink(os.path.join(root, name)):
os.remove(os.path.join(root, name))
else:
os.rmdir(os.path.join(root, name))
os.rmdir(topdir)
#
# Could also use return re.compile("(%s)" % "|".join(map(re.escape, suffixes))).sub(lambda mo: "", var)
# but thats possibly insane and suffixes is probably going to be small
#
def prune_suffix(var, suffixes, d):
# See if var ends with any of the suffixes listed and
# remove it if found
for suffix in suffixes:
if var.endswith(suffix):
return var.replace(suffix, "")
return var
def mkdirhier(directory):
"""Create a directory like 'mkdir -p', but does not complain if
directory already exists like os.makedirs
"""
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
def movefile(src, dest, newmtime = None, sstat = None):
"""Moves a file from src to dest, preserving all permissions and
attributes; mtime will be preserved even when moving across
filesystems. Returns true on success and false on failure. Move is
atomic.
"""
#print "movefile(" + src + "," + dest + "," + str(newmtime) + "," + str(sstat) + ")"
try:
if not sstat:
sstat = os.lstat(src)
except Exception as e:
print("movefile: Stating source file failed...", e)
return None
destexists = 1
try:
dstat = os.lstat(dest)
except:
dstat = os.lstat(os.path.dirname(dest))
destexists = 0
if destexists:
if stat.S_ISLNK(dstat[stat.ST_MODE]):
try:
os.unlink(dest)
destexists = 0
except Exception as e:
pass
if stat.S_ISLNK(sstat[stat.ST_MODE]):
try:
target = os.readlink(src)
if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
os.unlink(dest)
os.symlink(target, dest)
#os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
os.unlink(src)
return os.lstat(dest)
except Exception as e:
print("movefile: failed to properly create symlink:", dest, "->", target, e)
return None
renamefailed = 1
if sstat[stat.ST_DEV] == dstat[stat.ST_DEV]:
try:
os.rename(src, dest)
renamefailed = 0
except Exception as e:
if e[0] != errno.EXDEV:
# Some random error.
print("movefile: Failed to move", src, "to", dest, e)
return None
# Invalid cross-device-link 'bind' mounted or actually Cross-Device
if renamefailed:
didcopy = 0
if stat.S_ISREG(sstat[stat.ST_MODE]):
try: # For safety copy then move it over.
shutil.copyfile(src, dest + "#new")
os.rename(dest + "#new", dest)
didcopy = 1
except Exception as e:
print('movefile: copy', src, '->', dest, 'failed.', e)
return None
else:
#we don't yet handle special, so we need to fall back to /bin/mv
a = getstatusoutput("/bin/mv -f " + "'" + src + "' '" + dest + "'")
if a[0] != 0:
print("movefile: Failed to move special file:" + src + "' to '" + dest + "'", a)
return None # failure
try:
if didcopy:
os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID])
os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
os.unlink(src)
except Exception as e:
print("movefile: Failed to chown/chmod/unlink", dest, e)
return None
if newmtime:
os.utime(dest, (newmtime, newmtime))
else:
os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
newmtime = sstat[stat.ST_MTIME]
return newmtime
def copyfile(src, dest, newmtime = None, sstat = None):
"""
Copies a file from src to dest, preserving all permissions and
attributes; mtime will be preserved even when moving across
filesystems. Returns true on success and false on failure.
"""
#print "copyfile(" + src + "," + dest + "," + str(newmtime) + "," + str(sstat) + ")"
try:
if not sstat:
sstat = os.lstat(src)
except Exception as e:
logger.warn("copyfile: stat of %s failed (%s)" % (src, e))
return False
destexists = 1
try:
dstat = os.lstat(dest)
except:
dstat = os.lstat(os.path.dirname(dest))
destexists = 0
if destexists:
if stat.S_ISLNK(dstat[stat.ST_MODE]):
try:
os.unlink(dest)
destexists = 0
except Exception as e:
pass
if stat.S_ISLNK(sstat[stat.ST_MODE]):
try:
target = os.readlink(src)
if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
os.unlink(dest)
os.symlink(target, dest)
#os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
return os.lstat(dest)
except Exception as e:
logger.warn("copyfile: failed to create symlink %s to %s (%s)" % (dest, target, e))
return False
if stat.S_ISREG(sstat[stat.ST_MODE]):
try:
srcchown = False
if not os.access(src, os.R_OK):
# Make sure we can read it
srcchown = True
os.chmod(src, sstat[stat.ST_MODE] | stat.S_IRUSR)
# For safety copy then move it over.
shutil.copyfile(src, dest + "#new")
os.rename(dest + "#new", dest)
except Exception as e:
logger.warn("copyfile: copy %s to %s failed (%s)" % (src, dest, e))
return False
finally:
if srcchown:
os.chmod(src, sstat[stat.ST_MODE])
os.utime(src, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
else:
#we don't yet handle special, so we need to fall back to /bin/mv
a = getstatusoutput("/bin/cp -f " + "'" + src + "' '" + dest + "'")
if a[0] != 0:
logger.warn("copyfile: failed to copy special file %s to %s (%s)" % (src, dest, a))
return False # failure
try:
os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID])
os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
except Exception as e:
logger.warn("copyfile: failed to chown/chmod %s (%s)" % (dest, e))
return False
if newmtime:
os.utime(dest, (newmtime, newmtime))
else:
os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
newmtime = sstat[stat.ST_MTIME]
return newmtime
def which(path, item, direction = 0, history = False):
"""
Locate a file in a PATH
"""
hist = []
paths = (path or "").split(':')
if direction != 0:
paths.reverse()
for p in paths:
next = os.path.join(p, item)
hist.append(next)
if os.path.exists(next):
if not os.path.isabs(next):
next = os.path.abspath(next)
if history:
return next, hist
return next
if history:
return "", hist
return ""
def to_boolean(string, default=None):
if not string:
return default
normalized = string.lower()
if normalized in ("y", "yes", "1", "true"):
return True
elif normalized in ("n", "no", "0", "false"):
return False
else:
raise ValueError("Invalid value for to_boolean: %s" % string)
def contains(variable, checkvalues, truevalue, falsevalue, d):
val = d.getVar(variable, True)
if not val:
return falsevalue
val = set(val.split())
if isinstance(checkvalues, basestring):
checkvalues = set(checkvalues.split())
else:
checkvalues = set(checkvalues)
if checkvalues.issubset(val):
return truevalue
return falsevalue
def cpu_count():
return multiprocessing.cpu_count()
def nonblockingfd(fd):
fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
def process_profilelog(fn):
# Redirect stdout to capture profile information
pout = open(fn + '.processed', 'w')
so = sys.stdout.fileno()
orig_so = os.dup(sys.stdout.fileno())
os.dup2(pout.fileno(), so)
import pstats
p = pstats.Stats(fn)
p.sort_stats('time')
p.print_stats()
p.print_callers()
p.sort_stats('cumulative')
p.print_stats()
os.dup2(orig_so, so)
pout.flush()
pout.close()
#
# Was present to work around multiprocessing pool bugs in python < 2.7.3
#
def multiprocessingpool(*args, **kwargs):
return multiprocessing.Pool(*args, **kwargs)
| 6,501 | 0 | 437 |
c5651ec77827e83298c54cb4e4debd5496125923 | 12,198 | py | Python | processing/process_i3.py | jrbourbeau/cr-composition | e9efb4b713492aaf544b5dd8bb67280d4f108056 | [
"MIT"
] | null | null | null | processing/process_i3.py | jrbourbeau/cr-composition | e9efb4b713492aaf544b5dd8bb67280d4f108056 | [
"MIT"
] | 7 | 2017-08-29T16:20:04.000Z | 2018-06-12T16:58:36.000Z | processing/process_i3.py | jrbourbeau/cr-composition | e9efb4b713492aaf544b5dd8bb67280d4f108056 | [
"MIT"
] | 1 | 2018-04-03T20:56:40.000Z | 2018-04-03T20:56:40.000Z | #!/usr/bin/env python
import time
import argparse
import os
import socket
import math
import numpy as np
from icecube import (dataio, tableio, astro, toprec, dataclasses, icetray,
phys_services, stochastics, millipede, ddddr)
from icecube.frame_object_diff.segments import uncompress
from I3Tray import *
from icecube.tableio import I3TableWriter
from icecube.hdfwriter import I3HDFTableService
from icecube.icetop_Level3_scripts.functions import count_stations
from icecube import icetop_Level3_scripts, stochastics, dataclasses, millipede, photonics_service, ddddr, STTools
from icecube.icetop_Level3_scripts.segments import EnergylossReco
import comptools as comp
import icetray_software
def validate_i3_files(files):
""" Checks that input i3 files aren't corrupted
Parameters
----------
files : array-like
Iterable of i3 file paths to check.
Returns
-------
good_file_list : list
List of i3 files (from input files) that were able to be
succeessfully loaded.
"""
if isinstance(files, str):
files = [files]
good_file_list = []
for i3file in files:
try:
test_tray = I3Tray()
test_tray.Add('I3Reader', FileName=i3file)
test_tray.Add(uncompress, 'uncompress')
test_tray.Execute()
test_tray.Finish()
good_file_list.append(i3file)
except RuntimeError:
icetray.logging.log_warn('File {} is truncated'.format(i3file))
finally:
del test_tray
return good_file_list
def check_keys(frame, *keys):
""" Function to check if all keys are in frame
Parameters
----------
frame : I3Frame
I3Frame
keys:
Series of keys to look for in frame
Returns
-------
boolean
Whether or not all the keys in keys are in frame
"""
return all([key in frame for key in keys])
def delete_keys(frame, keys):
""" Deletes existing keys in an I3Frame
Parameters
----------
frame : I3Frame
I3Frame
keys:
Iterable of keys to delete
"""
if isinstance(keys, str):
keys = [keys]
for key in keys:
if key in frame:
frame.Delete(key)
if __name__ == '__main__':
description='Runs extra modules over a given fileList'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-f', '--files',
dest='files',
nargs='*',
help='Files to run over')
parser.add_argument('--type',
dest='type',
choices=['data', 'sim'],
default='sim',
help='Option to process simulation or data')
parser.add_argument('--sim',
dest='sim',
help='Simulation dataset')
parser.add_argument('--snow_lambda',
dest='snow_lambda',
type=float,
help='Snow lambda to use with Laputop reconstruction')
parser.add_argument('--dom_eff',
dest='dom_eff',
type=float,
help='DOM efficiency to use with Millipede reconstruction')
parser.add_argument('-o', '--outfile',
dest='outfile',
help='Output file')
args = parser.parse_args()
# Starting parameters
IT_pulses, inice_pulses = comp.datafunctions.reco_pulses()
# Keys to write to frame
keys = []
if args.type == 'sim':
keys += ['MCPrimary']
keys += ['FractionContainment_MCPrimary_IceTop',
'FractionContainment_MCPrimary_InIce']
keys += ['tanks_charge_Laputop', 'tanks_dist_Laputop']
# Keys read directly from level3 processed i3 files
keys += ['I3EventHeader']
keys += ['IceTopMaxSignal', 'IceTopMaxSignalString',
'IceTopMaxSignalInEdge', 'IceTopNeighbourMaxSignal',
'StationDensity']
keys += ['Laputop', 'LaputopParams']
keys += ['Stoch_Reco', 'Stoch_Reco2', 'MillipedeFitParams']
keys += ['I3MuonEnergyLaputopParams']
# Keys that are added to the frame
keys += ['NStations']
keys += ['avg_inice_radius', 'std_inice_radius', 'median_inice_radius',
'frac_outside_one_std_inice_radius',
'frac_outside_two_std_inice_radius']
dom_numbers = [1, 15, 30, 45, 60]
for min_DOM, max_DOM in zip(dom_numbers[:-1], dom_numbers[1:]):
key = '{}_{}'.format(min_DOM, max_DOM)
keys += ['NChannels_'+key,
'NHits_'+key,
'InIce_charge_'+key,
'max_qfrac_'+key,
]
key = '1_60'
keys += ['NChannels_'+key,
'NHits_'+key,
'InIce_charge_'+key,
'max_qfrac_'+key,
]
keys += ['FractionContainment_Laputop_IceTop',
'FractionContainment_Laputop_InIce']
keys += ['lap_fitstatus_ok']
keys += ['passed_IceTopQualityCuts', 'passed_InIceQualityCuts']
keys += ['angle_MCPrimary_Laputop']
t0 = time.time()
icetray.set_log_level(icetray.I3LogLevel.LOG_WARN)
comp.check_output_dir(args.outfile)
with comp.localized(inputs=args.files, output=args.outfile) as (inputs, output):
# Construct list of non-truncated files to process
good_file_list = validate_i3_files(inputs)
tray = I3Tray()
tray.Add('I3Reader', FileNameList=good_file_list)
# Uncompress Level3 diff files
tray.Add(uncompress, 'uncompress')
if args.snow_lambda is not None:
# Re-run Laputop reconstruction with specified snow correction lambda value
tray = icetray_software.rerun_reconstructions_snow_lambda(tray,
snow_lambda=args.snow_lambda)
if args.dom_eff is not None:
delete_keys = ['Millipede',
'MillipedeFitParams',
'Stoch_Reco',
'Stoch_Reco2',
'Millipede_dEdX',
'I3MuonEnergyLaputopParams',
'I3MuonEnergyLaputopCascadeParams',
'IT73AnalysisInIceQualityCuts',
]
tray.Add('Delete', keys=delete_keys)
from icecube.icetop_Level3_scripts import icetop_globals
# from icecube.icetop_Level3_scripts.segments import muonReconstructions
from icecube.icetop_Level3_scripts.modules import MakeQualityCuts
name = 'reco'
spline_dir="/data/sim/sim-new/downloads/spline-tables/"
inice_clean_coinc_pulses = icetop_globals.inice_clean_coinc_pulses
tray.AddSegment(EnergylossReco,
name+'_ElossReco',
InIcePulses=inice_clean_coinc_pulses,
dom_eff=args.dom_eff,
splinedir=spline_dir,
IceTopTrack='Laputop',
If=lambda fr: "NCh_"+inice_clean_coinc_pulses in fr and fr['NCh_' + inice_clean_coinc_pulses].value
)
# Collect in IT73AnalysisInIceQualityCuts
CutOrder = ["NCh_"+inice_clean_coinc_pulses,
"MilliRlogl",
"MilliQtot",
"MilliNCasc",
"StochReco"]
CutsToEvaluate={"NCh_"+inice_clean_coinc_pulses:(lambda fr: fr["NCh_"+inice_clean_coinc_pulses].value),
"MilliRlogl":(lambda fr: "MillipedeFitParams" in fr and math.log10(fr["MillipedeFitParams"].rlogl)<2),
"MilliQtot": (lambda fr: "MillipedeFitParams" in fr and math.log10(fr["MillipedeFitParams"].predicted_qtotal/fr["MillipedeFitParams"].qtotal)>-0.03),
"MilliNCasc": (lambda fr: "Millipede_dEdX" in fr and len([part for part in fr["Millipede_dEdX"] if part.energy > 0]) >= 3),
"StochReco": (lambda fr: "Stoch_Reco" in fr and fr["Stoch_Reco"].status == dataclasses.I3Particle.OK)}
CutsNames={"NCh_"+inice_clean_coinc_pulses:"NCh_"+inice_clean_coinc_pulses+"Above7",
"MilliRlogl":"MilliRloglBelow2",
"MilliQtot":"MilliQtotRatio",
"MilliNCasc":"MilliNCascAbove2",
"StochReco":"StochRecoSucceeded"}
tray.AddModule(MakeQualityCuts,
name+'_DoInIceCuts',
RemoveEvents=False,
CutOrder=CutOrder,
CutsToEvaluate=CutsToEvaluate,
CutsNames=CutsNames,
CollectBools="IT73AnalysisInIceQualityCuts"
)
if args.type == 'data':
# Filter out all events that don't pass standard IceTop cuts
tray.Add(lambda frame: all(frame['IT73AnalysisIceTopQualityCuts'].values()))
# Filter out non-coincident P frames
tray.Add(lambda frame: inice_pulses in frame)
tray.Add(icetray_software.add_IceTop_quality_cuts,
If=lambda frame: 'IT73AnalysisIceTopQualityCuts' in frame)
tray.Add(icetray_software.add_InIce_quality_cuts,
If=lambda frame: 'IT73AnalysisInIceQualityCuts' in frame)
tray.Add(icetray_software.add_nstations, pulses=IT_pulses,
If=lambda frame: IT_pulses in frame)
# Add total inice charge to frame
for min_DOM, max_DOM in zip(dom_numbers[:-1], dom_numbers[1:]):
tray.Add(icetray_software.AddInIceCharge,
pulses=inice_pulses,
min_DOM=min_DOM,
max_DOM=max_DOM,
If=lambda frame: 'I3Geometry' in frame and inice_pulses in frame)
tray.Add(icetray_software.AddInIceCharge,
pulses=inice_pulses,
min_DOM=1,
max_DOM=60,
If=lambda frame: 'I3Geometry' in frame and inice_pulses in frame)
# Add InIce muon radius to frame
tray.Add(icetray_software.AddInIceMuonRadius,
track='Laputop',
pulses='CoincLaputopCleanedPulses',
min_DOM=1,
max_DOM=60,
If=lambda frame: check_keys(frame, 'I3Geometry', 'Laputop', 'CoincLaputopCleanedPulses') )
# Add fraction containment to frame
tray.Add(icetray_software.add_fraction_containment, track='Laputop',
If=lambda frame: check_keys(frame, 'I3Geometry', 'Laputop') )
# if args.type == 'sim':
tray.Add(icetray_software.add_fraction_containment, track='MCPrimary',
If=lambda frame: check_keys(frame, 'I3Geometry', 'MCPrimary') )
# Add Laputop fitstatus ok boolean to frame
tray.Add(icetray_software.lap_fitstatus_ok,
If=lambda frame: 'Laputop' in frame)
# Add opening angle between Laputop and MCPrimary for angular resolution calculation
tray.Add(icetray_software.add_opening_angle,
particle1='MCPrimary', particle2='Laputop',
key='angle_MCPrimary_Laputop',
If=lambda frame: 'MCPrimary' in frame and 'Laputop' in frame)
#====================================================================
# Finish
hdf = I3HDFTableService(output)
keys = {key: tableio.default for key in keys}
if args.type == 'data':
keys['Laputop'] = [dataclasses.converters.I3ParticleConverter(),
astro.converters.I3AstroConverter()]
tray.Add(I3TableWriter,
tableservice=hdf,
keys=keys,
SubEventStreams=['ice_top'])
tray.Execute()
tray.Finish()
print('Time taken: {}'.format(time.time() - t0))
| 38.847134 | 177 | 0.573537 | #!/usr/bin/env python
import time
import argparse
import os
import socket
import math
import numpy as np
from icecube import (dataio, tableio, astro, toprec, dataclasses, icetray,
phys_services, stochastics, millipede, ddddr)
from icecube.frame_object_diff.segments import uncompress
from I3Tray import *
from icecube.tableio import I3TableWriter
from icecube.hdfwriter import I3HDFTableService
from icecube.icetop_Level3_scripts.functions import count_stations
from icecube import icetop_Level3_scripts, stochastics, dataclasses, millipede, photonics_service, ddddr, STTools
from icecube.icetop_Level3_scripts.segments import EnergylossReco
import comptools as comp
import icetray_software
def validate_i3_files(files):
""" Checks that input i3 files aren't corrupted
Parameters
----------
files : array-like
Iterable of i3 file paths to check.
Returns
-------
good_file_list : list
List of i3 files (from input files) that were able to be
succeessfully loaded.
"""
if isinstance(files, str):
files = [files]
good_file_list = []
for i3file in files:
try:
test_tray = I3Tray()
test_tray.Add('I3Reader', FileName=i3file)
test_tray.Add(uncompress, 'uncompress')
test_tray.Execute()
test_tray.Finish()
good_file_list.append(i3file)
except RuntimeError:
icetray.logging.log_warn('File {} is truncated'.format(i3file))
finally:
del test_tray
return good_file_list
def check_keys(frame, *keys):
""" Function to check if all keys are in frame
Parameters
----------
frame : I3Frame
I3Frame
keys:
Series of keys to look for in frame
Returns
-------
boolean
Whether or not all the keys in keys are in frame
"""
return all([key in frame for key in keys])
def delete_keys(frame, keys):
""" Deletes existing keys in an I3Frame
Parameters
----------
frame : I3Frame
I3Frame
keys:
Iterable of keys to delete
"""
if isinstance(keys, str):
keys = [keys]
for key in keys:
if key in frame:
frame.Delete(key)
if __name__ == '__main__':
description='Runs extra modules over a given fileList'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-f', '--files',
dest='files',
nargs='*',
help='Files to run over')
parser.add_argument('--type',
dest='type',
choices=['data', 'sim'],
default='sim',
help='Option to process simulation or data')
parser.add_argument('--sim',
dest='sim',
help='Simulation dataset')
parser.add_argument('--snow_lambda',
dest='snow_lambda',
type=float,
help='Snow lambda to use with Laputop reconstruction')
parser.add_argument('--dom_eff',
dest='dom_eff',
type=float,
help='DOM efficiency to use with Millipede reconstruction')
parser.add_argument('-o', '--outfile',
dest='outfile',
help='Output file')
args = parser.parse_args()
# Starting parameters
IT_pulses, inice_pulses = comp.datafunctions.reco_pulses()
# Keys to write to frame
keys = []
if args.type == 'sim':
keys += ['MCPrimary']
keys += ['FractionContainment_MCPrimary_IceTop',
'FractionContainment_MCPrimary_InIce']
keys += ['tanks_charge_Laputop', 'tanks_dist_Laputop']
# Keys read directly from level3 processed i3 files
keys += ['I3EventHeader']
keys += ['IceTopMaxSignal', 'IceTopMaxSignalString',
'IceTopMaxSignalInEdge', 'IceTopNeighbourMaxSignal',
'StationDensity']
keys += ['Laputop', 'LaputopParams']
keys += ['Stoch_Reco', 'Stoch_Reco2', 'MillipedeFitParams']
keys += ['I3MuonEnergyLaputopParams']
# Keys that are added to the frame
keys += ['NStations']
keys += ['avg_inice_radius', 'std_inice_radius', 'median_inice_radius',
'frac_outside_one_std_inice_radius',
'frac_outside_two_std_inice_radius']
dom_numbers = [1, 15, 30, 45, 60]
for min_DOM, max_DOM in zip(dom_numbers[:-1], dom_numbers[1:]):
key = '{}_{}'.format(min_DOM, max_DOM)
keys += ['NChannels_'+key,
'NHits_'+key,
'InIce_charge_'+key,
'max_qfrac_'+key,
]
key = '1_60'
keys += ['NChannels_'+key,
'NHits_'+key,
'InIce_charge_'+key,
'max_qfrac_'+key,
]
keys += ['FractionContainment_Laputop_IceTop',
'FractionContainment_Laputop_InIce']
keys += ['lap_fitstatus_ok']
keys += ['passed_IceTopQualityCuts', 'passed_InIceQualityCuts']
keys += ['angle_MCPrimary_Laputop']
t0 = time.time()
icetray.set_log_level(icetray.I3LogLevel.LOG_WARN)
comp.check_output_dir(args.outfile)
with comp.localized(inputs=args.files, output=args.outfile) as (inputs, output):
# Construct list of non-truncated files to process
good_file_list = validate_i3_files(inputs)
tray = I3Tray()
tray.Add('I3Reader', FileNameList=good_file_list)
# Uncompress Level3 diff files
tray.Add(uncompress, 'uncompress')
if args.snow_lambda is not None:
# Re-run Laputop reconstruction with specified snow correction lambda value
tray = icetray_software.rerun_reconstructions_snow_lambda(tray,
snow_lambda=args.snow_lambda)
if args.dom_eff is not None:
delete_keys = ['Millipede',
'MillipedeFitParams',
'Stoch_Reco',
'Stoch_Reco2',
'Millipede_dEdX',
'I3MuonEnergyLaputopParams',
'I3MuonEnergyLaputopCascadeParams',
'IT73AnalysisInIceQualityCuts',
]
tray.Add('Delete', keys=delete_keys)
from icecube.icetop_Level3_scripts import icetop_globals
# from icecube.icetop_Level3_scripts.segments import muonReconstructions
from icecube.icetop_Level3_scripts.modules import MakeQualityCuts
name = 'reco'
spline_dir="/data/sim/sim-new/downloads/spline-tables/"
inice_clean_coinc_pulses = icetop_globals.inice_clean_coinc_pulses
tray.AddSegment(EnergylossReco,
name+'_ElossReco',
InIcePulses=inice_clean_coinc_pulses,
dom_eff=args.dom_eff,
splinedir=spline_dir,
IceTopTrack='Laputop',
If=lambda fr: "NCh_"+inice_clean_coinc_pulses in fr and fr['NCh_' + inice_clean_coinc_pulses].value
)
# Collect in IT73AnalysisInIceQualityCuts
CutOrder = ["NCh_"+inice_clean_coinc_pulses,
"MilliRlogl",
"MilliQtot",
"MilliNCasc",
"StochReco"]
CutsToEvaluate={"NCh_"+inice_clean_coinc_pulses:(lambda fr: fr["NCh_"+inice_clean_coinc_pulses].value),
"MilliRlogl":(lambda fr: "MillipedeFitParams" in fr and math.log10(fr["MillipedeFitParams"].rlogl)<2),
"MilliQtot": (lambda fr: "MillipedeFitParams" in fr and math.log10(fr["MillipedeFitParams"].predicted_qtotal/fr["MillipedeFitParams"].qtotal)>-0.03),
"MilliNCasc": (lambda fr: "Millipede_dEdX" in fr and len([part for part in fr["Millipede_dEdX"] if part.energy > 0]) >= 3),
"StochReco": (lambda fr: "Stoch_Reco" in fr and fr["Stoch_Reco"].status == dataclasses.I3Particle.OK)}
CutsNames={"NCh_"+inice_clean_coinc_pulses:"NCh_"+inice_clean_coinc_pulses+"Above7",
"MilliRlogl":"MilliRloglBelow2",
"MilliQtot":"MilliQtotRatio",
"MilliNCasc":"MilliNCascAbove2",
"StochReco":"StochRecoSucceeded"}
tray.AddModule(MakeQualityCuts,
name+'_DoInIceCuts',
RemoveEvents=False,
CutOrder=CutOrder,
CutsToEvaluate=CutsToEvaluate,
CutsNames=CutsNames,
CollectBools="IT73AnalysisInIceQualityCuts"
)
if args.type == 'data':
# Filter out all events that don't pass standard IceTop cuts
tray.Add(lambda frame: all(frame['IT73AnalysisIceTopQualityCuts'].values()))
# Filter out non-coincident P frames
tray.Add(lambda frame: inice_pulses in frame)
tray.Add(icetray_software.add_IceTop_quality_cuts,
If=lambda frame: 'IT73AnalysisIceTopQualityCuts' in frame)
tray.Add(icetray_software.add_InIce_quality_cuts,
If=lambda frame: 'IT73AnalysisInIceQualityCuts' in frame)
tray.Add(icetray_software.add_nstations, pulses=IT_pulses,
If=lambda frame: IT_pulses in frame)
# Add total inice charge to frame
for min_DOM, max_DOM in zip(dom_numbers[:-1], dom_numbers[1:]):
tray.Add(icetray_software.AddInIceCharge,
pulses=inice_pulses,
min_DOM=min_DOM,
max_DOM=max_DOM,
If=lambda frame: 'I3Geometry' in frame and inice_pulses in frame)
tray.Add(icetray_software.AddInIceCharge,
pulses=inice_pulses,
min_DOM=1,
max_DOM=60,
If=lambda frame: 'I3Geometry' in frame and inice_pulses in frame)
# Add InIce muon radius to frame
tray.Add(icetray_software.AddInIceMuonRadius,
track='Laputop',
pulses='CoincLaputopCleanedPulses',
min_DOM=1,
max_DOM=60,
If=lambda frame: check_keys(frame, 'I3Geometry', 'Laputop', 'CoincLaputopCleanedPulses') )
# Add fraction containment to frame
tray.Add(icetray_software.add_fraction_containment, track='Laputop',
If=lambda frame: check_keys(frame, 'I3Geometry', 'Laputop') )
# if args.type == 'sim':
tray.Add(icetray_software.add_fraction_containment, track='MCPrimary',
If=lambda frame: check_keys(frame, 'I3Geometry', 'MCPrimary') )
# Add Laputop fitstatus ok boolean to frame
tray.Add(icetray_software.lap_fitstatus_ok,
If=lambda frame: 'Laputop' in frame)
# Add opening angle between Laputop and MCPrimary for angular resolution calculation
tray.Add(icetray_software.add_opening_angle,
particle1='MCPrimary', particle2='Laputop',
key='angle_MCPrimary_Laputop',
If=lambda frame: 'MCPrimary' in frame and 'Laputop' in frame)
#====================================================================
# Finish
hdf = I3HDFTableService(output)
keys = {key: tableio.default for key in keys}
if args.type == 'data':
keys['Laputop'] = [dataclasses.converters.I3ParticleConverter(),
astro.converters.I3AstroConverter()]
tray.Add(I3TableWriter,
tableservice=hdf,
keys=keys,
SubEventStreams=['ice_top'])
tray.Execute()
tray.Finish()
print('Time taken: {}'.format(time.time() - t0))
| 0 | 0 | 0 |
e0a8289301e4db8851bea0a9a53eea67e7e71287 | 1,855 | py | Python | tools/datasource-scaffold/sample/driver.py | openstack/vitrage | 95b33dbf39b040e23915882a2879c87aec239ca9 | [
"Apache-2.0"
] | 89 | 2015-09-30T21:42:17.000Z | 2022-03-28T16:31:19.000Z | tools/datasource-scaffold/sample/driver.py | openstack/vitrage | 95b33dbf39b040e23915882a2879c87aec239ca9 | [
"Apache-2.0"
] | 4 | 2015-12-13T13:06:53.000Z | 2016-01-03T19:51:28.000Z | tools/datasource-scaffold/sample/driver.py | openstack/vitrage | 95b33dbf39b040e23915882a2879c87aec239ca9 | [
"Apache-2.0"
] | 43 | 2015-11-04T15:54:27.000Z | 2021-12-10T14:24:03.000Z | # Copyright 2018 - Vitrage team
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from vitrage.datasources.driver_base import DriverBase
from vitrage.datasources.sample import SAMPLE_DATASOURCE
LOG = log.getLogger(__name__)
| 31.440678 | 75 | 0.665768 | # Copyright 2018 - Vitrage team
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from vitrage.datasources.driver_base import DriverBase
from vitrage.datasources.sample import SAMPLE_DATASOURCE
LOG = log.getLogger(__name__)
class SampleDriver(DriverBase):
def __init__(self):
super(SampleDriver, self).__init__()
@staticmethod
def get_event_types():
return []
def enrich_event(self, event, event_type):
pass
def get_all(self, datasource_action):
"""Query all entities and send events to the vitrage events queue.
When done for the first time, send an "end" event to inform it has
finished the get_all for the datasource (because it is done
asynchronously).
"""
return self.make_pickleable(self._get_all_entities(),
SAMPLE_DATASOURCE,
datasource_action)
def get_changes(self, datasource_action):
"""Send an event to the vitrage events queue upon any change."""
return self.make_pickleable(self._get_changes_entities(),
SAMPLE_DATASOURCE,
datasource_action)
def _get_all_entities(self):
return []
def _get_changes_entities(self):
return []
| 150 | 932 | 23 |
f7ee1f24b14f79bec3c0f87115d4358f7cab5e57 | 487 | py | Python | regression.py | iklasky/timemachines | 1820fa9453d31d4daaeff75274a935c7455febe3 | [
"MIT"
] | 253 | 2021-01-08T17:33:30.000Z | 2022-03-21T17:32:36.000Z | regression.py | iklasky/timemachines | 1820fa9453d31d4daaeff75274a935c7455febe3 | [
"MIT"
] | 65 | 2021-01-20T16:43:35.000Z | 2022-03-30T19:07:22.000Z | regression.py | iklasky/timemachines | 1820fa9453d31d4daaeff75274a935c7455febe3 | [
"MIT"
] | 28 | 2021-02-04T14:58:30.000Z | 2022-01-17T04:35:17.000Z | from timemachines.skatertools.testing.allregressiontests import REGRESSION_TESTS
import time
import random
TIMEOUT = 60*5
# Regression tests run occasionally to check various parts of hyper-param spaces, etc.
if __name__=='__main__':
start_time = time.time()
elapsed = time.time()-start_time
while elapsed < TIMEOUT:
a_test = random.choice(REGRESSION_TESTS)
print('Running '+str(a_test.__name__))
a_test()
elapsed = time.time() - start_time
| 30.4375 | 86 | 0.718686 | from timemachines.skatertools.testing.allregressiontests import REGRESSION_TESTS
import time
import random
TIMEOUT = 60*5
# Regression tests run occasionally to check various parts of hyper-param spaces, etc.
if __name__=='__main__':
start_time = time.time()
elapsed = time.time()-start_time
while elapsed < TIMEOUT:
a_test = random.choice(REGRESSION_TESTS)
print('Running '+str(a_test.__name__))
a_test()
elapsed = time.time() - start_time
| 0 | 0 | 0 |
84889cac9a9464c3bad718622ffe3da6e2bd9c35 | 1,644 | py | Python | bootstrap/hack/send_ks_request.py | NunoEdgarGFlowHub/kubeflow | a31dbbf823a0e67299e32596f93556743f851748 | [
"Apache-2.0"
] | 3 | 2018-07-12T08:21:26.000Z | 2019-03-19T07:12:58.000Z | bootstrap/hack/send_ks_request.py | NunoEdgarGFlowHub/kubeflow | a31dbbf823a0e67299e32596f93556743f851748 | [
"Apache-2.0"
] | 12 | 2020-09-26T01:21:07.000Z | 2022-02-26T03:19:38.000Z | bootstrap/hack/send_ks_request.py | NunoEdgarGFlowHub/kubeflow | a31dbbf823a0e67299e32596f93556743f851748 | [
"Apache-2.0"
] | 1 | 2022-02-11T03:20:23.000Z | 2022-02-11T03:20:23.000Z | #!/usr/bin/python
"""A script for manual testing and experimenting with the ks server.
TODO(jlewi): Should we use this as the basis for doing
E2E integration testing? We can run the server in a subprocess.
Send requests to it and then run various checks on the results.
"""
import argparse
import datetime
import logging
import requests
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(pathname)s|%(lineno)d| %(message)s'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
logging.getLogger().setLevel(logging.INFO)
main()
| 26.516129 | 73 | 0.596107 | #!/usr/bin/python
"""A script for manual testing and experimenting with the ks server.
TODO(jlewi): Should we use this as the basis for doing
E2E integration testing? We can run the server in a subprocess.
Send requests to it and then run various checks on the results.
"""
import argparse
import datetime
import logging
import requests
def main():
parser = argparse.ArgumentParser(
description="Script to test sending requests to the ksonnet server.")
parser.add_argument(
"--endpoint",
default="http://localhost:8080",
type=str,
help="The endpoint of the server")
args = parser.parse_args()
create_endpoint = args.endpoint + "/apps/create"
now = datetime.datetime.now()
data = {
"Name": "test-app-" + now.strftime("%Y%m%d-%H%M%S"),
"AppConfig": {
"Registries": [
{
"Name": "kubeflow",
"RegUri": "/home/jlewi/git_kubeflow/kubeflow",
},
],
"Packages": [
{
"Name": "core",
"Registry": "kubeflow",
}
],
},
"Namespace": "kubeflow",
"AutoConfigure": False,
}
r = requests.post(create_endpoint, json=data)
if r.status_code != requests.codes.OK:
logging.error("Request failed: status_code: %s", r.status_code)
logging.info("Result Body: %s", r.content)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(pathname)s|%(lineno)d| %(message)s'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
logging.getLogger().setLevel(logging.INFO)
main()
| 954 | 0 | 23 |
19591d9b51ef2bf50ec02199353bedf627aa8ee6 | 5,009 | py | Python | dataset.py | Mikubill/GAN-ConvLSTM | 943525f62a3ab462a625c72534b3188cd583d839 | [
"MIT"
] | 16 | 2020-07-12T07:21:40.000Z | 2022-02-18T03:32:15.000Z | dataset.py | Mikubill/GAN-ConvLSTM | 943525f62a3ab462a625c72534b3188cd583d839 | [
"MIT"
] | null | null | null | dataset.py | Mikubill/GAN-ConvLSTM | 943525f62a3ab462a625c72534b3188cd583d839 | [
"MIT"
] | 11 | 2020-08-05T08:42:38.000Z | 2022-03-21T02:16:37.000Z | import glob
import numpy as np
import time
import zarr
import torch
import torch.utils.data as data
| 37.94697 | 119 | 0.577161 | import glob
import numpy as np
import time
import zarr
import torch
import torch.utils.data as data
class RadarDataset(data.Dataset):
def __init__(self, train=True, threshold=None, n_frames_input=10, n_frames_output=10):
"""
param num_objects: a list of number of possible objects.
"""
super(RadarDataset, self).__init__()
self.dataset = sorted(glob.glob("/home/mist/data/*"))
self.train = train
self.length = 500 if train else 50
self.radar = [(583, 1840), (604, 2121), (727, 1835), (993, 1767), (1168, 1831), (1233, 1665), (1427, 1615),
(1456, 1756), (1590, 1610), (1539, 1517), (1411, 1451), (1606, 1412), (1494, 1208), (1647, 1167),
(1769, 1294), (1747, 988), (2083, 1038), (2352, 924), (2621, 781), (2808, 494)]
self.n_frames_input = n_frames_input
self.n_frames_output = n_frames_output
self.n_frames_total = self.n_frames_input + self.n_frames_output
self.threshold = threshold
def correcter(self, ones=True):
inputs, output = self.getitem()
while np.max(inputs) == 0:
# print(np.mean(np.sum(inputs, (1, 2)), axis=0))
# print(np.max(d, left, right))
inputs, output = self.getitem()
if ones:
output = torch.from_numpy(output).contiguous().float().unsqueeze(1)
inputs = torch.from_numpy(inputs).contiguous().float().unsqueeze(1)
return output, inputs
def getitem(self):
rand = np.random.RandomState(round((time.time() - 1589500000) * 1000))
now = rand.choice(self.dataset)
index = self.dataset.index(now)
if index < 20:
ranger = self.dataset[index:index + self.n_frames_total]
elif index > len(self.dataset) - 20:
ranger = self.dataset[index - self.n_frames_total:index]
else:
ranger = self.dataset[index - self.n_frames_input:index + self.n_frames_output]
full = self.get_content(ranger)
inputs = full[:self.n_frames_input, ...]
output = full[self.n_frames_input:self.n_frames_total, ...]
return inputs, output
def get_content(self, files):
dat = []
for item in files:
band = np.fromfile(item, dtype='float32', sep='').reshape(3360, 2560)
band[band == 9.999e+20] = 0
dat.append(band)
dataset = np.asarray(dat)
return self.radar_selector(dataset)
def radar_selector(self, dataset):
rand = np.random.RandomState(round((time.time() - 1589500000) * 1000))
crop1 = dataset[:, ..., :512, :512]
crop2 = [crop1]
for radar in self.radar:
dx, dy = radar
crop = dataset[:, ..., dx - 256:dx + 256, dy - 256:dy + 256]
if np.mean(np.sum(np.reshape(crop, (crop.shape[0], -1)), axis=1)) > 200:
crop2.append(crop)
if len(crop2) == 1:
return crop1
else:
return crop2[rand.randint(0, len(crop2) - 1)]
def __getitem__(self, idx):
return self.correcter()
def __len__(self):
return self.length
class RadarAndSatelliteDataset(RadarDataset):
def __init__(self, **kw):
super(RadarAndSatelliteDataset, self).__init__(**kw)
from numcodecs import blosc
blosc.set_nthreads(64)
self.dataset_1 = [zarr.open("/home/mist/hmr-data/data-merged-201906.zarr", "r"),
zarr.open("/home/mist/hmr-data/data-merged-201907.zarr", "r")]
self.dataset_2 = zarr.open("/home/mist/hmr-data/data-merged-201908.zarr", "r")
def getitem(self):
rand = np.random.RandomState(round((time.time() - 1589500000) * 1000))
dataset = self.dataset_1[rand.choice([0, 1])]
pos = rand.randint(dataset.shape[0])
dat = dataset if self.train else self.dataset_2
if pos < 20:
full = dat[pos:pos + self.n_frames_total, ...]
elif pos > dat.shape[0] - 20:
full = dat[pos - self.n_frames_total:pos, ...]
else:
full = dat[pos - self.n_frames_input:pos + self.n_frames_output, ...]
full = self.radar_selector(full)
full[full == 9.999e+20] = 0
full[:, 1, ...] = 300 - full[:, 1, ...]
inputs = full[:self.n_frames_input, ...]
output = full[self.n_frames_input:self.n_frames_total, ...]
return inputs, output
def correcter(self, ones=True):
inputs, output = self.getitem()
while np.mean(np.sum(inputs[:, 0, ...], (-1, -2)), axis=0) < 100 or inputs.shape != \
(10, 2, 512, 512) or output.shape != (10, 2, 512, 512):
# print(inputs.shape, output.shape)
inputs, output = self.getitem()
if ones:
output = torch.from_numpy(output).contiguous().float()
inputs = torch.from_numpy(inputs).contiguous().float()
return output[:, 0, ...].reshape(10, 1, 512, 512), inputs
| 3,687 | 1,094 | 126 |
718b0b9db199ec23f0b4a0698e32a31c6e0cfa05 | 3,031 | py | Python | losses/CRFLoss.py | woailaosang/NeuronBlocks | a0f87ff312cce2c0af84ecf24f5c764119846537 | [
"MIT"
] | 1,257 | 2019-05-06T21:25:16.000Z | 2022-03-19T11:06:49.000Z | losses/CRFLoss.py | heavenAsk/NeuronBlocks | 9b08bb8ac7ceca874c8f2541d610bc8d3278fb22 | [
"MIT"
] | 37 | 2019-05-07T00:16:13.000Z | 2021-12-31T11:55:44.000Z | losses/CRFLoss.py | heavenAsk/NeuronBlocks | 9b08bb8ac7ceca874c8f2541d610bc8d3278fb22 | [
"MIT"
] | 186 | 2019-05-07T00:36:40.000Z | 2022-02-28T20:47:19.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import torch
import torch.nn as nn
import torch.autograd as autograd
class CRFLoss(nn.Module):
"""CRFLoss
use for crf output layer for sequence tagging task.
"""
def _score_sentence(self, scores, mask, tags, transitions, crf_layer_conf):
"""
input:
scores: variable (seq_len, batch, tag_size, tag_size)
mask: (batch, seq_len)
tags: tensor (batch, seq_len)
output:
score: sum of score for gold sequences within whole batch
"""
# Gives the score of a provided tag sequence
batch_size = scores.size(1)
seq_len = scores.size(0)
tag_size = scores.size(2)
# convert tag value into a new format, recorded label bigram information to index
new_tags = autograd.Variable(torch.LongTensor(batch_size, seq_len))
if crf_layer_conf.use_gpu:
new_tags = new_tags.cuda()
for idx in range(seq_len):
if idx == 0:
# start -> first score
new_tags[:, 0] = (tag_size-2)*tag_size + tags[:, 0]
else:
new_tags[:, idx] = tags[:, idx-1]*tag_size + tags[:, idx]
# transition for label to STOP_TAG
end_transition = transitions[:, crf_layer_conf.target_dict[crf_layer_conf.STOP_TAG]].contiguous().view(1, tag_size).expand(batch_size, tag_size)
# length for batch, last word position = length - 1
length_mask = torch.sum(mask.long(), dim=1).view(batch_size, 1).long()
# index the label id of last word
end_ids = torch.gather(tags, 1, length_mask - 1)
# index the transition score for end_id to STOP_TAG
end_energy = torch.gather(end_transition, 1, end_ids)
# convert tag as (seq_len, batch_size, 1)
new_tags = new_tags.transpose(1, 0).contiguous().view(seq_len, batch_size, 1)
# need convert tags id to search from positions of scores
tg_energy = torch.gather(scores.view(seq_len, batch_size, -1), 2, new_tags).view(seq_len, batch_size) # seq_len * batch_size
# mask transpose to (seq_len, batch_size)
tg_energy = tg_energy.masked_select(mask.transpose(1, 0))
# add all score together
gold_score = tg_energy.sum() + end_energy.sum()
return gold_score
def forward(self, forward_score, scores, masks, tags, transitions, crf_layer_conf):
"""
:param forward_score: Tensor scale
:param scores: Tensor [seq_len, batch_size, target_size, target_size]
:param masks: Tensor [batch_size, seq_len]
:param tags: Tensor [batch_size, seq_len]
:return: goal_score - forward_score
"""
gold_score = self._score_sentence(scores, masks, tags, transitions, crf_layer_conf)
return forward_score - gold_score | 42.690141 | 152 | 0.628835 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import torch
import torch.nn as nn
import torch.autograd as autograd
class CRFLoss(nn.Module):
"""CRFLoss
use for crf output layer for sequence tagging task.
"""
def __init__(self):
super(CRFLoss, self).__init__()
def _score_sentence(self, scores, mask, tags, transitions, crf_layer_conf):
"""
input:
scores: variable (seq_len, batch, tag_size, tag_size)
mask: (batch, seq_len)
tags: tensor (batch, seq_len)
output:
score: sum of score for gold sequences within whole batch
"""
# Gives the score of a provided tag sequence
batch_size = scores.size(1)
seq_len = scores.size(0)
tag_size = scores.size(2)
# convert tag value into a new format, recorded label bigram information to index
new_tags = autograd.Variable(torch.LongTensor(batch_size, seq_len))
if crf_layer_conf.use_gpu:
new_tags = new_tags.cuda()
for idx in range(seq_len):
if idx == 0:
# start -> first score
new_tags[:, 0] = (tag_size-2)*tag_size + tags[:, 0]
else:
new_tags[:, idx] = tags[:, idx-1]*tag_size + tags[:, idx]
# transition for label to STOP_TAG
end_transition = transitions[:, crf_layer_conf.target_dict[crf_layer_conf.STOP_TAG]].contiguous().view(1, tag_size).expand(batch_size, tag_size)
# length for batch, last word position = length - 1
length_mask = torch.sum(mask.long(), dim=1).view(batch_size, 1).long()
# index the label id of last word
end_ids = torch.gather(tags, 1, length_mask - 1)
# index the transition score for end_id to STOP_TAG
end_energy = torch.gather(end_transition, 1, end_ids)
# convert tag as (seq_len, batch_size, 1)
new_tags = new_tags.transpose(1, 0).contiguous().view(seq_len, batch_size, 1)
# need convert tags id to search from positions of scores
tg_energy = torch.gather(scores.view(seq_len, batch_size, -1), 2, new_tags).view(seq_len, batch_size) # seq_len * batch_size
# mask transpose to (seq_len, batch_size)
tg_energy = tg_energy.masked_select(mask.transpose(1, 0))
# add all score together
gold_score = tg_energy.sum() + end_energy.sum()
return gold_score
def forward(self, forward_score, scores, masks, tags, transitions, crf_layer_conf):
"""
:param forward_score: Tensor scale
:param scores: Tensor [seq_len, batch_size, target_size, target_size]
:param masks: Tensor [batch_size, seq_len]
:param tags: Tensor [batch_size, seq_len]
:return: goal_score - forward_score
"""
gold_score = self._score_sentence(scores, masks, tags, transitions, crf_layer_conf)
return forward_score - gold_score | 38 | 0 | 26 |
86c4550eebbbafd8546a59d0d4e37ec2c2cb639e | 2,442 | py | Python | bin/sha1sum.py | yjqiang/stash | 83dd0367b2a260f69afbe59738ae9ae523f8f1d1 | [
"MIT"
] | 1 | 2019-04-16T14:01:03.000Z | 2019-04-16T14:01:03.000Z | bin/sha1sum.py | yjqiang/stash | 83dd0367b2a260f69afbe59738ae9ae523f8f1d1 | [
"MIT"
] | null | null | null | bin/sha1sum.py | yjqiang/stash | 83dd0367b2a260f69afbe59738ae9ae523f8f1d1 | [
"MIT"
] | null | null | null | '''
Get sha1 hash of a file or string.
usage: sha1sum.py [-h] [-c] [file [file ...]]
positional arguments:
file String or file to hash.
optional arguments:
-h, --help show this help message and exit
-c, --check Check a file with sha1 hashes and file names for a match.
format:
sha1_hash filename
sha1_hash filename
etc.
'''
from __future__ import print_function
import argparse
import os
import re
import sys
import six
from Crypto.Hash import SHA
ap = argparse.ArgumentParser()
ap.add_argument('-c','--check',action='store_true',default=False,
help='''Check a file with sha1 hashes and file names for a match. format: hash filename''')
ap.add_argument('file',action='store',nargs='*',help='String or file to hash.')
args = ap.parse_args(sys.argv[1:])
if args.check:
if args.file:
s = True
for arg in args.file:
if os.path.isfile(arg):
s = s and check_list(open(arg))
else:
s = check_list(make_file(sys.stdin.read()))
if s:
sys.exit(0)
else:
sys.exit(1)
else:
if args.file:
for arg in args.file:
if os.path.isfile(arg):
with open(arg, 'rb') as f:
print(get_hash(f)+' '+arg)
elif arg == "-":
print(get_hash(make_file(sys.stdin.read())))
else:
print(get_hash(make_file(arg)))
else:
print(get_hash(make_file(sys.stdin.read())))
| 24.918367 | 107 | 0.546274 | '''
Get sha1 hash of a file or string.
usage: sha1sum.py [-h] [-c] [file [file ...]]
positional arguments:
file String or file to hash.
optional arguments:
-h, --help show this help message and exit
-c, --check Check a file with sha1 hashes and file names for a match.
format:
sha1_hash filename
sha1_hash filename
etc.
'''
from __future__ import print_function
import argparse
import os
import re
import sys
import six
from Crypto.Hash import SHA
def get_hash(fileobj):
h = SHA.new()
chunk_size = 8192
while True:
chunk = fileobj.read(chunk_size)
if len(chunk) == 0:
break
h.update(chunk)
return h.hexdigest()
def check_list(fileobj):
correct = True
for line in fileobj:
match = re.match(r'(\w+)[ \t]+(.+)',line)
try:
with open(match.group(2),'rb') as f1:
if match.group(1) == get_hash(f1):
print(match.group(2)+': Pass')
else:
print(match.group(2)+': Fail')
correct = False
except Exception:
print('Invalid format.')
correct = False
return correct
def make_file(txt):
f = six.BytesIO()
if isinstance(txt, six.binary_type):
f.write(txt)
else:
f.write(txt.encode("utf-8"))
f.seek(0)
return f
ap = argparse.ArgumentParser()
ap.add_argument('-c','--check',action='store_true',default=False,
help='''Check a file with sha1 hashes and file names for a match. format: hash filename''')
ap.add_argument('file',action='store',nargs='*',help='String or file to hash.')
args = ap.parse_args(sys.argv[1:])
if args.check:
if args.file:
s = True
for arg in args.file:
if os.path.isfile(arg):
s = s and check_list(open(arg))
else:
s = check_list(make_file(sys.stdin.read()))
if s:
sys.exit(0)
else:
sys.exit(1)
else:
if args.file:
for arg in args.file:
if os.path.isfile(arg):
with open(arg, 'rb') as f:
print(get_hash(f)+' '+arg)
elif arg == "-":
print(get_hash(make_file(sys.stdin.read())))
else:
print(get_hash(make_file(arg)))
else:
print(get_hash(make_file(sys.stdin.read())))
| 830 | 0 | 69 |
61fb73cf6d88aa52e0624ce208b48d5549f7053c | 297 | py | Python | rses/src/rses_config.py | iScrE4m/RSES | 88299f105ded8838243eab8b25ab1626c97d1179 | [
"MIT"
] | 1 | 2022-02-16T15:06:22.000Z | 2022-02-16T15:06:22.000Z | rses/src/rses_config.py | djetelina/RSES | 88299f105ded8838243eab8b25ab1626c97d1179 | [
"MIT"
] | null | null | null | rses/src/rses_config.py | djetelina/RSES | 88299f105ded8838243eab8b25ab1626c97d1179 | [
"MIT"
] | null | null | null | # coding=utf-8
"""Configuration"""
import os
SECRET_KEY: str = os.environ.get('SECRET_KEY', 'SUPER_SECRET')
PORT: int = int(os.environ.get('PORT', 5000))
DATABASE_URL: str = os.environ.get('RSES_DB_URL') or os.environ.get('DATABASE_URL')
# Do you want a flask client
RSES_WEB_CLIENT: bool = True
| 29.7 | 83 | 0.727273 | # coding=utf-8
"""Configuration"""
import os
SECRET_KEY: str = os.environ.get('SECRET_KEY', 'SUPER_SECRET')
PORT: int = int(os.environ.get('PORT', 5000))
DATABASE_URL: str = os.environ.get('RSES_DB_URL') or os.environ.get('DATABASE_URL')
# Do you want a flask client
RSES_WEB_CLIENT: bool = True
| 0 | 0 | 0 |
34b4b3db094d6213c2a373133001a45d15786d56 | 965 | py | Python | linear_regression/linear_sample.py | kwoshvick/NSE-Stock-Price-Prediction | 87e16f72db149dc44220f626b009f5ad0df93839 | [
"MIT"
] | 4 | 2018-04-14T13:04:13.000Z | 2021-07-31T10:28:45.000Z | linear_regression/linear_sample.py | kwoshvick/NSE-Stock-Price-Prediction | 87e16f72db149dc44220f626b009f5ad0df93839 | [
"MIT"
] | null | null | null | linear_regression/linear_sample.py | kwoshvick/NSE-Stock-Price-Prediction | 87e16f72db149dc44220f626b009f5ad0df93839 | [
"MIT"
] | 2 | 2019-11-06T16:28:52.000Z | 2021-02-27T15:02:25.000Z | import quandl
import pandas as pd
import numpy as np
import datetime
from sklearn.linear_model import LinearRegression
from sklearn import preprocessing, cross_validation
df = quandl.get("WIKI/AMZN")
df = df[['Adj. Close']]
# print(df)
#
# exit()
forecast_out = int(30) # predicting 30 days into future
df['Prediction'] = df[['Adj. Close']].shift(-forecast_out) # label column with data shifted 30 units up
X = np.array(df.drop(['Prediction'], 1))
X = preprocessing.scale(X)
X_forecast = X[-forecast_out:] # set X_forecast equal to last 30
X = X[:-forecast_out] # remove last 30 from X
y = np.array(df['Prediction'])
y = y[:-forecast_out]
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size = 0.2)
# Training
clf = LinearRegression()
clf.fit(X_train,y_train)
# Testing
confidence = clf.score(X_test, y_test)
print("confidence: ", confidence)
forecast_prediction = clf.predict(X_forecast)
print(forecast_prediction)
| 21.444444 | 104 | 0.734715 | import quandl
import pandas as pd
import numpy as np
import datetime
from sklearn.linear_model import LinearRegression
from sklearn import preprocessing, cross_validation
df = quandl.get("WIKI/AMZN")
df = df[['Adj. Close']]
# print(df)
#
# exit()
forecast_out = int(30) # predicting 30 days into future
df['Prediction'] = df[['Adj. Close']].shift(-forecast_out) # label column with data shifted 30 units up
X = np.array(df.drop(['Prediction'], 1))
X = preprocessing.scale(X)
X_forecast = X[-forecast_out:] # set X_forecast equal to last 30
X = X[:-forecast_out] # remove last 30 from X
y = np.array(df['Prediction'])
y = y[:-forecast_out]
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size = 0.2)
# Training
clf = LinearRegression()
clf.fit(X_train,y_train)
# Testing
confidence = clf.score(X_test, y_test)
print("confidence: ", confidence)
forecast_prediction = clf.predict(X_forecast)
print(forecast_prediction)
| 0 | 0 | 0 |
48370d44d560e01da2f2aaf8cce12b36f338523e | 1,614 | py | Python | surfactant_example/mco/mco.py | force-h2020/force-bdss-plugin-surfactant-example | ba442f2b39919f7d071f4384f8eaba0d99f44b1f | [
"BSD-2-Clause",
"MIT"
] | null | null | null | surfactant_example/mco/mco.py | force-h2020/force-bdss-plugin-surfactant-example | ba442f2b39919f7d071f4384f8eaba0d99f44b1f | [
"BSD-2-Clause",
"MIT"
] | null | null | null | surfactant_example/mco/mco.py | force-h2020/force-bdss-plugin-surfactant-example | ba442f2b39919f7d071f4384f8eaba0d99f44b1f | [
"BSD-2-Clause",
"MIT"
] | null | null | null | import logging
from itertools import product
from force_bdss.api import BaseMCO, DataValue
log = logging.getLogger(__name__)
def parameter_grid_generator(parameters):
"""Function to calculate the number of Gromacs experiments
required and the combinations of each fragment concentrations"""
ranges = [parameter.sample_values for parameter in parameters]
for combo in product(*ranges):
yield combo
def get_labels(parameters):
"""Generates numerical labels for each categorical
MCOParameter"""
label_dict = {}
label = 1
for parameter in parameters:
if hasattr(parameter, "categories"):
for name in parameter.categories:
if name not in label_dict:
label_dict[name] = label
label += 1
return label_dict
| 26.9 | 75 | 0.644981 | import logging
from itertools import product
from force_bdss.api import BaseMCO, DataValue
log = logging.getLogger(__name__)
class MCO(BaseMCO):
def run(self, evaluator):
parameters = evaluator.mco_model.parameters
log.info("Doing MCO run")
for input_parameters in parameter_grid_generator(parameters):
kpis = evaluator.evaluate(input_parameters)
optimal_kpis = [DataValue(value=v) for v in kpis]
# NOTE: This is a workaround for displaying data from different
# ingredients in WfManager. Ultimately we should include
# a DataView object that can handle unicode variables
optimal_points = [
DataValue(value=v)
for v in input_parameters
]
evaluator.mco_model.notify_progress_event(
optimal_points, optimal_kpis
)
def parameter_grid_generator(parameters):
"""Function to calculate the number of Gromacs experiments
required and the combinations of each fragment concentrations"""
ranges = [parameter.sample_values for parameter in parameters]
for combo in product(*ranges):
yield combo
def get_labels(parameters):
"""Generates numerical labels for each categorical
MCOParameter"""
label_dict = {}
label = 1
for parameter in parameters:
if hasattr(parameter, "categories"):
for name in parameter.categories:
if name not in label_dict:
label_dict[name] = label
label += 1
return label_dict
| 731 | -2 | 49 |
d5c2a2ea0392abd06d1c10b0bc3c56f563ffe4fa | 18,142 | py | Python | kadi/events/orbit_funcs.py | jzuhone/kadi | de4885327d256e156cfe42b2b1700775f5b4d6cf | [
"BSD-3-Clause"
] | 1 | 2015-07-30T18:33:14.000Z | 2015-07-30T18:33:14.000Z | kadi/events/orbit_funcs.py | jzuhone/kadi | de4885327d256e156cfe42b2b1700775f5b4d6cf | [
"BSD-3-Clause"
] | 104 | 2015-01-20T18:44:36.000Z | 2022-03-29T18:51:55.000Z | kadi/events/orbit_funcs.py | jzuhone/kadi | de4885327d256e156cfe42b2b1700775f5b4d6cf | [
"BSD-3-Clause"
] | 2 | 2018-08-23T02:36:08.000Z | 2020-03-13T19:24:36.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import division
import re
import os
import logging
from pathlib import Path
import numpy as np
from Chandra.Time import DateTime
ORBIT_POINTS_DTYPE = [('date', 'U21'), ('name', 'U8'),
('orbit_num', 'i4'), ('descr', 'U50')]
ORBITS_DTYPE = [('orbit_num', 'i4'),
('start', 'U21'), ('stop', 'U21'),
('tstart', 'f8'), ('tstop', 'f8'), ('dur', 'f4'),
('perigee', 'U21'), ('t_perigee', 'f8'), ('apogee', 'U21'),
('start_radzone', 'U21'), ('stop_radzone', 'U21'),
('dt_start_radzone', 'f4'), ('dt_stop_radzone', 'f4')]
logger = logging.getLogger('events')
MPLOGS_DIR = Path(os.environ['SKA'], 'data', 'mpcrit1', 'mplogs')
# Just for reference, all name=descr pairs between 2000 to 2013:001
NAMES = {
'EALT0': 'ALTITUDE ZONE ENTRY0',
'EALT1': 'ALTITUDE ZONE ENTRY 1',
'EALT2': 'ALTITUDE ZONE ENTRY2',
'EALT3': 'ALTITUDE ZONE ENTRY3',
'EAPOGEE': 'ORBIT APOGEE',
'EASCNCR': 'ORBIT ASCENDING NODE CROSSING',
'EE1RADZ0': 'ELECTRON1 RADIATION ENTRY0',
'EE2RADZ0': 'ELECTRON2 RADIATION ENTRY0',
'EEF1000': 'ELECTRON 1 RADIATION ENTRY 0',
'EODAY': 'EARTH SHADOW (UMBRA) EXIT',
'EONIGHT': 'EARTH SHADOW (UMBRA) ENTRY',
'EP1RADZ0': 'PROTON1 RADIATION ENTRY0',
'EP2RADZ0': 'PROTON2 RADIATION ENTRY0',
'EPERIGEE': 'ORBIT PERIGEE',
'EPF1000': 'PROTON 1 RADIATION ENTRY 0',
'EQF003M': 'PROTON FLUX ENTRY FOR ENERGY 0 LEVEL 0 KP 3 MEAN',
'EQF013M': 'PROTON FLUX ENTRY FOR ENERGY 0 LEVEL 1 KP 3 MEAN',
'LSDAY': 'LUNAR SHADOW (UMBRA) EXIT',
'LSNIGHT': 'LUNAR SHADOW (UMBRA) ENTRY',
'LSPENTRY': 'LUNAR SHADOW (PENUMBRA) ENTRY',
'LSPEXIT': 'LUNAR SHADOW (PENUMBRA) EXIT',
'OORMPDS': 'RADMON DISABLE',
'OORMPEN': 'RADMON ENABLE',
'PENTRY': 'EARTH SHADOW (PENUMBRA) ENTRY',
'PEXIT': 'EARTH SHADOW (PENUMBRA) EXIT',
'XALT0': 'ALTITUDE ZONE EXIT 0',
'XALT1': 'ALTITUDE ZONE EXIT 1',
'XALT2': 'ALTITUDE ZONE EXIT2',
'XALT3': 'ALTITUDE ZONE EXIT3',
'XE1RADZ0': 'ELECTRON1 RADIATION EXIT0',
'XE2RADZ0': 'ELECTRON2 RADIATION EXIT0',
'XEF1000': 'ELECTRON 1 RADIATION EXIT 0',
'XP1RADZ0': 'PROTON1 RADIATION EXIT0',
'XP2RADZ0': 'PROTON2 RADIATION EXIT0',
'XPF1000': 'PROTON 1 RADIATION EXIT 0',
'XQF003M': 'PROTON FLUX EXIT FOR ENERGY 0 LEVEL 0 KP 3 MEAN',
'XQF013M': 'PROTON FLUX EXIT FOR ENERGY 0 LEVEL 1 KP 3 MEAN'}
def prune_dirs(dirs, regex):
"""
Prune directories (in-place) that do not match ``regex``.
"""
prunes = [x for x in dirs if not re.match(regex, x)]
for prune in prunes:
dirs.remove(prune)
# get_tlr_files is slow, so cache results (mostly for testing)
get_tlr_files_cache = {}
def get_tlr_files(mpdir=''):
"""
Get all timeline report files within the specified SOT MP directory
``mpdir`` relative to the root of /data/mpcrit1/mplogs.
Returns a list of dicts [{name, date},..]
"""
rootdir = (MPLOGS_DIR / mpdir).absolute()
try:
return get_tlr_files_cache[rootdir]
except KeyError:
pass
logger.info('Looking for TLR files in {}'.format(rootdir))
tlrfiles = []
for root, dirs, files in os.walk(rootdir):
root = root.rstrip('/')
depth = len(Path(root).parts) - len(MPLOGS_DIR.parts)
logger.debug(f'get_trl_files: root={root} {depth} {rootdir}')
if depth == 0:
prune_dirs(dirs, r'\d{4}$')
elif depth == 1:
prune_dirs(dirs, r'[A-Z]{3}\d{4}$')
elif depth == 2:
prune_dirs(dirs, r'ofls[a-z]$')
elif depth > 2:
tlrs = [x for x in files if re.match(r'.+\.tlr$', x)]
if len(tlrs) == 0:
logger.info('NO tlr file found in {}'.format(root))
else:
logger.info('Located TLR file {}'.format(os.path.join(root, tlrs[0])))
tlrfiles.append(os.path.join(root, tlrs[0]))
while dirs:
dirs.pop()
files = []
for tlrfile in tlrfiles:
monddyy, oflsv = tlrfile.split('/')[-3:-1]
mon = monddyy[:3].capitalize()
dd = monddyy[3:5]
yy = int(monddyy[5:7])
yyyy = 1900 + yy if yy > 95 else 2000 + yy
caldate = '{}{}{} at 12:00:00.000'.format(yyyy, mon, dd)
files.append((tlrfile, DateTime(caldate).date[:8] + oflsv, DateTime(caldate).date))
files = sorted(files, key=lambda x: x[1])
out = [{'name': x[0], 'date': x[2]} for x in files]
get_tlr_files_cache[rootdir] = out
return out
def prune_a_loads(tlrfiles):
"""
When there are B or later products, take out the A loads. This is where
most mistakes are removed. (CURRENTLY THIS FUNCTION IS NOT USED).
"""
outs = []
last_monddyy = None
for tlrfile in reversed(tlrfiles):
monddyy, oflsv = tlrfile.split('/')[-3:-1]
if monddyy == last_monddyy and oflsv == 'oflsa':
continue
else:
outs.append(tlrfile)
last_monddyy = monddyy
return list(reversed(outs))
def filter_known_bad(orbit_points):
"""
Filter some commands that are known to be incorrect.
"""
ops = orbit_points
bad = np.zeros(len(orbit_points), dtype=bool)
bad |= (ops['name'] == 'OORMPEN') & (ops['date'] == '2002:253:10:08:52.239')
bad |= (ops['name'] == 'OORMPEN') & (ops['date'] == '2004:010:10:00:00.000')
return orbit_points[~bad]
def get_orbit_points(tlrfiles):
"""
Get all orbit points from the timeline reports within the specified mission planning
path '' (all) or 'YYYY' (year) or YYYY/MONDDYY (load).
"""
orbit_points = []
# tlrfiles = prune_a_loads(tlrfiles)
for tlrfile in tlrfiles:
# Parse thing like this:
# 2012:025:21:22:21.732 EQF013M 1722 PROTON FLUX ENTRY FOR ENERGY 0 LEVEL ...
# 012345678901234567890123456789012345678901234567890123456789
logger.info('Getting points from {}'.format(tlrfile))
try:
fh = open(tlrfile, 'r', encoding='ascii', errors='ignore')
except IOError as err:
logger.warn(err)
continue
for line in fh:
if len(line) < 30 or line[:2] != ' 2':
continue
try:
date, name, orbit_num, descr = line.split(None, 3)
except ValueError:
continue
if name.startswith('OORMP'):
orbit_num = -1
descr = 'RADMON {}ABLE'.format('EN' if name.endswith('EN') else 'DIS')
elif line[23] in ' -':
continue
if 'DSS-' in name:
continue
if not re.match(r'\d{4}:\d{3}:\d{2}:\d{2}:\d{2}\.\d{3}', date):
logger.info('Failed for date: "{}"'.format(date))
continue
if not re.match(r'[A-Z]+', name):
logger.info('Failed for name: "{}"'.format(name))
continue
try:
orbit_num = int(orbit_num)
except TypeError:
logger.info('Failed for orbit_num: {}'.format(orbit_num))
continue
descr = descr.strip()
orbit_points.append((date, name, orbit_num, descr))
orbit_points = sorted(set(orbit_points), key=lambda x: x[0])
return orbit_points
def get_nearest_orbit_num(orbit_nums, idx, d_idx):
"""
Get the orbit number nearest to ``orbit_nums[idx]`` in direction ``d_idx``,
skipping values of -1 (from radmon commanding).
"""
while True:
idx += d_idx
if idx < 0 or idx >= len(orbit_nums):
raise NotFoundError('No nearest orbit num found')
if orbit_nums[idx] != -1:
break
return orbit_nums[idx], idx
def interpolate_orbit_points(orbit_points, name):
"""
Linearly interpolate across any gaps for ``name`` orbit_points.
"""
if len(orbit_points) == 0:
return []
ok = orbit_points['name'] == name
ops = orbit_points[ok]
# Get the indexes of missing orbits
idxs = np.flatnonzero(np.diff(ops['orbit_num']) > 1)
new_orbit_points = []
for idx in idxs:
op0 = ops[idx]
op1 = ops[idx + 1]
orb_num0 = op0['orbit_num']
orb_num1 = op1['orbit_num']
time0 = DateTime(op0['date']).secs
time1 = DateTime(op1['date']).secs
for orb_num in range(orb_num0 + 1, orb_num1):
time = time0 + (orb_num - orb_num0) / (orb_num1 - orb_num0) * (time1 - time0)
date = DateTime(time).date
new_orbit_point = (date, name, orb_num, op0['descr'])
logger.info('Adding new orbit point {}'.format(new_orbit_point))
new_orbit_points.append(new_orbit_point)
return new_orbit_points
def process_orbit_points(orbit_points):
"""
Take the raw orbit points (list of tuples) and do some processing:
- Remove duplicate events within 30 seconds of each other
- Fill in orbit number for RADMON enable / disable points
- Convert to a number structured array
Returns a numpy array with processed orbit points::
ORBIT_POINTS_DTYPE = [('date', 'U21'), ('name', 'U8'),
('orbit_num', 'i4'), ('descr', 'U50')]
"""
# Find neighboring pairs of orbit points that are identical except for date.
# If the dates are then within 180 seconds of each other then toss the first
# of the pair.
if len(orbit_points) == 0:
return np.array([], dtype=ORBIT_POINTS_DTYPE)
uniq_orbit_points = []
for op0, op1 in zip(orbit_points[:-1], orbit_points[1:]):
if op0[1:4] == op1[1:4]:
dt = (DateTime(op1[0]) - DateTime(op0[0])) * 86400
if dt < 180:
# logger.info('Removing duplicate orbit points:\n {}\n {}'
# .format(str(op0), str(op1)))
continue
uniq_orbit_points.append(op1)
uniq_orbit_points.append(orbit_points[-1])
orbit_points = uniq_orbit_points
# Convert to a numpy structured array
orbit_points = np.array(orbit_points, dtype=ORBIT_POINTS_DTYPE)
# Filter known bad points
orbit_points = filter_known_bad(orbit_points)
# For key orbit points linearly interpolate across gaps in orbit coverage.
new_ops = []
for name in ('EPERIGEE', 'EAPOGEE', 'EASCNCR'):
new_ops.extend(interpolate_orbit_points(orbit_points, name))
# Add a new orbit point for the ascending node EXIT which is the end of each orbit.
# This simplifies bookkeeping later.
for op in orbit_points[orbit_points['name'] == 'EASCNCR']:
new_ops.append((op['date'], 'XASCNCR', op['orbit_num'] - 1, op['descr'] + ' EXIT'))
# Add corresponding XASCNCR for any new EASCNCR points
for op in new_ops:
if op[1] == 'EASCNCR':
new_ops.append((op[0], 'XASCNCR', op[2] - 1, op[3] + ' EXIT'))
logger.info('Adding {} new orbit points'.format(len(new_ops)))
new_ops = np.array(new_ops, dtype=ORBIT_POINTS_DTYPE)
orbit_points = np.concatenate([orbit_points, new_ops])
orbit_points.sort(order=['date', 'orbit_num'])
# Fill in orbit number for RADMON enable / disable points
radmon_idxs = np.flatnonzero(orbit_points['orbit_num'] == -1)
orbit_nums = orbit_points['orbit_num']
for idx in radmon_idxs:
try:
prev_num, prev_idx = get_nearest_orbit_num(orbit_nums, idx, -1)
next_num, next_idx = get_nearest_orbit_num(orbit_nums, idx, +1)
except NotFoundError:
logger.info('No nearest orbit point for orbit_points[{}] (len={})'
.format(idx, len(orbit_points)))
else:
if prev_num == next_num:
orbit_nums[idx] = next_num
else:
logger.info('Unable to assign orbit num idx={} prev={} next={}'
.format(idx, prev_num, next_num))
logger.info(' {} {}'.format(prev_idx, orbit_points[prev_idx]))
logger.info(' * {} {}'.format(idx, orbit_points[idx]))
logger.info(' {} {}'.format(next_idx, orbit_points[next_idx]))
return orbit_points
def get_orbits(orbit_points):
"""
Collate the orbit points into full orbits, with dates corresponding to start (ORBIT
ASCENDING NODE CROSSING), stop, apogee, perigee, radzone start and radzone stop.
Radzone is defined as the time covering perigee when radmon is disabled by command.
This corresponds to the planned values and may differ from actual in the case of
events that run SCS107 and prematurely disable RADMON.
Returns a numpy structured array::
ORBITS_DTYPE = [('orbit_num', 'i4'),
('start', 'U21'), ('stop', 'U21'),
('tstart', 'f8'), ('tstop', 'f8'), ('dur', 'f4'),
('perigee', 'U21'), ('t_perigee', 'f8'), ('apogee', 'U21'),
('start_radzone', 'U21'), ('stop_radzone', 'U21'),
('dt_start_radzone', 'f4'), ('dt_stop_radzone', 'f4')]
"""
def find_radzone(idx_perigee):
"""
Find the extent of the radiation zone, defined as the last time before perigee
that RADMON is enabled until the first time after perigee that RADMON is enabled.
"""
idx = idx_perigee
start_radzone = None
while True:
idx -= 1
if idx < 0:
raise NotFoundError('Did not find RADMON enable prior to {}'
.format(orbit_points[idx_perigee]))
if orbit_points['name'][idx] == 'OORMPDS':
start_radzone = orbit_points['date'][idx]
if orbit_points['name'][idx] == 'OORMPEN':
if start_radzone is None:
raise NotFoundError('Found radmon enable before first disable at idx {}'
.format(idx))
break
idx = idx_perigee
while True:
idx += 1
if idx >= len(orbit_points):
raise NotFoundError('Did not find RADMON enable after to {}'
.format(str(orbit_points[idx_perigee])))
if orbit_points['name'][idx] == 'OORMPEN':
stop_radzone = orbit_points['date'][idx]
break
return start_radzone, stop_radzone
# Copy orbit points and sort by orbit_num then date. This allows using
# search_sorted to select orbit_points corresponding to each orbit. In
# very rare cases (orbit 1448 I think), there are orbit_points that cross
# orbit boundaries by a few seconds. This is related to the technique of
# reading in every TLR to get maximal coverage of orbit points.
orbit_points = orbit_points.copy()
orbit_points.sort(order=['orbit_num', 'date'])
orbit_nums = orbit_points['orbit_num']
uniq_orbit_nums = sorted(set(orbit_nums[orbit_nums > 0]))
orbits = []
for orbit_num in uniq_orbit_nums:
i0 = np.searchsorted(orbit_nums, orbit_num, side='left')
i1 = np.searchsorted(orbit_nums, orbit_num, side='right')
ops = orbit_points[i0: i1]
try:
if 'EASCNCR' not in ops['name'] or 'XASCNCR' not in ops['name']:
raise NotFoundError('Skipping orbit {} incomplete'.format(orbit_num))
start = get_date(ops, 'EASCNCR')
stop = get_date(ops, 'XASCNCR')
date_apogee = get_date(ops, 'EAPOGEE')
date_perigee = get_date(ops, 'EPERIGEE')
idx_perigee = get_idx(ops, 'EPERIGEE') + i0
start_radzone, stop_radzone = find_radzone(idx_perigee)
except NotFoundError as err:
logger.info(err)
continue
else:
dt_radzones = [(DateTime(date) - DateTime(date_perigee)) * 86400.0
for date in (start_radzone, stop_radzone)]
tstart = DateTime(start).secs
tstop = DateTime(stop).secs
orbit = (orbit_num,
start, stop,
tstart, tstop, tstop - tstart,
date_perigee, DateTime(date_perigee).secs, date_apogee,
start_radzone, stop_radzone,
dt_radzones[0], dt_radzones[1])
logger.info('get_orbits: Adding orbit {} {} {}'.format(orbit_num, start, stop))
orbits.append(orbit)
orbits = np.array(orbits, dtype=ORBITS_DTYPE)
return orbits
def get_radzone_from_orbit(orbit):
"""
Extract the RadZone fields from an orbit descriptor (which is one row
of the orbits structured array).
"""
start_radzone = DateTime(orbit['start_radzone'], format='date')
stop_radzone = DateTime(orbit['stop_radzone'], format='date')
tstart = start_radzone.secs
tstop = stop_radzone.secs
dur = tstop - tstart
radzone = {'start': start_radzone.date,
'stop': stop_radzone.date,
'tstart': tstart,
'tstop': tstop,
'dur': dur,
'orbit_num': orbit['orbit_num'],
'perigee': orbit['perigee']}
return radzone
| 37.483471 | 92 | 0.582461 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import division
import re
import os
import logging
from pathlib import Path
import numpy as np
from Chandra.Time import DateTime
class NotFoundError(Exception):
pass
ORBIT_POINTS_DTYPE = [('date', 'U21'), ('name', 'U8'),
('orbit_num', 'i4'), ('descr', 'U50')]
ORBITS_DTYPE = [('orbit_num', 'i4'),
('start', 'U21'), ('stop', 'U21'),
('tstart', 'f8'), ('tstop', 'f8'), ('dur', 'f4'),
('perigee', 'U21'), ('t_perigee', 'f8'), ('apogee', 'U21'),
('start_radzone', 'U21'), ('stop_radzone', 'U21'),
('dt_start_radzone', 'f4'), ('dt_stop_radzone', 'f4')]
logger = logging.getLogger('events')
MPLOGS_DIR = Path(os.environ['SKA'], 'data', 'mpcrit1', 'mplogs')
# Just for reference, all name=descr pairs between 2000 to 2013:001
NAMES = {
'EALT0': 'ALTITUDE ZONE ENTRY0',
'EALT1': 'ALTITUDE ZONE ENTRY 1',
'EALT2': 'ALTITUDE ZONE ENTRY2',
'EALT3': 'ALTITUDE ZONE ENTRY3',
'EAPOGEE': 'ORBIT APOGEE',
'EASCNCR': 'ORBIT ASCENDING NODE CROSSING',
'EE1RADZ0': 'ELECTRON1 RADIATION ENTRY0',
'EE2RADZ0': 'ELECTRON2 RADIATION ENTRY0',
'EEF1000': 'ELECTRON 1 RADIATION ENTRY 0',
'EODAY': 'EARTH SHADOW (UMBRA) EXIT',
'EONIGHT': 'EARTH SHADOW (UMBRA) ENTRY',
'EP1RADZ0': 'PROTON1 RADIATION ENTRY0',
'EP2RADZ0': 'PROTON2 RADIATION ENTRY0',
'EPERIGEE': 'ORBIT PERIGEE',
'EPF1000': 'PROTON 1 RADIATION ENTRY 0',
'EQF003M': 'PROTON FLUX ENTRY FOR ENERGY 0 LEVEL 0 KP 3 MEAN',
'EQF013M': 'PROTON FLUX ENTRY FOR ENERGY 0 LEVEL 1 KP 3 MEAN',
'LSDAY': 'LUNAR SHADOW (UMBRA) EXIT',
'LSNIGHT': 'LUNAR SHADOW (UMBRA) ENTRY',
'LSPENTRY': 'LUNAR SHADOW (PENUMBRA) ENTRY',
'LSPEXIT': 'LUNAR SHADOW (PENUMBRA) EXIT',
'OORMPDS': 'RADMON DISABLE',
'OORMPEN': 'RADMON ENABLE',
'PENTRY': 'EARTH SHADOW (PENUMBRA) ENTRY',
'PEXIT': 'EARTH SHADOW (PENUMBRA) EXIT',
'XALT0': 'ALTITUDE ZONE EXIT 0',
'XALT1': 'ALTITUDE ZONE EXIT 1',
'XALT2': 'ALTITUDE ZONE EXIT2',
'XALT3': 'ALTITUDE ZONE EXIT3',
'XE1RADZ0': 'ELECTRON1 RADIATION EXIT0',
'XE2RADZ0': 'ELECTRON2 RADIATION EXIT0',
'XEF1000': 'ELECTRON 1 RADIATION EXIT 0',
'XP1RADZ0': 'PROTON1 RADIATION EXIT0',
'XP2RADZ0': 'PROTON2 RADIATION EXIT0',
'XPF1000': 'PROTON 1 RADIATION EXIT 0',
'XQF003M': 'PROTON FLUX EXIT FOR ENERGY 0 LEVEL 0 KP 3 MEAN',
'XQF013M': 'PROTON FLUX EXIT FOR ENERGY 0 LEVEL 1 KP 3 MEAN'}
def prune_dirs(dirs, regex):
"""
Prune directories (in-place) that do not match ``regex``.
"""
prunes = [x for x in dirs if not re.match(regex, x)]
for prune in prunes:
dirs.remove(prune)
# get_tlr_files is slow, so cache results (mostly for testing)
get_tlr_files_cache = {}
def get_tlr_files(mpdir=''):
"""
Get all timeline report files within the specified SOT MP directory
``mpdir`` relative to the root of /data/mpcrit1/mplogs.
Returns a list of dicts [{name, date},..]
"""
rootdir = (MPLOGS_DIR / mpdir).absolute()
try:
return get_tlr_files_cache[rootdir]
except KeyError:
pass
logger.info('Looking for TLR files in {}'.format(rootdir))
tlrfiles = []
for root, dirs, files in os.walk(rootdir):
root = root.rstrip('/')
depth = len(Path(root).parts) - len(MPLOGS_DIR.parts)
logger.debug(f'get_trl_files: root={root} {depth} {rootdir}')
if depth == 0:
prune_dirs(dirs, r'\d{4}$')
elif depth == 1:
prune_dirs(dirs, r'[A-Z]{3}\d{4}$')
elif depth == 2:
prune_dirs(dirs, r'ofls[a-z]$')
elif depth > 2:
tlrs = [x for x in files if re.match(r'.+\.tlr$', x)]
if len(tlrs) == 0:
logger.info('NO tlr file found in {}'.format(root))
else:
logger.info('Located TLR file {}'.format(os.path.join(root, tlrs[0])))
tlrfiles.append(os.path.join(root, tlrs[0]))
while dirs:
dirs.pop()
files = []
for tlrfile in tlrfiles:
monddyy, oflsv = tlrfile.split('/')[-3:-1]
mon = monddyy[:3].capitalize()
dd = monddyy[3:5]
yy = int(monddyy[5:7])
yyyy = 1900 + yy if yy > 95 else 2000 + yy
caldate = '{}{}{} at 12:00:00.000'.format(yyyy, mon, dd)
files.append((tlrfile, DateTime(caldate).date[:8] + oflsv, DateTime(caldate).date))
files = sorted(files, key=lambda x: x[1])
out = [{'name': x[0], 'date': x[2]} for x in files]
get_tlr_files_cache[rootdir] = out
return out
def prune_a_loads(tlrfiles):
"""
When there are B or later products, take out the A loads. This is where
most mistakes are removed. (CURRENTLY THIS FUNCTION IS NOT USED).
"""
outs = []
last_monddyy = None
for tlrfile in reversed(tlrfiles):
monddyy, oflsv = tlrfile.split('/')[-3:-1]
if monddyy == last_monddyy and oflsv == 'oflsa':
continue
else:
outs.append(tlrfile)
last_monddyy = monddyy
return list(reversed(outs))
def filter_known_bad(orbit_points):
"""
Filter some commands that are known to be incorrect.
"""
ops = orbit_points
bad = np.zeros(len(orbit_points), dtype=bool)
bad |= (ops['name'] == 'OORMPEN') & (ops['date'] == '2002:253:10:08:52.239')
bad |= (ops['name'] == 'OORMPEN') & (ops['date'] == '2004:010:10:00:00.000')
return orbit_points[~bad]
def get_orbit_points(tlrfiles):
"""
Get all orbit points from the timeline reports within the specified mission planning
path '' (all) or 'YYYY' (year) or YYYY/MONDDYY (load).
"""
orbit_points = []
# tlrfiles = prune_a_loads(tlrfiles)
for tlrfile in tlrfiles:
# Parse thing like this:
# 2012:025:21:22:21.732 EQF013M 1722 PROTON FLUX ENTRY FOR ENERGY 0 LEVEL ...
# 012345678901234567890123456789012345678901234567890123456789
logger.info('Getting points from {}'.format(tlrfile))
try:
fh = open(tlrfile, 'r', encoding='ascii', errors='ignore')
except IOError as err:
logger.warn(err)
continue
for line in fh:
if len(line) < 30 or line[:2] != ' 2':
continue
try:
date, name, orbit_num, descr = line.split(None, 3)
except ValueError:
continue
if name.startswith('OORMP'):
orbit_num = -1
descr = 'RADMON {}ABLE'.format('EN' if name.endswith('EN') else 'DIS')
elif line[23] in ' -':
continue
if 'DSS-' in name:
continue
if not re.match(r'\d{4}:\d{3}:\d{2}:\d{2}:\d{2}\.\d{3}', date):
logger.info('Failed for date: "{}"'.format(date))
continue
if not re.match(r'[A-Z]+', name):
logger.info('Failed for name: "{}"'.format(name))
continue
try:
orbit_num = int(orbit_num)
except TypeError:
logger.info('Failed for orbit_num: {}'.format(orbit_num))
continue
descr = descr.strip()
orbit_points.append((date, name, orbit_num, descr))
orbit_points = sorted(set(orbit_points), key=lambda x: x[0])
return orbit_points
def get_nearest_orbit_num(orbit_nums, idx, d_idx):
"""
Get the orbit number nearest to ``orbit_nums[idx]`` in direction ``d_idx``,
skipping values of -1 (from radmon commanding).
"""
while True:
idx += d_idx
if idx < 0 or idx >= len(orbit_nums):
raise NotFoundError('No nearest orbit num found')
if orbit_nums[idx] != -1:
break
return orbit_nums[idx], idx
def interpolate_orbit_points(orbit_points, name):
"""
Linearly interpolate across any gaps for ``name`` orbit_points.
"""
if len(orbit_points) == 0:
return []
ok = orbit_points['name'] == name
ops = orbit_points[ok]
# Get the indexes of missing orbits
idxs = np.flatnonzero(np.diff(ops['orbit_num']) > 1)
new_orbit_points = []
for idx in idxs:
op0 = ops[idx]
op1 = ops[idx + 1]
orb_num0 = op0['orbit_num']
orb_num1 = op1['orbit_num']
time0 = DateTime(op0['date']).secs
time1 = DateTime(op1['date']).secs
for orb_num in range(orb_num0 + 1, orb_num1):
time = time0 + (orb_num - orb_num0) / (orb_num1 - orb_num0) * (time1 - time0)
date = DateTime(time).date
new_orbit_point = (date, name, orb_num, op0['descr'])
logger.info('Adding new orbit point {}'.format(new_orbit_point))
new_orbit_points.append(new_orbit_point)
return new_orbit_points
def process_orbit_points(orbit_points):
"""
Take the raw orbit points (list of tuples) and do some processing:
- Remove duplicate events within 30 seconds of each other
- Fill in orbit number for RADMON enable / disable points
- Convert to a number structured array
Returns a numpy array with processed orbit points::
ORBIT_POINTS_DTYPE = [('date', 'U21'), ('name', 'U8'),
('orbit_num', 'i4'), ('descr', 'U50')]
"""
# Find neighboring pairs of orbit points that are identical except for date.
# If the dates are then within 180 seconds of each other then toss the first
# of the pair.
if len(orbit_points) == 0:
return np.array([], dtype=ORBIT_POINTS_DTYPE)
uniq_orbit_points = []
for op0, op1 in zip(orbit_points[:-1], orbit_points[1:]):
if op0[1:4] == op1[1:4]:
dt = (DateTime(op1[0]) - DateTime(op0[0])) * 86400
if dt < 180:
# logger.info('Removing duplicate orbit points:\n {}\n {}'
# .format(str(op0), str(op1)))
continue
uniq_orbit_points.append(op1)
uniq_orbit_points.append(orbit_points[-1])
orbit_points = uniq_orbit_points
# Convert to a numpy structured array
orbit_points = np.array(orbit_points, dtype=ORBIT_POINTS_DTYPE)
# Filter known bad points
orbit_points = filter_known_bad(orbit_points)
# For key orbit points linearly interpolate across gaps in orbit coverage.
new_ops = []
for name in ('EPERIGEE', 'EAPOGEE', 'EASCNCR'):
new_ops.extend(interpolate_orbit_points(orbit_points, name))
# Add a new orbit point for the ascending node EXIT which is the end of each orbit.
# This simplifies bookkeeping later.
for op in orbit_points[orbit_points['name'] == 'EASCNCR']:
new_ops.append((op['date'], 'XASCNCR', op['orbit_num'] - 1, op['descr'] + ' EXIT'))
# Add corresponding XASCNCR for any new EASCNCR points
for op in new_ops:
if op[1] == 'EASCNCR':
new_ops.append((op[0], 'XASCNCR', op[2] - 1, op[3] + ' EXIT'))
logger.info('Adding {} new orbit points'.format(len(new_ops)))
new_ops = np.array(new_ops, dtype=ORBIT_POINTS_DTYPE)
orbit_points = np.concatenate([orbit_points, new_ops])
orbit_points.sort(order=['date', 'orbit_num'])
# Fill in orbit number for RADMON enable / disable points
radmon_idxs = np.flatnonzero(orbit_points['orbit_num'] == -1)
orbit_nums = orbit_points['orbit_num']
for idx in radmon_idxs:
try:
prev_num, prev_idx = get_nearest_orbit_num(orbit_nums, idx, -1)
next_num, next_idx = get_nearest_orbit_num(orbit_nums, idx, +1)
except NotFoundError:
logger.info('No nearest orbit point for orbit_points[{}] (len={})'
.format(idx, len(orbit_points)))
else:
if prev_num == next_num:
orbit_nums[idx] = next_num
else:
logger.info('Unable to assign orbit num idx={} prev={} next={}'
.format(idx, prev_num, next_num))
logger.info(' {} {}'.format(prev_idx, orbit_points[prev_idx]))
logger.info(' * {} {}'.format(idx, orbit_points[idx]))
logger.info(' {} {}'.format(next_idx, orbit_points[next_idx]))
return orbit_points
def get_orbits(orbit_points):
"""
Collate the orbit points into full orbits, with dates corresponding to start (ORBIT
ASCENDING NODE CROSSING), stop, apogee, perigee, radzone start and radzone stop.
Radzone is defined as the time covering perigee when radmon is disabled by command.
This corresponds to the planned values and may differ from actual in the case of
events that run SCS107 and prematurely disable RADMON.
Returns a numpy structured array::
ORBITS_DTYPE = [('orbit_num', 'i4'),
('start', 'U21'), ('stop', 'U21'),
('tstart', 'f8'), ('tstop', 'f8'), ('dur', 'f4'),
('perigee', 'U21'), ('t_perigee', 'f8'), ('apogee', 'U21'),
('start_radzone', 'U21'), ('stop_radzone', 'U21'),
('dt_start_radzone', 'f4'), ('dt_stop_radzone', 'f4')]
"""
def get_idx(ops, name):
ok = ops['name'] == name
if np.sum(ok) != 1:
raise NotFoundError('Expected one match for {} but found {} in orbit {}\n{}'
.format(name, np.sum(ok), orbit_num, ops))
return np.flatnonzero(ok)[0]
def get_date(ops, name):
idx = get_idx(ops, name)
return ops['date'][idx]
def get_nearest_orbit_point(name, idx, d_idx):
while True:
idx += d_idx
if idx < 0 or idx >= len(orbit_points):
raise NotFoundError('Skipping orbit {}: no nearest orbit point {} found'
.format(orbit_num, name))
if orbit_points['name'][idx] == name:
break
return orbit_points[idx]
def find_radzone(idx_perigee):
"""
Find the extent of the radiation zone, defined as the last time before perigee
that RADMON is enabled until the first time after perigee that RADMON is enabled.
"""
idx = idx_perigee
start_radzone = None
while True:
idx -= 1
if idx < 0:
raise NotFoundError('Did not find RADMON enable prior to {}'
.format(orbit_points[idx_perigee]))
if orbit_points['name'][idx] == 'OORMPDS':
start_radzone = orbit_points['date'][idx]
if orbit_points['name'][idx] == 'OORMPEN':
if start_radzone is None:
raise NotFoundError('Found radmon enable before first disable at idx {}'
.format(idx))
break
idx = idx_perigee
while True:
idx += 1
if idx >= len(orbit_points):
raise NotFoundError('Did not find RADMON enable after to {}'
.format(str(orbit_points[idx_perigee])))
if orbit_points['name'][idx] == 'OORMPEN':
stop_radzone = orbit_points['date'][idx]
break
return start_radzone, stop_radzone
# Copy orbit points and sort by orbit_num then date. This allows using
# search_sorted to select orbit_points corresponding to each orbit. In
# very rare cases (orbit 1448 I think), there are orbit_points that cross
# orbit boundaries by a few seconds. This is related to the technique of
# reading in every TLR to get maximal coverage of orbit points.
orbit_points = orbit_points.copy()
orbit_points.sort(order=['orbit_num', 'date'])
orbit_nums = orbit_points['orbit_num']
uniq_orbit_nums = sorted(set(orbit_nums[orbit_nums > 0]))
orbits = []
for orbit_num in uniq_orbit_nums:
i0 = np.searchsorted(orbit_nums, orbit_num, side='left')
i1 = np.searchsorted(orbit_nums, orbit_num, side='right')
ops = orbit_points[i0: i1]
try:
if 'EASCNCR' not in ops['name'] or 'XASCNCR' not in ops['name']:
raise NotFoundError('Skipping orbit {} incomplete'.format(orbit_num))
start = get_date(ops, 'EASCNCR')
stop = get_date(ops, 'XASCNCR')
date_apogee = get_date(ops, 'EAPOGEE')
date_perigee = get_date(ops, 'EPERIGEE')
idx_perigee = get_idx(ops, 'EPERIGEE') + i0
start_radzone, stop_radzone = find_radzone(idx_perigee)
except NotFoundError as err:
logger.info(err)
continue
else:
dt_radzones = [(DateTime(date) - DateTime(date_perigee)) * 86400.0
for date in (start_radzone, stop_radzone)]
tstart = DateTime(start).secs
tstop = DateTime(stop).secs
orbit = (orbit_num,
start, stop,
tstart, tstop, tstop - tstart,
date_perigee, DateTime(date_perigee).secs, date_apogee,
start_radzone, stop_radzone,
dt_radzones[0], dt_radzones[1])
logger.info('get_orbits: Adding orbit {} {} {}'.format(orbit_num, start, stop))
orbits.append(orbit)
orbits = np.array(orbits, dtype=ORBITS_DTYPE)
return orbits
def get_radzone_from_orbit(orbit):
"""
Extract the RadZone fields from an orbit descriptor (which is one row
of the orbits structured array).
"""
start_radzone = DateTime(orbit['start_radzone'], format='date')
stop_radzone = DateTime(orbit['stop_radzone'], format='date')
tstart = start_radzone.secs
tstop = stop_radzone.secs
dur = tstop - tstart
radzone = {'start': start_radzone.date,
'stop': stop_radzone.date,
'tstart': tstart,
'tstop': tstop,
'dur': dur,
'orbit_num': orbit['orbit_num'],
'perigee': orbit['perigee']}
return radzone
| 710 | 19 | 103 |
5cd9f650b1f362a40ccd5a144d5d9a5ffbad63c2 | 496 | py | Python | proxy_info.py | leonardlinde/timeandtemp | 93e9ad16b2027fd9c261052c22a5977b86326550 | [
"Artistic-2.0"
] | null | null | null | proxy_info.py | leonardlinde/timeandtemp | 93e9ad16b2027fd9c261052c22a5977b86326550 | [
"Artistic-2.0"
] | null | null | null | proxy_info.py | leonardlinde/timeandtemp | 93e9ad16b2027fd9c261052c22a5977b86326550 | [
"Artistic-2.0"
] | null | null | null | #!/usr/bin/env python
"""
ZMQ proxy for info queues.
Publish Queue: tcp:5550
"""
import zmq
# these are the ports we are doing proxy for
proxies = ['5551']
if __name__ == '__main__':
main_proxy_info()
| 19.076923 | 44 | 0.645161 | #!/usr/bin/env python
"""
ZMQ proxy for info queues.
Publish Queue: tcp:5550
"""
import zmq
# these are the ports we are doing proxy for
proxies = ['5551']
def main_proxy_info():
ctx = zmq.Context()
frontend = ctx.socket(zmq.XSUB)
for proxy in proxies:
queue = "tcp://localhost:" + proxy
frontend.connect(queue)
backend = ctx.socket(zmq.XPUB)
backend.bind("tcp://*:5550")
zmq.proxy(frontend,backend)
if __name__ == '__main__':
main_proxy_info()
| 262 | 0 | 23 |
cbd374562181bcc96852448acfde95f41dd9a8a0 | 665 | py | Python | pipupgradeall.py | cxu-fork/pipupgradeall | fcca62aa0c334d9f9eca8323c7d17f228d937ee7 | [
"MIT"
] | null | null | null | pipupgradeall.py | cxu-fork/pipupgradeall | fcca62aa0c334d9f9eca8323c7d17f228d937ee7 | [
"MIT"
] | null | null | null | pipupgradeall.py | cxu-fork/pipupgradeall | fcca62aa0c334d9f9eca8323c7d17f228d937ee7 | [
"MIT"
] | null | null | null | import pkg_resources, subprocess
import os
def get_all_pythons():
'''https://stackoverflow.com/a/52123490'''
output, err = subprocess.Popen(
['which', '-a', 'python', 'python3','python2'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE
).communicate()
return output.decode('utf8').split('\n')[:-1]
| 28.913043 | 70 | 0.601504 | import pkg_resources, subprocess
import os
def get_all_pythons():
'''https://stackoverflow.com/a/52123490'''
output, err = subprocess.Popen(
['which', '-a', 'python', 'python3','python2'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE
).communicate()
return output.decode('utf8').split('\n')[:-1]
def _main():
all_pythons = []
if os.name == 'posix':
all_pythons = get_all_pythons()
if not all_pythons:
all_pythons = ['python']
for py in all_pythons:
subprocess.run([py, '-m', 'pip','install','-U',
*[dist.project_name for dist in pkg_resources.working_set]
])
| 306 | 0 | 23 |
f9dd03bbc7a06c0128823c1731550097c86554b1 | 3,934 | py | Python | data_augment.py | steven7woo/fair_regression_reduction | 7650cb6cc82a499555a42b9d12b7dde598a0dbeb | [
"MIT"
] | 9 | 2020-06-23T08:02:07.000Z | 2022-03-31T13:02:04.000Z | data_augment.py | steven7woo/fair_regression_reduction | 7650cb6cc82a499555a42b9d12b7dde598a0dbeb | [
"MIT"
] | null | null | null | data_augment.py | steven7woo/fair_regression_reduction | 7650cb6cc82a499555a42b9d12b7dde598a0dbeb | [
"MIT"
] | 4 | 2020-06-23T08:02:15.000Z | 2021-01-29T07:33:16.000Z | """
Augment the dataset according to the loss functions.
Input:
- a regression data set (x, a, y), which may be obtained using the data_parser
- loss function
- Theta, a set of thresholds in between 0 and 1
Output:
a weighted classification dataset (X, A, Y, W)
"""
import functools
import numpy as np
import pandas as pd
import data_parser as parser
from itertools import repeat
import itertools
_LOGISTIC_C = 5
def augment_data_ab(X, A, Y, Theta):
"""
Takes input data and augment it with an additional feature of
theta; Return: X tensor_product Theta
For absolute loss, we don't do any reweighting.
TODO: might add the alpha/2 to match with the write-up
"""
n = np.shape(X)[0]
num_theta = len(Theta)
X_aug = pd.concat(repeat(X, num_theta))
A_aug = pd.concat(repeat(A, num_theta))
Y_values = pd.concat(repeat(Y, num_theta))
theta_list = [s for theta in Theta for s in repeat(theta, n)]
# Adding theta to the feature
X_aug['theta'] = pd.Series(theta_list, index=X_aug.index)
Y_aug = Y_values >= X_aug['theta']
Y_aug = Y_aug.map({True: 1, False: 0})
X_aug.index = range(n * num_theta)
Y_aug.index = range(n * num_theta)
A_aug.index = range(n * num_theta)
W_aug = pd.Series(1, Y_aug.index)
return X_aug, A_aug, Y_aug, W_aug
def augment_data_sq(x, a, y, Theta):
"""
Augment the dataset so that the x carries an additional feature of theta
Then also attach appropriate weights to each data point.
Theta: Assume uniform grid Theta
"""
n = np.shape(x)[0] # number of original data points
num_theta = len(Theta)
width = Theta[1] - Theta[0]
X_aug = pd.concat(repeat(x, num_theta))
A_aug = pd.concat(repeat(a, num_theta))
Y_values = pd.concat(repeat(y, num_theta))
theta_list = [s for theta in Theta for s in repeat(theta, n)]
# Adding theta to the feature
X_aug['theta'] = pd.Series(theta_list, index=X_aug.index)
X_aug.index = range(n * num_theta)
# Y_aug.index = range(n * num_theta)
A_aug.index = range(n * num_theta)
Y_values.index = range(n * num_theta)
# two helper functions
sq_loss = lambda a, b: (a - b)**2 # square loss function
weight_assign = lambda theta, y: (sq_loss(theta + width/2, y) - sq_loss(theta - width/2, y))
W = weight_assign(X_aug['theta'], Y_values)
Y_aug = 1*(W < 0)
W = abs(W)
# Compute the weights
return X_aug, A_aug, Y_aug, W
def augment_data_logistic(x, a, y, Theta):
"""
Augment the dataset so that the x carries an additional feature of theta
Then also attach appropriate weights to each data point, so that optimize
for logisitc loss
Theta: Assume uniform grid Theta
y: assume the labels are {0, 1}
"""
n = np.shape(x)[0] # number of original data points
num_theta = len(Theta)
width = Theta[1] - Theta[0]
X_aug = pd.concat(repeat(x, num_theta))
A_aug = pd.concat(repeat(a, num_theta))
Y_values = pd.concat(repeat(y, num_theta))
theta_list = [s for theta in Theta for s in repeat(theta, n)]
# Adding theta to the feature
X_aug['theta'] = pd.Series(theta_list, index=X_aug.index)
X_aug.index = range(n * num_theta)
A_aug.index = range(n * num_theta)
Y_values.index = range(n * num_theta)
# two helper functions
logistic_loss = lambda y_hat, y: np.log(1 + np.exp(-(_LOGISTIC_C)*(2 * y - 1) * (2 * y_hat - 1))) / (np.log(1 + np.exp(_LOGISTIC_C))) # re-scaled logistic loss
#logistic_loss = lambda y_hat, y: np.log(1 + np.exp(-(_LOGISTIC_C)*(2 * y - 1) * (2 * y_hat - 1))) # re-scaled logistic loss
weight_assign = lambda theta, y: (logistic_loss(theta + width/2,
y) - logistic_loss(theta - width/2, y))
W = weight_assign(X_aug['theta'], Y_values)
Y_aug = 1*(W < 0)
W = abs(W)
# Compute the weights
return X_aug, A_aug, Y_aug, W
| 34.508772 | 164 | 0.649212 | """
Augment the dataset according to the loss functions.
Input:
- a regression data set (x, a, y), which may be obtained using the data_parser
- loss function
- Theta, a set of thresholds in between 0 and 1
Output:
a weighted classification dataset (X, A, Y, W)
"""
import functools
import numpy as np
import pandas as pd
import data_parser as parser
from itertools import repeat
import itertools
_LOGISTIC_C = 5
def augment_data_ab(X, A, Y, Theta):
"""
Takes input data and augment it with an additional feature of
theta; Return: X tensor_product Theta
For absolute loss, we don't do any reweighting.
TODO: might add the alpha/2 to match with the write-up
"""
n = np.shape(X)[0]
num_theta = len(Theta)
X_aug = pd.concat(repeat(X, num_theta))
A_aug = pd.concat(repeat(A, num_theta))
Y_values = pd.concat(repeat(Y, num_theta))
theta_list = [s for theta in Theta for s in repeat(theta, n)]
# Adding theta to the feature
X_aug['theta'] = pd.Series(theta_list, index=X_aug.index)
Y_aug = Y_values >= X_aug['theta']
Y_aug = Y_aug.map({True: 1, False: 0})
X_aug.index = range(n * num_theta)
Y_aug.index = range(n * num_theta)
A_aug.index = range(n * num_theta)
W_aug = pd.Series(1, Y_aug.index)
return X_aug, A_aug, Y_aug, W_aug
def augment_data_sq(x, a, y, Theta):
"""
Augment the dataset so that the x carries an additional feature of theta
Then also attach appropriate weights to each data point.
Theta: Assume uniform grid Theta
"""
n = np.shape(x)[0] # number of original data points
num_theta = len(Theta)
width = Theta[1] - Theta[0]
X_aug = pd.concat(repeat(x, num_theta))
A_aug = pd.concat(repeat(a, num_theta))
Y_values = pd.concat(repeat(y, num_theta))
theta_list = [s for theta in Theta for s in repeat(theta, n)]
# Adding theta to the feature
X_aug['theta'] = pd.Series(theta_list, index=X_aug.index)
X_aug.index = range(n * num_theta)
# Y_aug.index = range(n * num_theta)
A_aug.index = range(n * num_theta)
Y_values.index = range(n * num_theta)
# two helper functions
sq_loss = lambda a, b: (a - b)**2 # square loss function
weight_assign = lambda theta, y: (sq_loss(theta + width/2, y) - sq_loss(theta - width/2, y))
W = weight_assign(X_aug['theta'], Y_values)
Y_aug = 1*(W < 0)
W = abs(W)
# Compute the weights
return X_aug, A_aug, Y_aug, W
def augment_data_logistic(x, a, y, Theta):
"""
Augment the dataset so that the x carries an additional feature of theta
Then also attach appropriate weights to each data point, so that optimize
for logisitc loss
Theta: Assume uniform grid Theta
y: assume the labels are {0, 1}
"""
n = np.shape(x)[0] # number of original data points
num_theta = len(Theta)
width = Theta[1] - Theta[0]
X_aug = pd.concat(repeat(x, num_theta))
A_aug = pd.concat(repeat(a, num_theta))
Y_values = pd.concat(repeat(y, num_theta))
theta_list = [s for theta in Theta for s in repeat(theta, n)]
# Adding theta to the feature
X_aug['theta'] = pd.Series(theta_list, index=X_aug.index)
X_aug.index = range(n * num_theta)
A_aug.index = range(n * num_theta)
Y_values.index = range(n * num_theta)
# two helper functions
logistic_loss = lambda y_hat, y: np.log(1 + np.exp(-(_LOGISTIC_C)*(2 * y - 1) * (2 * y_hat - 1))) / (np.log(1 + np.exp(_LOGISTIC_C))) # re-scaled logistic loss
#logistic_loss = lambda y_hat, y: np.log(1 + np.exp(-(_LOGISTIC_C)*(2 * y - 1) * (2 * y_hat - 1))) # re-scaled logistic loss
weight_assign = lambda theta, y: (logistic_loss(theta + width/2,
y) - logistic_loss(theta - width/2, y))
W = weight_assign(X_aug['theta'], Y_values)
Y_aug = 1*(W < 0)
W = abs(W)
# Compute the weights
return X_aug, A_aug, Y_aug, W
| 0 | 0 | 0 |
8cda1db85b5e021df45ef0efebcd0d5ea4ba37db | 253 | py | Python | unileaks/task.py | zahessi/unileaks | 3ed2462e11f8e3decc64ed8faceee42438ec06ff | [
"MIT"
] | null | null | null | unileaks/task.py | zahessi/unileaks | 3ed2462e11f8e3decc64ed8faceee42438ec06ff | [
"MIT"
] | null | null | null | unileaks/task.py | zahessi/unileaks | 3ed2462e11f8e3decc64ed8faceee42438ec06ff | [
"MIT"
] | null | null | null |
assert unique('aa') == False
assert unique('abadkjsld') == False
assert unique('aa') == False
assert unique('fsl') == True | 25.3 | 38 | 0.632411 | def unique(st):
if not st: return False
for i, e in enumerate(st):
if e in st[i+1:]: return False
return True
assert unique('aa') == False
assert unique('abadkjsld') == False
assert unique('aa') == False
assert unique('fsl') == True | 108 | 0 | 22 |
35e018437aeddfe7aeac17401bf8b28c29cda12f | 10,304 | py | Python | asciidoxy/model.py | RerrerBuub/asciidoxy | 3402f37d59e30975e9919653465839e396f05513 | [
"Apache-2.0"
] | null | null | null | asciidoxy/model.py | RerrerBuub/asciidoxy | 3402f37d59e30975e9919653465839e396f05513 | [
"Apache-2.0"
] | null | null | null | asciidoxy/model.py | RerrerBuub/asciidoxy | 3402f37d59e30975e9919653465839e396f05513 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2019-2021, TomTom (http://tomtom.com).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models of API reference elements."""
from abc import ABC
from typing import Dict, List, Optional
class ReferableElement(ModelBase):
"""Base class for all objects that can be referenced/linked to.
Attributes:
id: Unique identifier, if available.
name: Short name of the element.
full_name: Fully qualified name.
language: Language the element is written in.
kind: Kind of language element.
"""
id: Optional[str] = None
name: str = ""
full_name: str = ""
language: str
kind: str = ""
class TypeRef(ModelBase):
"""Reference to a type.
Attributes:
id: Unique identifier of the type.
name: Name of the type.
language: Language the type is written in.
namespace: Namespace, or package, from which the type is referenced.
kind: Kind of language element.
prefix: Qualifiers prefixing the type.
suffix: Qualifiers suffixing the type.
nested: List of nested types. None if no arguments, an empty list if zero arguments.
args: Arguments for function like types. None if no arguments, an empty list if zero
arguments.
returns: Return type in case of closure types.
prot: Protection level of the referenced type.
"""
id: Optional[str] = None
name: str
language: str
namespace: Optional[str] = None
kind: Optional[str] = None
prefix: Optional[str] = None
suffix: Optional[str] = None
nested: Optional[List["TypeRef"]] = None
args: Optional[List["Parameter"]] = None
returns: Optional["TypeRef"] = None
prot: Optional[str] = None
class Parameter(ModelBase):
"""Parameter description.
Representation of doxygen type paramType
Attributes:
type: Reference to the type of the parameter.
name: Name used for the parameter.
description: Explanation of the parameter.
default_value: Default value for the parameter.
prefix: Prefix for the parameter declaration.
"""
# doxygen based fields
type: Optional[TypeRef] = None
name: str = ""
description: str = ""
default_value: Optional[str] = None
prefix: Optional[str] = None
class ReturnValueList(ModelBase):
""" discrete return value
Attributes:
name: Value returned .
description: Explanation of the name/value.
"""
# doxygen based fields
name: str = ""
description: str = ""
class ReturnValue(ModelBase):
"""Value returned from a member.
Attributes:
type: Reference to the type of return value.
description: Explanation of the return value.
valuelist: List of possible return values
"""
type: Optional[TypeRef] = None
description: str = ""
valuelist: Optional[ReturnValueList] = None
class ThrowsClause(ModelBase):
"""Potential exception thrown from a member.
Attributes:
type: Reference to the type of the exception.
description: Explanation of when the exception is thrown.
"""
type: TypeRef
description: str = ""
class Compound(ReferableElement):
"""Compound object. E.g. a class or enum.
Representation of the doxygen type compound.
Attributes:
members: List of members in the compound.
params: List of parameters.
exceptions: List of exceptions that can be thrown.
returns: Return value.
include: Name of the include (file) required to use this compound.
namespace: Namespace, or package, the compound is contained in.
prot: Protection or visibility level.
definition: Full definition in source code.
args: All arguments as in source code.
initializer: Initial value assignment.
brief: Brief description of the compound.
description: Full description of the compound.
sections: Extra documentation sections with special meanings.
static: True if this is marked as static.
const: True if this is marked as const.
deleted: True if this is marked as deleted.
default: True if this is marked as default.
constexpr: True if this is marked as constexpr.
"""
members: List["Compound"]
params: List[Parameter]
exceptions: List[ThrowsClause]
returns: Optional[ReturnValue] = None
include: Optional[str] = None
namespace: Optional[str] = None
prot: str = ""
definition: str = ""
args: str = ""
initializer: str = ""
brief: str = ""
description: str = ""
sections: Dict[str, str]
static: bool = False
const: bool = False
deleted: bool = False
default: bool = False
constexpr: bool = False
| 35.047619 | 100 | 0.604911 | # Copyright (C) 2019-2021, TomTom (http://tomtom.com).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models of API reference elements."""
from abc import ABC
from typing import Dict, List, Optional
def json_repr(obj):
data = {"__CLASS__": obj.__class__.__name__}
data.update(vars(obj))
return data
class ModelBase(ABC):
def __init__(self, **kwargs):
for name, value in kwargs.items():
if not hasattr(self, name):
raise TypeError(f"{self.__class__} has no attribute {name}.")
setattr(self, name, value)
class ReferableElement(ModelBase):
"""Base class for all objects that can be referenced/linked to.
Attributes:
id: Unique identifier, if available.
name: Short name of the element.
full_name: Fully qualified name.
language: Language the element is written in.
kind: Kind of language element.
"""
id: Optional[str] = None
name: str = ""
full_name: str = ""
language: str
kind: str = ""
def __init__(self, language: str = "", **kwargs):
super().__init__(**kwargs)
self.language = language
def __str__(self) -> str:
text = (f"ReferableElement [\n id [{self.id}]\n name [{self.name}]\n "
f"full name[{self.full_name}]\n lang [{self.language}]\n kind [{self.kind}]")
return text + "]"
def __eq__(self, other) -> bool:
if other is None:
return False
return ((self.id, self.name, self.full_name, self.language,
self.kind) == (other.id, other.name, other.full_name, other.language, other.kind))
def __hash__(self):
return hash((self.id, self.name, self.full_name, self.language, self.kind))
class TypeRef(ModelBase):
"""Reference to a type.
Attributes:
id: Unique identifier of the type.
name: Name of the type.
language: Language the type is written in.
namespace: Namespace, or package, from which the type is referenced.
kind: Kind of language element.
prefix: Qualifiers prefixing the type.
suffix: Qualifiers suffixing the type.
nested: List of nested types. None if no arguments, an empty list if zero arguments.
args: Arguments for function like types. None if no arguments, an empty list if zero
arguments.
returns: Return type in case of closure types.
prot: Protection level of the referenced type.
"""
id: Optional[str] = None
name: str
language: str
namespace: Optional[str] = None
kind: Optional[str] = None
prefix: Optional[str] = None
suffix: Optional[str] = None
nested: Optional[List["TypeRef"]] = None
args: Optional[List["Parameter"]] = None
returns: Optional["TypeRef"] = None
prot: Optional[str] = None
def __init__(self, language: str = "", name: str = "", **kwargs):
super().__init__(**kwargs)
self.language = language
self.name = name
def __str__(self) -> str:
nested_str = ""
if self.nested:
nested_str = f"< {', '.join(str(t) for t in self.nested)} >"
args_str = ""
if self.args:
args_str = f"({', '.join(f'{p.type} {p.name}' for p in self.args)})"
return f"{self.prefix or ''}{self.name}{nested_str}{args_str}{self.suffix or ''}"
def resolve(self, reference_target: ReferableElement) -> None:
self.id = reference_target.id
self.kind = reference_target.kind
def __eq__(self, other) -> bool:
if other is None:
return False
return ((self.id, self.name, self.language, self.namespace, self.kind, self.prefix,
self.suffix, self.nested, self.args, self.returns,
self.prot) == (other.id, other.name, other.language, other.namespace, other.kind,
other.prefix, other.suffix, other.nested, other.args, other.returns,
other.prot))
class Parameter(ModelBase):
"""Parameter description.
Representation of doxygen type paramType
Attributes:
type: Reference to the type of the parameter.
name: Name used for the parameter.
description: Explanation of the parameter.
default_value: Default value for the parameter.
prefix: Prefix for the parameter declaration.
"""
# doxygen based fields
type: Optional[TypeRef] = None
name: str = ""
description: str = ""
default_value: Optional[str] = None
prefix: Optional[str] = None
def __eq__(self, other) -> bool:
if other is None:
return False
return ((self.type, self.name, self.description, self.default_value,
self.prefix) == (other.type, other.name, other.description, other.default_value,
other.prefix))
class ReturnValueList(ModelBase):
""" discrete return value
Attributes:
name: Value returned .
description: Explanation of the name/value.
"""
# doxygen based fields
name: str = ""
description: str = ""
def __eq__(self, other) -> bool:
if other is None:
return False
return ((self.name, self.description) == (other.name, other.description))
class ReturnValue(ModelBase):
"""Value returned from a member.
Attributes:
type: Reference to the type of return value.
description: Explanation of the return value.
valuelist: List of possible return values
"""
type: Optional[TypeRef] = None
description: str = ""
valuelist: Optional[ReturnValueList] = None
def __eq__(self, other) -> bool:
if other is None:
return False
return (self.type, self.description) == (other.type, other.description)
class ThrowsClause(ModelBase):
"""Potential exception thrown from a member.
Attributes:
type: Reference to the type of the exception.
description: Explanation of when the exception is thrown.
"""
type: TypeRef
description: str = ""
def __init__(self, language: str = "", type: Optional[TypeRef] = None, **kwargs):
super().__init__(**kwargs)
self.type = type or TypeRef(language)
def __eq__(self, other) -> bool:
if other is None:
return False
return (self.type, self.description) == (other.type, other.description)
class Compound(ReferableElement):
"""Compound object. E.g. a class or enum.
Representation of the doxygen type compound.
Attributes:
members: List of members in the compound.
params: List of parameters.
exceptions: List of exceptions that can be thrown.
returns: Return value.
include: Name of the include (file) required to use this compound.
namespace: Namespace, or package, the compound is contained in.
prot: Protection or visibility level.
definition: Full definition in source code.
args: All arguments as in source code.
initializer: Initial value assignment.
brief: Brief description of the compound.
description: Full description of the compound.
sections: Extra documentation sections with special meanings.
static: True if this is marked as static.
const: True if this is marked as const.
deleted: True if this is marked as deleted.
default: True if this is marked as default.
constexpr: True if this is marked as constexpr.
"""
members: List["Compound"]
params: List[Parameter]
exceptions: List[ThrowsClause]
returns: Optional[ReturnValue] = None
include: Optional[str] = None
namespace: Optional[str] = None
prot: str = ""
definition: str = ""
args: str = ""
initializer: str = ""
brief: str = ""
description: str = ""
sections: Dict[str, str]
static: bool = False
const: bool = False
deleted: bool = False
default: bool = False
constexpr: bool = False
def __init__(self,
language: str = "",
*,
members: Optional[List["Compound"]] = None,
params: Optional[List[Parameter]] = None,
exceptions: Optional[List[ThrowsClause]] = None,
sections: Optional[Dict[str, str]] = None,
**kwargs):
super().__init__(language, **kwargs)
self.members = members or []
self.params = params or []
self.exceptions = exceptions or []
self.sections = sections or {}
def __str__(self):
return f"Compound [{super().__str__()}]"
def __eq__(self, other) -> bool:
if other is None:
return False
return (super().__eq__(other)
and (self.members, self.params, self.exceptions, self.returns, self.include,
self.namespace, self.prot, self.definition, self.args, self.initializer,
self.brief, self.description, self.sections, self.static, self.const,
self.deleted, self.default, self.constexpr)
== (other.members, other.params, other.exceptions, other.returns, other.include,
other.namespace, other.prot, other.definition, other.args, other.initializer,
other.brief, other.description, other.sections, other.static, other.const,
other.deleted, other.default, other.constexpr))
def __hash__(self):
return super().__hash__()
| 4,236 | 0 | 531 |
327e26f3a8e2db00df03eb9d007c2805c3966eea | 6,616 | py | Python | webapp/new_jp_webhook.py | motionbug/JAWA | 5b525b02cf3eb123c0e9d0e54286c3c92135b1c5 | [
"MIT"
] | 1 | 2019-11-20T15:22:02.000Z | 2019-11-20T15:22:02.000Z | webapp/new_jp_webhook.py | motionbug/JAWA | 5b525b02cf3eb123c0e9d0e54286c3c92135b1c5 | [
"MIT"
] | null | null | null | webapp/new_jp_webhook.py | motionbug/JAWA | 5b525b02cf3eb123c0e9d0e54286c3c92135b1c5 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# encoding: utf-8
import os
import json
from time import sleep
import signal
import requests
import re
from werkzeug import secure_filename
from flask import (Flask, request, render_template,
session, redirect, url_for, escape,
send_from_directory, Blueprint, abort)
new_jp = Blueprint('webhooks', __name__)
@new_jp.route('/webhooks', methods=['GET','POST'])
| 31.061033 | 105 | 0.659915 | #!/usr/bin/python
# encoding: utf-8
import os
import json
from time import sleep
import signal
import requests
import re
from werkzeug import secure_filename
from flask import (Flask, request, render_template,
session, redirect, url_for, escape,
send_from_directory, Blueprint, abort)
new_jp = Blueprint('webhooks', __name__)
@new_jp.route('/webhooks', methods=['GET','POST'])
def webhooks():
exists = os.path.isfile('/usr/local/jawa/webapp/server.json')
if exists == False:
return render_template('setup.html',
setup="setup",
username=str(escape(session['username'])))
exists = os.path.isfile('/usr/local/jawa/jpwebhooks.json')
if exists == False:
data = []
with open('/usr/local/jawa/jpwebhooks.json', 'w') as outfile:
json.dump(data, outfile)
if 'username' in session:
# response = requests.get(session['url'] + '/JSSResource/computergroups',
# auth=(session['username'], session['password']),
# headers={'Accept': 'application/json'})
# response_json = response.json()
# computer_groups = response_json['computer_groups']
# found_computer_groups = []
# for computer_group in computer_groups:
# if computer_group['is_smart'] is True:
# found_computer_groups.append(computer_group)
# print found_computer_groups
# response = requests.get(session['url'] + '/JSSResource/mobiledevicegroups',
# auth=(session['username'], session['password']),
# headers={'Accept': 'application/json'})
# response_json = response.json()
# mobile_device_groups = response_json['mobile_device_groups']
# found_mobile_device_groups = []
# for mobile_device_group in mobile_device_groups:
# if mobile_device_group['is_smart'] is True:
# found_mobile_device_groups.append(mobile_device_group)
# print found_mobile_device_groups
if request.method == 'POST':
if request.form.get('webhookname') != '':
check = 0
if ' ' in request.form.get('webhookname'):
error_message = "Single-string name only."
return render_template('error.html',
error_message=error_message,
error="error",
username=str(escape(session['username'])))
with open('/etc/webhook.conf') as json_file:
data = json.load(json_file)
x = 0
id_list = []
while True:
try:
id_list.append(data[x]['id'])
x += 1
str_error = None
except Exception as str_error:
pass
if str_error:
sleep(2)
break
else:
continue
for id_name in id_list:
if id_name == request.form.get('webhookname'):
check = 1
else:
check = 0
if check is not 0:
error_message = "Name already exists!"
return render_template('error.html',
error_message=error_message,
error="error",
username=str(escape(session['username'])))
with open('/usr/local/jawa/webapp/server.json') as json_file:
data = json.load(json_file)
server_address = data[0]['jawa_address']
if not os.path.isdir('/usr/local/jawa/'):
os.mkdir('/usr/local/jawa/')
if not os.path.isdir('/usr/local/jawa/scripts'):
os.mkdir('/usr/local/jawa/scripts')
os.chdir('/usr/local/jawa/scripts')
f = request.files['script']
if ' ' in f.filename:
f.filename = f.filename.replace(" ", "-")
f.save(secure_filename(f.filename))
old_script_file = "/usr/local/jawa/scripts/{}".format(f.filename)
new_script_file = "/usr/local/jawa/scripts/{}-{}".format(request.form.get('webhookname'), f.filename)
os.rename(old_script_file, new_script_file)
hooks_file = '/etc/webhook.conf'
jp_hooks = '/usr/local/jawa/jp_webhooks.json'
data = json.load(open(hooks_file))
new_id = request.form.get('new_webhookname')
os.chmod(new_script_file, 0755)
if type(data) is dict:
data = [data]
data.append({"id": request.form.get('webhookname'),
"execute-command": new_script_file,
"command-working-directory": "/",
"pass-arguments-to-command":[{"source": "entire-payload"}]})
with open(hooks_file, 'w') as outfile:
json.dump(data, outfile)
hooks_file = '/etc/webhook.conf'
data = json.load(open(hooks_file))
data[:] = [d for d in data if d.get('id') != 'none' ]
with open(hooks_file, 'w') as outfile:
json.dump(data, outfile)
if (
request.form.get('event') == 'SmartGroupMobileDeviceMembershipChange' or
request.form.get('event') == 'SmartGroupComputerMembershipChange'):
smart_group_notice = "NOTICE! This webhook is not yet enabled."
smart_group_instructions = "Specify desired Smart Group and enable: "
webhook_enablement = 'false'
else:
smart_group_instructions = ""
webhook_enablement = 'true'
data = '<webhook>'
data += '<name>'
data += request.form.get('webhookname')
data += '</name><enabled>' + webhook_enablement + '</enabled><url>'
data += "{}/hooks/{}".format(server_address, request.form.get('webhookname'))
data += '</url><content_type>application/json</content_type>'
data += '<event>{}</event>'.format(request.form.get('event'))
data += '</webhook>'
full_url = session['url'] + '/JSSResource/webhooks/id/0'
response = requests.post(full_url,
auth=(session['username'], session['password']),
headers={'Content-Type': 'application/xml'}, data=data)
result = re.search('<id>(.*)</id>', response.text)
new_link = "{}/webhooks.html?id={}".format(session['url'],result.group(1))
data = json.load(open('/usr/local/jawa/jp_webhooks.json'))
data.append({"url": str(session['url']),
"username": str(session['username']),
"name": request.form.get('webhookname'),
"event": request.form.get('event'),
"script": new_script_file,
"description": request.form.get('description')})
with open('/usr/local/jawa/jp_webhooks.json', 'w') as outfile:
json.dump(data, outfile)
new_here = "Link"
new_webhook = "New webhook created."
return render_template('success.html',
webhooks="success",
smart_group_instructions=smart_group_instructions,
smart_group_notice=smart_group_notice,
new_link=new_link,
new_here=new_here,
new_webhook=new_webhook,
username=str(escape(session['username'])))
else:
return render_template('webhooks.html',
webhooks="webhooks",
url=session['url'],
# found_mobile_device_groups=found_mobile_device_groups,
# found_computer_groups=found_computer_groups,
username=str(escape(session['username'])))
else:
return render_template('home.html', login="false")
| 6,211 | 0 | 22 |
cc7ed11991087ba45f1beeb9889cef1936cabc0b | 7,743 | py | Python | zerovl/core/initial.py | zerovl/ZeroVL | b48794e74fed0f80adf5fa3010481064411c4182 | [
"MIT"
] | 14 | 2022-01-19T08:08:29.000Z | 2022-03-10T05:55:36.000Z | zerovl/core/initial.py | zerovl/ZeroVL | b48794e74fed0f80adf5fa3010481064411c4182 | [
"MIT"
] | 2 | 2022-02-25T14:35:47.000Z | 2022-03-01T03:11:13.000Z | zerovl/core/initial.py | zerovl/ZeroVL | b48794e74fed0f80adf5fa3010481064411c4182 | [
"MIT"
] | 3 | 2022-02-09T01:23:11.000Z | 2022-02-15T11:45:30.000Z | import os
import random
import numpy as np
import torch
import torch.distributed as distributed
from torch.nn import SyncBatchNorm
from zerovl.models import PIPELINE
from zerovl.utils import ENV, build_from_cfg
from zerovl.utils import (
is_list_of,
logger
)
try:
from apex.parallel import convert_syncbn_model
except ImportError:
logger.warning(f'=> ImportError: can not import apex, '
f'distribute training with apex will raise error')
__all__ = ['init_device', 'init_resume', 'init_model']
def _load_checkpoint(src_path: str, raise_exception: bool = True):
r"""
Load checkpoint from local
"""
if not isinstance(src_path, str):
return None
if os.path.exists(src_path):
return torch.load(src_path, map_location=ENV.device)
| 41.18617 | 116 | 0.665375 | import os
import random
import numpy as np
import torch
import torch.distributed as distributed
from torch.nn import SyncBatchNorm
from zerovl.models import PIPELINE
from zerovl.utils import ENV, build_from_cfg
from zerovl.utils import (
is_list_of,
logger
)
try:
from apex.parallel import convert_syncbn_model
except ImportError:
logger.warning(f'=> ImportError: can not import apex, '
f'distribute training with apex will raise error')
__all__ = ['init_device', 'init_resume', 'init_model']
def _load_checkpoint(src_path: str, raise_exception: bool = True):
r"""
Load checkpoint from local
"""
if not isinstance(src_path, str):
return None
if os.path.exists(src_path):
return torch.load(src_path, map_location=ENV.device)
def init_device(cfg):
# Get the Context instance and record the distribution mode
ENV.dist_mode = cfg.dist.name
# Random seed setting
if cfg.seed is not None:
seed = cfg.seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(cfg.seed)
torch.backends.cudnn.deterministic = True
# Distributed scheme initialization
torch.backends.cudnn.benchmark = True
if cfg.dist.name in ['apex', 'torch']:
torch.cuda.set_device(ENV.local_rank)
distributed.init_process_group(backend='nccl', init_method='env://')
ENV.rank = distributed.get_rank()
ENV.size = distributed.get_world_size()
ENV.device = torch.device(f'cuda:{ENV.local_rank}')
logger.info(f'=> Device: running distributed training with '
f'{cfg.dist.name} DDP, world size:{ENV.size}')
elif cfg.dist.name is None:
assert ENV.local_rank == 0, '--np must be 1 when cfg.dist.name is None'
torch.cuda.set_device(0)
ENV.device = torch.device(f'cuda:0')
logger.info('=> Device: running on single process GPU, distributed training disabled')
# Legality check for batch_size dividable of ENV.size
if cfg.data.batch_size is not None:
assert cfg.data.batch_size % ENV.size == 0
if cfg.data.batch_size_val is not None:
assert cfg.data.batch_size_val % ENV.size == 0
# PyTorch version record
logger.info(f'=> PyTorch Version: {torch.__version__}\n')
def init_resume(cfg):
checkpoint = None
if cfg.resume is not None:
checkpoint = _load_checkpoint(cfg.resume, raise_exception=not cfg.auto_resume)
if checkpoint is not None:
logger.info(f'=> Model resume: loaded from {cfg.resume}\n')
return checkpoint
def init_model(cfg, resume_checkpoint=None):
# Build model
logger.info(f'=> Model: {cfg.model.name} with params {cfg.model.param}')
if cfg.model.backbone.name is not None:
logger.info(f' - Backbone: {cfg.model.backbone.name} with params {cfg.model.backbone.param}')
if cfg.model.head.name is not None:
logger.info(f' - Head: {cfg.model.head.name} with params {cfg.model.head.param}')
if cfg.model.criterion.name is not None:
logger.info(f' - Criterion: {cfg.model.criterion.name} with params {cfg.model.criterion.param}. '
f'Other settings: prob_type={cfg.model.criterion.prob_type}; '
f'cls_loss_type={cfg.model.criterion.cls_loss_type}; '
f'reg_loss_type={cfg.model.criterion.reg_loss_type}')
model = build_from_cfg(cfg.model.name, cfg, PIPELINE)
# Load pretrained model if resume checkpoint doesn't exist
if resume_checkpoint is None:
pretrained_model_loading(cfg, model)
# Convert BN into SyncBN if necessary
sync_bn = cfg.model.param.get('sync_bn', False)
if sync_bn:
if cfg.dist.name == 'apex':
model = convert_syncbn_model(model)
elif cfg.dist.name == 'torch':
model = SyncBatchNorm.convert_sync_batchnorm(model)
# Resume model if necessary
if resume_checkpoint is not None:
state_dict = resume_checkpoint.get('state_dict', resume_checkpoint)
model.load_state_dict(state_dict)
return model.to(ENV.device)
def pretrained_model_loading(cfg, model):
# load the checkpoint of the pretrained model
checkpoint = _load_checkpoint(cfg.model.pretrained)
if checkpoint is None:
return
# extract state_dict from the checkpoint
src_state_dict = checkpoint.get('state_dict', checkpoint)
# remove the avoid_prefix and avoid_keys from state_dict only when pretrained_strict is False
pretrained_strict = cfg.model.pretrained_strict
if pretrained_strict is False:
avoid_prefix = cfg.model.pretrained_avoid_prefix
if avoid_prefix is not None:
if isinstance(avoid_prefix, str):
avoid_prefix = [avoid_prefix]
assert is_list_of(avoid_prefix, str)
for key in list(src_state_dict.keys()):
if key.startswith(tuple(avoid_prefix)):
src_state_dict.pop(key)
logger.info(f'=> Pretrained: avoid_prefix [{", ".join(avoid_prefix)}] removed from state_dict if exist')
avoid_keys = cfg.model.pretrained_avoid_keys
if avoid_keys is not None:
if isinstance(avoid_keys, str):
avoid_keys = [avoid_keys]
assert is_list_of(avoid_keys, str)
for key in list(src_state_dict.keys()):
if key in avoid_keys:
src_state_dict.pop(key)
logger.info(f'=> Pretrained: avoid_keys [{", ".join(avoid_keys)}] removed from state_dict if exist')
# model mapped loading with target_prefix
target_prefix = cfg.model.pretrained_target_prefix
if target_prefix is None:
keys = model.load_state_dict(src_state_dict, strict=pretrained_strict)
elif target_prefix == 'auto':
# TODO: the 'auto' target_prefix is an risky patch to deal with the compatibility
# between the old `backbone_only` model where backbone and FC heads are
# directly saved without prefix. Collate the early version of pretrained
# model and remove this mode if early zerovl versions are no longer supported.
prefix_mapping = dict()
for key in model.state_dict().keys():
prefix, name = key.split('.', 1)
if name in prefix_mapping:
raise ValueError(f'pretrained loading onto auto prefix failed. Both {prefix} '
f'and {prefix_mapping[name]} prefix has sub-module {name}')
prefix_mapping[name] = prefix
for name in list(src_state_dict.keys()):
if name in prefix_mapping:
src_state_dict[f'{prefix_mapping[name]}.{name}'] = src_state_dict[name]
del src_state_dict[name]
keys = model.load_state_dict(src_state_dict, strict=pretrained_strict)
logger.info(f'=> Pretrained: the prefix is automatically filled if necessary')
else:
sub_model = model
for p in target_prefix.split('.'):
assert hasattr(sub_model, p), f'Illegal pretrained_target_prefix {target_prefix}'
sub_model = getattr(sub_model, p)
keys = sub_model.load_state_dict(src_state_dict, strict=pretrained_strict)
logger.info(f'=> Pretrained: the state_dict is loaded to model.{target_prefix}')
if len(keys.missing_keys) > 0:
logger.info(f"=> Pretrained: missing_keys [{', '.join(keys.missing_keys)}]")
if len(keys.unexpected_keys) > 0:
logger.info(f"=> Pretrained: unexpected_keys [{', '.join(keys.unexpected_keys)}]")
logger.info(f'=> Pretrained: loaded with strict={pretrained_strict} from {cfg.model.pretrained}\n')
| 6,844 | 0 | 92 |
c57bd23194af74cd729e526403ce1a9ad5f1615c | 3,587 | py | Python | data_facility_admin/test_serializers.py | NYU-CI/dfadmin | 071f38c62aea8ef8bf4ae82dbd672694e719b9bf | [
"CC0-1.0"
] | 1 | 2021-04-08T05:22:35.000Z | 2021-04-08T05:22:35.000Z | data_facility_admin/test_serializers.py | NYU-CI/dfadmin | 071f38c62aea8ef8bf4ae82dbd672694e719b9bf | [
"CC0-1.0"
] | 8 | 2019-08-05T18:16:07.000Z | 2019-10-29T18:42:53.000Z | data_facility_admin/test_serializers.py | NYU-CI/dfadmin | 071f38c62aea8ef8bf4ae82dbd672694e719b9bf | [
"CC0-1.0"
] | 2 | 2019-09-11T15:24:32.000Z | 2020-01-08T20:34:05.000Z | ''' tests for the serializers '''
# from django.test import TestCase
from unittest import TestCase, main
from .serializers import _get_attr_value, UserLDAPSerializer
from .models import User
from django.conf import settings
LDAP_USER_EXAMPLE = ('uid=chiahsuanyang,ou=People,dc=adrf,dc=info',
{
'gidNumber': ['502'],
'givenName': ['Chia-Hsuan'],
'homeDirectory': ['/nfshome/chiahsuanyang'],
'loginShell': ['/bin/bash'],
'objectClass': ['inetOrgPerson', 'posixAccount', 'top', 'adrfPerson'],
'uid': ['chiahsuanyang'],
'uidNumber': ['1039'],
'mail': ['cy1138@nyu.edu'],
'sn': ['Yang'],
'cn': ['Chia-Hsuan Yang'],
}
)
LDAP_PROJECT_EXAMPLE = ('cn=project-Food Analysis,ou=Projects,dc=adrf,dc=info',
{
'objectClass': ['posixGroup', 'groupOfMembers', 'adrfProject'],
'summary': ['required field'],
'name': ['Food Analysis'],
'gidNumber': ['7003'],
'creationdate': ['20161130221426Z'],
'cn': ['project-Food Analysis'],
'memberUid': ['rafael', 'will'],
}
)
LDAP_DFROLE_EXAMPLE = ('cn=annotation-reviewers,ou=Groups,dc=adrf,dc=info',
{
'objectClass': ['posixGroup', 'groupOfMembers'],
'gidNumber': ['5004'],
'cn': ['annotation-reviewers'],
'memberUid': ['rafael', 'will'],
}
)
class LdapSerializersTests(TestCase):
''' Tests for ldap serializers '''
if __name__ == '__main__':
main()
| 42.2 | 116 | 0.529969 | ''' tests for the serializers '''
# from django.test import TestCase
from unittest import TestCase, main
from .serializers import _get_attr_value, UserLDAPSerializer
from .models import User
from django.conf import settings
LDAP_USER_EXAMPLE = ('uid=chiahsuanyang,ou=People,dc=adrf,dc=info',
{
'gidNumber': ['502'],
'givenName': ['Chia-Hsuan'],
'homeDirectory': ['/nfshome/chiahsuanyang'],
'loginShell': ['/bin/bash'],
'objectClass': ['inetOrgPerson', 'posixAccount', 'top', 'adrfPerson'],
'uid': ['chiahsuanyang'],
'uidNumber': ['1039'],
'mail': ['cy1138@nyu.edu'],
'sn': ['Yang'],
'cn': ['Chia-Hsuan Yang'],
}
)
LDAP_PROJECT_EXAMPLE = ('cn=project-Food Analysis,ou=Projects,dc=adrf,dc=info',
{
'objectClass': ['posixGroup', 'groupOfMembers', 'adrfProject'],
'summary': ['required field'],
'name': ['Food Analysis'],
'gidNumber': ['7003'],
'creationdate': ['20161130221426Z'],
'cn': ['project-Food Analysis'],
'memberUid': ['rafael', 'will'],
}
)
LDAP_DFROLE_EXAMPLE = ('cn=annotation-reviewers,ou=Groups,dc=adrf,dc=info',
{
'objectClass': ['posixGroup', 'groupOfMembers'],
'gidNumber': ['5004'],
'cn': ['annotation-reviewers'],
'memberUid': ['rafael', 'will'],
}
)
class LdapSerializersTests(TestCase):
''' Tests for ldap serializers '''
def setUp(self):
self.user_dc = User(first_name='Daniel',
last_name='Castellani',
email='daniel.castellani@nyu.edu',
ldap_id=1000,
ldap_name='danielcastellani')
self.ldap_user = UserLDAPSerializer.dumps(self.user_dc)
# print('ldap_user=', self.ldap_user)
def test_user_ldap_serializer_dump_uid(self):
self.assertEqual(self.user_dc.username, self.ldap_user[1]['uid'][0])
def test_user_ldap_serializer_dump_sn(self):
self.assertEqual(self.user_dc.last_name, self.ldap_user[1]['sn'][0])
def test_user_ldap_serializer_dump_cn(self):
self.assertEqual(self.user_dc.full_name(), self.ldap_user[1]['cn'][0])
def test_user_ldap_serializer_dump_mail(self):
self.assertEqual(self.user_dc.email, self.ldap_user[1]['mail'][0])
def test_user_ldap_serializer_dump_given_name(self):
self.assertEqual(self.user_dc.first_name, self.ldap_user[1]['givenName'][0])
def test_user_ldap_serializer_dump_home_dir(self):
self.assertEqual('/nfshome/' + self.user_dc.username,
self.ldap_user[1]['homeDirectory'][0])
def test_user_ldap_serializer_dump_gidNumber(self):
self.assertEqual(self.user_dc.ldap_id, int(self.ldap_user[1]['gidNumber'][0]))
def test_user_ldap_serializer_dump_dn(self):
self.assertEqual('uid=%s,ou=People,%s' % (self.user_dc.ldap_name, settings.LDAP_BASE_DN), self.ldap_user[0])
if __name__ == '__main__':
main()
| 1,300 | 0 | 243 |
52448c51322f77371ccc497045df46eda63d3b7d | 595 | py | Python | listBoxGui.py | sairam1318/GUI | bd1892a2162993129008fccae0bfccfc11a90f2d | [
"Unlicense"
] | null | null | null | listBoxGui.py | sairam1318/GUI | bd1892a2162993129008fccae0bfccfc11a90f2d | [
"Unlicense"
] | null | null | null | listBoxGui.py | sairam1318/GUI | bd1892a2162993129008fccae0bfccfc11a90f2d | [
"Unlicense"
] | null | null | null | from tkinter import *
import tkinter.messagebox as tmsg
root = Tk()
root.title("Place Order")
root.geometry("400x400")
scrollbar = Scrollbar(root)
scrollbar.pack(side= RIGHT, fill = Y)
lbx = Listbox(root, yscrollcommand = scrollbar.set)
# lbx.insert(1, "firstItem")
# lbx.insert(2, "secondItem")
# lbx.insert(3, "thirdItem")
# lbx.insert(4, "fourthItem")
# lbx.insert(ACTIVE, 0)
for i in range(300):
lbx.insert(END, "Item {}".format(str(i)))
i += 1
scrollbar.config(command = lbx.yview)
#to attach the scroll bar to the list, we need to configure it
lbx.pack(fill = BOTH)
root.mainloop() | 24.791667 | 63 | 0.705882 | from tkinter import *
import tkinter.messagebox as tmsg
root = Tk()
root.title("Place Order")
root.geometry("400x400")
scrollbar = Scrollbar(root)
scrollbar.pack(side= RIGHT, fill = Y)
lbx = Listbox(root, yscrollcommand = scrollbar.set)
# lbx.insert(1, "firstItem")
# lbx.insert(2, "secondItem")
# lbx.insert(3, "thirdItem")
# lbx.insert(4, "fourthItem")
# lbx.insert(ACTIVE, 0)
for i in range(300):
lbx.insert(END, "Item {}".format(str(i)))
i += 1
scrollbar.config(command = lbx.yview)
#to attach the scroll bar to the list, we need to configure it
lbx.pack(fill = BOTH)
root.mainloop() | 0 | 0 | 0 |
b186f2bb37c1c02a3541b691a40fae430a1eb611 | 733 | py | Python | src/examples_in_my_book/general_problems/dicts/delete_duplicate_char_str.py | lucidrohit/Over-100-Exercises-Python-and-Algorithms | 62345c7d7c9cc2269f240d134189645fc96c3e80 | [
"MIT"
] | 2 | 2022-01-07T11:46:32.000Z | 2022-02-24T08:44:31.000Z | src/examples_in_my_book/general_problems/dicts/delete_duplicate_char_str.py | lucidrohit/Over-100-Exercises-Python-and-Algorithms | 62345c7d7c9cc2269f240d134189645fc96c3e80 | [
"MIT"
] | null | null | null | src/examples_in_my_book/general_problems/dicts/delete_duplicate_char_str.py | lucidrohit/Over-100-Exercises-Python-and-Algorithms | 62345c7d7c9cc2269f240d134189645fc96c3e80 | [
"MIT"
] | 1 | 2021-10-01T15:35:05.000Z | 2021-10-01T15:35:05.000Z | #!/usr/bin/python3
# mari von steinkirch @2013
# steinkirch at gmail
import string
def delete_unique_word(str1):
''' find and delete all the duplicate characters in a string '''
# create ordered dict
table_c = { key : 0 for key in string.ascii_lowercase}
# fill the table with the chars in the string
for i in str1:
table_c[i] += 1
# scan the table to find times chars > 1
for key, value in table_c.items():
if value > 1:
str1 = str1.replace(key, "")
return str1
if __name__ == '__main__':
test_delete_unique_word()
| 22.212121 | 69 | 0.626194 | #!/usr/bin/python3
# mari von steinkirch @2013
# steinkirch at gmail
import string
def delete_unique_word(str1):
''' find and delete all the duplicate characters in a string '''
# create ordered dict
table_c = { key : 0 for key in string.ascii_lowercase}
# fill the table with the chars in the string
for i in str1:
table_c[i] += 1
# scan the table to find times chars > 1
for key, value in table_c.items():
if value > 1:
str1 = str1.replace(key, "")
return str1
def test_delete_unique_word():
str1 = "google"
assert(delete_unique_word(str1) == 'le')
print('Tests passed!')
if __name__ == '__main__':
test_delete_unique_word()
| 101 | 0 | 23 |
727c4486f448ccb721c96d168d510c9534143afd | 3,034 | py | Python | solve.py | jnobre/lxmls-toolkit-2017 | 528da3377723cb9a048d13ac80786408d16df88d | [
"MIT"
] | null | null | null | solve.py | jnobre/lxmls-toolkit-2017 | 528da3377723cb9a048d13ac80786408d16df88d | [
"MIT"
] | null | null | null | solve.py | jnobre/lxmls-toolkit-2017 | 528da3377723cb9a048d13ac80786408d16df88d | [
"MIT"
] | null | null | null | '''
This script solves the exercises of days that have been completed. Jut in case
the students did not made it by their own.
'''
import sys
import urllib2
def download_and_replace(url, target_file):
'''
Downloads file through http with progress report. Version by PabloG
obtained in stack overflow
http://stackoverflow.com/questions/22676/how-do-i-download-a-file-over-http
-using-python
'''
# Try to connect to the internet
try:
u = urllib2.urlopen(url)
except Exception, err:
if getattr(err, 'code', None):
print "\nError: %s Could not get file %s\n" % (err.code, url)
else:
# A generic error is most possibly no available internet
print "\nCould not connect to the internet\n"
exit(1)
with open(target_file, 'wb') as f:
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl,
file_size_dl*100./file_size)
status = status + chr(8)*(len(status)+1)
# CONFIGURATION
master_URL = 'https://github.com/LxMLS/lxmls-toolkit/raw/master/'
labs_URL = 'https://github.com/LxMLS/lxmls-toolkit/raw/student/'
# FILES TO BE REPLACED FOR THAT DAY
code_day = {
'day1': ['lxmls/classifiers/multinomial_naive_bayes.py',
'lxmls/classifiers/perceptron.py'],
'day2': ['lxmls/sequences/hmm.py',
'lxmls/sequences/sequence_classification_decoder.py'],
'day3': ['lxmls/sequences/structured_perceptron.py'],
'day4': ['lxmls/parsing/dependency_decoder.py'],
'day5': ['lxmls/deep_learning/mlp.py'],
'day6': ['lxmls/deep_learning/rnn.py']
}
# ARGUMENT PROCESSING
if ((len(sys.argv) == 2) and
(sys.argv[1] in ['day0', 'day1', 'day2', 'day3', 'day4', 'day5', 'day6'])):
undo_flag = 0
day = sys.argv[1]
elif ((len(sys.argv) == 3) and
(sys.argv[1] == '--undo') and
(sys.argv[2] in ['day0', 'day1', 'day2', 'day3', 'day4', 'day5', 'day6'])):
undo_flag = 1
day = sys.argv[2]
else:
print ("\nUsage:\n"
"\n"
"python solve.py day<day number> # To solve exercise \n"
"\n"
"python solve.py --undo day<day number> # To undo solve\n"
"" )
exit(1)
# CHECK THERE ARE FILES TO SAVE
if day in code_day:
print "\nsolving %s" % day
else:
print "\nTheres actually no code to solve on %s!\n" % day
exit()
# OVERWRITE THE FILES TO SOLVE THEM
for pyfile in code_day[day]:
if undo_flag:
download_and_replace(labs_URL + pyfile, pyfile)
print "Unsolving: %s" % pyfile
else:
download_and_replace(master_URL + pyfile, pyfile)
print "Solving: %s" % pyfile
| 31.936842 | 82 | 0.584377 | '''
This script solves the exercises of days that have been completed. Jut in case
the students did not made it by their own.
'''
import sys
import urllib2
def download_and_replace(url, target_file):
'''
Downloads file through http with progress report. Version by PabloG
obtained in stack overflow
http://stackoverflow.com/questions/22676/how-do-i-download-a-file-over-http
-using-python
'''
# Try to connect to the internet
try:
u = urllib2.urlopen(url)
except Exception, err:
if getattr(err, 'code', None):
print "\nError: %s Could not get file %s\n" % (err.code, url)
else:
# A generic error is most possibly no available internet
print "\nCould not connect to the internet\n"
exit(1)
with open(target_file, 'wb') as f:
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl,
file_size_dl*100./file_size)
status = status + chr(8)*(len(status)+1)
# CONFIGURATION
master_URL = 'https://github.com/LxMLS/lxmls-toolkit/raw/master/'
labs_URL = 'https://github.com/LxMLS/lxmls-toolkit/raw/student/'
# FILES TO BE REPLACED FOR THAT DAY
code_day = {
'day1': ['lxmls/classifiers/multinomial_naive_bayes.py',
'lxmls/classifiers/perceptron.py'],
'day2': ['lxmls/sequences/hmm.py',
'lxmls/sequences/sequence_classification_decoder.py'],
'day3': ['lxmls/sequences/structured_perceptron.py'],
'day4': ['lxmls/parsing/dependency_decoder.py'],
'day5': ['lxmls/deep_learning/mlp.py'],
'day6': ['lxmls/deep_learning/rnn.py']
}
# ARGUMENT PROCESSING
if ((len(sys.argv) == 2) and
(sys.argv[1] in ['day0', 'day1', 'day2', 'day3', 'day4', 'day5', 'day6'])):
undo_flag = 0
day = sys.argv[1]
elif ((len(sys.argv) == 3) and
(sys.argv[1] == '--undo') and
(sys.argv[2] in ['day0', 'day1', 'day2', 'day3', 'day4', 'day5', 'day6'])):
undo_flag = 1
day = sys.argv[2]
else:
print ("\nUsage:\n"
"\n"
"python solve.py day<day number> # To solve exercise \n"
"\n"
"python solve.py --undo day<day number> # To undo solve\n"
"" )
exit(1)
# CHECK THERE ARE FILES TO SAVE
if day in code_day:
print "\nsolving %s" % day
else:
print "\nTheres actually no code to solve on %s!\n" % day
exit()
# OVERWRITE THE FILES TO SOLVE THEM
for pyfile in code_day[day]:
if undo_flag:
download_and_replace(labs_URL + pyfile, pyfile)
print "Unsolving: %s" % pyfile
else:
download_and_replace(master_URL + pyfile, pyfile)
print "Solving: %s" % pyfile
| 0 | 0 | 0 |
2fda3ae9e6226aa99b7882ca2e12b4c4a56e15b4 | 1,928 | py | Python | config.py | SuYehTarn/CS651-Group8-Feedback_Forum | d1163442aea81214c4dfa8de1d353ec719bfa7ab | [
"MIT"
] | null | null | null | config.py | SuYehTarn/CS651-Group8-Feedback_Forum | d1163442aea81214c4dfa8de1d353ec719bfa7ab | [
"MIT"
] | null | null | null | config.py | SuYehTarn/CS651-Group8-Feedback_Forum | d1163442aea81214c4dfa8de1d353ec719bfa7ab | [
"MIT"
] | null | null | null | """Module of app configuration"""
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
"""Class of configuration"""
# Form
SECRET_KEY = os.environ.get('SECRET_KEY') or \
b'\x876\xeb_\xc9<?\xb8r\xcak\r[\xa0\xf4\xfe\xdbP\xae\x17\x15S\xa5^'
# Mail
MAIL_SERVER = os.environ.get('MAIL_SERVER', 'smtp.gmail.com')
MAIL_PORT = int(os.environ.get('MAIL_PORT', '587'))
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS', 'true').lower() in ['true', 'on', '1']
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
FEEDBACK_FORUM_MAIL_SUBJECT_PREFIX = '[Feedback Forum]'
FEEDBACK_FORUM_MAIL_SENDER = 'Feedback Forum'
# DataBase
SQLALCHEMY_TRACK_MODIFICATIONS = False
# Administrator
ADMIN_NAME = os.environ.get('ADMIN_NAME')
ADMIN_PASSWORD = os.environ.get('ADMIN_PASSWORD')
# Review Statuses
REVIEW_STATUSES = [
'PENDING',
'PROCESSING',
'CLOSED',
]
@staticmethod
def init_app(app):
"""Initialize the app with this configuration"""
class DevelopmentConfig(Config):
"""Class of configuration on developing"""
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
"""Class of configuration on testing"""
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite://'
WTF_CSRF_ENABLED = False
class ProductionConfig(Config):
"""Class of configuration on production"""
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| 28.352941 | 88 | 0.664938 | """Module of app configuration"""
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
"""Class of configuration"""
# Form
SECRET_KEY = os.environ.get('SECRET_KEY') or \
b'\x876\xeb_\xc9<?\xb8r\xcak\r[\xa0\xf4\xfe\xdbP\xae\x17\x15S\xa5^'
# Mail
MAIL_SERVER = os.environ.get('MAIL_SERVER', 'smtp.gmail.com')
MAIL_PORT = int(os.environ.get('MAIL_PORT', '587'))
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS', 'true').lower() in ['true', 'on', '1']
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
FEEDBACK_FORUM_MAIL_SUBJECT_PREFIX = '[Feedback Forum]'
FEEDBACK_FORUM_MAIL_SENDER = 'Feedback Forum'
# DataBase
SQLALCHEMY_TRACK_MODIFICATIONS = False
# Administrator
ADMIN_NAME = os.environ.get('ADMIN_NAME')
ADMIN_PASSWORD = os.environ.get('ADMIN_PASSWORD')
# Review Statuses
REVIEW_STATUSES = [
'PENDING',
'PROCESSING',
'CLOSED',
]
@staticmethod
def init_app(app):
"""Initialize the app with this configuration"""
class DevelopmentConfig(Config):
"""Class of configuration on developing"""
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
"""Class of configuration on testing"""
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite://'
WTF_CSRF_ENABLED = False
class ProductionConfig(Config):
"""Class of configuration on production"""
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| 0 | 0 | 0 |
1510b6a1d7776841a261956bbaff5f23788763b2 | 1,356 | py | Python | setup.py | zhu327/doge | 60991418a0cfedc5b65d1e20cb5c11ec741bd021 | [
"Apache-2.0"
] | 163 | 2018-03-19T07:58:07.000Z | 2022-03-25T02:25:20.000Z | setup.py | zhu327/doge | 60991418a0cfedc5b65d1e20cb5c11ec741bd021 | [
"Apache-2.0"
] | 5 | 2018-12-03T03:32:09.000Z | 2021-03-31T08:38:06.000Z | setup.py | zhu327/doge | 60991418a0cfedc5b65d1e20cb5c11ec741bd021 | [
"Apache-2.0"
] | 34 | 2018-03-26T05:30:38.000Z | 2022-03-10T15:49:31.000Z | # coding: utf8
import sys
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
setup(
name="dogerpc",
version="0.1.4",
description="A RPC Framework",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
author="Timmy",
author_email="zhu327@gmail.com",
url="http://github.com/zhu327/doge",
packages=["doge"] + [f"{'doge'}.{i}" for i in find_packages("doge")],
license="Apache License 2.0",
keywords=["rpc", "etcd", "messagepack", "gevent", "microservices"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
],
install_requires=["mprpc", "pyformance", "python-etcd",],
tests_require=["pytest",],
cmdclass={"test": PyTest},
)
| 28.851064 | 73 | 0.631268 | # coding: utf8
import sys
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name="dogerpc",
version="0.1.4",
description="A RPC Framework",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
author="Timmy",
author_email="zhu327@gmail.com",
url="http://github.com/zhu327/doge",
packages=["doge"] + [f"{'doge'}.{i}" for i in find_packages("doge")],
license="Apache License 2.0",
keywords=["rpc", "etcd", "messagepack", "gevent", "microservices"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
],
install_requires=["mprpc", "pyformance", "python-etcd",],
tests_require=["pytest",],
cmdclass={"test": PyTest},
)
| 198 | 5 | 76 |
60f5a16ffdb91d357d35d22f435ec0fc68cde35e | 965 | py | Python | sleap/io/format/text.py | preeti98/sleap | 203c3a03c0c54f8dab242611d9a8d24595e98081 | [
"BSD-3-Clause-Clear"
] | 156 | 2020-05-01T18:43:43.000Z | 2022-03-25T10:31:18.000Z | sleap/io/format/text.py | preeti98/sleap | 203c3a03c0c54f8dab242611d9a8d24595e98081 | [
"BSD-3-Clause-Clear"
] | 299 | 2020-04-20T16:37:52.000Z | 2022-03-31T23:54:48.000Z | sleap/io/format/text.py | preeti98/sleap | 203c3a03c0c54f8dab242611d9a8d24595e98081 | [
"BSD-3-Clause-Clear"
] | 41 | 2020-05-14T15:25:21.000Z | 2022-03-25T12:44:54.000Z | """
Adaptor for reading and writing any generic text file.
This is a good example of a very simple adaptor class.
"""
from .adaptor import Adaptor, SleapObjectType
from .filehandle import FileHandle
| 20.978261 | 56 | 0.634197 | """
Adaptor for reading and writing any generic text file.
This is a good example of a very simple adaptor class.
"""
from .adaptor import Adaptor, SleapObjectType
from .filehandle import FileHandle
class TextAdaptor(Adaptor):
@property
def handles(self):
return SleapObjectType.misc
@property
def default_ext(self):
return "txt"
@property
def all_exts(self):
return ["txt", "log"]
@property
def name(self):
return "Text file"
def can_read_file(self, file: FileHandle):
return True
def can_write_filename(self, filename: str) -> bool:
return True
def does_read(self) -> bool:
return True
def does_write(self) -> bool:
return True
def read(self, file: FileHandle, *args, **kwargs):
return file.text
def write(self, filename: str, source_object: str):
with open(filename, "w") as f:
f.write(source_object)
| 409 | 331 | 23 |
7f7c346e86720cd9bd906e6e836249ca0cd3f0ca | 10,237 | py | Python | data/extractBench.py | tharrry/sphincsplus | 7f01ec4b24ae38ed386098aa4b68d60252778d83 | [
"CC0-1.0"
] | null | null | null | data/extractBench.py | tharrry/sphincsplus | 7f01ec4b24ae38ed386098aa4b68d60252778d83 | [
"CC0-1.0"
] | null | null | null | data/extractBench.py | tharrry/sphincsplus | 7f01ec4b24ae38ed386098aa4b68d60252778d83 | [
"CC0-1.0"
] | null | null | null |
#rearrangeNumber(kg, s, v)
#
#
#(kg, s, v) = extractNumber('ref.txt')
#(kgs_c, kgr_c, ss_c, sr_c, vs_c, vr_c) = rearrangeNumber(kg, s, v)
#kg_matrix = addToMatrix(kg_matrix, kgs_c, kgr_c, 0)
#s_matrix = addToMatrix(s_matrix, ss_c, sr_c, 0)
#v_matrix = addToMatrix(v_matrix, vs_c, vr_c, 0)
#
#(kg, s, v) = extractNumber('refx4.txt')
#(kgs_c, kgr_c, ss_c, sr_c, vs_c, vr_c) = rearrangeNumber(kg, s, v)
#kg_matrix = addToMatrix(kg_matrix, kgs_c, kgr_c, 1)
#s_matrix = addToMatrix(s_matrix, ss_c, sr_c, 1)
#v_matrix = addToMatrix(v_matrix, vs_c, vr_c, 1)
#
#(kg, s, v) = extractNumber('transpose2.txt')
#(kgs_c, kgr_c, ss_c, sr_c, vs_c, vr_c) = rearrangeNumber(kg, s, v)
#kg_matrix = addToMatrix(kg_matrix, kgs_c, kgr_c, 2)
#s_matrix = addToMatrix(s_matrix, ss_c, sr_c, 2)
#v_matrix = addToMatrix(v_matrix, vs_c, vr_c, 2)
#
#for m in kg_matrix:
# print(m)
#for m in s_matrix:
# print(m)
#for m in v_matrix:
# print(m)
if __name__ == "__main__":
main() | 44.316017 | 117 | 0.489108 | class Results:
def __init__(self, filename):
(self.kg, self.s, self.v) = extractNumber (filename)
self.name = filename.split('.')[0]
def __str__(self):
print("")
print("benchmark results of {}:".format(self.name))
print("")
print("key gen:")
print(' 126f robust {}'.format(self.kg[0]))
print(' 126f simple {}'.format(self.kg[1]))
print(' 192f robust {}'.format(self.kg[2]))
print(' 192f simple {}'.format(self.kg[3]))
print(' 256f robust {}'.format(self.kg[4]))
print(' 265f simple {}'.format(self.kg[5]))
print(' 126s robust {}'.format(self.kg[6]))
print(' 126s simple {}'.format(self.kg[7]))
print(' 192s robust {}'.format(self.kg[8]))
print(' 192s simple {}'.format(self.kg[9]))
print(' 256s robust {}'.format(self.kg[10]))
print(' 265s simple {}'.format(self.kg[11]))
print("signing:")
print(' 126f robust {}'.format(self.s[0]))
print(' 126f simple {}'.format(self.s[1]))
print(' 192f robust {}'.format(self.s[2]))
print(' 192f simple {}'.format(self.s[3]))
print(' 256f robust {}'.format(self.s[4]))
print(' 265f simple {}'.format(self.s[5]))
print(' 126s robust {}'.format(self.s[6]))
print(' 126s simple {}'.format(self.s[7]))
print(' 192s robust {}'.format(self.s[8]))
print(' 192s simple {}'.format(self.s[9]))
print(' 256s robust {}'.format(self.s[10]))
print(' 265s simple {}'.format(self.s[11]))
print("verifying:")
print(' 126f robust {}'.format(self.v[0]))
print(' 126f simple {}'.format(self.v[1]))
print(' 192f robust {}'.format(self.v[2]))
print(' 192f simple {}'.format(self.v[3]))
print(' 256f robust {}'.format(self.v[4]))
print(' 265f simple {}'.format(self.v[5]))
print(' 126s robust {}'.format(self.v[6]))
print(' 126s simple {}'.format(self.v[7]))
print(' 192s robust {}'.format(self.v[8]))
print(' 192s simple {}'.format(self.v[9]))
print(' 256s robust {}'.format(self.v[10]))
print(' 265s simple {}'.format(self.v[11]))
return ""
def adapt_layout(self):
(self.kg, self.s, self.v) = rearrangeNumber (self.kg, self.s, self.v)
def compare (self, other):
print("comparing")
print(' {}'.format(self.name))
print("to")
print(' {}'.format(other.name))
print('')
print("key gen:")
print(' 256f simple {}'.format( round((1 - (other.kg[5] / self.kg[5]) ) * 100, 1)))
print(' 256s simple {}'.format( round((1 - (other.kg[11] / self.kg[11]) ) * 100, 1)))
print(' 192f simple {}'.format( round((1 - (other.kg[3] / self.kg[3]) ) * 100, 1)))
print(' 192s simple {}'.format( round((1 - (other.kg[9] / self.kg[9]) ) * 100, 1)))
print(' 128f simple {}'.format( round((1 - (other.kg[1] / self.kg[1]) ) * 100, 1)))
print(' 128s simple {}'.format( round((1 - (other.kg[7] / self.kg[7]) ) * 100, 1)))
print(' 256f robust {}'.format( round((1 - (other.kg[4] / self.kg[4]) ) * 100, 1)))
print(' 256s robust {}'.format( round((1 - (other.kg[10] / self.kg[10]) ) * 100, 1)))
print(' 192f robust {}'.format( round((1 - (other.kg[2] / self.kg[2]) ) * 100, 1)))
print(' 192s robust {}'.format( round((1 - (other.kg[8] / self.kg[8]) ) * 100, 1)))
print(' 128f robust {}'.format( round((1 - (other.kg[0] / self.kg[0]) ) * 100, 1)))
print(' 128s robust {}'.format( round((1 - (other.kg[6] / self.kg[6]) ) * 100, 1)))
print('')
print("signing:")
print(' 256f simple {}'.format( round((1 - (other.s[5] / self.s[5]) ) * 100, 1)))
print(' 256s simple {}'.format( round((1 - (other.s[11] / self.s[11]) ) * 100, 1)))
print(' 192f simple {}'.format( round((1 - (other.s[3] / self.s[3]) ) * 100, 1)))
print(' 192s simple {}'.format( round((1 - (other.s[9] / self.s[9]) ) * 100, 1)))
print(' 128f simple {}'.format( round((1 - (other.s[1] / self.s[1]) ) * 100, 1)))
print(' 128s simple {}'.format( round((1 - (other.s[7] / self.s[7]) ) * 100, 1)))
print(' 256f robust {}'.format( round((1 - (other.s[4] / self.s[4]) ) * 100, 1)))
print(' 256s robust {}'.format( round((1 - (other.s[10] / self.s[10]) ) * 100, 1)))
print(' 192f robust {}'.format( round((1 - (other.s[2] / self.s[2]) ) * 100, 1)))
print(' 192s robust {}'.format( round((1 - (other.s[8] / self.s[8]) ) * 100, 1)))
print(' 128f robust {}'.format( round((1 - (other.s[0] / self.s[0]) ) * 100, 1)))
print(' 128s robust {}'.format( round((1 - (other.s[6] / self.s[6]) ) * 100, 1)))
print('')
print("verifying:")
print(' 256f simple {}'.format( round((1 - (other.v[5] / self.v[5]) ) * 100, 1)))
print(' 256s simple {}'.format( round((1 - (other.v[11] / self.v[11]) ) * 100, 1)))
print(' 192f simple {}'.format( round((1 - (other.v[3] / self.v[3]) ) * 100, 1)))
print(' 192s simple {}'.format( round((1 - (other.v[9] / self.v[9]) ) * 100, 1)))
print(' 128f simple {}'.format( round((1 - (other.v[1] / self.v[1]) ) * 100, 1)))
print(' 128s simple {}'.format( round((1 - (other.v[7] / self.v[7]) ) * 100, 1)))
print(' 256f robust {}'.format( round((1 - (other.v[4] / self.v[4]) ) * 100, 1)))
print(' 256s robust {}'.format( round((1 - (other.v[10] / self.v[10]) ) * 100, 1)))
print(' 192f robust {}'.format( round((1 - (other.v[2] / self.v[2]) ) * 100, 1)))
print(' 192s robust {}'.format( round((1 - (other.v[8] / self.v[8]) ) * 100, 1)))
print(' 128f robust {}'.format( round((1 - (other.v[0] / self.v[0]) ) * 100, 1)))
print(' 128s robust {}'.format( round((1 - (other.v[6] / self.v[6]) ) * 100, 1)))
def extractNumber (filename):
kg =[]
s = []
v = []
i = 0
with open(filename) as f:
for line in f:
if i == 4:
words = line.split()
#print(words)
number = words[-2].replace(',', '')
#print(number)
kg.append(int(number))
i = i+1
elif i == 6:
words = line.split()
number = words[-2].replace(',', '')
#print(number)
s.append(int(number))
i = i+1
elif i == 9:
words = line.split()
number = words[-2].replace(',', '')
#print(number)
v.append(int(number))
i = i+1
elif i == 14:
i = 0
else:
i = i+1
return (kg, s, v)
#rearrangeNumber(kg, s, v)
def addToMatrix(matrix, simple, robust, offset):
matrix[offset] = simple
matrix[offset+3] = robust
return matrix
def rearrangeNumber (kg, s, v):
kg_simple = []
kg_robust = []
s_simple = []
s_robust = []
v_simple = []
v_robust = []
i = 0
for key in kg:
if i % 2 == 0:
kg_robust.append(key)
else:
kg_simple.append(key)
i = i+1
i = 0
for sig in s:
if i % 2 == 0:
s_robust.append(sig)
else:
s_simple.append(sig)
i = i+1
i = 0
for ver in v:
if i % 2 == 0:
v_robust.append(ver)
else:
v_simple.append(ver)
i = i+1
kg_simple_correct = [kg_simple [2], kg_simple[5], kg_simple[1], kg_simple[4], kg_simple[0], kg_simple[3]]
kg_robust_correct = [kg_robust [2], kg_robust[5], kg_robust[1], kg_robust[4], kg_robust[0], kg_robust[3]]
s_simple_correct = [s_simple [2], s_simple[5], s_simple[1], s_simple[4], s_simple[0], s_simple[3]]
s_robust_correct = [s_robust [2], s_robust[5], s_robust[1], s_robust[4], s_robust[0], s_robust[3]]
v_simple_correct = [v_simple [2], v_simple[5], v_simple[1], v_simple[4], v_simple[0], v_simple[3]]
v_robust_correct = [v_robust [2], v_robust[5], v_robust[1], v_robust[4], v_robust[0], v_robust[3]]
return (kg_simple_correct,kg_robust_correct,s_simple_correct,s_robust_correct,v_simple_correct,v_robust_correct)
def main():
ref = Results('ref.txt')
print(ref)
refx4 = Results('refx4.txt')
print(refx4)
neon = Results('transpose2.txt')
print(neon)
ref.compare(refx4)
ref.compare(neon)
refx4.compare(neon)
#kg_matrix = [None] * 6
#s_matrix = [None] * 6
#v_matrix = [None] * 6
#
#
#(kg, s, v) = extractNumber('ref.txt')
#(kgs_c, kgr_c, ss_c, sr_c, vs_c, vr_c) = rearrangeNumber(kg, s, v)
#kg_matrix = addToMatrix(kg_matrix, kgs_c, kgr_c, 0)
#s_matrix = addToMatrix(s_matrix, ss_c, sr_c, 0)
#v_matrix = addToMatrix(v_matrix, vs_c, vr_c, 0)
#
#(kg, s, v) = extractNumber('refx4.txt')
#(kgs_c, kgr_c, ss_c, sr_c, vs_c, vr_c) = rearrangeNumber(kg, s, v)
#kg_matrix = addToMatrix(kg_matrix, kgs_c, kgr_c, 1)
#s_matrix = addToMatrix(s_matrix, ss_c, sr_c, 1)
#v_matrix = addToMatrix(v_matrix, vs_c, vr_c, 1)
#
#(kg, s, v) = extractNumber('transpose2.txt')
#(kgs_c, kgr_c, ss_c, sr_c, vs_c, vr_c) = rearrangeNumber(kg, s, v)
#kg_matrix = addToMatrix(kg_matrix, kgs_c, kgr_c, 2)
#s_matrix = addToMatrix(s_matrix, ss_c, sr_c, 2)
#v_matrix = addToMatrix(v_matrix, vs_c, vr_c, 2)
#
#for m in kg_matrix:
# print(m)
#for m in s_matrix:
# print(m)
#for m in v_matrix:
# print(m)
if __name__ == "__main__":
main() | 8,926 | -7 | 236 |
2cecc5f76b3bc4957e1b411f13df9e0537dd69d1 | 753 | py | Python | macro/tutorial/bundles/03_bundle_parameters.py | gnafit/gna | c1a58dac11783342c97a2da1b19c97b85bce0394 | [
"MIT"
] | 5 | 2019-10-14T01:06:57.000Z | 2021-02-02T16:33:06.000Z | macro/tutorial/bundles/03_bundle_parameters.py | gnafit/gna | c1a58dac11783342c97a2da1b19c97b85bce0394 | [
"MIT"
] | null | null | null | macro/tutorial/bundles/03_bundle_parameters.py | gnafit/gna | c1a58dac11783342c97a2da1b19c97b85bce0394 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import load
from gna.bundle import execute_bundle
from gna.configurator import NestedDict, uncertaindict, uncertain
from gna.env import env
#
# Bundle configuration
#
cfg = NestedDict(
bundle = dict(
name='parameters',
version='ex01',
),
pars = uncertaindict(
[
( 'par_a', (1.0, 1.0, 'percent') ),
( 'par_b', (2.0, 0.01, 'relative') ),
( 'par_c', (3.0, 0.5, 'absolute') ),
( 'group.a', (1.0, 'free' ) ),
( 'group.b', (1.0, 'fixed', 'Labeled fixed parameter' ) )
],
),
)
#
# Execute bundle configuration
#
b1 = execute_bundle(cfg)
#
# Print the parameters
#
env.globalns.printparameters(labels=True)
| 20.916667 | 69 | 0.549801 | #!/usr/bin/env python
import load
from gna.bundle import execute_bundle
from gna.configurator import NestedDict, uncertaindict, uncertain
from gna.env import env
#
# Bundle configuration
#
cfg = NestedDict(
bundle = dict(
name='parameters',
version='ex01',
),
pars = uncertaindict(
[
( 'par_a', (1.0, 1.0, 'percent') ),
( 'par_b', (2.0, 0.01, 'relative') ),
( 'par_c', (3.0, 0.5, 'absolute') ),
( 'group.a', (1.0, 'free' ) ),
( 'group.b', (1.0, 'fixed', 'Labeled fixed parameter' ) )
],
),
)
#
# Execute bundle configuration
#
b1 = execute_bundle(cfg)
#
# Print the parameters
#
env.globalns.printparameters(labels=True)
| 0 | 0 | 0 |
4729aae3bcd366dd822d5493901808145392f045 | 361 | py | Python | team10_project/message/urls.py | jhkuang11/UniTrade | 5f68b853926e167936b58c8543b8f95ebd6f5211 | [
"MIT"
] | null | null | null | team10_project/message/urls.py | jhkuang11/UniTrade | 5f68b853926e167936b58c8543b8f95ebd6f5211 | [
"MIT"
] | 10 | 2020-06-05T19:42:26.000Z | 2022-03-11T23:38:35.000Z | team10_project/message/urls.py | Davisoye/Unitrade | 99428f3712221b2b641a58f1e064d8a3126885a5 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from django.contrib import admin
from message import views
# template tagging for relative url
app_name = 'message'
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^inbox$', views.inbox, name='inbox'),
url(r'^outbox$', views.outbox, name='outbox'),
url(r'^compose$', views.compose, name='compose'),
]
| 24.066667 | 53 | 0.68144 | from django.conf.urls import url
from django.contrib import admin
from message import views
# template tagging for relative url
app_name = 'message'
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^inbox$', views.inbox, name='inbox'),
url(r'^outbox$', views.outbox, name='outbox'),
url(r'^compose$', views.compose, name='compose'),
]
| 0 | 0 | 0 |
6faa507e7697542efa1659a32cdaf8a2f46e0ffa | 2,701 | py | Python | mpa/modules/datasets/cls_csv_incr_dataset.py | openvinotoolkit/model_preparation_algorithm | 8d36bf5944837b7a3d22fc2c3a4cb93423619fc2 | [
"Apache-2.0"
] | null | null | null | mpa/modules/datasets/cls_csv_incr_dataset.py | openvinotoolkit/model_preparation_algorithm | 8d36bf5944837b7a3d22fc2c3a4cb93423619fc2 | [
"Apache-2.0"
] | null | null | null | mpa/modules/datasets/cls_csv_incr_dataset.py | openvinotoolkit/model_preparation_algorithm | 8d36bf5944837b7a3d22fc2c3a4cb93423619fc2 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
from mmcls.datasets.builder import DATASETS
from .multi_cls_dataset import MultiClsDataset
from .cls_csv_dataset import CSVDatasetCls
from mpa.modules.utils.task_adapt import map_class_names
import numpy as np
@DATASETS.register_module()
@DATASETS.register_module()
| 40.313433 | 92 | 0.617549 | # Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
from mmcls.datasets.builder import DATASETS
from .multi_cls_dataset import MultiClsDataset
from .cls_csv_dataset import CSVDatasetCls
from mpa.modules.utils.task_adapt import map_class_names
import numpy as np
@DATASETS.register_module()
class LwfTaskIncDataset(MultiClsDataset):
def __init__(self, pre_stage_res=None, model_tasks=None, **kwargs):
self.pre_stage_res = pre_stage_res
self.model_tasks = model_tasks
if self.pre_stage_res is not None:
self.pre_stage_data = np.load(self.pre_stage_res, allow_pickle=True)
for p in kwargs['pipeline']:
if p['type'] == 'Collect':
p['keys'] += ['soft_label']
super(LwfTaskIncDataset, self).__init__(**kwargs)
def load_annotations(self):
if self.pre_stage_res is not None:
data_infos = self.pre_stage_data
index_map = dict()
for i, k in enumerate(self.tasks.keys()):
index_map.update({i: map_class_names(self.tasks[k], self.model_tasks[k])})
for data in data_infos:
data['img_prefix'] = self.data_prefix
for i, map in index_map.items():
data['gt_label'][i] = map[data['gt_label'][i]]
else:
data_infos = super().load_annotations()
return data_infos
@DATASETS.register_module()
class ClassIncDataset(CSVDatasetCls):
def __init__(self, pre_stage_res=None, dst_classes=None, **kwargs):
self.pre_stage_res = pre_stage_res
self.dst_classes = dst_classes
if self.pre_stage_res is not None:
self.pre_stage_data = np.load(self.pre_stage_res, allow_pickle=True)
for p in kwargs['pipeline']:
if p['type'] == 'Collect':
p['keys'] += ['soft_label']
p['keys'] += ['center']
super(ClassIncDataset, self).__init__(**kwargs)
def load_annotations(self):
if self.pre_stage_res is not None:
dataframe = self._read_csvs()
num_new_class = len(dataframe)
data_infos = self.pre_stage_data
index_map = map_class_names(self.CLASSES, self.dst_classes)
for i, data in enumerate(data_infos):
data['img_prefix'] = self.data_prefix
if i < num_new_class:
data['gt_label'] = np.array(index_map[data['gt_label']], dtype=np.int64)
else:
if self.dst_classes is not None:
self.CLASSES = self.dst_classes
data_infos = super().load_annotations()
return data_infos
| 2,165 | 36 | 150 |
9d1ac50ab7a8871e2c149a42039149cf44590161 | 132 | py | Python | Python/Unsorted/469a_v2.py | LittleEndu/Codeforces | 82c49b10702c58bc5ce062801d740a2f5f600062 | [
"MIT"
] | null | null | null | Python/Unsorted/469a_v2.py | LittleEndu/Codeforces | 82c49b10702c58bc5ce062801d740a2f5f600062 | [
"MIT"
] | null | null | null | Python/Unsorted/469a_v2.py | LittleEndu/Codeforces | 82c49b10702c58bc5ce062801d740a2f5f600062 | [
"MIT"
] | null | null | null | # Should be smallest now
n=input
print("IO hb,e cmoym ek etyhbeo agrudy!. "[int(n())>len(set(n().split()[1:]+n().split()[1:]))::2]) | 33 | 98 | 0.606061 | # Should be smallest now
n=input
print("IO hb,e cmoym ek etyhbeo agrudy!. "[int(n())>len(set(n().split()[1:]+n().split()[1:]))::2]) | 0 | 0 | 0 |
3d15c619eb5d8ddd230c9b3eb5f22f4ce204f9ef | 3,862 | py | Python | docs/conf.py | hugmyndakassi/hvmi | fa49a34ba32b327c462224db1cf58d96a076a224 | [
"Apache-2.0"
] | 677 | 2020-07-30T13:59:36.000Z | 2022-03-24T11:02:00.000Z | docs/conf.py | hugmyndakassi/hvmi | fa49a34ba32b327c462224db1cf58d96a076a224 | [
"Apache-2.0"
] | 38 | 2020-08-11T13:59:36.000Z | 2022-02-17T15:03:48.000Z | docs/conf.py | fengjixuchui/hvmi | 72488e8432d26547876a052d24ea44c3e18279a7 | [
"Apache-2.0"
] | 55 | 2020-07-30T14:11:03.000Z | 2022-03-09T05:40:44.000Z | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
#import sphinx_rtd_theme
import sphinx_bootstrap_theme
import subprocess
import pathlib
from pathlib import Path
generate_doxygen()
# -- Project information -----------------------------------------------------
project = 'Hypervisor Memory Introspection'
copyright = '2020, Bitdefender'
author = 'Bitdefender'
# The major project version, used as the replacement for |version|.
version = "1"
# The full project version, used as the replacement for |release| and e.g. in the HTML templates.
release = '1.132.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.autosectionlabel',
'sphinx_bootstrap_theme'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# Tell sphinx what the primary language being documented is.
primary_domain = 'c'
# Tell sphinx what the pygments highlight language should be.
highlight_language = 'c'
todo_include_todos = False
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'chapters/global-options.rst', 'chapters/process-options.rst']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'bootstrap'
html_logo = 'chapters/images/hvmi-logo-main-color.png'
html_use_index = True
if html_theme == 'bootstrap':
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_theme_options = {
'bootstrap_version': "3",
'navbar_site_name': 'Chapters',
'navbar_links': [
("GitHub", "https://github.com/hvmi/hvmi", True),
("Blog", "https://hvmi.github.io/blog/", True),
("Doxygen", "_static/doxygen/html/index"),
],
'source_link_position': "footer",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
master_doc = 'index'
# autosectionlabel settings
# True to prefix each section label with the name of the document it is in, followed by a colon.
autosectionlabel_prefix_document = True
# Uncomment this to use custom.css
| 34.792793 | 118 | 0.685396 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
#import sphinx_rtd_theme
import sphinx_bootstrap_theme
import subprocess
import pathlib
from pathlib import Path
def generate_doxygen():
# The Doxygen directory is one level up, so go there.
parent_dir = Path(__file__).absolute().parent.parent
# We don't catch any of the exceptions that could be thrown here because we want the build to fail if something
# exceptional happens.
p = subprocess.Popen(['doxygen', 'Doxygen/Doxyfile'], cwd=parent_dir)
# A generous timeout.
p.wait(timeout=120)
if p.returncode != 0:
print("Doxygen generation failed!")
sys.exit(1)
generate_doxygen()
# -- Project information -----------------------------------------------------
project = 'Hypervisor Memory Introspection'
copyright = '2020, Bitdefender'
author = 'Bitdefender'
# The major project version, used as the replacement for |version|.
version = "1"
# The full project version, used as the replacement for |release| and e.g. in the HTML templates.
release = '1.132.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.autosectionlabel',
'sphinx_bootstrap_theme'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# Tell sphinx what the primary language being documented is.
primary_domain = 'c'
# Tell sphinx what the pygments highlight language should be.
highlight_language = 'c'
todo_include_todos = False
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'chapters/global-options.rst', 'chapters/process-options.rst']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'bootstrap'
html_logo = 'chapters/images/hvmi-logo-main-color.png'
html_use_index = True
if html_theme == 'bootstrap':
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_theme_options = {
'bootstrap_version': "3",
'navbar_site_name': 'Chapters',
'navbar_links': [
("GitHub", "https://github.com/hvmi/hvmi", True),
("Blog", "https://hvmi.github.io/blog/", True),
("Doxygen", "_static/doxygen/html/index"),
],
'source_link_position': "footer",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
master_doc = 'index'
# autosectionlabel settings
# True to prefix each section label with the name of the document it is in, followed by a colon.
autosectionlabel_prefix_document = True
# Uncomment this to use custom.css
def setup(app):
if html_theme == 'bootstrap':
app.add_css_file('custom.css')
| 542 | 0 | 45 |
33658693dc9a0d200adede00e29553dcde573ab4 | 578 | py | Python | sevenbridges/models/compound/volumes/volume_file.py | sbg/sevenbridges-python | b3e14016066563470d978c9b13e1a236a41abea8 | [
"Apache-2.0"
] | 46 | 2016-04-27T12:51:17.000Z | 2021-11-24T23:43:12.000Z | sevenbridges/models/compound/volumes/volume_file.py | sbg/sevenbridges-python | b3e14016066563470d978c9b13e1a236a41abea8 | [
"Apache-2.0"
] | 111 | 2016-05-25T15:44:31.000Z | 2022-02-05T20:45:37.000Z | sevenbridges/models/compound/volumes/volume_file.py | sbg/sevenbridges-python | b3e14016066563470d978c9b13e1a236a41abea8 | [
"Apache-2.0"
] | 37 | 2016-04-27T12:10:43.000Z | 2021-03-18T11:22:28.000Z | from sevenbridges.meta.fields import StringField
from sevenbridges.meta.resource import Resource
class VolumeFile(Resource):
"""
VolumeFile resource describes the location of the file
on the external volume.
"""
volume = StringField(read_only=True)
location = StringField(read_only=True)
| 28.9 | 78 | 0.692042 | from sevenbridges.meta.fields import StringField
from sevenbridges.meta.resource import Resource
class VolumeFile(Resource):
"""
VolumeFile resource describes the location of the file
on the external volume.
"""
volume = StringField(read_only=True)
location = StringField(read_only=True)
def __str__(self):
return f'<VolumeFile: volume={self.volume}, location={self.location}>'
def __eq__(self, other):
if type(other) is not type(self):
return False
return self is other or self.location == other.location
| 210 | 0 | 54 |
10f9d7dfc533d1074e71035424e95b25f68c15f6 | 340 | py | Python | Module_03/mlb.py | JoseGtz/2021_python_selenium | c7b39479c78839ba2e2e2633a0f673a8b02fb4cb | [
"Unlicense"
] | null | null | null | Module_03/mlb.py | JoseGtz/2021_python_selenium | c7b39479c78839ba2e2e2633a0f673a8b02fb4cb | [
"Unlicense"
] | null | null | null | Module_03/mlb.py | JoseGtz/2021_python_selenium | c7b39479c78839ba2e2e2633a0f673a8b02fb4cb | [
"Unlicense"
] | null | null | null | from common.webdriver_factory import get_driver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
driver = get_driver('chrome')
wait = WebDriverWait(driver, 5)
driver.get('https://www.mlb.com/es/standings')
driver.quit() | 30.909091 | 64 | 0.817647 | from common.webdriver_factory import get_driver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
driver = get_driver('chrome')
wait = WebDriverWait(driver, 5)
driver.get('https://www.mlb.com/es/standings')
driver.quit() | 0 | 0 | 0 |
1e302e794b191fcbf7fce3dd9089541530553362 | 1,552 | py | Python | wpbullet/SQLInjection.py | wdeilim/Audit-Rules | 74889d54bfdca5d0298efb5fdbe33231be2a7e82 | [
"MIT"
] | 9 | 2020-03-15T00:01:42.000Z | 2021-03-10T03:35:09.000Z | wpbullet/SQLInjection.py | wdeilim/Audit-Rules | 74889d54bfdca5d0298efb5fdbe33231be2a7e82 | [
"MIT"
] | null | null | null | wpbullet/SQLInjection.py | wdeilim/Audit-Rules | 74889d54bfdca5d0298efb5fdbe33231be2a7e82 | [
"MIT"
] | 1 | 2021-01-05T21:07:56.000Z | 2021-01-05T21:07:56.000Z | from core.modules import BaseClass
| 23.164179 | 46 | 0.477448 | from core.modules import BaseClass
class SQLInjection(BaseClass):
name = "SQL Injection"
severity = "High"
functions_prefix = ""
functions = [
# Native MySQL(i) Injection
"(?<![^\s+(])mysql_query",
"(?<![^\s+(])mysqli_multi_query",
"(?<![^\s+(])mysqli_send_query",
"(?<![^\s+(])mysqli_master_query",
"(?<![^\s+(])mysql_unbuffered_query",
"(?<![^\s+(])mysql_db_query",
"mysqli::real_query",
"mysqli_real_query",
"mysqli::query",
"mysqli_query",
# PostgreSQL Injection
"(?<![^\s+(])pg_query",
"(?<![^\s+(])pg_send_query",
# SQLite SQL Injection
"(?<![^\s+(])sqlite_array_query",
"(?<![^\s+(])sqlite_exec",
"(?<![^\s+(])sqlite_query",
"(?<![^\s+(])sqlite_single_query",
"(?<![^\s+(])sqlite_unbuffered_query",
# PDO SQL Injection
"->arrayQuery",
"->query",
"->queryExec",
"->singleQuery",
"->querySingle",
"->exec",
"->execute",
"->unbufferedQuery",
"->real_query",
"->multi_query",
"->send_query",
# WordPress SQL Injection
"wpdb->query",
"wpdb->get_var",
"wpdb->get_row",
"wpdb->get_col",
"wpdb->get_results",
"wpdb->replace",
]
blacklist = [
"mysql_real_escape_string",
"pg_escape_string",
"sqlite_escape_string",
"wpdb->prepare",
"intval",
"esc_sql"
]
| 0 | 1,493 | 23 |
ebb89cbff519a1c8164f4889b7bcd0d23b450a02 | 409 | py | Python | Logger/migrations/0010_auto_20181009_0131.py | MenheraMikumo/Nextflow-Kanban | 54333f32cf626a021ca097d1a80b81f0d26029ed | [
"MIT"
] | null | null | null | Logger/migrations/0010_auto_20181009_0131.py | MenheraMikumo/Nextflow-Kanban | 54333f32cf626a021ca097d1a80b81f0d26029ed | [
"MIT"
] | null | null | null | Logger/migrations/0010_auto_20181009_0131.py | MenheraMikumo/Nextflow-Kanban | 54333f32cf626a021ca097d1a80b81f0d26029ed | [
"MIT"
] | null | null | null | # Generated by Django 2.1.1 on 2018-10-09 01:31
from django.db import migrations, models
| 21.526316 | 74 | 0.606357 | # Generated by Django 2.1.1 on 2018-10-09 01:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Logger', '0009_auto_20181009_0128'),
]
operations = [
migrations.AlterField(
model_name='trace',
name='scratch',
field=models.CharField(blank=True, max_length=128, null=True),
),
]
| 0 | 295 | 23 |
78dd0f4f343d910e1055e06008f235f1c97febef | 12 | py | Python | brainstorm/utils/__init__.py | znhv/winsio | 4d4e69961285ea3dcebc5ad6358e2d753d6b4f9d | [
"MIT"
] | null | null | null | brainstorm/utils/__init__.py | znhv/winsio | 4d4e69961285ea3dcebc5ad6358e2d753d6b4f9d | [
"MIT"
] | null | null | null | brainstorm/utils/__init__.py | znhv/winsio | 4d4e69961285ea3dcebc5ad6358e2d753d6b4f9d | [
"MIT"
] | null | null | null | """Pass."""
| 6 | 11 | 0.333333 | """Pass."""
| 0 | 0 | 0 |
dbfabe95346f96e3d740ad1e1b8d58e31d123c44 | 5,873 | py | Python | metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_hammer_v2.py | abdulhaim/metaworld | bbf19f5b72c07c11e51def23fd71eca8b6c619e2 | [
"MIT"
] | null | null | null | metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_hammer_v2.py | abdulhaim/metaworld | bbf19f5b72c07c11e51def23fd71eca8b6c619e2 | [
"MIT"
] | null | null | null | metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_hammer_v2.py | abdulhaim/metaworld | bbf19f5b72c07c11e51def23fd71eca8b6c619e2 | [
"MIT"
] | null | null | null | import numpy as np
from gym.spaces import Box
from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set
| 33.752874 | 93 | 0.598331 | import numpy as np
from gym.spaces import Box
from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set
class SawyerHammerEnvV2(SawyerXYZEnv):
HAMMER_HANDLE_LENGTH = 0.14
def __init__(self):
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.4, 0.0)
obj_high = (0.1, 0.5, 0.0)
goal_low = (0.2399, .7399, 0.109)
goal_high = (0.2401, .7401, 0.111)
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
)
self.init_config = {
'hammer_init_pos': np.array([0, 0.5, 0.0]),
'hand_init_pos': np.array([0, 0.4, 0.2]),
}
self.goal = self.init_config['hammer_init_pos']
self.hammer_init_pos = self.init_config['hammer_init_pos']
self.obj_init_pos = self.hammer_init_pos.copy()
self.hand_init_pos = self.init_config['hand_init_pos']
self.nail_init_pos = None
self._random_reset_space = Box(np.array(obj_low), np.array(obj_high))
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
# self.one_hot_encode = [0,1]
@property
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_hammer.xml')
@_assert_task_is_set
def evaluate_state(self, obs, action):
(
reward,
reward_grab,
reward_ready,
reward_success,
success
) = self.compute_reward(action, obs)
info = {
'success': float(success),
'near_object': reward_ready,
'grasp_success': reward_grab >= 0.5,
'grasp_reward': reward_grab,
'in_place_reward': reward_success,
'obj_to_target': 0,
'unscaled_reward': reward,
}
return reward, info
def _get_id_main_object(self):
return self.unwrapped.model.geom_name2id('HammerHandle')
def _get_pos_objects(self):
return np.hstack((
self.get_body_com('hammer').copy(),
self.get_body_com('nail_link').copy()
))
def _get_quat_objects(self):
return np.hstack((
self.sim.data.get_body_xquat('hammer'),
self.sim.data.get_body_xquat('nail_link')
))
def _set_hammer_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9:12] = pos.copy()
qvel[9:15] = 0
self.set_state(qpos, qvel)
def reset_model(self):
self._reset_hand()
# Set position of box & nail (these are not randomized)
self.sim.model.body_pos[self.model.body_name2id(
'box'
)] = np.array([0.24, 0.85, 0.0])
# Update _target_pos
self._target_pos = self._get_site_pos('goal')
# Randomize hammer position
self.hammer_init_pos = self._get_state_rand_vec() if self.random_init \
else self.init_config['hammer_init_pos']
self.nail_init_pos = self._get_site_pos('nailHead')
self.obj_init_pos = self.hammer_init_pos.copy()
self._set_hammer_xyz(self.hammer_init_pos)
return self._get_obs()
@staticmethod
def _reward_quat(obs):
# Ideal laid-down wrench has quat [1, 0, 0, 0]
# Rather than deal with an angle between quaternions, just approximate:
ideal = np.array([1., 0., 0., 0.])
error = np.linalg.norm(obs[7:11] - ideal)
return max(1.0 - error / 0.4, 0.0)
@staticmethod
def _reward_pos(hammer_head, target_pos):
pos_error = target_pos - hammer_head
a = 0.1 # Relative importance of just *trying* to lift the hammer
b = 0.9 # Relative importance of hitting the nail
lifted = hammer_head[2] > 0.02
in_place = a * float(lifted) + b * reward_utils.tolerance(
np.linalg.norm(pos_error),
bounds=(0, 0.02),
margin=0.2,
sigmoid='long_tail',
)
return in_place
def compute_reward(self, actions, obs):
hand = obs[:3]
hammer = obs[4:7]
hammer_head = hammer + np.array([.16, .06, .0])
# `self._gripper_caging_reward` assumes that the target object can be
# approximated as a sphere. This is not true for the hammer handle, so
# to avoid re-writing the `self._gripper_caging_reward` we pass in a
# modified hammer position.
# This modified position's X value will perfect match the hand's X value
# as long as it's within a certain threshold
hammer_threshed = hammer.copy()
threshold = SawyerHammerEnvV2.HAMMER_HANDLE_LENGTH / 2.0
if abs(hammer[0] - hand[0]) < threshold:
hammer_threshed[0] = hand[0]
reward_quat = SawyerHammerEnvV2._reward_quat(obs)
reward_grab = self._gripper_caging_reward(
actions, hammer_threshed,
object_reach_radius=0.01,
obj_radius=0.015,
pad_success_thresh=0.02,
xz_thresh=0.01,
high_density=True,
)
reward_in_place = SawyerHammerEnvV2._reward_pos(
hammer_head,
self._target_pos
)
reward = (2.0 * reward_grab + 6.0 * reward_in_place) * reward_quat
# Override reward on success. We check that reward is above a threshold
# because this env's success metric could be hacked easily
success = self.data.get_joint_qpos('NailSlideJoint') > 0.09
if success and reward > 5.:
reward = 10.0
return (
reward,
reward_grab,
reward_quat,
reward_in_place,
success,
)
| 5,186 | 421 | 23 |
6f32a6c633192ec49eb5d9989afede526590d90b | 1,284 | py | Python | tests/metarl/np/algos/test_cma_es.py | neurips2020submission11699/metarl | ae4825d21478fa1fd0aa6b116941ea40caa152a5 | [
"MIT"
] | 2 | 2021-02-07T12:14:52.000Z | 2021-07-29T08:07:22.000Z | tests/metarl/np/algos/test_cma_es.py | neurips2020submission11699/metarl | ae4825d21478fa1fd0aa6b116941ea40caa152a5 | [
"MIT"
] | null | null | null | tests/metarl/np/algos/test_cma_es.py | neurips2020submission11699/metarl | ae4825d21478fa1fd0aa6b116941ea40caa152a5 | [
"MIT"
] | null | null | null | from metarl.envs import MetaRLEnv
from metarl.experiment import LocalTFRunner
from metarl.np.algos import CMAES
from metarl.np.baselines import LinearFeatureBaseline
from metarl.sampler import OnPolicyVectorizedSampler
from metarl.tf.policies import CategoricalMLPPolicy
from tests.fixtures import snapshot_config, TfGraphTestCase
| 36.685714 | 74 | 0.622274 | from metarl.envs import MetaRLEnv
from metarl.experiment import LocalTFRunner
from metarl.np.algos import CMAES
from metarl.np.baselines import LinearFeatureBaseline
from metarl.sampler import OnPolicyVectorizedSampler
from metarl.tf.policies import CategoricalMLPPolicy
from tests.fixtures import snapshot_config, TfGraphTestCase
class TestCMAES(TfGraphTestCase):
def test_cma_es_cartpole(self):
"""Test CMAES with Cartpole-v1 environment."""
with LocalTFRunner(snapshot_config) as runner:
env = MetaRLEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
n_samples = 20
algo = CMAES(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
n_samples=n_samples)
runner.setup(algo, env, sampler_cls=OnPolicyVectorizedSampler)
runner.train(n_epochs=1, batch_size=1000)
# No assertion on return because CMAES is not stable.
env.close()
| 0 | 929 | 23 |
c3e71a45dd9fc2cf820b5ef17fa26fc3dedd9920 | 17,011 | py | Python | test/test_advanced_queries.py | ShaneKilkelly/bedquilt | beaee513a015ed0dd633b738517b33eb7c4c42a3 | [
"MIT"
] | 288 | 2015-04-20T18:14:39.000Z | 2021-10-30T01:35:44.000Z | test/test_advanced_queries.py | ShaneKilkelly/bedquilt | beaee513a015ed0dd633b738517b33eb7c4c42a3 | [
"MIT"
] | 21 | 2015-04-13T12:48:40.000Z | 2017-05-27T12:41:10.000Z | test/test_advanced_queries.py | ShaneKilkelly/bedquilt | beaee513a015ed0dd633b738517b33eb7c4c42a3 | [
"MIT"
] | 19 | 2015-11-03T09:25:00.000Z | 2021-05-01T00:28:02.000Z | import testutils
import json
import psycopg2
| 33.095331 | 78 | 0.38675 | import testutils
import json
import psycopg2
def _map_labels(results):
return list(map(lambda row: row[0]['label'], results))
def _map_ids(results):
return list(map(lambda row: row[0]['label'], results))
class TestAdvancedQueries(testutils.BedquiltTestCase):
def test_eq(self):
rows = [
{"_id": "aa", "label": "a", "n": 1, "color": "red"},
{"_id": "bb", "label": "b", "n": 4, "color": "red"},
{"_id": "cc", "label": "c", "n": 8, "color": "red"},
{"_id": "dd", "label": "d", "n": 16, "color": "blue"},
{"_id": "ee", "label": "e", "n": 8, "color": "blue"},
{"_id": "ff", "label": "f", "n": 16, "color": "red"},
{"_id": "dud", "color": "blue"}
]
for row in rows:
self._insert('things', row)
# find_one
result = self._query(
"select bq_find_one('things', '{}')".format(json.dumps({
'color': 'blue',
'n': {'$eq': 8},
}))
)
self.assertEqual(result[0][0]['label'], 'e')
result = self._query(
"select bq_find_one('things', '{}')".format(json.dumps({
'color': 'red',
'n': {'$eq': 16},
}))
)
self.assertEqual(result[0][0]['label'], 'f')
# find many
result = self._query(
"select bq_find('things', '{}')".format(json.dumps({
'n': {'$eq': 8},
}))
)
self.assertEqual(len(result), 2)
self.assertEqual(_map_labels(result), ['c', 'e'])
result = self._query(
"select bq_find('things', '{}')".format(json.dumps({
'color': {'$eq': 'red'},
}))
)
self.assertEqual(len(result), 4)
self.assertEqual(_map_labels(result), ['a', 'b', 'c', 'f'])
result = self._query(
"select bq_find('things', %s)", (json.dumps({
"o'reilly": {'$eq': "o'really"}
}),)
)
self.assertEqual(len(result), 0)
self.assertEqual(_map_labels(result), [])
def test_noteq(self):
rows = [
{"_id": "wat", "label": "oh", "color": "purple"},
{"_id": "aa", "label": "a", "n": 1, "color": "red"},
{"_id": "bb", "label": "b", "n": 4, "color": "red"},
{"_id": "dud", "label": "dud", "color": "blue"}
]
for row in rows:
self._insert('things', row)
# find_one
result = self._query(
"select bq_find_one('things', '{}')".format(json.dumps({
'color': 'red',
'n': {'$noteq': 1},
}))
)
self.assertEqual(result[0][0]['label'], 'b')
result = self._query(
"select bq_find_one('things', '{}')".format(json.dumps({
'color': 'red',
'n': {'$noteq': 4},
}))
)
self.assertEqual(result[0][0]['label'], 'a')
# find many
result = self._query(
"select bq_find('things', '{}')".format(json.dumps({
'n': {'$noteq': 4},
}))
)
self.assertEqual(len(result), 3)
self.assertEqual(_map_labels(result), ['oh', 'a', 'dud'])
result = self._query(
"select bq_find('things', '{}')".format(json.dumps({
'n': {'$noteq': 400},
}))
)
self.assertEqual(len(result), 4)
self.assertEqual(_map_labels(result), ['oh', 'a', 'b', 'dud'])
result = self._query(
"select bq_find('things', '{}')".format(json.dumps({
'color': {'$noteq': 'red'},
}))
)
self.assertEqual(len(result), 2)
self.assertEqual(_map_labels(result), ['oh', 'dud'])
def test_gt_and_gte(self):
rows = [
{"_id": "aa", "label": "a", "n": 1, "color": "red"},
{"_id": "bb", "label": "b", "n": 4, "color": "red"},
{"_id": "cc", "label": "c", "n": 8, "color": "red"},
{"_id": "dd", "label": "d", "n": 16, "color": "blue"},
{"_id": "ee", "label": "e", "n": 8, "color": "blue"},
{"_id": "ff", "label": "f", "n": 16, "color": "red"},
{"_id": "dud", "color": "blue"}
]
for row in rows:
self._insert('things', row)
# find_one
result = self._query(
"select bq_find_one('things', '{}')".format(json.dumps({
'color': 'blue',
'n': {'$gt': 5},
}))
)
self.assertEqual(result[0][0]['label'], 'd')
result = self._query(
"select bq_find_one('things', '{}')".format(json.dumps({
'color': 'red',
'n': {'$gt': 5},
}))
)
self.assertEqual(result[0][0]['label'], 'c')
result = self._query(
"select bq_find_one('things', '{}')".format(json.dumps({
'color': 'red',
'n': {'$gte': 8},
}))
)
self.assertEqual(result[0][0]['label'], 'c')
# find many
result = self._query(
"select bq_find('things', '{}')".format(json.dumps({
'color': 'blue',
'n': {'$gt': 5},
}))
)
self.assertEqual(len(result), 2)
self.assertEqual(_map_labels(result), ['d', 'e'])
result = self._query(
"select bq_find('things', '{}')".format(json.dumps({
'color': 'red',
'n': {'$gte': 4},
}))
)
self.assertEqual(len(result), 3)
self.assertEqual(_map_labels(result), ['b', 'c', 'f'])
def test_lt(self):
rows = [
{"_id": "aa", "label": "a", "n": 1, "color": "red"},
{"_id": "bb", "label": "b", "n": 4, "color": "red"},
{"_id": "cc", "label": "c", "n": 8, "color": "red"},
{"_id": "dd", "label": "d", "n": 16, "color": "blue"},
{"_id": "ee", "label": "e", "n": 8, "color": "blue"},
{"_id": "ff", "label": "f", "n": 16, "color": "red"},
{"_id": "dud", "color": "blue"}
]
for row in rows:
self._insert('things', row)
# find_one
result = self._query(
"select bq_find_one('things', '{}')".format(json.dumps({
'color': 'blue',
'n': {'$lt': 10},
}))
)
self.assertEqual(result[0][0]['label'], 'e')
result = self._query(
"select bq_find_one('things', '{}')".format(json.dumps({
'color': 'red',
'n': {'$lt': 2},
}))
)
self.assertEqual(result[0][0]['label'], 'a')
result = self._query(
"select bq_find_one('things', '{}')".format(json.dumps({
'color': 'blue',
'n': {'$lte': 8},
}))
)
self.assertEqual(result[0][0]['label'], 'e')
# find many
result = self._query(
"select bq_find('things', '{}')".format(json.dumps({
'color': 'red',
'n': {'$lt': 16},
}))
)
self.assertEqual(len(result), 3)
self.assertEqual(_map_labels(result), ['a', 'b', 'c'])
result = self._query(
"select bq_find('things', '{}')".format(json.dumps({
'color': 'red',
'n': {'$lte': 16},
}))
)
self.assertEqual(len(result), 4)
self.assertEqual(_map_labels(result), ['a', 'b', 'c', 'f'])
def test_in(self):
rows = [
{"_id": "aa", "label": "a", "n": 1, "color": "red"},
{"_id": "bb", "label": "b", "n": 4, "color": "red"},
{"_id": "cc", "label": "c", "n": 8, "color": "red"},
{"_id": "dd", "label": "d", "n": 16, "color": "blue"},
{"_id": "ee", "label": "e", "n": 8, "color": "blue"},
{"_id": "ff", "label": "f", "n": 16, "color": "red"},
{"_id": "dud", "color": "blue"}
]
for row in rows:
self._insert('things', row)
# find_one
result = self._query(
"select bq_find_one('things', '{}')".format(json.dumps({
'color': 'red',
'n': {'$in': [4, 22, 9]},
}))
)
self.assertEqual(result[0][0]['label'], 'b')
result = self._query(
"select bq_find_one('things', '{}')".format(json.dumps({
'color': 'blue',
'n': {'$in': [2, 8, 24]},
}))
)
self.assertEqual(result[0][0]['label'], 'e')
# find many
result = self._query(
"select bq_find('things', '{}')".format(json.dumps({
'color': 'red',
'n': {'$in': [4, 2, 16, 9]},
}))
)
self.assertEqual(len(result), 2)
self.assertEqual(_map_labels(result), ['b', 'f'])
def test_notin(self):
rows = [
{"_id": "aa", "label": "a", "n": 1, "color": "red"},
{"_id": "bb", "label": "b", "n": 4, "color": "red"},
{"_id": "cc", "label": "c", "n": 8, "color": "red"},
{"_id": "dd", "label": "d", "n": 16, "color": "blue"},
{"_id": "ee", "label": "e", "n": 8, "color": "blue"},
{"_id": "ff", "label": "f", "n": 16, "color": "red"},
{"_id": "dud", "color": "blue"}
]
for row in rows:
self._insert('things', row)
# find_one
result = self._query(
"select bq_find_one('things', '{}')".format(json.dumps({
'color': 'red',
'n': {'$notin': [1, 8, 16]},
}))
)
self.assertEqual(result[0][0]['label'], 'b')
result = self._query(
"select bq_find_one('things', '{}')".format(json.dumps({
'color': 'blue',
'n': {'$notin': [16, 12]},
}))
)
self.assertEqual(result[0][0]['label'], 'e')
# find many
result = self._query(
"select bq_find('things', '{}')".format(json.dumps({
'color': 'red',
'n': {'$notin': [22, 4, 8]},
}))
)
self.assertEqual(len(result), 2)
self.assertEqual(_map_labels(result), ['a', 'f'])
def test_exists(self):
rows = [
{"_id": "aa", "color": "red", "label": "a", "nested": {"x": 42}},
{"_id": "bb", "color": "blue", "label": "b"},
{"_id": "cc", "color": "blue", "label": "c", "nested": {"x": 44}},
{"_id": "dd", "color": "red", "label": "d", "nested": {"y": 13}},
{"_id": "ee", "color": "blue", "label": "e", "nested": {"x": 46}},
{"_id": "ff", "color": "red", "label": "f"},
{"_id": "gg", "color": "blue", "label": "g"},
]
for row in rows:
self._insert('things', row)
# exists=true, find one
result = self._query(
"select bq_find_one('things', '{}')".format(json.dumps({
'color': 'blue',
'nested': {
'x': {'$exists': True}
},
}))
)
self.assertEqual(result[0][0]['label'], 'c')
# exists=true, find many
result = self._query(
"select bq_find('things', '{}')".format(json.dumps({
'color': 'blue',
'nested': {
'x': {'$exists': True}
},
}))
)
self.assertEqual(len(result), 2)
self.assertEqual(_map_labels(result), ['c', 'e'])
# exists=false, find one
result = self._query(
"select bq_find_one('things', '{}')".format(json.dumps({
'color': 'blue',
'nested': {
'x': {'$exists': False}
},
}))
)
self.assertEqual(result[0][0]['label'], 'b')
# exists=false, find many
result = self._query(
"select bq_find('things', '{}')".format(json.dumps({
'color': 'blue',
'nested': {
'x': {'$exists': False}
},
}))
)
self.assertEqual(len(result), 2)
self.assertEqual(_map_labels(result), ['b', 'g'])
def test_type(self):
rows = [
{'label': 'a', 'x': 42},
{'label': 'b', 'x': 'wat'},
{'label': 'c', 'x': None},
{'label': 'd', 'x': 90},
{'label': 'e', 'x': True},
{'label': 'f', 'x': 'wat'},
{'label': 'g', 'x': [1, 2, 3]},
{'label': 'h', 'x': {'foo': 'bar'}},
{'label': 'i', 'x': [4, 5]},
{'label': 'j', 'x': False},
{'label': 'k', 'x': {'foo': 'baz'}},
{'label': 'l', 'x': None},
{'label': 'm', 'x': None}
]
for row in rows:
self._insert('things', row)
# find one
examples = [
('number', 'a'),
('string', 'b'),
('object', 'h'),
('array', 'g'),
('boolean', 'e'),
('null', 'c')
]
for type_string, label in examples:
result = self._query(
"select bq_find_one('things', '{}')".format(json.dumps({
'x': {'$type': type_string}
}))
)
self.assertEqual(result[0][0]['label'], label)
# find many
examples = [
('number', ['a', 'd']),
('string', ['b', 'f']),
('object', ['h', 'k']),
('array', ['g', 'i']),
('boolean', ['e', 'j']),
('null', ['c', 'l', 'm'])
]
for type_string, labels in examples:
result = self._query(
"select bq_find('things', '{}')".format(json.dumps({
'x': {'$type': type_string}
}))
)
self.assertEqual(_map_labels(result), labels)
def test_like(self):
rows = [
{'label': 'a', 'x': 42},
{'label': 'b', 'x': 'one two'},
{'label': 'c', 'x': 'oh no two'},
{'label': 'd', 'x': 90},
{'label': 'e', 'x': True},
{'label': 'f', 'x': 'three four'},
{'label': 'g', 'x': 'nine four'},
]
for row in rows:
self._insert('things', row)
examples = [
('%two', 'b'),
('%one%', 'b'),
('%ree f%', 'f'),
('%four', 'f')
]
# find one
for like_string, label in examples:
result = self._query(
"select bq_find_one('things', %s)",
(format(json.dumps({'x': {'$like': like_string}})),)
)
self.assertEqual(result[0][0]['label'], label)
# find many
examples = [
('%two', ['b', 'c']),
('%o%', ['b','c','f', 'g']),
('%four', ['f', 'g']),
('%ree f%', ['f'])
]
for like_string, labels in examples:
result = self._query(
"select bq_find('things', %s)",
(format(json.dumps({'x': {'$like': like_string}})),)
)
self.assertEqual(_map_labels(result), labels)
def test_regex(self):
rows = [
{'label': 'a', 'x': 42},
{'label': 'b', 'x': 'one two'},
{'label': 'c', 'x': 'oh no two'},
{'label': 'd', 'x': 90},
{'label': 'e', 'x': True},
{'label': 'f', 'x': 'three four'},
{'label': 'g', 'x': 'nine four'},
]
for row in rows:
self._insert('things', row)
examples = [
('^.*two$', 'b'),
('^.*one.*$', 'b'),
('^.*four$', 'f'),
('^.*ree f.*$', 'f')
]
# find one
for regex_string, label in examples:
result = self._query(
"select bq_find_one('things', '{}')".format(json.dumps({
'x': {'$regex': regex_string}
}))
)
self.assertEqual(result[0][0]['label'], label)
# find many
examples = [
('^.*two$', ['b', 'c']),
('^.*o.*$', ['b','c','f', 'g']),
('^.*four$', ['f', 'g']),
('^.*ree f.*$', ['f'])
]
for regex_string, labels in examples:
result = self._query(
"select bq_find('things', '{}')".format(json.dumps({
'x': {'$regex': regex_string}
}))
)
self.assertEqual(_map_labels(result), labels)
| 16,591 | 33 | 339 |
fc8a43d5cbe335fdf7db030ba5d558cc7c3fa531 | 567 | py | Python | flypy/compiler/optimizations/tests/test_inlining.py | filmackay/flypy | d64e70959c5c8af9e914dcc3ce1068fb99859c3a | [
"BSD-2-Clause"
] | null | null | null | flypy/compiler/optimizations/tests/test_inlining.py | filmackay/flypy | d64e70959c5c8af9e914dcc3ce1068fb99859c3a | [
"BSD-2-Clause"
] | null | null | null | flypy/compiler/optimizations/tests/test_inlining.py | filmackay/flypy | d64e70959c5c8af9e914dcc3ce1068fb99859c3a | [
"BSD-2-Clause"
] | 1 | 2020-01-01T00:43:24.000Z | 2020-01-01T00:43:24.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import unittest
from flypy import jit, ijit
#===------------------------------------------------------------------===
# Tests
#===------------------------------------------------------------------===
if __name__ == '__main__':
unittest.main() | 22.68 | 73 | 0.440917 | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import unittest
from flypy import jit, ijit
#===------------------------------------------------------------------===
# Tests
#===------------------------------------------------------------------===
class TestInlining(unittest.TestCase):
def test_inline_simple(self):
@ijit
def g(x):
return x * 2
@ijit
def f(x):
return g(x) + 2
self.assertEqual(f(8), 18)
if __name__ == '__main__':
unittest.main() | 161 | 17 | 50 |
f8557df426480f37626f0aa7ee66e724dc31671e | 4,664 | py | Python | gazoo_device/tests/functional_tests/switchboard_test_suite.py | dedsec-9/gazoo-device | 5ed2867c258da80e53b6aae07ec7a65efe473a28 | [
"Apache-2.0"
] | null | null | null | gazoo_device/tests/functional_tests/switchboard_test_suite.py | dedsec-9/gazoo-device | 5ed2867c258da80e53b6aae07ec7a65efe473a28 | [
"Apache-2.0"
] | null | null | null | gazoo_device/tests/functional_tests/switchboard_test_suite.py | dedsec-9/gazoo-device | 5ed2867c258da80e53b6aae07ec7a65efe473a28 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test suite for Switchboard capability."""
import os.path
import time
from typing import Tuple, Type
from gazoo_device.switchboard import log_process
from gazoo_device.tests.functional_tests.utils import gdm_test_base
class SwitchboardTestSuite(gdm_test_base.GDMTestBase):
"""Test suite for Switchboard capability."""
@classmethod
def is_applicable_to(cls, device_type: str,
device_class: Type[gdm_test_base.DeviceType],
device_name: str) -> bool:
"""Determine if this test suite can run on the given device."""
return device_class.has_capabilities(["switchboard"])
@classmethod
def requires_pairing(cls) -> bool:
"""Returns True if the device must be paired to run this test suite."""
return False
@classmethod
def required_test_config_variables(cls) -> Tuple[str, ...]:
"""Returns keys required to be present in the functional test config."""
return ("shell_cmd", "expect")
def test_send_and_expect(self):
"""Tests send_and_expect() method."""
timeout = 10 # In seconds.
response = self.device.switchboard.send_and_expect(
self.test_config["shell_cmd"],
self.test_config["expect"],
timeout=timeout)
self.assertFalse(
response.timedout,
"{} switchboard.send_and_expect failed for command {!r}. "
"Did not find regex {!r} in {}s. Device output: {!r}"
.format(self.device.name, self.test_config["shell_cmd"],
self.test_config["expect"], timeout, response.before))
def test_do_and_expect(self):
"""Tests switchboard.do_and_expect() method."""
switch = MockPowerSwitch()
expect_result = self.device.switchboard.do_and_expect(
switch.turn_on_power, (), {},
["fake_string, won't match anything"],
timeout=.1)
self.assertTrue(
expect_result.timedout,
"Expected do_and_expect to time out, but timedout was False")
self.assertTrue(switch.power_is_on,
"switch.turn_on_power() did not execute. "
"The power state is still off for switch.")
def test_expect_with_bogus_logline(self):
"""Tests switchboard.expect() method for a log line that doesn't exist."""
phrase = "garblygookand more"
response = self.device.switchboard.expect([phrase], timeout=2)
self.assertTrue(response.timedout,
"Response should have timed out, but it didn't. "
f"Requested log line regex: {phrase!r}. "
f"Device output: {response.before!r}")
def test_rotate_log(self):
"""Tests max_log_size and auto log rotation features."""
old_log_file_name = self.device.log_file_name
expected_log_filename = log_process.get_next_log_filename(old_log_file_name)
expected_message = "Special message to trigger at least one log rotation"
max_log_size = len(expected_message) * 10
self.device.switchboard.set_max_log_size(max_log_size)
time.sleep(.5) # Allow time for set_max_log_size to complete.
try:
for _ in range(20):
self.device.switchboard.add_log_note(expected_message)
end_time = time.time() + 3
while (old_log_file_name == self.device.log_file_name
and time.time() < end_time):
time.sleep(0.1)
self.assertTrue(
os.path.exists(old_log_file_name),
f"Expected old log file name {old_log_file_name} to exist")
self.assertTrue(
os.path.exists(expected_log_filename),
f"Expected new log file name {expected_log_filename} to exist")
self.assertNotEqual(
old_log_file_name, self.device.log_file_name,
f"Expected log file name to change from {old_log_file_name}")
finally:
# Disable log rotation (the default) after the test.
self.device.switchboard.set_max_log_size(0)
if __name__ == "__main__":
gdm_test_base.main()
| 37.015873 | 80 | 0.686106 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test suite for Switchboard capability."""
import os.path
import time
from typing import Tuple, Type
from gazoo_device.switchboard import log_process
from gazoo_device.tests.functional_tests.utils import gdm_test_base
class MockPowerSwitch:
def __init__(self):
self._is_on = False
def turn_on_power(self):
self._is_on = True
def power_is_on(self):
return self._is_on
class SwitchboardTestSuite(gdm_test_base.GDMTestBase):
"""Test suite for Switchboard capability."""
@classmethod
def is_applicable_to(cls, device_type: str,
device_class: Type[gdm_test_base.DeviceType],
device_name: str) -> bool:
"""Determine if this test suite can run on the given device."""
return device_class.has_capabilities(["switchboard"])
@classmethod
def requires_pairing(cls) -> bool:
"""Returns True if the device must be paired to run this test suite."""
return False
@classmethod
def required_test_config_variables(cls) -> Tuple[str, ...]:
"""Returns keys required to be present in the functional test config."""
return ("shell_cmd", "expect")
def test_send_and_expect(self):
"""Tests send_and_expect() method."""
timeout = 10 # In seconds.
response = self.device.switchboard.send_and_expect(
self.test_config["shell_cmd"],
self.test_config["expect"],
timeout=timeout)
self.assertFalse(
response.timedout,
"{} switchboard.send_and_expect failed for command {!r}. "
"Did not find regex {!r} in {}s. Device output: {!r}"
.format(self.device.name, self.test_config["shell_cmd"],
self.test_config["expect"], timeout, response.before))
def test_do_and_expect(self):
"""Tests switchboard.do_and_expect() method."""
switch = MockPowerSwitch()
expect_result = self.device.switchboard.do_and_expect(
switch.turn_on_power, (), {},
["fake_string, won't match anything"],
timeout=.1)
self.assertTrue(
expect_result.timedout,
"Expected do_and_expect to time out, but timedout was False")
self.assertTrue(switch.power_is_on,
"switch.turn_on_power() did not execute. "
"The power state is still off for switch.")
def test_expect_with_bogus_logline(self):
"""Tests switchboard.expect() method for a log line that doesn't exist."""
phrase = "garblygookand more"
response = self.device.switchboard.expect([phrase], timeout=2)
self.assertTrue(response.timedout,
"Response should have timed out, but it didn't. "
f"Requested log line regex: {phrase!r}. "
f"Device output: {response.before!r}")
def test_rotate_log(self):
"""Tests max_log_size and auto log rotation features."""
old_log_file_name = self.device.log_file_name
expected_log_filename = log_process.get_next_log_filename(old_log_file_name)
expected_message = "Special message to trigger at least one log rotation"
max_log_size = len(expected_message) * 10
self.device.switchboard.set_max_log_size(max_log_size)
time.sleep(.5) # Allow time for set_max_log_size to complete.
try:
for _ in range(20):
self.device.switchboard.add_log_note(expected_message)
end_time = time.time() + 3
while (old_log_file_name == self.device.log_file_name
and time.time() < end_time):
time.sleep(0.1)
self.assertTrue(
os.path.exists(old_log_file_name),
f"Expected old log file name {old_log_file_name} to exist")
self.assertTrue(
os.path.exists(expected_log_filename),
f"Expected new log file name {expected_log_filename} to exist")
self.assertNotEqual(
old_log_file_name, self.device.log_file_name,
f"Expected log file name to change from {old_log_file_name}")
finally:
# Disable log rotation (the default) after the test.
self.device.switchboard.set_max_log_size(0)
if __name__ == "__main__":
gdm_test_base.main()
| 72 | 1 | 98 |
7eb7815265a0d0cf79d4fdb01038f83ca9367e2a | 168 | py | Python | src-tmp/old_stuff/PyRCC/src/basesplit.py | EulerProject/EulerX | 49e63e6a27be97ab30832180a47d214494388e15 | [
"MIT"
] | 15 | 2016-02-17T20:48:29.000Z | 2021-03-05T20:38:05.000Z | src-tmp/old_stuff/PyRCC/src/basesplit.py | eddy7896/EulerX | 49e63e6a27be97ab30832180a47d214494388e15 | [
"MIT"
] | 16 | 2015-02-05T18:38:48.000Z | 2021-06-14T11:38:36.000Z | src-tmp/old_stuff/PyRCC/src/basesplit.py | eddy7896/EulerX | 49e63e6a27be97ab30832180a47d214494388e15 | [
"MIT"
] | 4 | 2016-01-26T03:24:52.000Z | 2020-01-09T07:57:15.000Z | from helpfuncs import bitdecoding
# initialize and fill list for set based on base relations
bsplit = [(len(bitdecoding(i+1)),bitdecoding(i+1)) for i in xrange(255)]
| 28 | 72 | 0.755952 | from helpfuncs import bitdecoding
# initialize and fill list for set based on base relations
bsplit = [(len(bitdecoding(i+1)),bitdecoding(i+1)) for i in xrange(255)]
| 0 | 0 | 0 |
87d6a217d56d8263ab9d4353c7b0143396b66d3f | 31 | py | Python | wrapweb/default_config.py | sarum9in/wrapweb | 0a4aa6e505c587de4f2c4d61719df0c1c016dfa1 | [
"Apache-2.0"
] | null | null | null | wrapweb/default_config.py | sarum9in/wrapweb | 0a4aa6e505c587de4f2c4d61719df0c1c016dfa1 | [
"Apache-2.0"
] | null | null | null | wrapweb/default_config.py | sarum9in/wrapweb | 0a4aa6e505c587de4f2c4d61719df0c1c016dfa1 | [
"Apache-2.0"
] | null | null | null | SECRET_KEY = "changeme please"
| 15.5 | 30 | 0.774194 | SECRET_KEY = "changeme please"
| 0 | 0 | 0 |
c79a10577a87d7d01549c9617fef04931a69d02e | 407 | py | Python | test/test_controller.py | craigfouts/GestureBot | 1672fe402c6079057bc1f4adcfc64353d9195e4a | [
"MIT"
] | null | null | null | test/test_controller.py | craigfouts/GestureBot | 1672fe402c6079057bc1f4adcfc64353d9195e4a | [
"MIT"
] | null | null | null | test/test_controller.py | craigfouts/GestureBot | 1672fe402c6079057bc1f4adcfc64353d9195e4a | [
"MIT"
] | null | null | null | from server.controller.controllers import RobotController, TextRobot
| 29.071429 | 68 | 0.744472 | from server.controller.controllers import RobotController, TextRobot
def test_can_instantiate_robot_controller():
RobotController(TextRobot, home_coords=(8.0, -1.0))
def test_drive_forward():
controller = RobotController(TextRobot, home_coords=(8.0, -1.0))
output = controller.drive(100, 10)
assert "Driving" in output
assert "distance 100" in output
assert "speed 10" in output
| 290 | 0 | 46 |
eb8d5aa03cf2b8203a110b9a9f41e9df66b328b6 | 33 | py | Python | lang/py/cookbook/v2/source/cb2_1_15_sol_1.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | null | null | null | lang/py/cookbook/v2/source/cb2_1_15_sol_1.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | null | null | null | lang/py/cookbook/v2/source/cb2_1_15_sol_1.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | null | null | null | mystring = mystring.expandtabs()
| 16.5 | 32 | 0.787879 | mystring = mystring.expandtabs()
| 0 | 0 | 0 |
8518df1171ca8bf2d835fb0e1ccc062c074df5b2 | 973 | py | Python | app/routes.py | TomTom4/Pami | 44bfbabef5735737310b7e1da18faa8ca43ed4af | [
"MIT"
] | null | null | null | app/routes.py | TomTom4/Pami | 44bfbabef5735737310b7e1da18faa8ca43ed4af | [
"MIT"
] | null | null | null | app/routes.py | TomTom4/Pami | 44bfbabef5735737310b7e1da18faa8ca43ed4af | [
"MIT"
] | null | null | null | from app import app
from app import controller
from flask import request
control = controller.Controller()
@app.route('/api/login', methods=['POST'])
@app.route('/api/logout')
@app.route('/mailboxes')
@app.route('/search_emails')
@app.route('/emails')
@app.route('/email')
| 20.270833 | 74 | 0.644399 | from app import app
from app import controller
from flask import request
control = controller.Controller()
@app.route('/api/login', methods=['POST'])
def login():
data = request.form
print(data)
try:
response = control.connect_imap(data["server_url"], data["email"],
data["password"])
if response == "connected":
return "true"
except Exception:
return "false"
@app.route('/api/logout')
def logout():
response = control.close_imap_connection()
if response == "disconnected":
return "true"
return "false"
@app.route('/mailboxes')
def list_mailboxes():
return control.list_mailboxes()
@app.route('/search_emails')
def search(mailbox=None):
return control.search_emails(mailbox)
@app.route('/emails')
def retrieve_emails():
return control.retrieve_emails()
@app.route('/email')
def retrieve_mail(id):
return control.retrieve_email(id)
| 554 | 0 | 132 |
3f7306111a0bfea569e4064702ba956954de5f64 | 2,394 | py | Python | core/migrations/0068_auto_20210514_0739.py | Nephrolog-lt/nephrolog-api | ccd2162aff02b2abfab0f285779e5d8457be1788 | [
"Apache-2.0"
] | 2 | 2020-12-17T13:50:42.000Z | 2021-01-09T07:01:07.000Z | core/migrations/0068_auto_20210514_0739.py | Nephrolog-lt/nephrolog-api | ccd2162aff02b2abfab0f285779e5d8457be1788 | [
"Apache-2.0"
] | 2 | 2021-08-25T05:02:56.000Z | 2022-01-16T18:29:49.000Z | core/migrations/0068_auto_20210514_0739.py | Nephrolog-lt/nephrolog-api | ccd2162aff02b2abfab0f285779e5d8457be1788 | [
"Apache-2.0"
] | 1 | 2020-11-16T01:40:15.000Z | 2020-11-16T01:40:15.000Z | # Generated by Django 3.2.3 on 2021-05-14 07:39
from django.db import migrations
| 28.5 | 59 | 0.560568 | # Generated by Django 3.2.3 on 2021-05-14 07:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0067_auto_20210408_1304'),
]
operations = [
migrations.RemoveField(
model_name='historicaluserprofile',
name='birthday',
),
migrations.RemoveField(
model_name='historicaluserprofile',
name='chronic_kidney_disease_years',
),
migrations.RemoveField(
model_name='historicaluserprofile',
name='diabetes_complications',
),
migrations.RemoveField(
model_name='historicaluserprofile',
name='diabetes_years',
),
migrations.RemoveField(
model_name='historicaluserprofile',
name='dialysis_type',
),
migrations.RemoveField(
model_name='historicaluserprofile',
name='periotonic_dialysis_type',
),
migrations.RemoveField(
model_name='historicaluserprofile',
name='weight_kg',
),
migrations.RemoveField(
model_name='historicaluserprofile',
name='year_of_birth',
),
migrations.RemoveField(
model_name='userprofile',
name='birthday',
),
migrations.RemoveField(
model_name='userprofile',
name='chronic_kidney_disease_years',
),
migrations.RemoveField(
model_name='userprofile',
name='diabetes_complications',
),
migrations.RemoveField(
model_name='userprofile',
name='diabetes_years',
),
migrations.RemoveField(
model_name='userprofile',
name='dialysis_type',
),
migrations.RemoveField(
model_name='userprofile',
name='periotonic_dialysis_type',
),
migrations.RemoveField(
model_name='userprofile',
name='weight_kg',
),
migrations.RemoveField(
model_name='userprofile',
name='year_of_birth',
),
migrations.DeleteModel(
name='GeneralRecommendationDeprecated',
),
migrations.DeleteModel(
name='GeneralRecommendationDeprecatedCategory',
),
]
| 0 | 2,288 | 23 |
6ed4711a33490ba853dc7e6416ae43acafc56d85 | 3,566 | py | Python | models/blocks.py | ghokun-thesis/domain-networks | 8f64182a5ef404a0e41eb023812de5efefe4233e | [
"MIT"
] | 1 | 2020-12-19T11:56:10.000Z | 2020-12-19T11:56:10.000Z | models/blocks.py | ghokun-thesis/domain-networks | 8f64182a5ef404a0e41eb023812de5efefe4233e | [
"MIT"
] | null | null | null | models/blocks.py | ghokun-thesis/domain-networks | 8f64182a5ef404a0e41eb023812de5efefe4233e | [
"MIT"
] | 1 | 2021-01-11T13:55:32.000Z | 2021-01-11T13:55:32.000Z | """
gathering of blocks/group of layers used in domainnet.
author: David-Alexandre Beaupre
date: 2020-04-27
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
| 41.952941 | 119 | 0.643298 | """
gathering of blocks/group of layers used in domainnet.
author: David-Alexandre Beaupre
date: 2020-04-27
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class LinearReLU(nn.Module):
def __init__(self, in_dim: int, out_dim: int, bias: bool = True):
"""
represents the operations of a fully connected layer (require parameters) and ReLU (no parameters).
:param in_dim: number of channels for the input.
:param out_dim: number of channels for the output.
:param bias: learn the linear bias or not.
"""
super(LinearReLU, self).__init__()
self.linear = nn.Linear(in_features=in_dim, out_features=out_dim, bias=bias)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
forward pass implementation (relu -> fc)
:param x: input tensor.
:return: tensor.
"""
return F.relu(self.linear(x))
class Conv2dBN(nn.Module):
def __init__(self, in_dim: int, out_dim: int, ksize: (int, int), stride: int = 1,
padding: int = 0, dilation: int = 1, bias: bool = True):
"""
represents the operations of 2d convolution and batch normalization (require parameters).
:param in_dim: number of channels for the input.
:param out_dim: number of channels for the output.
:param ksize: size of the convolution kernel.
:param stride: distance between consecutive convolutions.
:param padding: number of pixels added on the contour of the tensor.
:param dilation: distance between pixels considered by the convolutions kernel.
:param bias: learn bias of convolution or not.
"""
super(Conv2dBN, self).__init__()
self.conv = nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=ksize,
stride=stride, padding=padding, dilation=dilation, bias=bias)
self.bn = nn.BatchNorm2d(num_features=out_dim)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
forward pass implementation (batch normalization -> convolution).
:param x: input tensor.
:return: tensor.
"""
return self.bn(self.conv(x))
class Conv2dBNReLU(nn.Module):
def __init__(self, in_dim: int, out_dim: int, ksize: (int, int), stride: int = 1,
padding: int = 0, dilation: int = 1, bias: bool = True):
"""
represents the operations of 2d convolution, batch normalization (require parameters) and ReLU (no parameters).
:param in_dim: number of channels for the input.
:param out_dim: number of channels for the output.
:param ksize: size of the convolution kernel.
:param stride: distance between consecutive convolutions.
:param padding: number pixels added on the contour of the tensor.
:param dilation: distance between pixels considered by the convolution kernel.
:param bias: learn bias of convolution or not.
"""
super(Conv2dBNReLU, self).__init__()
self.conv = nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=ksize,
stride=stride, padding=padding, dilation=dilation, bias=bias)
self.bn = nn.BatchNorm2d(num_features=out_dim)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
forward pass implementation (relu -> batch normalization -> convolution).
:param x: input tensor.
:return: tensor.
"""
return F.relu(self.bn(self.conv(x)))
| 0 | 3,313 | 69 |
86b69aefd9193e37b6fef5fd841bfbbfe3bd9e7f | 1,341 | py | Python | sabre/libs/helper.py | gc8O5RbU/sabre-scripts | 20f584f8a1ba473bc16d56ee13c57732fcf0f460 | [
"Apache-2.0"
] | null | null | null | sabre/libs/helper.py | gc8O5RbU/sabre-scripts | 20f584f8a1ba473bc16d56ee13c57732fcf0f460 | [
"Apache-2.0"
] | null | null | null | sabre/libs/helper.py | gc8O5RbU/sabre-scripts | 20f584f8a1ba473bc16d56ee13c57732fcf0f460 | [
"Apache-2.0"
] | null | null | null | from os.path import split, join, abspath, exists
from os import environ
class PathHelper:
"""This class provides a set of assisting functions to deal with
path issues."""
@classmethod
def abspath_of_executable(cls, path: str):
"""Given a command that can run in shell (with the current environment
settings), this function figures out the absolute path to the command.
e.g.::
python3 -> /usr/bin/python3
if the given path does not target to any file, it will raise an
FileNotFound exception.
:raises FileNotFoundError: no executable can be found at `path`
:rtype: str
"""
base_path, name = split(path)
if base_path == '':
# this is the name of a program
# search from PATH
if 'PATH' not in environ:
raise FileNotFoundError
else:
for prefix in environ['PATH'].strip().split(':'):
prefix = prefix.strip()
if exists(join(prefix, name)):
return join(prefix, name)
raise FileNotFoundError
else:
full_path = abspath(path)
if not exists(full_path):
raise FileNotFoundError
else:
return full_path
| 31.186047 | 78 | 0.561521 | from os.path import split, join, abspath, exists
from os import environ
class PathHelper:
"""This class provides a set of assisting functions to deal with
path issues."""
@classmethod
def abspath_of_executable(cls, path: str):
"""Given a command that can run in shell (with the current environment
settings), this function figures out the absolute path to the command.
e.g.::
python3 -> /usr/bin/python3
if the given path does not target to any file, it will raise an
FileNotFound exception.
:raises FileNotFoundError: no executable can be found at `path`
:rtype: str
"""
base_path, name = split(path)
if base_path == '':
# this is the name of a program
# search from PATH
if 'PATH' not in environ:
raise FileNotFoundError
else:
for prefix in environ['PATH'].strip().split(':'):
prefix = prefix.strip()
if exists(join(prefix, name)):
return join(prefix, name)
raise FileNotFoundError
else:
full_path = abspath(path)
if not exists(full_path):
raise FileNotFoundError
else:
return full_path
| 0 | 0 | 0 |
a3d459b729b8723ee3b4d96a4ae3b36fdc6adc97 | 1,877 | py | Python | misc/command_line.py | ELS-RD/anonymisation | 0b02b4e3069729673e0397a1dbbc50ae9612d90f | [
"Apache-2.0"
] | 81 | 2019-05-02T17:29:27.000Z | 2021-10-14T07:24:28.000Z | misc/command_line.py | ELS-RD/anonymisation | 0b02b4e3069729673e0397a1dbbc50ae9612d90f | [
"Apache-2.0"
] | 13 | 2020-07-13T13:15:45.000Z | 2021-01-17T18:33:58.000Z | misc/command_line.py | ELS-RD/anonymisation | 0b02b4e3069729673e0397a1dbbc50ae9612d90f | [
"Apache-2.0"
] | 18 | 2019-05-21T10:04:47.000Z | 2021-11-24T21:44:07.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from argparse import ArgumentParser, Namespace
def train_parse_args(train: bool) -> Namespace:
"""
Parse command line arguments.
:returns: a namespace with all the set parameters
"""
parser = ArgumentParser(description="Annotate a sample of the given files in the input directory")
parser.add_argument("--model-dir", help="Model directory", action="store", dest="model_dir", required=False)
parser.add_argument(
"--input-files-dir", help="Input files directory", action="store", dest="input_dir", required=True
)
parser.add_argument(
"--dev-set-size", help="Size of dev set", action="store", dest="dev_size", type=float, required=False
)
parser.add_argument("--nb_segment", help="Number of segment", action="store", type=int, required=False)
parser.add_argument("--segment", help="Number of segment", action="store", type=int, required=False)
if train:
parser.add_argument("--epochs", help="Number of epochs", action="store", type=int, dest="epoch", required=True)
return parser.parse_args()
| 44.690476 | 119 | 0.720831 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from argparse import ArgumentParser, Namespace
def train_parse_args(train: bool) -> Namespace:
"""
Parse command line arguments.
:returns: a namespace with all the set parameters
"""
parser = ArgumentParser(description="Annotate a sample of the given files in the input directory")
parser.add_argument("--model-dir", help="Model directory", action="store", dest="model_dir", required=False)
parser.add_argument(
"--input-files-dir", help="Input files directory", action="store", dest="input_dir", required=True
)
parser.add_argument(
"--dev-set-size", help="Size of dev set", action="store", dest="dev_size", type=float, required=False
)
parser.add_argument("--nb_segment", help="Number of segment", action="store", type=int, required=False)
parser.add_argument("--segment", help="Number of segment", action="store", type=int, required=False)
if train:
parser.add_argument("--epochs", help="Number of epochs", action="store", type=int, dest="epoch", required=True)
return parser.parse_args()
| 0 | 0 | 0 |
fa10b7c8627c7e859b8c0b7e281874ed2e93b26d | 2,139 | py | Python | tests/unit/test_units.py | imneonizer/Imageinary | 5b76466290d2021fa1ccdc5db61217fc06b73735 | [
"Apache-2.0"
] | 25 | 2020-11-02T20:05:07.000Z | 2022-03-21T10:44:57.000Z | tests/unit/test_units.py | imneonizer/Imageinary | 5b76466290d2021fa1ccdc5db61217fc06b73735 | [
"Apache-2.0"
] | 21 | 2020-11-03T22:00:08.000Z | 2022-03-02T21:34:11.000Z | tests/unit/test_units.py | imneonizer/Imageinary | 5b76466290d2021fa1ccdc5db61217fc06b73735 | [
"Apache-2.0"
] | 8 | 2021-05-24T08:19:13.000Z | 2022-03-21T11:09:11.000Z | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import os
from imagine import imagine
| 37.526316 | 76 | 0.596073 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import os
from imagine import imagine
class TestUnits:
@pytest.fixture(autouse=True)
def setup(self, tmpdir):
self.tmpdir = tmpdir
def teardown_method(self):
try:
os.rmdir(str(self.tmpdir))
except OSError:
# The directory wasn't created, as expected
pass
def test_directory_creation_if_not_exist(self):
imagine._try_create_directory(str(self.tmpdir))
def test_error_input_directory_doesnt_exist(self):
with pytest.raises(RuntimeError):
imagine._check_directory_exists(os.path.join(str(self.tmpdir),
'dne'))
def test_record_slice_yields_expected_results(self):
slices = [range(x, x + 100) for x in range(0, 1000, 100)]
results = imagine._record_slice(self.tmpdir,
self.tmpdir,
'test_record_',
range(0, 1000),
100,
10)
for count, result in enumerate(results):
source, dest, name, images, num = result
assert source == self.tmpdir
assert dest == self.tmpdir
assert name == 'test_record_'
assert images == slices[count]
assert num == count
# Enumerate is 0-based, so the final number will be 9 for 10 records
assert count == 10 - 1
| 1,291 | 163 | 23 |
8ade28a0dca24d0f82a8ac85e9cb31c49b9edf12 | 3,369 | py | Python | oneshot/alfassy/setops_funcs.py | nganltp/admicro-LaSO | 857d67a40af437ab57068fb0de35e4ada56c6209 | [
"BSD-3-Clause"
] | 83 | 2019-04-14T06:58:15.000Z | 2022-03-01T01:34:03.000Z | oneshot/alfassy/setops_funcs.py | leokarlin/LaSO | 8941bdc9316361ad03dbc2bcabd4bf9922c0ecc7 | [
"BSD-3-Clause"
] | 17 | 2019-04-28T04:26:24.000Z | 2022-01-19T15:37:42.000Z | oneshot/alfassy/setops_funcs.py | nganltp/admicro-LaSO | 857d67a40af437ab57068fb0de35e4ada56c6209 | [
"BSD-3-Clause"
] | 15 | 2019-09-05T04:22:10.000Z | 2022-01-13T15:31:25.000Z | import numpy as np
import torch
| 33.029412 | 85 | 0.636094 | import numpy as np
import torch
def set_subtraction_operation(labels1, labels2):
batch_size = labels1.shape[0]
classesNum = labels1.shape[1]
# print("labels1: ", labels1)
# print("labels2: ", labels2)
subLabels = []
for vecNum in range(batch_size):
subLabelPerClass = []
for classNum in range(classesNum):
if (labels1[vecNum][classNum] == 1) and (labels2[vecNum][classNum] == 0):
subLabelPerClass += [1]
else:
subLabelPerClass += [0]
subLabels += [subLabelPerClass]
# print(subLabels)
npSubLabels = np.asarray(subLabels)
# print(npSubLabels)
torSubLabels = torch.from_numpy(npSubLabels)
# print(torSubLabels)
return torSubLabels
def set_union_operation(labels1, labels2):
batch_size = labels1.shape[0]
classesNum = labels1.shape[1]
subLabels = []
for vecNum in range(batch_size):
subLabelPerClass = []
for classNum in range(classesNum):
if (labels1[vecNum][classNum] == 1) or (labels2[vecNum][classNum] == 1):
subLabelPerClass += [1]
else:
subLabelPerClass += [0]
subLabels += [subLabelPerClass]
npSubLabels = np.asarray(subLabels)
torSubLabels = torch.from_numpy(npSubLabels)
return torSubLabels
def set_intersection_operation(labels1, labels2):
batch_size = labels1.shape[0]
classesNum = labels1.shape[1]
subLabels = []
for vecNum in range(batch_size):
subLabelPerClass = []
for classNum in range(classesNum):
if (labels1[vecNum][classNum] == 1) and (labels2[vecNum][classNum] == 1):
subLabelPerClass += [1]
else:
subLabelPerClass += [0]
subLabels += [subLabelPerClass]
npSubLabels = np.asarray(subLabels)
torSubLabels = torch.from_numpy(npSubLabels)
return torSubLabels
def set_subtraction_operation_one_sample(labels1, labels2):
classesNum = labels1.shape[0]
# print("labels1: ", labels1)
# print("labels2: ", labels2)
subLabelPerClass = []
for classNum in range(classesNum):
if (labels1[classNum] == 1) and (labels2[classNum] == 0):
subLabelPerClass += [1]
else:
subLabelPerClass += [0]
# print(subLabels)
npSubLabels = np.asarray(subLabelPerClass)
# print(npSubLabels)
# subLabelPerClass = torch.from_numpy(subLabelPerClass)
# print(torSubLabels)
return npSubLabels
def set_union_operation_one_sample(labels1, labels2):
classesNum = labels1.shape[0]
subLabelPerClass = []
for classNum in range(classesNum):
if (labels1[classNum] == 1) or (labels2[classNum] == 1):
subLabelPerClass += [1]
else:
subLabelPerClass += [0]
npSubLabels = np.asarray(subLabelPerClass)
# torSubLabels = torch.from_numpy(npSubLabels)
return npSubLabels
def set_intersection_operation_one_sample(labels1, labels2):
classesNum = labels1.shape[0]
subLabelPerClass = []
for classNum in range(classesNum):
if (labels1[classNum] == 1) and (labels2[classNum] == 1):
subLabelPerClass += [1]
else:
subLabelPerClass += [0]
npSubLabels = np.asarray(subLabelPerClass)
# torSubLabels = torch.from_numpy(npSubLabels)
return npSubLabels | 3,194 | 0 | 138 |
0ba090f52a1367153a25f3f82916433884c9a32d | 3,675 | py | Python | SunGazer/Collectors/GeoVisionCam.py | zimmaz/SunGazer | 66ead266df8a4f9a2568baec0e071f5f9e8ef89a | [
"MIT"
] | null | null | null | SunGazer/Collectors/GeoVisionCam.py | zimmaz/SunGazer | 66ead266df8a4f9a2568baec0e071f5f9e8ef89a | [
"MIT"
] | null | null | null | SunGazer/Collectors/GeoVisionCam.py | zimmaz/SunGazer | 66ead266df8a4f9a2568baec0e071f5f9e8ef89a | [
"MIT"
] | null | null | null | import re
import hashlib
import requests
import cv2
import numpy as np
from bs4 import BeautifulSoup
from SunGazer.Collectors.Camera import Cam
class GeoVisionCam(Cam):
"""
GeoVision IP camera class.
"""
def __init__(self, cam_address):
"""
Construct a cam object.
Parameters
----------
cam_address : str
url to the IP camera login page.
"""
super().__init__()
self.cam_address = cam_address
self.user_token = None
self.pass_token = None
self.desc_token = None
@staticmethod
def login(self, username, pwd):
"""
Login to the IP camera.
Parameters
----------
username : str
username for the IP camera.
pwd : str
password for the IP camera.
"""
umd5, pmd5 = self._get_hashed_credentials(username, pwd)
data = {
'grp': -1,
'username': '',
'password': '',
'Apply': 'Apply',
'umd5': umd5,
'pmd5': pmd5,
'browser': 1,
'is_check_OCX_OK': 0
}
headers = {
'User-Agent': 'Mozilla'
}
c = requests.post('{}/LoginPC.cgi'.format(self.cam_address), data=data, headers=headers)
self.user_token, self.pass_token, self.desc_token = re.search(
r'gUserName\s=\s\"(.*)\";\n.*\s\"(.*)\";\n.*\"(.*)\"',
c.text).groups()
def cap_pic(self, output='array'):
"""
Capture a picture.
Parameters
----------
output : str, default 'array'
output type of the picture, if a path given, the picture will be saved there.
Returns
-------
numpy.array
image array.
"""
if self.user_token and self.pass_token and self.desc_token:
data = {
'username': self.user_token,
'password': self.pass_token,
'data_type': 0,
'attachment': 1,
'channel': 1,
'secret': 1,
'key': self.desc_token
}
r = requests.post('{}/PictureCatch.cgi'.format(self.cam_address), data=data, stream=True)
if output.lower() == 'array':
return cv2.imdecode(np.frombuffer(r.content, np.uint8), -1)
# write the image in the disk
with open(output, 'wb') as f:
for chunk in r.iter_content():
f.write(chunk)
else:
raise Exception('Authentication failed! Wrong username or password!')
def cap_video(self, output):
"""
Capture video.
"""
raise NotImplementedError('This method is not implemented yet!')
| 30.625 | 101 | 0.530612 | import re
import hashlib
import requests
import cv2
import numpy as np
from bs4 import BeautifulSoup
from SunGazer.Collectors.Camera import Cam
class GeoVisionCam(Cam):
"""
GeoVision IP camera class.
"""
def __init__(self, cam_address):
"""
Construct a cam object.
Parameters
----------
cam_address : str
url to the IP camera login page.
"""
super().__init__()
self.cam_address = cam_address
self.user_token = None
self.pass_token = None
self.desc_token = None
@staticmethod
def _gen_md5(string):
return hashlib.md5(string.encode('utf-8')).hexdigest()
def _get_salt_values(self):
# get html and JS code as text
page = requests.get('{}/ssi.cgi/Login.htm'.format(self.cam_address))
html_content = BeautifulSoup(page.content, "html.parser").text
# parse the salt values from the HTML/JS code of login page(cc1 and cc2)
salt = re.search(r'cc1=\"(.{4})\".*cc2=\"(.{4})\"', html_content)
return salt.groups()
def _get_hashed_credentials(self, username, pwd):
cc1, cc2 = self._get_salt_values()
# hash mechanism/formula based on the JS code of camera interface
umd5 = '{}{}{}'.format(cc1, username.lower(), cc2)
pmd5 = '{}{}{}'.format(cc2, pwd.lower(), cc1)
return self._gen_md5(umd5).upper(), self._gen_md5(pmd5).upper()
def login(self, username, pwd):
"""
Login to the IP camera.
Parameters
----------
username : str
username for the IP camera.
pwd : str
password for the IP camera.
"""
umd5, pmd5 = self._get_hashed_credentials(username, pwd)
data = {
'grp': -1,
'username': '',
'password': '',
'Apply': 'Apply',
'umd5': umd5,
'pmd5': pmd5,
'browser': 1,
'is_check_OCX_OK': 0
}
headers = {
'User-Agent': 'Mozilla'
}
c = requests.post('{}/LoginPC.cgi'.format(self.cam_address), data=data, headers=headers)
self.user_token, self.pass_token, self.desc_token = re.search(
r'gUserName\s=\s\"(.*)\";\n.*\s\"(.*)\";\n.*\"(.*)\"',
c.text).groups()
def cap_pic(self, output='array'):
"""
Capture a picture.
Parameters
----------
output : str, default 'array'
output type of the picture, if a path given, the picture will be saved there.
Returns
-------
numpy.array
image array.
"""
if self.user_token and self.pass_token and self.desc_token:
data = {
'username': self.user_token,
'password': self.pass_token,
'data_type': 0,
'attachment': 1,
'channel': 1,
'secret': 1,
'key': self.desc_token
}
r = requests.post('{}/PictureCatch.cgi'.format(self.cam_address), data=data, stream=True)
if output.lower() == 'array':
return cv2.imdecode(np.frombuffer(r.content, np.uint8), -1)
# write the image in the disk
with open(output, 'wb') as f:
for chunk in r.iter_content():
f.write(chunk)
else:
raise Exception('Authentication failed! Wrong username or password!')
def cap_video(self, output):
"""
Capture video.
"""
raise NotImplementedError('This method is not implemented yet!')
| 770 | 0 | 80 |
5f4fb012fc4dedc7b9bcc04a885e6fc2f827b17e | 215 | py | Python | igrapher.py | j-towns/igrapher | 3ba5b3e1897a9ad8767fdba61772756c71d2aa82 | [
"MIT"
] | null | null | null | igrapher.py | j-towns/igrapher | 3ba5b3e1897a9ad8767fdba61772756c71d2aa82 | [
"MIT"
] | null | null | null | igrapher.py | j-towns/igrapher | 3ba5b3e1897a9ad8767fdba61772756c71d2aa82 | [
"MIT"
] | null | null | null | from ipywidgets import DOMWidget
from traitlets import Unicode, Int
| 30.714286 | 56 | 0.772093 | from ipywidgets import DOMWidget
from traitlets import Unicode, Int
class IGrapherWidget(DOMWidget):
_view_name = Unicode('IGrapherView').tag(sync=True)
_view_module = Unicode('igrapher.js').tag(sync=True)
| 0 | 124 | 23 |
9a129b601df40ff4df214bf709dcf1c690cd8c54 | 13,619 | py | Python | appion/bin/pyace2.py | leschzinerlab/myami-3.2-freeHand | 974b8a48245222de0d9cfb0f433533487ecce60d | [
"MIT"
] | null | null | null | appion/bin/pyace2.py | leschzinerlab/myami-3.2-freeHand | 974b8a48245222de0d9cfb0f433533487ecce60d | [
"MIT"
] | null | null | null | appion/bin/pyace2.py | leschzinerlab/myami-3.2-freeHand | 974b8a48245222de0d9cfb0f433533487ecce60d | [
"MIT"
] | 1 | 2019-09-05T20:58:37.000Z | 2019-09-05T20:58:37.000Z | #!/usr/bin/env python
#pythonlib
import os
import re
import sys
import math
import time
import glob
import numpy
import shutil
import subprocess
#appion
from appionlib import apFile
from appionlib import apParam
from appionlib import apImage
from appionlib import apDisplay
from appionlib import apDatabase
from appionlib import appiondata
from appionlib import appionLoop2
from appionlib import apInstrument
from appionlib.apCtf import ctfdb
from appionlib.apCtf import ctfinsert
# other myami
from pyami import mrc, primefactor, imagefun
class Ace2Loop(appionLoop2.AppionLoop):
"""
appion Loop function that
runs Craig's ace2 program
to estimate the CTF in images
"""
#======================
#======================
#======================
#======================
#======================
def reprocessImage(self, imgdata):
"""
Returns
True, if an image should be reprocessed
False, if an image was processed and should NOT be reprocessed
None, if image has not yet been processed
e.g. a confidence less than 80%
"""
if self.params['reprocess'] is None:
return True
ctfvalue = ctfdb.getBestCtfByResolution(imgdata, msg=False)
if ctfvalue is None:
return True
if conf > self.params['reprocess']:
# small, unbinned images can give same defocus values for 1 & 2:
if self.params['bin'] == 1 or ctfvalue['defocus1'] != ctfvalue['defocus2']:
return False
return True
#======================
#======================
#======================
#======================
#======================
if __name__ == '__main__':
imgLoop = Ace2Loop()
imgLoop.run()
| 37.414835 | 113 | 0.677363 | #!/usr/bin/env python
#pythonlib
import os
import re
import sys
import math
import time
import glob
import numpy
import shutil
import subprocess
#appion
from appionlib import apFile
from appionlib import apParam
from appionlib import apImage
from appionlib import apDisplay
from appionlib import apDatabase
from appionlib import appiondata
from appionlib import appionLoop2
from appionlib import apInstrument
from appionlib.apCtf import ctfdb
from appionlib.apCtf import ctfinsert
# other myami
from pyami import mrc, primefactor, imagefun
class Ace2Loop(appionLoop2.AppionLoop):
"""
appion Loop function that
runs Craig's ace2 program
to estimate the CTF in images
"""
#======================
def setProcessingDirName(self):
self.processdirname = "ctf"
#======================
def preLoopFunctions(self):
self.powerspecdir = os.path.join(self.params['rundir'], "opimages")
apParam.createDirectory(self.powerspecdir, warning=False)
self.ace2exe = self.getACE2Path()
self.ctfrundata = None
return
#======================
def getACE2Path(self):
exename = 'ace2.exe'
ace2exe = subprocess.Popen("which "+exename, shell=True, stdout=subprocess.PIPE).stdout.read().strip()
if not os.path.isfile(ace2exe):
ace2exe = os.path.join(apParam.getAppionDirectory(), 'bin', exename)
if not os.path.isfile(ace2exe):
apDisplay.printError(exename+" was not found at: "+apParam.getAppionDirectory())
return ace2exe
#======================
def postLoopFunctions(self):
pattern = os.path.join(self.params['rundir'], self.params['sessionname']+'*.corrected.mrc')
apFile.removeFilePattern(pattern)
ctfdb.printCtfSummary(self.params, self.imgtree)
#======================
def reprocessImage(self, imgdata):
"""
Returns
True, if an image should be reprocessed
False, if an image was processed and should NOT be reprocessed
None, if image has not yet been processed
e.g. a confidence less than 80%
"""
if self.params['reprocess'] is None:
return True
ctfvalue = ctfdb.getBestCtfByResolution(imgdata, msg=False)
if ctfvalue is None:
return True
if conf > self.params['reprocess']:
# small, unbinned images can give same defocus values for 1 & 2:
if self.params['bin'] == 1 or ctfvalue['defocus1'] != ctfvalue['defocus2']:
return False
return True
#======================
def processImage(self, imgdata):
self.ctfvalues = {}
bestdef = ctfdb.getBestCtfByResolution(imgdata, msg=True)
apix = apDatabase.getPixelSize(imgdata)
if (not (self.params['onepass'] and self.params['zeropass'])):
maskhighpass = False
ace2inputpath = os.path.join(imgdata['session']['image path'],imgdata['filename']+".mrc")
else:
maskhighpass = True
filterimg = apImage.maskHighPassFilter(imgdata['image'],apix,1,self.params['zeropass'],self.params['onepass'])
ace2inputpath = os.path.join(self.params['rundir'],imgdata['filename']+".mrc")
mrc.write(filterimg,ace2inputpath)
# make sure that the image is a square
dimx = imgdata['camera']['dimension']['x']
dimy = imgdata['camera']['dimension']['y']
if dimx != dimy:
dims = [dimx,dimy]
dims.sort()
apDisplay.printMsg("resizing image: %ix%i to %ix%i" % (dimx,dimy,dims[0],dims[0]))
mrcarray = apImage.mrcToArray(ace2inputpath,msg=False)
clippedmrc = apImage.frame_cut(mrcarray,[dims[0],dims[0]])
ace2inputpath = os.path.join(self.params['rundir'],imgdata['filename']+".mrc")
apImage.arrayToMrc(clippedmrc,ace2inputpath,msg=False)
### pad out image to speed up FFT calculations for non-standard image sizes
print "checking prime factor"
if primefactor.isGoodStack(dimx) is False:
goodsize = primefactor.getNextEvenPrime(dimx)
factor = float(goodsize) / float(dimx)
apDisplay.printMsg("padding image: %ix%i to %ix%i" % (dimx,dimy,dimx*factor,dimy*factor))
mrcarray = apImage.mrcToArray(ace2inputpath,msg=False)
# paddedmrc = imagefun.pad(mrcarray, None, factor)
paddedmrc = apImage.frame_constant(mrcarray, (dimx*factor,dimy*factor), cval=mrcarray.mean())
ace2inputpath = os.path.join(self.params['rundir'],imgdata['filename']+".mrc")
apImage.arrayToMrc(paddedmrc,ace2inputpath,msg=False)
inputparams = {
'input': ace2inputpath,
'cs': self.params['cs'],
'kv': imgdata['scope']['high tension']/1000.0,
'apix': apix,
'binby': self.params['bin'],
}
### make standard input for ACE 2
apDisplay.printMsg("Ace2 executable: "+self.ace2exe)
commandline = ( self.ace2exe
+ " -i " + str(inputparams['input'])
+ " -b " + str(inputparams['binby'])
+ " -c " + str(inputparams['cs'])
+ " -k " + str(inputparams['kv'])
+ " -a " + str(inputparams['apix'])
+ " -e " + str(self.params['edge_b'])+","+str(self.params['edge_t'])
+ " -r " + str(self.params['rotblur'])
+ "\n" )
### run ace2
apDisplay.printMsg("running ace2 at "+time.asctime())
apDisplay.printColor(commandline, "purple")
t0 = time.time()
if self.params['verbose'] is True:
ace2proc = subprocess.Popen(commandline, shell=True)
else:
aceoutf = open("ace2.out", "a")
aceerrf = open("ace2.err", "a")
ace2proc = subprocess.Popen(commandline, shell=True, stderr=aceerrf, stdout=aceoutf)
ace2proc.wait()
### check if ace2 worked
basename = os.path.basename(ace2inputpath)
imagelog = basename+".ctf.txt"
if not os.path.isfile(imagelog) and self.stats['count'] <= 1:
### ace2 always crashes on first image??? .fft_wisdom file??
time.sleep(1)
if self.params['verbose'] is True:
ace2proc = subprocess.Popen(commandline, shell=True)
else:
aceoutf = open("ace2.out", "a")
aceerrf = open("ace2.err", "a")
ace2proc = subprocess.Popen(commandline, shell=True, stderr=aceerrf, stdout=aceoutf)
ace2proc.wait()
if self.params['verbose'] is False:
aceoutf.close()
aceerrf.close()
if not os.path.isfile(imagelog):
lddcmd = "ldd "+self.ace2exe
lddproc = subprocess.Popen(lddcmd, shell=True)
lddproc.wait()
apDisplay.printError("ace2 did not run")
apDisplay.printMsg("ace2 completed in " + apDisplay.timeString(time.time()-t0))
### parse log file
self.ctfvalues = {
'cs': self.params['cs'],
'volts': imgdata['scope']['high tension'],
}
logf = open(imagelog, "r")
apDisplay.printMsg("reading log file %s"%(imagelog))
for line in logf:
sline = line.strip()
if re.search("^Final Defocus: ", sline):
### old ACE2
apDisplay.printError("This old version of ACE2 has a bug in the astigmastism, please upgrade ACE2 now")
#parts = sline.split()
#self.ctfvalues['defocus1'] = float(parts[2])
#self.ctfvalues['defocus2'] = float(parts[3])
### convert to degrees
#self.ctfvalues['angle_astigmatism'] = math.degrees(float(parts[4]))
elif re.search("^Final Defocus \(m,m,deg\):", sline):
### new ACE2
apDisplay.printMsg("Reading new ACE2 defocus")
parts = sline.split()
#print parts
self.ctfvalues['defocus1'] = float(parts[3])
self.ctfvalues['defocus2'] = float(parts[4])
# ace2 defines negative angle from +x toward +y
self.ctfvalues['angle_astigmatism'] = -float(parts[5])
elif re.search("^Amplitude Contrast:",sline):
parts = sline.split()
self.ctfvalues['amplitude_contrast'] = float(parts[2])
elif re.search("^Confidence:",sline):
parts = sline.split()
self.ctfvalues['confidence'] = float(parts[1])
self.ctfvalues['confidence_d'] = float(parts[1])
logf.close()
### summary stats
apDisplay.printMsg("============")
avgdf = (self.ctfvalues['defocus1']+self.ctfvalues['defocus2'])/2.0
ampconst = 100.0*self.ctfvalues['amplitude_contrast']
pererror = 100.0 * (self.ctfvalues['defocus1']-self.ctfvalues['defocus2']) / avgdf
apDisplay.printMsg("Defocus: %.3f x %.3f um (%.2f percent astigmatism)"%
(self.ctfvalues['defocus1']*1.0e6, self.ctfvalues['defocus2']*1.0e6, pererror ))
apDisplay.printMsg("Angle astigmatism: %.2f degrees"%(self.ctfvalues['angle_astigmatism']))
apDisplay.printMsg("Amplitude contrast: %.2f percent"%(ampconst))
apDisplay.printColor("Final confidence: %.3f"%(self.ctfvalues['confidence']),'cyan')
### double check that the values are reasonable
if avgdf > self.params['maxdefocus'] or avgdf < self.params['mindefocus']:
apDisplay.printWarning("bad defocus estimate, not committing values to database")
self.badprocess = True
if ampconst < 0.0 or ampconst > 80.0:
apDisplay.printWarning("bad amplitude contrast, not committing values to database")
self.badprocess = True
if self.ctfvalues['confidence'] < 0.2:
apDisplay.printWarning("bad confidence value, not committing values to database")
self.badprocess = True
## create power spectra jpeg
mrcfile = imgdata['filename']+".mrc.edge.mrc"
if os.path.isfile(mrcfile):
jpegfile = os.path.join(self.powerspecdir, apDisplay.short(imgdata['filename'])+".jpg")
ps = apImage.mrcToArray(mrcfile,msg=False)
c = numpy.array(ps.shape)/2.0
ps[c[0]-0,c[1]-0] = ps.mean()
ps[c[0]-1,c[1]-0] = ps.mean()
ps[c[0]-0,c[1]-1] = ps.mean()
ps[c[0]-1,c[1]-1] = ps.mean()
#print "%.3f -- %.3f -- %.3f"%(ps.min(), ps.mean(), ps.max())
ps = numpy.log(ps+1.0)
ps = (ps-ps.mean())/ps.std()
cutoff = -2.0*ps.min()
ps = numpy.where(ps > cutoff, cutoff, ps)
cutoff = ps.mean()
ps = numpy.where(ps < cutoff, cutoff, ps)
#print "%.3f -- %.3f -- %.3f"%(ps.min(), ps.mean(), ps.max())
apImage.arrayToJpeg(ps, jpegfile, msg=False)
apFile.removeFile(mrcfile)
self.ctfvalues['graph3'] = jpegfile
otherfiles = glob.glob(imgdata['filename']+".*.txt")
### remove extra debugging files
for filename in otherfiles:
if filename[-9:] == ".norm.txt":
continue
elif filename[-8:] == ".ctf.txt":
continue
else:
apFile.removeFile(filename)
if maskhighpass and os.path.isfile(ace2inputpath):
apFile.removeFile(ace2inputpath)
return
#======================
def commitToDatabase(self, imgdata):
if self.ctfrundata is None:
self.insertRunData()
ctfinsert.validateAndInsertCTFData(imgdata, self.ctfvalues, self.ctfrundata, self.params['rundir'])
return True
#======================
def insertRunData(self):
paramq = appiondata.ApAce2ParamsData()
paramq['bin'] = self.params['bin']
paramq['reprocess'] = self.params['reprocess']
paramq['cs'] = self.params['cs']
paramq['stig'] = True
paramq['min_defocus'] = self.params['mindefocus']
paramq['max_defocus'] = self.params['maxdefocus']
paramq['edge_thresh'] = self.params['edge_t']
paramq['edge_blur'] = self.params['edge_b']
paramq['rot_blur'] = self.params['rotblur']
paramq['refine2d'] = self.params['refine2d']
paramq['onepass'] = self.params['onepass']
paramq['zeropass'] = self.params['zeropass']
runq=appiondata.ApAceRunData()
runq['name'] = self.params['runname']
runq['session'] = self.getSessionData()
runq['hidden'] = False
runq['path'] = appiondata.ApPathData(path=os.path.abspath(self.params['rundir']))
runq['ace2_params'] = paramq
runq.insert()
self.ctfrundata = runq
#======================
def setupParserOptions(self):
### values
self.parser.add_option("-b", "--bin", dest="bin", type="int", default=1,
help="Binning of the image before FFT", metavar="#")
self.parser.add_option("--mindefocus", dest="mindefocus", type="float", default=0.1e-6,
help="Minimal acceptable defocus (in meters)", metavar="#")
self.parser.add_option("--maxdefocus", dest="maxdefocus", type="float", default=15e-6,
help="Maximal acceptable defocus (in meters)", metavar="#")
self.parser.add_option("--edge1", dest="edge_b", type="float", default=12.0,
help="Canny edge parameters Blur Sigma", metavar="#")
self.parser.add_option("--edge2", dest="edge_t", type="float", default=0.001,
help="Canny edge parameters Edge Treshold(0.0-1.0)", metavar="#")
self.parser.add_option("--rotblur", dest="rotblur", type="float", default=0.0,
help="Rotational blur for low contrast CTF (in degrees), default 0", metavar="#")
### true/false
self.parser.add_option("--refine2d", dest="refine2d", default=False,
action="store_true", help="Refine the defocus after initial ACE with 2d cross-correlation")
self.parser.add_option("--verbose", dest="verbose", default=True,
action="store_true", help="Show all ace2 messages")
self.parser.add_option("--quiet", dest="verbose", default=True,
action="store_false", help="Hide all ace2 messages")
self.parser.add_option("--onepass", dest="onepass", type="float",
help="Mask High pass filter radius for end of gradient mask in Angstroms", metavar="FLOAT")
self.parser.add_option("--zeropass", dest="zeropass", type="float",
help="Mask High pass filter radius for zero mask in Angstroms", metavar="FLOAT")
#self.parser.add_option("--refineapix", dest="refineapix", default=False,
# action="store_true", help="Refine the pixel size")
#======================
def checkConflicts(self):
if self.params['bin'] < 1:
apDisplay.printError("bin must be positive")
if (self.params['mindefocus'] is not None and
(self.params['mindefocus'] > 1e-3 or self.params['mindefocus'] < 1e-9)):
apDisplay.printError("min defocus is not in an acceptable range, e.g. mindefocus=1.5e-6")
if (self.params['maxdefocus'] is not None and
(self.params['maxdefocus'] > 1e-3 or self.params['maxdefocus'] < 1e-9)):
apDisplay.printError("max defocus is not in an acceptable range, e.g. maxdefocus=1.5e-6")
### set cs value
self.params['cs'] = apInstrument.getCsValueFromSession(self.getSessionData())
return
if __name__ == '__main__':
imgLoop = Ace2Loop()
imgLoop.run()
| 11,791 | 0 | 207 |
e5448dbf5258ef53de49771abaf05b549b971db2 | 750 | py | Python | config.py | Fgeorgiou/Barber_Shop_App_Flask | 06ead766a93f1efe50cb91e76a6463f61b42dca6 | [
"MIT"
] | 1 | 2021-06-28T19:31:25.000Z | 2021-06-28T19:31:25.000Z | config.py | adifarhaaan/Barber_Shop_App_Flask | 06ead766a93f1efe50cb91e76a6463f61b42dca6 | [
"MIT"
] | null | null | null | config.py | adifarhaaan/Barber_Shop_App_Flask | 06ead766a93f1efe50cb91e76a6463f61b42dca6 | [
"MIT"
] | 1 | 2021-06-28T19:31:44.000Z | 2021-06-28T19:31:44.000Z | #Importing os module and defining application's path
import os
basedir = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
SQLALCHEMY_ECHO = True
#Cross-site_request_forgery_(CSRF)_protection_provided_by_WTforms
WTF_CSRF_ENABLED = True
#The secret key that scrf uses for authentication
SECRET_KEY = 'This-must-be-changed'
#The path to the database
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'barber_shop.db')
#By disabling this, we decrease the overload
SQLALCHEMY_TRACK_MODIFICATIONS = False
# Reference: https://blog.miguelgrinberg.com/post/the-flask-mega-tutorial-part-vii-unit-testing
# Mail Server Settings
MAIL_SERVER = 'localhost'
MAIL_PORT = 25
MAIL_USERNAME = None
MAIL_PASSWORD = None | 30 | 96 | 0.773333 | #Importing os module and defining application's path
import os
basedir = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
SQLALCHEMY_ECHO = True
#Cross-site_request_forgery_(CSRF)_protection_provided_by_WTforms
WTF_CSRF_ENABLED = True
#The secret key that scrf uses for authentication
SECRET_KEY = 'This-must-be-changed'
#The path to the database
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'barber_shop.db')
#By disabling this, we decrease the overload
SQLALCHEMY_TRACK_MODIFICATIONS = False
# Reference: https://blog.miguelgrinberg.com/post/the-flask-mega-tutorial-part-vii-unit-testing
# Mail Server Settings
MAIL_SERVER = 'localhost'
MAIL_PORT = 25
MAIL_USERNAME = None
MAIL_PASSWORD = None | 0 | 0 | 0 |
a082227134f57b79643b770f0f7a6ad604d437e9 | 11,239 | py | Python | eoncloud_web/biz/account/models.py | eoncloud-dev/eoncloud_web | e671ee49cb8822edad3351d0e9feaf80c6bf5467 | [
"Apache-2.0"
] | 10 | 2015-06-17T11:15:53.000Z | 2021-08-19T22:04:25.000Z | eoncloud_web/biz/account/models.py | eoncloud-dev/eoncloud_web | e671ee49cb8822edad3351d0e9feaf80c6bf5467 | [
"Apache-2.0"
] | 25 | 2015-06-24T03:31:18.000Z | 2015-09-28T02:11:51.000Z | eoncloud_web/biz/account/models.py | eoncloud-dev/eoncloud_web | e671ee49cb8822edad3351d0e9feaf80c6bf5467 | [
"Apache-2.0"
] | 7 | 2015-06-17T10:44:33.000Z | 2018-03-01T15:30:29.000Z | #coding=utf-8
import logging
import hashlib
import urlparse
import random
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.conf import settings
from django.contrib.auth.models import User, Permission
from django.db import models
from django.utils.translation import ugettext_lazy as _
from biz.account.settings import USER_TYPE_CHOICES, QUOTA_ITEM, NotificationLevel, TimeUnit
from biz.account.mixins import LivingDeadModel
from biz.idc.models import UserDataCenter
LOG = logging.getLogger(__name__)
User.profile = property(lambda u: UserProfile.objects.get_or_create(user=u)[0])
NOTIFICATION_KEY_METHODS = ((NotificationLevel.INFO, 'info'),
(NotificationLevel.SUCCESS, 'success'),
(NotificationLevel.ERROR, 'error'),
(NotificationLevel.WARNING, 'warning'),
(NotificationLevel.DANGER, 'danger'))
# This loop will create some is_xxx(eg, is_info, is_success..) property
for value, name in NOTIFICATION_KEY_METHODS:
bind(value)
# This loop will create some action method, user can create notification like this way:
# Notification.info(receiver, title, content)
for value, name in NOTIFICATION_KEY_METHODS:
bind(value)
| 33.449405 | 91 | 0.641783 | #coding=utf-8
import logging
import hashlib
import urlparse
import random
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.conf import settings
from django.contrib.auth.models import User, Permission
from django.db import models
from django.utils.translation import ugettext_lazy as _
from biz.account.settings import USER_TYPE_CHOICES, QUOTA_ITEM, NotificationLevel, TimeUnit
from biz.account.mixins import LivingDeadModel
from biz.idc.models import UserDataCenter
LOG = logging.getLogger(__name__)
class UserProfile(models.Model):
user = models.ForeignKey(User, unique=True)
mobile = models.CharField(_("Mobile"), max_length=26, null=True)
user_type = models.IntegerField(_("User Type"), null=True, default=1, \
choices=USER_TYPE_CHOICES)
balance = models.DecimalField(max_digits=9, decimal_places=2, default=0.00)
def __unicode__(self):
return u'Profile of user: %s' % self.user.username
class Meta:
db_table = "auth_user_profile"
verbose_name = _("UserProfile")
verbose_name_plural = _("UserProfile")
User.profile = property(lambda u: UserProfile.objects.get_or_create(user=u)[0])
class NormalUserManager(models.Manager):
def get_queryset(self):
return super(NormalUserManager, self).get_queryset().filter(
is_superuser=False)
class SuperUserManager(models.Manager):
def get_queryset(self):
return super(SuperUserManager, self).get_queryset().filter(
is_superuser=True)
class UserProxy(User):
class Meta:
proxy = True
normal_users = NormalUserManager()
super_users = SuperUserManager()
@property
def user_data_centers(self):
return self.userdatacenter_set.all()
@property
def has_udc(self):
return UserDataCenter.objects.filter(user=self).exists()
@property
def is_approver(self):
return settings.WORKFLOW_ENABLED and \
self.has_perm('workflow.approve_workflow')
@classmethod
def grant_workflow_approve(cls, user, save=True):
perm = Permission.objects.get(codename="approve_workflow")
user.user_permissions.add(perm)
if save:
user.save()
@classmethod
def revoke_workflow_approve(cls, user, save=True):
perm = Permission.objects.get(codename="approve_workflow")
user.user_permissions.remove(perm)
if save:
user.save()
class LivingManager(models.Manager):
def get_queryset(self):
return super(LivingManager, self).get_queryset().filter(deleted=False)
class DeletedManager(models.Manager):
def get_queryset(self):
return super(DeletedManager, self).get_queryset().filter(deleted=True)
class Contract(LivingDeadModel):
user = models.ForeignKey(User)
udc = models.ForeignKey('idc.UserDataCenter')
name = models.CharField(_("Contract name"), max_length=128, null=False)
customer = models.CharField(_("Customer name"), max_length=128, null=False)
start_date = models.DateTimeField(_("Start Date"), null=False)
end_date = models.DateTimeField(_("End Date"), null=False)
deleted = models.BooleanField(_("Deleted"), default=False)
create_date = models.DateTimeField(_("Create Date"), auto_now_add=True)
update_date = models.DateTimeField(_("Update Date"), auto_now_add=True,
auto_now=True)
def __unicode__(self):
return self.name
def get_quotas(self):
d = settings.QUOTA_ITEMS.copy()
for quota in self.quotas.all():
d[quota.resource] = quota.limit
return d
class Meta:
db_table = "user_contract"
verbose_name = _("Contract")
verbose_name_plural = _("Contract")
class Quota(LivingDeadModel):
contract = models.ForeignKey(Contract, related_name="quotas")
resource = models.CharField(_("Resouce"), max_length=128,
choices=QUOTA_ITEM, null=False)
limit = models.IntegerField(_("Limit"), default=0)
deleted = models.BooleanField(_("Deleted"), default=False)
create_date = models.DateTimeField(_("Create Date"), auto_now_add=True)
update_date = models.DateTimeField(_("Update Date"), auto_now_add=True,
auto_now=True)
class Meta:
db_table = "user_quota"
verbose_name = _("Quota")
verbose_name_plural = _("Quota")
class Operation(models.Model):
user = models.ForeignKey(User)
udc = models.ForeignKey('idc.UserDataCenter')
resource = models.CharField(_("Resource"), max_length=128, null=False)
resource_id = models.IntegerField(_("Resource ID"), null=False)
resource_name = models.CharField(_("Resource Name"), max_length=128)
action = models.CharField(_("Action"), max_length=128, null=False)
result = models.IntegerField(_("Result"), default=0)
create_date = models.DateTimeField(_("Create Date"), auto_now_add=True)
@classmethod
def log(cls, obj, obj_name, action, result=1, udc=None, user=None):
try:
Operation.objects.create(
user=user or obj.user,
udc=udc or obj.user_data_center,
resource=obj.__class__.__name__,
resource_id=obj.id,
resource_name=obj_name,
action=action,
result=result
)
except Exception as e:
pass
def get_resource(self):
return _(self.resource)
def get_desc(self):
desc_format = _(
"%(resource)s:%(resource_name)s execute %(action)s operation")
desc = desc_format % {
"resource": _(self.resource),
"resource_name": self.resource_name,
"action": _(self.action),
}
return desc
@property
def operator(self):
return self.user.username
@property
def data_center_name(self):
return self.udc.data_center.name
class Meta:
db_table = "user_operation"
verbose_name = _("Operation")
verbose_name_plural = _("Operation")
class Notification(models.Model):
level = models.IntegerField(choices=NotificationLevel.OPTIONS,
default=NotificationLevel.INFO)
title = models.CharField(max_length=100)
content = models.TextField()
create_date = models.DateTimeField(auto_now_add=True)
is_announcement = models.BooleanField(default=False)
is_auto = models.BooleanField(default=False)
@property
def time_ago(self):
time_delta = (timezone.now() -
self.create_date).total_seconds() * TimeUnit.SECOND
if time_delta < TimeUnit.MINUTE:
return _("just now")
elif time_delta < TimeUnit.HOUR:
minutes = time_delta / TimeUnit.MINUTE
return _("%(minutes)d minutes ago") % {'minutes': minutes}
elif time_delta < TimeUnit.DAY:
hours = time_delta / TimeUnit.HOUR
return _("%(hours)d hours ago") % {'hours': hours}
elif time_delta < TimeUnit.YEAR:
days = time_delta / TimeUnit.DAY
return _("%(days)d days ago") % {'days': days}
else:
years = time_delta / TimeUnit.YEAR
return _("%(years)d years ago") % {'years': years}
class Meta:
db_table = "notification"
verbose_name = _("Notification")
verbose_name_plural = _("Notifications")
@classmethod
def broadcast(cls, receivers, title, content, level):
notification = cls.objects.create(title=title, content=content,
level=level)
for receiver in receivers:
Feed.objects.create(receiver=receiver, notification=notification)
@classmethod
def pull_announcements(cls, receiver):
try:
for notification in Notification.objects.filter(
is_announcement=True). \
exclude(feed=Feed.objects.filter(receiver=receiver)):
Feed.objects.create(notification=notification,
receiver=receiver)
except:
LOG.exception("Failed to pull announcement for user: %s",
receiver.username)
NOTIFICATION_KEY_METHODS = ((NotificationLevel.INFO, 'info'),
(NotificationLevel.SUCCESS, 'success'),
(NotificationLevel.ERROR, 'error'),
(NotificationLevel.WARNING, 'warning'),
(NotificationLevel.DANGER, 'danger'))
# This loop will create some is_xxx(eg, is_info, is_success..) property
for value, name in NOTIFICATION_KEY_METHODS:
def bind(level):
setattr(Notification, 'is_' + name,
property(lambda self: self.level == level))
bind(value)
# This loop will create some action method, user can create notification like this way:
# Notification.info(receiver, title, content)
for value, name in NOTIFICATION_KEY_METHODS:
def bind(level):
def action(cls, receiver, title, content, is_auto=True):
notification = cls.objects.create(title=title, content=content,
level=level, is_auto=is_auto)
Feed.objects.create(receiver=receiver, notification=notification)
return notification
setattr(Notification, name, classmethod(action))
bind(value)
class Feed(LivingDeadModel):
is_read = models.BooleanField(default=False)
receiver = models.ForeignKey(User, related_name="notifications",
related_query_name='notification')
create_date = models.DateTimeField(auto_now_add=True)
read_date = models.DateTimeField(null=True)
deleted = models.BooleanField(default=False)
notification = models.ForeignKey(Notification, related_name="feeds",
related_query_name="feed")
class Meta:
db_table = "user_feed"
verbose_name = _("Feed")
verbose_name_plural = _("Feeds")
def mark_read(self):
self.is_read = True
self.read_date = timezone.now()
self.save()
def fake_delete(self):
self.deleted = True
self.mark_read()
class ActivateUrl(models.Model):
user = models.ForeignKey('auth.User')
code = models.CharField(max_length=128, unique=True)
expire_date = models.DateTimeField()
create_date = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = "activate_url"
verbose_name = _("Activate Url")
verbose_name_plural = _("Activate Urls")
@classmethod
def generate(cls, user):
content = "%s-%d" % (user.username, random.randint(0, 10000))
code = hashlib.md5(content).hexdigest()
expire_date = timezone.now() + settings.ACTIVATE_EMAIL_EXPIRE_DAYS
return cls.objects.create(user=user, code=code, expire_date=expire_date)
@property
def url(self):
url = reverse('first_activate_user', kwargs={'code': self.code})
return urlparse.urljoin(settings.EXTERNAL_URL, url)
| 4,523 | 4,981 | 432 |
df682920b4e1ed6ff2b5f463d97577f5f5a05326 | 2,477 | py | Python | api/tests/test_restapi.py | ercchy/coding-events | 38db125b351f190e3ff13be7b27d2a4e777cec40 | [
"MIT"
] | null | null | null | api/tests/test_restapi.py | ercchy/coding-events | 38db125b351f190e3ff13be7b27d2a4e777cec40 | [
"MIT"
] | null | null | null | api/tests/test_restapi.py | ercchy/coding-events | 38db125b351f190e3ff13be7b27d2a4e777cec40 | [
"MIT"
] | 1 | 2015-09-22T14:56:49.000Z | 2015-09-22T14:56:49.000Z | # -*- coding: utf-8 -*-
import json
import datetime
from geoposition import Geoposition
from web.processors.event import create_or_update_event
| 29.843373 | 79 | 0.677432 | # -*- coding: utf-8 -*-
import json
import datetime
from geoposition import Geoposition
from web.processors.event import create_or_update_event
class TestRestApi:
def test_event_list_all(self, client, admin_user):
event_data = {
"start_date": datetime.datetime.now() - datetime.timedelta(days=1, hours=3),
"end_date": datetime.datetime.now() + datetime.timedelta(days=3, hours=3),
"organizer": "some organizer",
"creator": admin_user,
"title": "Unique REST API Event",
"pub_date": datetime.datetime.now(),
"country": "SI",
"geoposition": Geoposition(46.05528,14.51444),
"location": "Ljubljana",
"audience": [1],
"theme": [1],
"tags": ["tag1", "tag2"],
}
event = create_or_update_event(**event_data)
event.status = 'APPROVED'
event.save()
response_json = client.get('/api/event/list/?format=json')
response_data = json.loads(response_json.content)
assert isinstance(response_data, list)
assert event_data['title'] in response_json.content
def test_scoreboard_api(self, client, admin_user):
event_data = {
"start_date": datetime.datetime.now() - datetime.timedelta(days=1, hours=3),
"end_date": datetime.datetime.now() + datetime.timedelta(days=3, hours=3),
"organizer": "some organizer",
"creator": admin_user,
"title": "Event in SI",
"pub_date": datetime.datetime.now(),
"country": "SI",
"geoposition": Geoposition(46.05528, 14.51444),
"location": "Ljubljana",
"audience": [1],
"theme": [1],
"tags": ["tag1", "tag2"],
}
event = create_or_update_event(**event_data)
event.status = 'APPROVED'
event.save()
event_data = {
"start_date": datetime.datetime.now() - datetime.timedelta(days=1, hours=3),
"end_date": datetime.datetime.now() + datetime.timedelta(days=3, hours=3),
"organizer": "other organizer",
"creator": admin_user,
"title": "Event in IS",
"pub_date": datetime.datetime.now(),
"country": "IS",
"geoposition": Geoposition(64.13244, -21.85690),
"location": "Reykjavík",
"audience": [1],
"theme": [1],
"tags": ["tag1", "tag2"],
}
event = create_or_update_event(**event_data)
event.status = 'APPROVED'
event.save()
response_json = client.get('/api/scoreboard/?format=json')
response_data = json.loads(response_json.content)
assert isinstance(response_data, list)
assert len(response_data)>1
assert response_data[0]["country_name"] == "Iceland"
assert response_data[1]["country_name"] == "Slovenia"
| 2,264 | -3 | 70 |
8ddd7fe2c9dc87626b6ae7272a75702f54eba691 | 14,573 | py | Python | src/scanmode/subscan.py | flyingfrog81/basie | 0956824f8b8467a6d839957f2a6af4082d95816d | [
"BSD-3-Clause"
] | null | null | null | src/scanmode/subscan.py | flyingfrog81/basie | 0956824f8b8467a6d839957f2a6af4082d95816d | [
"BSD-3-Clause"
] | 35 | 2016-02-12T14:49:15.000Z | 2021-06-22T14:37:04.000Z | src/scanmode/subscan.py | flyingfrog81/basie | 0956824f8b8467a6d839957f2a6af4082d95816d | [
"BSD-3-Clause"
] | 2 | 2016-02-22T16:55:36.000Z | 2021-06-04T12:14:21.000Z | #coding=utf-8
#
#
# Copyright (C) 2013 INAF -IRA Italian institute of radioastronomy, bartolini@ira.inaf.it
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Subscan related classes and funcions:
B{Classes}
- SubscanError
- Subscan: a generic subscan
- OTFSubscan: a generic on the fly subscan
- SiderealSubscan: a generic sidereal subscan
B{Functions}
Used to get subscan classes instances. Subscans are often returned in couples
together with their associated Tsys sidereal subscan.
- get_cen_otf_subscan
- get_ss_otf_subscan (not implemented)
- get_sidereal_subscan
- get_tsys_subscan
- get_couple_subscan
- get_sid_couple_subscan
"""
from past.builtins import cmp
from builtins import str
import logging
logger = logging.getLogger(__name__)
import copy
from persistent import Persistent
from ..valid_angles import VAngle
from .. import templates, frame, utils, procedures
from ..errors import ScheduleError, ScanError
from ..frame import NULL_COORD, Coord, EQ, GAL, HOR, NULL
TSYS_SIGMA = 5
"""
Used for calculating TSYS subscans coordinate offsets as TSYS_SIGMA * beamsize
"""
class Subscan(Persistent):
"""
Generic subscan. Contains common subscan attributes and is meant to be
override by specific subscan classes
"""
ID = 1 #static counter attribute
def __init__(self, _target, duration=0.0, is_tsys=False,
is_cal=False):
"""
Constructor.
Give the subscan a unique ID.
"""
self.ID = Subscan.ID #This value will be the same found in the lis file
Subscan.ID += 1
self.target = _target
self.is_tsys = is_tsys
self.duration = duration
#self.SEQ_ID = 0 #position in the respective scan, default value 0
self.is_cal = is_cal
if self.is_cal and self.is_tsys:
raise ScheduleError("Subscan cannot be tsys and cal at the same time")
if self.is_cal:
self.pre_procedure = procedures.CALON
self.post_procedure = procedures.CALOFF
elif self.is_tsys:
self.pre_procedure = procedures.NULL
self.post_procedure = procedures.TSYS
else: #Default
self.pre_procedure = procedures.NULL
self.post_procedure = procedures.NULL
class OTFSubscan(Subscan):
"""
On the flight sunbscan class
"""
def __init__(self, _target, lon2, lat2, descr, scan_frame,
geom, direction, duration, is_tsys=False, is_cal=False):
"""
Constructor.
@type lon2: VAngle
@type lat2: VAngle
"""
Subscan.__init__(self, _target, duration, is_tsys, is_cal)
self.typename = "OTF"
self.scan_frame = scan_frame
#check that offset frame and scan frame are equal
if self.target.offset_coord.frame == frame.NULL:#default behaviour
self.target.offset_coord.frame = self.scan_frame
if not self.target.offset_coord.frame == self.scan_frame:
msg = "offset frame %s different from scan frame %s" % (self.target.offset_coord.frame.name, self.scan_frame)
logger.debug(msg)
raise ScheduleError(msg)
self.lon2 = lon2
self.lat2 = lat2
self.descr = descr.upper()
#check consistnecy of frames specifications
#we already know that offset and scan
if not self.target.coord.frame == self.scan_frame:#possible mistake!
logger.warning("SUBSCAN %d : scan_frame and coordinates_frame are different" % (self.ID,))
if (self.target.coord.frame == frame.EQ and
self.descr == "CEN" and
self.scan_frame == frame.HOR):
pass #OK - only success condition
else:
raise ScheduleError("not compatible frame types")#very bad!
self.geom = geom
self.direction = direction
def get_cen_otf(_target,
duration,
length,
offset,
const_axis,
direction,
scan_frame):
"""
Get an I{OTF} subscan with description I{CEN}.
@type length: VAngle
@type offset: VAngle
@return: an L{OTFSubscan} instance
"""
__target = copy.deepcopy(_target)
if const_axis == "LON":
__target.offset_coord.lon = _target.offset_coord.lon + offset
logger.debug("offset lon: %f" % (__target.offset_coord.lon.deg,))
lon2 = VAngle(0.0)
lat2 = length
elif const_axis == "LAT":
__target.offset_coord.lat = _target.offset_coord.lat + offset
logger.debug("offset lat: %f" % (__target.offset_coord.lat.deg,))
lon2 = length
lat2 = VAngle(0.0)
attr = dict(_target = __target,
descr = 'CEN',
duration = duration,
lon2 = lon2,
lat2 = lat2,
geom = const_axis,
direction = direction,
scan_frame = scan_frame,
)
return OTFSubscan(**attr)
def get_ss_otf(*args, **kwargs):
"""
@raise NotImplementedError: we still have no useful case for implemting this
function
"""
raise NotImplementedError("is there any useful case for implementing this?")
def get_sidereal(_target, offset=NULL_COORD, duration=0.0,
is_tsys=False, is_cal=False):
"""
@param _target: the subscan target
@type _target: target.Target
@param offset_lon: additional longitude offset
@type offset_lon: VAngle
@param offset_lat: additional latitude offset
@type offset_lat: VAngle
"""
__target = copy.deepcopy(_target)
#import ipdb;ipdb.set_trace()
__target.offset_coord += offset
return SiderealSubscan(__target, duration, is_tsys, is_cal)
def get_tsys(_target, offset, duration=0.0):
"""
Get a Tsys subscan.
This basically returns a SIDEREAL subscan where source name is I{Tsys} and
duration is I{0.0}
@type offset_lon: VAngle
@type offset_lat: VAngle
"""
__target = copy.deepcopy(_target)
__target.label = "Tsys"
st = get_sidereal(__target, offset, duration=0.0,
is_tsys=True)
st.post_procedure = procedures.TSYS
return st
def get_cen_otf_tsys(_target,
duration,
length,
offset,
const_axis,
direction,
scan_frame,
beamsize):
"""
Get a couple composed of a CEN_OTF subscan and its relative SIDEREAL TSYS
subscan.
@return: (otf_subscan, tsys_subscan)
@type length: VAngle
@type offset: Coord
@type beamsize: VAngle
"""
logger.debug("get couple subscan offset: %s " % (offset,))
negative_offset = VAngle(-1 * (length.deg / 2.0 + beamsize.deg * TSYS_SIGMA))
positive_offset = VAngle(length.deg / 2.0 + beamsize.deg * TSYS_SIGMA)
if const_axis == "LAT":
_offset_lat = offset
if direction == "INC":
_offset_lon = negative_offset
elif direction == "DEC":
_offset_lon = positive_offset
elif const_axis == "LON":
_offset_lon = offset
if direction == "INC":
_offset_lat = negative_offset
elif direction == "DEC":
_offset_lat = positive_offset
_offset = Coord(scan_frame, _offset_lon, _offset_lat)
ss = get_cen_otf(_target, duration, length, offset, const_axis, direction,
scan_frame)
st = get_tsys(_target, _offset)
return ss, st
def get_sid_tsys(_target,
offset,
extremes,
duration,
beamsize):
"""
Get a couple of sidereal subscans, where the first is an actual subscan and the
second is a tsys subscan obtained pointing the antenna out of a rectangular
polygon containing the source.
@param _target: the source to be observed
@type _target: L{target.Target}
@param offset_lon: longitude offset of the subscan
@type offset_lon: VAngle
@param offset_lat: latitude offset of the subscan
@type offset_lat: VAngle
@param extremes: An array containing the offsets of the extremes of the rectangular polygon
containing the source (i.e. the borders of a raster map)
@type extremes: [(x0,y0), (x1,y1), (x2,y2), (x3,y3)]
@param duration: subscan duration (Sec. )
@type duration: float
@param beamsize: beam size used to calculated tsys subscan offsets
@type beamsize: VAngle
"""
ss = get_sidereal(_target, offset, duration)
tsys_offsets = utils.extrude_from_rectangle(offset.lon.deg,
offset.lat.deg,
extremes,
beamsize.deg * TSYS_SIGMA)
_offsets = Coord(offset.frame,
VAngle(tsys_offsets[0]),
VAngle(tsys_offsets[1]))
st = get_tsys(_target, _offsets)
return ss, st
| 37.462725 | 121 | 0.584437 | #coding=utf-8
#
#
# Copyright (C) 2013 INAF -IRA Italian institute of radioastronomy, bartolini@ira.inaf.it
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Subscan related classes and funcions:
B{Classes}
- SubscanError
- Subscan: a generic subscan
- OTFSubscan: a generic on the fly subscan
- SiderealSubscan: a generic sidereal subscan
B{Functions}
Used to get subscan classes instances. Subscans are often returned in couples
together with their associated Tsys sidereal subscan.
- get_cen_otf_subscan
- get_ss_otf_subscan (not implemented)
- get_sidereal_subscan
- get_tsys_subscan
- get_couple_subscan
- get_sid_couple_subscan
"""
from past.builtins import cmp
from builtins import str
import logging
logger = logging.getLogger(__name__)
import copy
from persistent import Persistent
from ..valid_angles import VAngle
from .. import templates, frame, utils, procedures
from ..errors import ScheduleError, ScanError
from ..frame import NULL_COORD, Coord, EQ, GAL, HOR, NULL
TSYS_SIGMA = 5
"""
Used for calculating TSYS subscans coordinate offsets as TSYS_SIGMA * beamsize
"""
class Subscan(Persistent):
"""
Generic subscan. Contains common subscan attributes and is meant to be
override by specific subscan classes
"""
ID = 1 #static counter attribute
def __init__(self, _target, duration=0.0, is_tsys=False,
is_cal=False):
"""
Constructor.
Give the subscan a unique ID.
"""
self.ID = Subscan.ID #This value will be the same found in the lis file
Subscan.ID += 1
self.target = _target
self.is_tsys = is_tsys
self.duration = duration
#self.SEQ_ID = 0 #position in the respective scan, default value 0
self.is_cal = is_cal
if self.is_cal and self.is_tsys:
raise ScheduleError("Subscan cannot be tsys and cal at the same time")
if self.is_cal:
self.pre_procedure = procedures.CALON
self.post_procedure = procedures.CALOFF
elif self.is_tsys:
self.pre_procedure = procedures.NULL
self.post_procedure = procedures.TSYS
else: #Default
self.pre_procedure = procedures.NULL
self.post_procedure = procedures.NULL
def add_post_procedure(self, proc):
if self.post_procedure == procedures.NULL:
self.post_procedure = proc
else:
self.post_procedure = self.post_procedure + proc
def add_pre_procedure(self, proc):
if self.pre_procedure == procedures.NULL:
self.pre_procedure = proc
else:
self.pre_procedure = self.pre_procedure + proc
def __hash__(self):
return self.ID
def __cmp__(self, other):
return cmp(self.ID, other.ID)
def __eq__(self, other):
return self.ID == other.ID
class OTFSubscan(Subscan):
"""
On the flight sunbscan class
"""
def __init__(self, _target, lon2, lat2, descr, scan_frame,
geom, direction, duration, is_tsys=False, is_cal=False):
"""
Constructor.
@type lon2: VAngle
@type lat2: VAngle
"""
Subscan.__init__(self, _target, duration, is_tsys, is_cal)
self.typename = "OTF"
self.scan_frame = scan_frame
#check that offset frame and scan frame are equal
if self.target.offset_coord.frame == frame.NULL:#default behaviour
self.target.offset_coord.frame = self.scan_frame
if not self.target.offset_coord.frame == self.scan_frame:
msg = "offset frame %s different from scan frame %s" % (self.target.offset_coord.frame.name, self.scan_frame)
logger.debug(msg)
raise ScheduleError(msg)
self.lon2 = lon2
self.lat2 = lat2
self.descr = descr.upper()
#check consistnecy of frames specifications
#we already know that offset and scan
if not self.target.coord.frame == self.scan_frame:#possible mistake!
logger.warning("SUBSCAN %d : scan_frame and coordinates_frame are different" % (self.ID,))
if (self.target.coord.frame == frame.EQ and
self.descr == "CEN" and
self.scan_frame == frame.HOR):
pass #OK - only success condition
else:
raise ScheduleError("not compatible frame types")#very bad!
self.geom = geom
self.direction = direction
def __str__(self):
return templates.otf_subscan.substitute(
dict(
ID = self.ID,
target = self.target.label,
lon1 = self.target.coord.lon.fmt(),
lat1 = self.target.coord.lat.fmt(),
lon2 = self.lon2.fmt(),
lat2 = self.lat2.fmt(),
frame = self.target.coord.frame.name,
s_frame = self.scan_frame.name,
geom = self.geom,
descr = self.descr,
direction = self.direction,
duration = str(self.duration),
offset_frame = self.target.offset_coord.frame.offset_name,
offset_lon = self.target.offset_coord.lon.fmt(),
offset_lat = self.target.offset_coord.lat.fmt(),
vel = str(self.target.velocity),
)
)
class SkydipSubscan(Subscan):
def __init__(self, _target, duration=30.0,
start_elevation = VAngle(88),
stop_elevation = VAngle(15),
offset = frame.Coord(frame.HOR,
VAngle(1),
VAngle(0)),
is_tsys=False, is_cal=False):
Subscan.__init__(self, _target, duration, is_tsys, is_cal)
self.typename = "SKYDIP"
self.offset = offset
self.start_elevation = start_elevation
self.stop_elevation = stop_elevation
def __str__(self):
return templates.skydip_subscan.substitute(
dict(ID = self.ID,
target_subscan = self.target,
start_elevation = self.start_elevation.fmt_dec(),
stop_elevation = self.stop_elevation.fmt_dec(),
duration = str(self.duration),
offset_frame = self.offset.frame.offset_name,
offset_lon = self.offset.lon.fmt(),
offset_lat = self.offset.lat.fmt(),
))
class SiderealSubscan(Subscan):
def __init__(self, _target, duration=0.0, is_tsys=False, is_cal=False):
Subscan.__init__(self, _target, duration, is_tsys, is_cal)
self.typename = "SID"
def __str__(self):
if self.target.coord.frame == frame.EQ:
_epoch = str(self.target.coord.epoch) + '\t'
else:
_epoch = ""
return templates.sidereal_subscan.substitute(
dict(
ID = self.ID,
target = self.target.label,
frame = self.target.coord.frame.name,
longitude = self.target.coord.lon.fmt(),
latitude = self.target.coord.lat.fmt(),
epoch = _epoch,
offset_frame = self.target.offset_coord.frame.offset_name,
offset_lon = self.target.offset_coord.lon.fmt(),
offset_lat = self.target.offset_coord.lat.fmt(),
vel = str(self.target.velocity),
)
)
def get_skydip_tsys(target_id,_target, duration=30.0,
start_elevation = VAngle(88),
stop_elevation = VAngle(15),
offset = frame.Coord(frame.HOR,
VAngle(1),
VAngle(0))):
ss = SkydipSubscan(target_id, duration, start_elevation, stop_elevation, offset)
st = get_tsys(_target, offset)
return ss, st
def get_cen_otf(_target,
duration,
length,
offset,
const_axis,
direction,
scan_frame):
"""
Get an I{OTF} subscan with description I{CEN}.
@type length: VAngle
@type offset: VAngle
@return: an L{OTFSubscan} instance
"""
__target = copy.deepcopy(_target)
if const_axis == "LON":
__target.offset_coord.lon = _target.offset_coord.lon + offset
logger.debug("offset lon: %f" % (__target.offset_coord.lon.deg,))
lon2 = VAngle(0.0)
lat2 = length
elif const_axis == "LAT":
__target.offset_coord.lat = _target.offset_coord.lat + offset
logger.debug("offset lat: %f" % (__target.offset_coord.lat.deg,))
lon2 = length
lat2 = VAngle(0.0)
attr = dict(_target = __target,
descr = 'CEN',
duration = duration,
lon2 = lon2,
lat2 = lat2,
geom = const_axis,
direction = direction,
scan_frame = scan_frame,
)
return OTFSubscan(**attr)
def get_ss_otf(*args, **kwargs):
"""
@raise NotImplementedError: we still have no useful case for implemting this
function
"""
raise NotImplementedError("is there any useful case for implementing this?")
def get_sidereal(_target, offset=NULL_COORD, duration=0.0,
is_tsys=False, is_cal=False):
"""
@param _target: the subscan target
@type _target: target.Target
@param offset_lon: additional longitude offset
@type offset_lon: VAngle
@param offset_lat: additional latitude offset
@type offset_lat: VAngle
"""
__target = copy.deepcopy(_target)
#import ipdb;ipdb.set_trace()
__target.offset_coord += offset
return SiderealSubscan(__target, duration, is_tsys, is_cal)
def get_tsys(_target, offset, duration=0.0):
"""
Get a Tsys subscan.
This basically returns a SIDEREAL subscan where source name is I{Tsys} and
duration is I{0.0}
@type offset_lon: VAngle
@type offset_lat: VAngle
"""
__target = copy.deepcopy(_target)
__target.label = "Tsys"
st = get_sidereal(__target, offset, duration=0.0,
is_tsys=True)
st.post_procedure = procedures.TSYS
return st
def get_cen_otf_tsys(_target,
duration,
length,
offset,
const_axis,
direction,
scan_frame,
beamsize):
"""
Get a couple composed of a CEN_OTF subscan and its relative SIDEREAL TSYS
subscan.
@return: (otf_subscan, tsys_subscan)
@type length: VAngle
@type offset: Coord
@type beamsize: VAngle
"""
logger.debug("get couple subscan offset: %s " % (offset,))
negative_offset = VAngle(-1 * (length.deg / 2.0 + beamsize.deg * TSYS_SIGMA))
positive_offset = VAngle(length.deg / 2.0 + beamsize.deg * TSYS_SIGMA)
if const_axis == "LAT":
_offset_lat = offset
if direction == "INC":
_offset_lon = negative_offset
elif direction == "DEC":
_offset_lon = positive_offset
elif const_axis == "LON":
_offset_lon = offset
if direction == "INC":
_offset_lat = negative_offset
elif direction == "DEC":
_offset_lat = positive_offset
_offset = Coord(scan_frame, _offset_lon, _offset_lat)
ss = get_cen_otf(_target, duration, length, offset, const_axis, direction,
scan_frame)
st = get_tsys(_target, _offset)
return ss, st
def get_sid_tsys(_target,
offset,
extremes,
duration,
beamsize):
"""
Get a couple of sidereal subscans, where the first is an actual subscan and the
second is a tsys subscan obtained pointing the antenna out of a rectangular
polygon containing the source.
@param _target: the source to be observed
@type _target: L{target.Target}
@param offset_lon: longitude offset of the subscan
@type offset_lon: VAngle
@param offset_lat: latitude offset of the subscan
@type offset_lat: VAngle
@param extremes: An array containing the offsets of the extremes of the rectangular polygon
containing the source (i.e. the borders of a raster map)
@type extremes: [(x0,y0), (x1,y1), (x2,y2), (x3,y3)]
@param duration: subscan duration (Sec. )
@type duration: float
@param beamsize: beam size used to calculated tsys subscan offsets
@type beamsize: VAngle
"""
ss = get_sidereal(_target, offset, duration)
tsys_offsets = utils.extrude_from_rectangle(offset.lon.deg,
offset.lat.deg,
extremes,
beamsize.deg * TSYS_SIGMA)
_offsets = Coord(offset.frame,
VAngle(tsys_offsets[0]),
VAngle(tsys_offsets[1]))
st = get_tsys(_target, _offsets)
return ss, st
def get_off_tsys(_target,
offset,
extremes,
duration,
beamsize):
extremes_offsets = utils.extrude_from_rectangle(offset.lon.deg,
offset.lat.deg,
extremes,
beamsize.deg * TSYS_SIGMA)
_offsets = Coord(offset.frame,
VAngle(extremes_offsets[0]),
VAngle(extremes_offsets[1]))
ss = get_sidereal(_target, _offsets, duration)
st = get_tsys(_target, _offsets)
return ss, st
| 4,480 | 18 | 360 |
532629a6d6ac3182afad366e479f768c8294bd88 | 4,998 | py | Python | Machine_Learning/Design_Tutorials/04-Keras_GoogleNet_ResNet/files/code/eval_graph.py | mkolod/Vitis-Tutorials | 33d6cf9686398ef1179778dc0da163291c68b465 | [
"Apache-2.0"
] | 1 | 2022-03-15T22:07:18.000Z | 2022-03-15T22:07:18.000Z | Machine_Learning/Design_Tutorials/04-Keras_GoogleNet_ResNet/files/code/eval_graph.py | mkolod/Vitis-Tutorials | 33d6cf9686398ef1179778dc0da163291c68b465 | [
"Apache-2.0"
] | null | null | null | Machine_Learning/Design_Tutorials/04-Keras_GoogleNet_ResNet/files/code/eval_graph.py | mkolod/Vitis-Tutorials | 33d6cf9686398ef1179778dc0da163291c68b465 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
## © Copyright (C) 2016-2020 Xilinx, Inc
##
## Licensed under the Apache License, Version 2.0 (the "License"). You may
## not use this file except in compliance with the License. A copy of the
## License is located at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
## License for the specific language governing permissions and limitations
## under the License.
'''
##################################################################
# Evaluation of frozen/quantized graph
#################################################################
'''
TESTED WITH PYTHON 3.6
Author: Mark Harvey (mark.harvey@xilinx.com)
Date: 28 May 2019
Modified by Daniele Bagni (daniele.bagni@xilinx.com)
Date: 27 Aug 2019
'''
import os
import sys
import glob
import argparse
import shutil
import tensorflow as tf
import numpy as np
import cv2
import gc # memory garbage collector #DB
import tensorflow.contrib.decent_q
from tensorflow.python.platform import gfile
from config import fashion_mnist_config as cfg #DB
#DB
DATAS_DIR = cfg.DATASET_DIR
TEST_DIR = os.path.join(DATAS_DIR, "test")
print("\n eval_graph.py runs from ", DATAS_DIR)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--graph', type=str,
default='./freeze/frozen_graph.pb',
help='graph file (.pb) to be evaluated.')
parser.add_argument('--input_node', type=str,
default='images_in',
help='input node.')
parser.add_argument('--output_node', type=str,
default='dense_1/BiasAdd',
help='output node.')
parser.add_argument('--class_num', type=int,
default=cfg.NUM_CLASSES,
help='number of classes.')
parser.add_argument('--gpu', type=str,
default='0',
help='gpu device id.')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 31.632911 | 83 | 0.636255 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
## © Copyright (C) 2016-2020 Xilinx, Inc
##
## Licensed under the Apache License, Version 2.0 (the "License"). You may
## not use this file except in compliance with the License. A copy of the
## License is located at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
## License for the specific language governing permissions and limitations
## under the License.
'''
##################################################################
# Evaluation of frozen/quantized graph
#################################################################
'''
TESTED WITH PYTHON 3.6
Author: Mark Harvey (mark.harvey@xilinx.com)
Date: 28 May 2019
Modified by Daniele Bagni (daniele.bagni@xilinx.com)
Date: 27 Aug 2019
'''
import os
import sys
import glob
import argparse
import shutil
import tensorflow as tf
import numpy as np
import cv2
import gc # memory garbage collector #DB
import tensorflow.contrib.decent_q
from tensorflow.python.platform import gfile
from config import fashion_mnist_config as cfg #DB
#DB
DATAS_DIR = cfg.DATASET_DIR
TEST_DIR = os.path.join(DATAS_DIR, "test")
print("\n eval_graph.py runs from ", DATAS_DIR)
def graph_eval(input_graph_def, input_node, output_node):
#Reading image paths
test_img_paths = [img_path for img_path in glob.glob(TEST_DIR+"/*/*.png")]
NUMEL = len(test_img_paths)
assert (NUMEL > 0 )
y_test= np.zeros((NUMEL,1), dtype="uint8")
x_test= np.zeros((NUMEL,cfg.IMAGE_HEIGHT,cfg.IMAGE_WIDTH,3),dtype="uint8")
i = 0
for img_path in test_img_paths:
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
filename = os.path.basename(img_path)
class_name = filename.split("_")[0]
label = cfg.labelNames_dict[class_name]
#print("filename: ", img_path)
#print("classname: ", class_name)
x_test[i] = img
y_test[i] = int(label)
i = i + 1
'''
#normalize
x_test = x_test.astype(np.float32)
x_test = x_test/cfg.NORM_FACTOR
x_test = x_test -0.5
x_test = x_test *2
'''
x_test = cfg.Normalize(x_test)
#print(x_test[0])
#collect garbage to save memory #DB
#del img
#del test_img_paths
#del img_path
#gc.collect()
x_test = np.reshape(x_test, [-1, cfg.IMAGE_HEIGHT,cfg.IMAGE_WIDTH, 3])
y_test = tf.keras.utils.to_categorical(y_test, num_classes=cfg.NUM_CLASSES)
tf.import_graph_def(input_graph_def,name = '')
# Get input placeholders & tensors
images_in = tf.get_default_graph().get_tensor_by_name(input_node+':0')
labels = tf.placeholder(tf.int32,shape = [None,cfg.NUM_CLASSES])
# get output tensors
logits = tf.get_default_graph().get_tensor_by_name(output_node+':0')
# top 5 and top 1 accuracy
in_top5 = tf.nn.in_top_k(predictions=logits, targets=tf.argmax(labels, 1), k=5)
in_top1 = tf.nn.in_top_k(predictions=logits, targets=tf.argmax(labels, 1), k=1)
top5_acc = tf.reduce_mean(tf.cast(in_top5, tf.float32))
top1_acc = tf.reduce_mean(tf.cast(in_top1, tf.float32))
# Create the Computational graph
with tf.Session() as sess:
sess.run(tf.initializers.global_variables())
feed_dict={images_in: x_test, labels: y_test}
t5_acc,t1_acc = sess.run([top5_acc,top1_acc], feed_dict)
#print(dir(x_test))
#print(max(x_test[0]))
#print(min(x_test[0]))
print (' Top 1 accuracy with test dataset: {:1.4f}'.format(t1_acc))
print (' Top 5 accuracy with test dataset: {:1.4f}'.format(t5_acc))
print ('FINISHED!')
return
def main(unused_argv):
os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu
input_graph_def = tf.Graph().as_graph_def()
input_graph_def.ParseFromString(tf.gfile.GFile(FLAGS.graph, "rb").read())
graph_eval(input_graph_def, FLAGS.input_node, FLAGS.output_node)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--graph', type=str,
default='./freeze/frozen_graph.pb',
help='graph file (.pb) to be evaluated.')
parser.add_argument('--input_node', type=str,
default='images_in',
help='input node.')
parser.add_argument('--output_node', type=str,
default='dense_1/BiasAdd',
help='output node.')
parser.add_argument('--class_num', type=int,
default=cfg.NUM_CLASSES,
help='number of classes.')
parser.add_argument('--gpu', type=str,
default='0',
help='gpu device id.')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 2,630 | 0 | 46 |
7924c5f1a461d1f211470639400c3de8ae6b4018 | 7,559 | py | Python | libs/plots.py | protivinsky/python-utils | 145fc8e6385df745c7b73fa0dfbb17abf6f58f82 | [
"MIT"
] | 2 | 2020-04-21T12:46:41.000Z | 2021-03-08T00:27:48.000Z | libs/plots.py | protivinsky/python-utils | 145fc8e6385df745c7b73fa0dfbb17abf6f58f82 | [
"MIT"
] | null | null | null | libs/plots.py | protivinsky/python-utils | 145fc8e6385df745c7b73fa0dfbb17abf6f58f82 | [
"MIT"
] | 1 | 2020-04-21T12:46:43.000Z | 2020-04-21T12:46:43.000Z | import os
from yattag import Doc, indent
from libs.utils import create_stamped_temp, slugify
import matplotlib.pyplot as plt
# NOTE - Does not work out of the box, needs a fix:
#
# Annoyingly, the js loading of subpages violates Cross-Origin Requests policy in all browsers
# when files are served locally via file:///. Works fine for http protocol though.
# It is possible to use iframes rather than js loader, but it's ugly and has other issues (multiple nested scrollbars).
#
# Workarounds:
# - Firefox:
# - go to about:config -> search for privacy.file_unique_origin and toggle
# - then set up Firefox as the default for opening .htm files (that's the reason why I do not use .html)
# - Chrome
# - can be started with "--allow-file-access-from-files", then it should just work
# - it would be possible to start the appropriate process in .show, but I have not tried
# - one workaround is enough for me
# - https://stackoverflow.com/a/18137280
# - Edge:
# - until recently, it was the only browser not enforcing the CORS policy for local files, so it just
# worked. The new version of Edge enforces the same, do not know how to get around there.
# - or it is possible to use local webserver and serve the files via it
# - CORS policy is respected with http
# - python webserver works fine, just serving the directory: python -m http.server 8000
# - however seems more hassle than just changing firefox config...
# I am not using it at the end, not sure if it works correctly.
| 40.207447 | 120 | 0.511179 | import os
from yattag import Doc, indent
from libs.utils import create_stamped_temp, slugify
import matplotlib.pyplot as plt
# NOTE - Does not work out of the box, needs a fix:
#
# Annoyingly, the js loading of subpages violates Cross-Origin Requests policy in all browsers
# when files are served locally via file:///. Works fine for http protocol though.
# It is possible to use iframes rather than js loader, but it's ugly and has other issues (multiple nested scrollbars).
#
# Workarounds:
# - Firefox:
# - go to about:config -> search for privacy.file_unique_origin and toggle
# - then set up Firefox as the default for opening .htm files (that's the reason why I do not use .html)
# - Chrome
# - can be started with "--allow-file-access-from-files", then it should just work
# - it would be possible to start the appropriate process in .show, but I have not tried
# - one workaround is enough for me
# - https://stackoverflow.com/a/18137280
# - Edge:
# - until recently, it was the only browser not enforcing the CORS policy for local files, so it just
# worked. The new version of Edge enforces the same, do not know how to get around there.
# - or it is possible to use local webserver and serve the files via it
# - CORS policy is respected with http
# - python webserver works fine, just serving the directory: python -m http.server 8000
# - however seems more hassle than just changing firefox config...
class Chart:
def __init__(self, figs, cols=3, title=None, format='png'):
if not isinstance(figs, list):
figs = [figs]
self.figs = [f if isinstance(f, plt.Figure) else f.get_figure() for f in figs]
self.cols = cols
self.format = format
self.title = title or self.figs[0].axes[0].title._text
def save(self, path, inner=False):
os.makedirs(path, exist_ok=True)
n = len(self.figs)
for i in range(n):
self.figs[i].savefig(f'{path}/fig_{i+1:03d}.{self.format}')
plt.close('all')
doc, tag, text = Doc().tagtext()
doc.asis('<!DOCTYPE html>')
with tag('html'):
with tag('head'):
with tag('title'):
text(self.title or 'Chart')
with tag('body'):
with tag('h1'):
text(self.title or 'Chart')
num_rows = (n + self.cols - 1) // self.cols
for r in range(num_rows):
with tag('div'):
for c in range(min(self.cols, n - self.cols * r)):
doc.stag('img', src=f'fig_{self.cols * r + c + 1:03d}.{self.format}')
file = open('{}/page.htm'.format(path), 'w', encoding='utf-8')
file.write(indent(doc.getvalue()))
file.close()
def show(self):
path = create_stamped_temp('reports')
self.save(path)
os.startfile('{}/page.htm'.format(path))
# I am not using it at the end, not sure if it works correctly.
class Text:
def __init__(self, texts, width=750, title=None):
if not isinstance(texts, list):
texts = [texts]
self.texts = texts
self.width = width
self.title = title
def save(self, path, inner=False):
os.makedirs(path, exist_ok=True)
doc, tag, text = Doc().tagtext()
doc.asis('<!DOCTYPE html>')
with tag('html'):
with tag('head'):
with tag('title'):
text(self.title or 'Text')
with tag('body'):
with tag('h1'):
text(self.title or 'Text')
with tag('div'):
for t in self.texts:
with tag('div', style='width: {}px; float: left'.format(self.width)):
with tag('pre'):
text(t)
file = open('{}/page.htm'.format(path), 'w', encoding='utf-8')
file.write(indent(doc.getvalue()))
file.close()
def show(self):
path = create_stamped_temp('reports')
self.save(path)
os.startfile('{}/page.htm'.format(path))
class Selector:
def __init__(self, charts, title=None):
if not isinstance(charts, list):
charts = [charts]
self.charts = [ch if isinstance(ch, (Text, Chart, Selector)) else Chart(ch) for ch in charts]
self.title = title or 'Selector'
def save(self, path):
os.makedirs(path, exist_ok=True)
n = len(self.charts)
for i in range(n):
ch = self.charts[i]
if ch.title is None:
ch.title = '{}_{:02d}'.format('Chart' if isinstance(ch, Chart) else ('Text' if isinstance(ch, Text)
else 'Selector'), i)
ch.save('{}/{}'.format(path, slugify(ch.title)))
doc, tag, text, line = Doc().ttl()
doc.asis('<!DOCTYPE html>')
with tag('html'):
with tag('head'):
with tag('title'):
text(self.title or 'Selector')
with tag('script'):
doc.asis("""
function loader(target, file) {
var element = document.getElementById(target);
var xmlhttp = new XMLHttpRequest();
xmlhttp.onreadystatechange = function(){
if(xmlhttp.status == 200 && xmlhttp.readyState == 4){
var txt = xmlhttp.responseText;
var next_file = ""
var matches = txt.match(/<script>loader\\('.*', '(.*)'\\)<\\/script>/);
if (matches) {
next_file = matches[1];
};
txt = txt.replace(/^[\s\S]*<body>/, "").replace(/<\/body>[\s\S]*$/, "");
txt = txt.replace(/src=\\"fig_/g, "src=\\"" + file + "/fig_");
txt = txt.replace(/loader\\('/g, "loader('" + file.replace("/", "-") + "-");
txt = txt.replace(/div id=\\"/, "div id=\\"" + file.replace("/", "-") + "-");
txt = txt.replace(/content', '/g, "content', '" + file + "/");
element.innerHTML = txt;
if (next_file) {
loader(file.replace("/", "-") + "-content", file.replace("/", "-") + "/" + next_file);
};
};
};
xmlhttp.open("GET", file + "/page.htm", true);
xmlhttp.send();
}
""")
with tag('body'):
with tag('h1'):
text(self.title or 'Selector')
with tag('div'):
for ch in self.charts:
#line('a', ch.title, href='{}/page.html'.format(slugify(ch.title)), target='iframe')
line('button', ch.title, type='button',
onclick='loader(\'content\', \'{}\')'.format(slugify(ch.title)))
with tag('div', id='content'):
text('')
with tag('script'):
doc.asis('loader(\'content\', \'{}\')'.format(slugify(self.charts[0].title)))
file = open('{}/page.htm'.format(path), 'w', encoding='utf-8')
file.write(indent(doc.getvalue()))
file.close()
def show(self):
path = create_stamped_temp('reports')
self.save(path)
os.startfile('{}/page.htm'.format(path))
| 5,648 | -25 | 334 |
4eefb2ee01165f85bd55b4a148829a1648d185bd | 400 | py | Python | ifs/source/elasticsearch.py | cbednarski/ifs-python | 9629ba857b1c397fc1a1f13eeee46e5427fb2744 | [
"0BSD"
] | 6 | 2016-03-29T21:12:43.000Z | 2021-05-01T18:34:10.000Z | ifs/source/elasticsearch.py | cbednarski/ifs-python | 9629ba857b1c397fc1a1f13eeee46e5427fb2744 | [
"0BSD"
] | 2 | 2015-08-12T01:34:51.000Z | 2015-08-25T19:23:17.000Z | ifs/source/elasticsearch.py | cbednarski/ifs-python | 9629ba857b1c397fc1a1f13eeee46e5427fb2744 | [
"0BSD"
] | null | null | null | version = '2.3.1'
version_cmd = 'elasticsearch -version'
download_url = 'http://packages.elasticsearch.org/GPG-KEY-elasticsearch'
install_script = """
apt-key add GPG-KEY-elasticsearch
echo "deb http://packages.elasticsearch.org/elasticsearch/VERSION/debian stable main" > /etc/apt/sources.list.d/elasticsearch.list
apt-get update -qq
apt-get install -y elasticsearch
service elasticsearch start
"""
| 36.363636 | 130 | 0.785 | version = '2.3.1'
version_cmd = 'elasticsearch -version'
download_url = 'http://packages.elasticsearch.org/GPG-KEY-elasticsearch'
install_script = """
apt-key add GPG-KEY-elasticsearch
echo "deb http://packages.elasticsearch.org/elasticsearch/VERSION/debian stable main" > /etc/apt/sources.list.d/elasticsearch.list
apt-get update -qq
apt-get install -y elasticsearch
service elasticsearch start
"""
| 0 | 0 | 0 |
9ca4012b1629bc9c733cdb8b446f0f5e58ef51b7 | 992 | py | Python | CONVERT ATAT TO POSCAR/convert_ATAT_to_POSCAR.py | vijindal/cluspand | a3676594354ab59991fe75fccecdc3a400c7b153 | [
"MIT"
] | null | null | null | CONVERT ATAT TO POSCAR/convert_ATAT_to_POSCAR.py | vijindal/cluspand | a3676594354ab59991fe75fccecdc3a400c7b153 | [
"MIT"
] | null | null | null | CONVERT ATAT TO POSCAR/convert_ATAT_to_POSCAR.py | vijindal/cluspand | a3676594354ab59991fe75fccecdc3a400c7b153 | [
"MIT"
] | null | null | null | # Converts a 'str.out' file to the VASP POSCAR format.
#
# Assumes:
# + You have installed the "ase" python package.
# + You have the "str2cif" tool from ATAT in your path.
#
# Author: Jesper Kristensen
import os, sys
from ase import io
#=== USER SETTINGS:
structure_from = 'str.out'
structure_to = 'str.POSCAR'
if not os.path.exists(structure_from):
print 'EEEE ATAT file %s does not exist in the path!'
print 'EEEE You have to specify the ATAT file in this Python script.'
print 'EEEE exiting ...'
sys.exit(1)
#=== Convert str.out to CIF format first:
print
print 'IIII Converting ATAT to CIF format ...'
tmp = 'tmp.cif'
cmd = 'str2cif < %s > %s' % (structure_from, tmp)
os.system(cmd)
#=== Then convert CIF to POSCAR using ASE:
print 'IIII Converting CIF to POSCAR format ...'
atoms = io.read(tmp)
atoms.write(structure_to, format = 'vasp')
#=== Clean up:
os.remove(tmp)
print 'IIII All done, the resulting POSCAR file is in %s' % structure_to
print
| 25.435897 | 73 | 0.68246 | # Converts a 'str.out' file to the VASP POSCAR format.
#
# Assumes:
# + You have installed the "ase" python package.
# + You have the "str2cif" tool from ATAT in your path.
#
# Author: Jesper Kristensen
import os, sys
from ase import io
#=== USER SETTINGS:
structure_from = 'str.out'
structure_to = 'str.POSCAR'
if not os.path.exists(structure_from):
print 'EEEE ATAT file %s does not exist in the path!'
print 'EEEE You have to specify the ATAT file in this Python script.'
print 'EEEE exiting ...'
sys.exit(1)
#=== Convert str.out to CIF format first:
print
print 'IIII Converting ATAT to CIF format ...'
tmp = 'tmp.cif'
cmd = 'str2cif < %s > %s' % (structure_from, tmp)
os.system(cmd)
#=== Then convert CIF to POSCAR using ASE:
print 'IIII Converting CIF to POSCAR format ...'
atoms = io.read(tmp)
atoms.write(structure_to, format = 'vasp')
#=== Clean up:
os.remove(tmp)
print 'IIII All done, the resulting POSCAR file is in %s' % structure_to
print
| 0 | 0 | 0 |
3fcd8a1cb0c2ff5dc3cc391247bdfc8ca998bf5a | 9,600 | py | Python | src/rdf_builder.py | leolani/leolani-datarepresentation | bc2975310fe623f7548db54bf5d691c7bbcf0c1e | [
"MIT"
] | null | null | null | src/rdf_builder.py | leolani/leolani-datarepresentation | bc2975310fe623f7548db54bf5d691c7bbcf0c1e | [
"MIT"
] | null | null | null | src/rdf_builder.py | leolani/leolani-datarepresentation | bc2975310fe623f7548db54bf5d691c7bbcf0c1e | [
"MIT"
] | null | null | null | import logging
from iribaker import to_iri
from rdflib import URIRef, Literal, Namespace
from representation import Predicate, Entity, Triple, Provenance
logger = logging.getLogger(__name__)
| 34.532374 | 105 | 0.6025 | import logging
from iribaker import to_iri
from rdflib import URIRef, Literal, Namespace
from representation import Predicate, Entity, Triple, Provenance
logger = logging.getLogger(__name__)
class RdfBuilder(object):
def __init__(self):
# type: () -> None
self.namespaces = {}
self._log = logger.getChild(self.__class__.__name__)
self._log.debug("Booted")
self._define_namespaces()
########## setting up connection ##########
def _define_namespaces(self):
"""
Define namespaces for different layers (ontology/vocab and resource). Assign them to self
:return:
"""
# Namespaces for the instance layer
instance_vocab = 'http://cltl.nl/leolani/n2mu/'
self.namespaces['N2MU'] = Namespace(instance_vocab)
instance_resource = 'http://cltl.nl/leolani/world/'
self.namespaces['LW'] = Namespace(instance_resource)
# Namespaces for the mention layer
mention_vocab = 'http://groundedannotationframework.org/gaf#'
self.namespaces['GAF'] = Namespace(mention_vocab)
mention_resource = 'http://cltl.nl/leolani/talk/'
self.namespaces['LTa'] = Namespace(mention_resource)
# Namespaces for the attribution layer
attribution_vocab = 'http://groundedannotationframework.org/grasp#'
self.namespaces['GRASP'] = Namespace(attribution_vocab)
factuality_vocab = 'http://groundedannotationframework.org/grasp/factuality#'
self.namespaces['GRASPf'] = Namespace(factuality_vocab)
sentiment_vocab = 'http://groundedannotationframework.org/grasp/sentiment#'
self.namespaces['GRASPs'] = Namespace(sentiment_vocab)
emotion_vocab = 'http://groundedannotationframework.org/grasp/emotion#'
self.namespaces['GRASPe'] = Namespace(emotion_vocab)
attribution_resource_friends = 'http://cltl.nl/leolani/friends/'
self.namespaces['LF'] = Namespace(attribution_resource_friends)
attribution_resource_inputs = 'http://cltl.nl/leolani/inputs/'
self.namespaces['LI'] = Namespace(attribution_resource_inputs)
# Namespaces for the temporal layer-ish
context_vocab = 'http://cltl.nl/episodicawareness/'
self.namespaces['EPS'] = Namespace(context_vocab)
self.namespaces['LC'] = Namespace('http://cltl.nl/leolani/context/')
# The namespaces of external ontologies
skos = 'http://www.w3.org/2004/02/skos/core#'
self.namespaces['SKOS'] = Namespace(skos)
prov = 'http://www.w3.org/ns/prov#'
self.namespaces['PROV'] = Namespace(prov)
sem = 'http://semanticweb.cs.vu.nl/2009/11/sem/'
self.namespaces['SEM'] = Namespace(sem)
time = 'http://www.w3.org/TR/owl-time/#'
self.namespaces['TIME'] = Namespace(time)
xml = 'https://www.w3.org/TR/xmlschema-2/#'
self.namespaces['XML'] = Namespace(xml)
wd = 'http://www.wikidata.org/entity/'
self.namespaces['WD'] = Namespace(wd)
wdt = 'http://www.wikidata.org/prop/direct/'
self.namespaces['WDT'] = Namespace(wdt)
wikibase = 'http://wikiba.se/ontology#'
self.namespaces['wikibase'] = Namespace(wikibase)
########## basic constructors ##########
def create_resource_uri(self, namespace, resource_name):
# type: (str, str) -> str
"""
Create an URI for the given resource (entity, predicate, named graph, etc) in the given namespace
Parameters
----------
namespace: str
Namespace where entity belongs to
resource_name: str
Label of resource
Returns
-------
uri: str
Representing the URI of the resource
"""
if namespace in self.namespaces.keys():
uri = URIRef(to_iri(self.namespaces[namespace] + resource_name))
else:
uri = URIRef(to_iri('{}:{}'.format(namespace, resource_name)))
return uri
def fill_literal(self, value, datatype=None):
# type: (str, str) -> Literal
"""
Create an RDF literal given its value and datatype
Parameters
----------
value: str
Value of the literal resource
datatype: str
Datatype of the literal
Returns
-------
Literal with value and datatype given
"""
return Literal(value, datatype=datatype) if datatype is not None else Literal(value)
def fill_entity(self, label, types, namespace='LW', uri=None):
# type: (str, list, str, str) -> Entity
"""
Create an RDF entity given its label, types and its namespace
Parameters
----------
label: str
Label of entity
types: List[str]
List of types for this entity
uri: str
URI of the entity, is available (i.e. when extracting concepts from wikidata)
namespace: str
Namespace where entity belongs to
Returns
-------
Entity object with given label
"""
if types in [None, ''] and label != '':
self._log.warning('Unknown type: {}'.format(label))
return self.fill_entity_from_label(label, namespace)
else:
entity_id = self.create_resource_uri(namespace, label) if not uri else URIRef(to_iri(uri))
return Entity(entity_id, Literal(label), types)
def fill_predicate(self, label, namespace='N2MU', uri=None):
# type: (str, str, str) -> Predicate
"""
Create an RDF predicate given its label and its namespace
Parameters
----------
label: str
Label of predicate
uri: str
URI of the predicate, is available (i.e. when extracting concepts from wikidata)
namespace:
Namespace where predicate belongs to
Returns
-------
Predicate object with given label
"""
predicate_id = self.create_resource_uri(namespace, label) if not uri else URIRef(to_iri(uri))
return Predicate(predicate_id, Literal(label))
def fill_entity_from_label(self, label, namespace='LW', uri=None):
# type: (str, str, str) -> Entity
"""
Create an RDF entity given its label and its namespace
Parameters
----------
label: str
Label of entity
uri: str
URI of the entity, is available (i.e. when extracting concepts from wikidata)
namespace: str
Namespace where entity belongs to
Returns
-------
Entity object with given label and no type information
"""
entity_id = self.create_resource_uri(namespace, label) if not uri else URIRef(to_iri(uri))
return Entity(entity_id, Literal(label), [''])
def empty_entity(self):
# type: () -> Entity
"""
Create an empty RDF entity
Parameters
----------
Returns
-------
Entity object with no label and no type information
"""
return Entity('', Literal(''), [''])
def fill_provenance(self, author, date):
# type: (str, date) -> Provenance
"""
Structure provenance to pair authors and dates when mentions are created
Parameters
----------
author: str
Actor that generated the knowledge
date: date
Date when knowledge was generated
Returns
-------
Provenance object containing author and date
"""
return Provenance(author, date)
def fill_triple(self, subject_dict, predicate_dict, object_dict, namespace='LW'):
# type: (dict, dict, dict, str) -> Triple
"""
Create an RDF entity given its label and its namespace
Parameters
----------
subject_dict: dict
Information about label and type of subject
predicate_dict: dict
Information about type of predicate
object_dict: dict
Information about label and type of object
namespace: str
Information about which namespace the entities belongs to
Returns
-------
Entity object with given label
"""
subject = self.fill_entity(subject_dict['label'], [subject_dict['type']], namespace=namespace)
predicate = self.fill_predicate(predicate_dict['type'])
object = self.fill_entity(object_dict['label'], [object_dict['type']], namespace=namespace)
return Triple(subject, predicate, object)
def fill_triple_from_label(self, subject_label, predicate, object_label, namespace='LW'):
# type: (str, str, str, str) -> Triple
"""
Create an RDF entity given its label and its namespace
Parameters
----------
subject_label: str
Information about label of subject
predicate: str
Information about predicate
object_label: str
Information about label of object
namespace: str
Information about which namespace the entities belongs to
Returns
-------
Entity object with given label
"""
subject = self.fill_entity_from_label(subject_label, namespace=namespace)
predicate = self.fill_predicate(predicate)
object = self.fill_entity_from_label(object_label, namespace=namespace)
return Triple(subject, predicate, object)
| 186 | 9,196 | 23 |
025e076517441e324caae991ffdeb7b0fe63f7cb | 1,944 | py | Python | wifi-password/__main__.py | Abdelrahman0W/wifi-password | 115f8e9168a8c690d2c7ab8d38fd5c82c65e5e56 | [
"MIT"
] | 1 | 2021-07-26T20:00:56.000Z | 2021-07-26T20:00:56.000Z | wifi-password/__main__.py | Abdelrahman0W/wifi-password | 115f8e9168a8c690d2c7ab8d38fd5c82c65e5e56 | [
"MIT"
] | null | null | null | wifi-password/__main__.py | Abdelrahman0W/wifi-password | 115f8e9168a8c690d2c7ab8d38fd5c82c65e5e56 | [
"MIT"
] | 1 | 2021-07-26T20:04:23.000Z | 2021-07-26T20:04:23.000Z | from .platform import OS
from .windows.win import winPass
from .linUni.linUni import linUniPass
from .manager import Manager
import inquirer
if __name__ == "__main__":
main()
| 30.375 | 81 | 0.432613 | from .platform import OS
from .windows.win import winPass
from .linUni.linUni import linUniPass
from .manager import Manager
import inquirer
class wifiPass:
def __init__(self) -> None:
self.platform = OS().getOS
select = [
inquirer.List(
'auto',
message = "Select your choice >>>",
choices = [
'Generate for the current network',
'Generate for a new network'
],
),
]
if inquirer.prompt(select)['auto'] == 'Generate for the current network':
self.auto = True
else:
self.auto = False
def __getSSID(self) -> str:
if not self.auto:
return input("Enter SSID >>> ")
if self.platform == 'windows':
return winPass().getSSID()
else:
return linUniPass().getSSID()
def __getPW(self) -> str:
if not self.auto:
return input("Enter Password >>> ")
if self.platform == 'windows':
return winPass().getPW()
else:
return linUniPass().getPW()
def generateQR(self) -> None:
Manager().generateQR(ssid=self.__getSSID(), pw=self.__getPW())
def main():
print("""
__ ___ ______ _ _____ _
\ \ / (_) | ____(_) | __ \ | |
\ \ /\ / / _ ______| |__ _ | |__) |_ _ ___ _____ _____ _ __ __| |
\ \/ \/ / | |______| __| | | | ___/ _` / __/ __\ \ /\ / / _ \| '__/ _` |
\ /\ / | | | | | | | | | (_| \__ \__ \\ V V / (_) | | | (_| |
\/ \/ |_| |_| |_| |_| \__,_|___/___/ \_/\_/ \___/|_| \__,_|
Welcome to Wi-Fi Password.
""")
wifiPass().generateQR()
if __name__ == "__main__":
main()
| 1,614 | -6 | 153 |
fc1729841205b1f3d5942d6c7c3e59b48f13a0bf | 3,379 | py | Python | src/app/waveglow/training.py | stefantaubert/tacotron2 | 8475f014391c5066cfe0b92b6c74568639be5e79 | [
"BSD-3-Clause"
] | 3 | 2020-08-04T09:38:22.000Z | 2022-03-26T12:38:30.000Z | src/app/waveglow/training.py | stefantaubert/tacotron2 | 8475f014391c5066cfe0b92b6c74568639be5e79 | [
"BSD-3-Clause"
] | null | null | null | src/app/waveglow/training.py | stefantaubert/tacotron2 | 8475f014391c5066cfe0b92b6c74568639be5e79 | [
"BSD-3-Clause"
] | null | null | null | import os
from logging import Logger
from typing import Dict, Optional
from src.app.io import (get_checkpoints_dir, get_train_log_file,
get_train_logs_dir, load_trainset, load_valset,
save_prep_name, save_testset, save_trainset,
save_valset)
from src.app.pre.prepare import get_prepared_dir, load_filelist
from src.app.utils import prepare_logger
from src.app.waveglow.io import get_train_dir
from src.core.common.train import get_custom_or_last_checkpoint
from src.core.pre.merge_ds import split_prepared_data_train_test_val
from src.core.waveglow.model_checkpoint import CheckpointWaveglow
from src.core.waveglow.train import continue_train, train
if __name__ == "__main__":
mode = 0
if mode == 0:
start_new_training(
base_dir="/datasets/models/taco2pt_v5",
train_name="debug",
prep_name="thchs_ljs",
custom_hparams={
"batch_size": 3,
"iters_per_checkpoint": 5,
"cache_wavs": False
},
validation_size=0.001,
)
elif mode == 1:
continue_training(
base_dir="/datasets/models/taco2pt_v5",
train_name="debug"
)
| 34.479592 | 290 | 0.748742 | import os
from logging import Logger
from typing import Dict, Optional
from src.app.io import (get_checkpoints_dir, get_train_log_file,
get_train_logs_dir, load_trainset, load_valset,
save_prep_name, save_testset, save_trainset,
save_valset)
from src.app.pre.prepare import get_prepared_dir, load_filelist
from src.app.utils import prepare_logger
from src.app.waveglow.io import get_train_dir
from src.core.common.train import get_custom_or_last_checkpoint
from src.core.pre.merge_ds import split_prepared_data_train_test_val
from src.core.waveglow.model_checkpoint import CheckpointWaveglow
from src.core.waveglow.train import continue_train, train
def try_load_checkpoint(base_dir: str, train_name: Optional[str], checkpoint: Optional[int], logger: Logger) -> Optional[CheckpointWaveglow]:
result = None
if train_name:
train_dir = get_train_dir(base_dir, train_name, False)
checkpoint_path, _ = get_custom_or_last_checkpoint(
get_checkpoints_dir(train_dir), checkpoint)
result = CheckpointWaveglow.load(checkpoint_path, logger)
return result
def start_new_training(base_dir: str, train_name: str, prep_name: str, test_size: float = 0.01, validation_size: float = 0.01, custom_hparams: Optional[Dict[str, str]] = None, split_seed: int = 1234, warm_start_train_name: Optional[str] = None, warm_start_checkpoint: Optional[int] = None):
prep_dir = get_prepared_dir(base_dir, prep_name)
wholeset = load_filelist(prep_dir)
trainset, testset, valset = split_prepared_data_train_test_val(
wholeset, test_size=test_size, validation_size=validation_size, seed=split_seed, shuffle=True)
train_dir = get_train_dir(base_dir, train_name, create=True)
save_trainset(train_dir, trainset)
save_testset(train_dir, testset)
save_valset(train_dir, valset)
logs_dir = get_train_logs_dir(train_dir)
logger = prepare_logger(get_train_log_file(logs_dir), reset=True)
warm_model = try_load_checkpoint(
base_dir=base_dir,
train_name=warm_start_train_name,
checkpoint=warm_start_checkpoint,
logger=logger
)
save_prep_name(train_dir, prep_name)
train(
custom_hparams=custom_hparams,
logdir=logs_dir,
trainset=trainset,
valset=valset,
save_checkpoint_dir=get_checkpoints_dir(train_dir),
debug_logger=logger,
warm_model=warm_model,
)
def continue_training(base_dir: str, train_name: str, custom_hparams: Optional[Dict[str, str]] = None):
train_dir = get_train_dir(base_dir, train_name, create=False)
assert os.path.isdir(train_dir)
logs_dir = get_train_logs_dir(train_dir)
logger = prepare_logger(get_train_log_file(logs_dir))
continue_train(
custom_hparams=custom_hparams,
logdir=logs_dir,
trainset=load_trainset(train_dir),
valset=load_valset(train_dir),
save_checkpoint_dir=get_checkpoints_dir(train_dir),
debug_logger=logger
)
if __name__ == "__main__":
mode = 0
if mode == 0:
start_new_training(
base_dir="/datasets/models/taco2pt_v5",
train_name="debug",
prep_name="thchs_ljs",
custom_hparams={
"batch_size": 3,
"iters_per_checkpoint": 5,
"cache_wavs": False
},
validation_size=0.001,
)
elif mode == 1:
continue_training(
base_dir="/datasets/models/taco2pt_v5",
train_name="debug"
)
| 2,129 | 0 | 69 |
6bc894c811b8ef772b2c827b2589b3237eb861bc | 1,139 | py | Python | hardest/template.py | proggga/hardest | 234cb41115c30a756ee11ed7c5fa41c9979d3303 | [
"MIT"
] | 2 | 2018-02-03T13:43:25.000Z | 2021-12-03T16:13:49.000Z | hardest/template.py | proggga/hardest | 234cb41115c30a756ee11ed7c5fa41c9979d3303 | [
"MIT"
] | 8 | 2017-08-16T08:34:59.000Z | 2018-02-05T18:30:44.000Z | hardest/template.py | proggga/hardest | 234cb41115c30a756ee11ed7c5fa41c9979d3303 | [
"MIT"
] | 1 | 2018-02-05T18:26:20.000Z | 2018-02-05T18:26:20.000Z | """Template class."""
import os
# For Mypy typing
from typing import Any # noqa pylint: disable=unused-import
from typing import Dict # noqa pylint: disable=unused-import
import jinja2
class Template(object): # pylint: disable=too-few-public-methods
"""Represents tepmplate which can be rendered."""
def __init__(self, file_path, context):
# type: (str, Dict[str, Any]) -> None
"""Constructor."""
self.file_path = file_path # type: str
self.context = context # type: Dict[str, Any]
def render(self):
# type () -> str
"""Render template."""
if not os.path.exists(self.file_path):
import hardest.exceptions
message = ('Path "{}" not exists.'
.format(self.file_path))
raise hardest.exceptions.TemplateNotFoundException(message)
file_handler = open(self.file_path)
template_content = str(file_handler.read())
file_handler.close()
template = jinja2.Template(template_content)
rendered_content = str(template.render(**self.context))
return rendered_content
| 31.638889 | 71 | 0.632133 | """Template class."""
import os
# For Mypy typing
from typing import Any # noqa pylint: disable=unused-import
from typing import Dict # noqa pylint: disable=unused-import
import jinja2
class Template(object): # pylint: disable=too-few-public-methods
"""Represents tepmplate which can be rendered."""
def __init__(self, file_path, context):
# type: (str, Dict[str, Any]) -> None
"""Constructor."""
self.file_path = file_path # type: str
self.context = context # type: Dict[str, Any]
def render(self):
# type () -> str
"""Render template."""
if not os.path.exists(self.file_path):
import hardest.exceptions
message = ('Path "{}" not exists.'
.format(self.file_path))
raise hardest.exceptions.TemplateNotFoundException(message)
file_handler = open(self.file_path)
template_content = str(file_handler.read())
file_handler.close()
template = jinja2.Template(template_content)
rendered_content = str(template.render(**self.context))
return rendered_content
| 0 | 0 | 0 |
c3a385ffea6f2255f86dd3adabddf41b91e825f1 | 556 | py | Python | docker_demo/stage2/dj_demo/hello/views.py | lbjworld/demo | df937493b51dbdd3ddf10742d9a01d3ac00af6a6 | [
"MIT"
] | 1 | 2016-02-14T07:32:49.000Z | 2016-02-14T07:32:49.000Z | docker_demo/stage3/dj_demo/hello/views.py | lbjworld/demo | df937493b51dbdd3ddf10742d9a01d3ac00af6a6 | [
"MIT"
] | null | null | null | docker_demo/stage3/dj_demo/hello/views.py | lbjworld/demo | df937493b51dbdd3ddf10742d9a01d3ac00af6a6 | [
"MIT"
] | null | null | null | import datetime
from django.shortcuts import render
from django.http import HttpResponse
from django.db.models import F
from django.conf import settings
from models import Counter
| 29.263158 | 78 | 0.683453 | import datetime
from django.shortcuts import render
from django.http import HttpResponse
from django.db.models import F
from django.conf import settings
from models import Counter
def hello(request):
now = datetime.datetime.now()
# update counter
Counter.objects.filter(name=settings.STAT_NAME).update(count=F('count')+1)
c = Counter.objects.get(name=settings.STAT_NAME)
html = "<html><body>\
It is now {now}, count : {c}. <br/>\
</body></html>".format(now=now, c=c.count)
return HttpResponse(html)
| 350 | 0 | 23 |
7317364a9d5df033377400f6e8fedf41a3d1fdda | 2,727 | py | Python | elecsus/libs/polarisation_animation_mpl.py | fsponciano/ElecSus | c79444edb18154906caddf438c7e33b02865fa66 | [
"Apache-2.0"
] | 22 | 2016-07-11T15:25:18.000Z | 2021-10-04T08:16:33.000Z | elecsus/libs/polarisation_animation_mpl.py | Quantum-Light-and-Matter/ElecSus | c79444edb18154906caddf438c7e33b02865fa66 | [
"Apache-2.0"
] | 8 | 2019-08-12T09:46:21.000Z | 2021-07-29T09:01:10.000Z | elecsus/libs/polarisation_animation_mpl.py | Quantum-Light-and-Matter/ElecSus | c79444edb18154906caddf438c7e33b02865fa66 | [
"Apache-2.0"
] | 20 | 2016-06-09T14:35:14.000Z | 2021-09-30T13:43:46.000Z | """
Polarisation animation...
the animate_vectors() method creates an interactive 3D plot, visualising the resultant polarisation for a
given input of Ex, Ey and the phase difference (in radians) between them
Last updated 2018-02-19 JK
"""
# py 2.7 compatibility
from __future__ import (division, print_function, absolute_import)
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.animation as animation
#replace default matplotlib text and color sequence with durham colours
plt.rc('font',**{'family':'Serif','serif':['Times New Roman']})
params={'axes.labelsize':13,'xtick.labelsize':12,'ytick.labelsize':12,'legend.fontsize': 11,'mathtext.fontset':'cm','mathtext.rm':'serif'}
plt.rcParams.update(params)
if __name__ == '__main__':
animate_vectors(1,1.j,0) | 34.961538 | 176 | 0.674367 | """
Polarisation animation...
the animate_vectors() method creates an interactive 3D plot, visualising the resultant polarisation for a
given input of Ex, Ey and the phase difference (in radians) between them
Last updated 2018-02-19 JK
"""
# py 2.7 compatibility
from __future__ import (division, print_function, absolute_import)
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.animation as animation
#replace default matplotlib text and color sequence with durham colours
plt.rc('font',**{'family':'Serif','serif':['Times New Roman']})
params={'axes.labelsize':13,'xtick.labelsize':12,'ytick.labelsize':12,'legend.fontsize': 11,'mathtext.fontset':'cm','mathtext.rm':'serif'}
plt.rcParams.update(params)
def update_lines(num, Ex, Ey, z, time, curve):
# NOTE: there is no .set_data() for 3 dim data...
curve.set_data([Ex*np.exp(-1.j*time[num]*2*np.pi),z])
curve.set_3d_properties(Ey*np.exp(-1.j*time[num]*2*np.pi))
return curve
def animate_vectors(Exi,Eyi,phase):
# Attaching 3D axis to the figure
E = [Exi, Eyi*np.exp(1.j*phase)]
fig = plt.figure("Polarisation animation")
ax = p3.Axes3D(fig)
k = 2*np.pi / 2
# 100 resultant vectors at various z
z_axis_curve = np.linspace(-2.5,2.5,500)
# t = 0 data, all z
Ex = E[0] * np.exp(1.j*(k*z_axis_curve))
Ey = E[1] * np.exp(1.j*(k*z_axis_curve))
nframes = 100
time = np.linspace(0,4,nframes)
# Creating fifty line objects.
# NOTE: Can't pass empty arrays into 3d version of plot()
curve = ax.plot(Ex, z_axis_curve, Ey, color='k', lw=2)[0]
spokes = 6
lines = [ax.plot([0,Ex[::spokes][i]],[z_axis_curve[::spokes][i],z_axis_curve[::spokes][i]],[0,Ey[::spokes][i]],color='k',alpha=0.3, lw=1)[0] for i in range(len(Ex[::spokes]))]
x_quiver = ax.quiver3D([0],[0],[0],[1],[0],[0],length=2,arrow_length_ratio=0.05,pivot='middle',color='k',lw=2)
y_quiver = ax.quiver3D([0],[0],[0],[0],[0],[1],length=2,arrow_length_ratio=0.05,pivot='middle',color='k',lw=2)
#z_quiver = ax.quiver3D([0],[0],[0],[0],[1],[0],length=5,arrow_length_ratio=0.05,pivot='middle',color='k',lw=2)
k_quiver = ax.quiver3D([0],[0],[0],[0],[1],[0],length=5,arrow_length_ratio=0.05,pivot='middle',color='r',lw=3,alpha=0.6)
ax.text(0, 2.2, 0.15, r"$\vec{k}, \vec{z}$", (0,1,0), color='red', size=18)
ax.text(0.9, 0, 0.1, r"$\vec{x}$", (1,0,0), color='k', size=18)
ax.text(0.1, 0, 0.9, r"$\vec{y}$", (1,0,0), color='k', size=18)
ax.set_xlim3d(-1,1)
ax.set_zlim3d(-1,1)
# Creating the Animation object
line_ani = animation.FuncAnimation(fig, update_lines, nframes, fargs=(Ex, Ey, z_axis_curve,time, curve),
interval=50, blit=False)
plt.show()
if __name__ == '__main__':
animate_vectors(1,1.j,0) | 1,856 | 0 | 46 |
f572ec76ebffa74087c45a7676146ac7f45584e8 | 463 | py | Python | users/migrations/0009_auto_20210810_2244.py | Achyut-0705/Django-Blog-App | d9f331e43f805efa4ef65844c055edee57124621 | [
"MIT"
] | null | null | null | users/migrations/0009_auto_20210810_2244.py | Achyut-0705/Django-Blog-App | d9f331e43f805efa4ef65844c055edee57124621 | [
"MIT"
] | 1 | 2021-08-15T16:27:03.000Z | 2021-08-15T16:27:03.000Z | users/migrations/0009_auto_20210810_2244.py | Achyut-0705/Library-Management-System | d9f331e43f805efa4ef65844c055edee57124621 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-08-10 17:14
from django.db import migrations, models
import django.db.models.deletion
| 23.15 | 98 | 0.632829 | # Generated by Django 3.1.7 on 2021-08-10 17:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0008_auto_20210810_2103'),
]
operations = [
migrations.AlterField(
model_name='post',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.user'),
),
]
| 0 | 316 | 23 |
2afad78a490520061c4c7c8837aabf8f97c65be2 | 10,415 | py | Python | framenet_tools/frame_identification/frameidnetwork.py | inception-project/framenet-tools | ff0f8c334dc0c2a0733673c33b54b51d098e9d40 | [
"Apache-2.0"
] | 6 | 2020-07-23T09:04:54.000Z | 2022-03-01T10:25:51.000Z | framenet_tools/frame_identification/frameidnetwork.py | inception-project/framenet-tools | ff0f8c334dc0c2a0733673c33b54b51d098e9d40 | [
"Apache-2.0"
] | 22 | 2019-07-08T08:10:56.000Z | 2021-06-02T00:13:01.000Z | framenet_tools/frame_identification/frameidnetwork.py | inception-project/framenet-tools | ff0f8c334dc0c2a0733673c33b54b51d098e9d40 | [
"Apache-2.0"
] | 3 | 2019-08-27T12:46:05.000Z | 2020-08-16T14:54:05.000Z | import logging
import torch
import torch.nn as nn
import torchtext
import os
from torch.autograd import Variable
from tqdm import tqdm
from tensorboardX import SummaryWriter
from typing import List
from framenet_tools.config import ConfigManager
| 30.014409 | 123 | 0.584926 | import logging
import torch
import torch.nn as nn
import torchtext
import os
from torch.autograd import Variable
from tqdm import tqdm
from tensorboardX import SummaryWriter
from typing import List
from framenet_tools.config import ConfigManager
class Net(nn.Module):
def __init__(
self,
embedding_size: int,
hidden_sizes: list,
activation_functions: list,
num_classes: int,
embedding_layer: torch.nn.Embedding,
device: torch.device,
):
super(Net, self).__init__()
self.device = device
self.embedding_layer = embedding_layer
self.hidden_layers = []
last_size = embedding_size * 2
logging.debug(f"Hidden sizes: {hidden_sizes}")
logging.debug(f"Activation functions: {activation_functions}")
# Programmatically add new layers according to the config file
for i in range(len(hidden_sizes)):
if activation_functions[i].lower() == "dropout":
# Add dropout
self.add_module(str(i), nn.Dropout(hidden_sizes[i]))
self.hidden_layers.append(getattr(self, str(i)))
continue
hidden_sizes[i] = int(hidden_sizes[i])
self.add_module(str(i), nn.Linear(last_size, hidden_sizes[i]))
# Saving function ref
self.hidden_layers.append(getattr(self, str(i)))
# Dynamic instantiation of the activation function
act_func = getattr(nn, activation_functions[i])().to(self.device)
self.hidden_layers.append(act_func)
last_size = hidden_sizes[i]
self.out_layer = nn.Linear(last_size, num_classes)
def set_embedding_layer(self, embedding_layer: torch.nn.Embedding):
"""
Setter for the embedding_layer
:param embedding_layer: The new embedding_layer
:return:
"""
self.embedding_layer = embedding_layer
def average_sentence(self, sent: torch.tensor):
"""
Averages a sentence/multiple sentences by taking the mean of its embeddings
:param sent: The given sentence as numbers from the vocab
:return: The averaged sentence/sentences as a tensor (size equals the size of one word embedding for each sentence)
"""
lookup_tensor = sent.to(self.device)
appended_avg = []
for sentence in lookup_tensor:
# Cut off padding from torchtext, as it messes up the averaging process!
sentence = sentence[: (sentence != 1).nonzero()[-1].item() + 1]
embedded_sent = self.embedding_layer(sentence)
averaged_sent = embedded_sent.mean(dim=0)
# Reappend the FEE
inc_FEE = torch.cat((embedded_sent[0], averaged_sent), 0)
appended_avg.append(inc_FEE)
averaged_sent = torch.stack(appended_avg)
return averaged_sent
def forward(self, x: torch.tensor):
"""
The forward function, specifying the processing path
:param x: A input value
:return: The prediction of the network
"""
x = torch.transpose(x, 0, 1)
x = Variable(self.average_sentence(x)).to(self.device)
# Programmatically pass x through all layers
# NOTE: hidden_layers also includes activation functions!
for hidden_layer in self.hidden_layers:
x = hidden_layer(x)
out = self.out_layer(x)
return out
class FrameIDNetwork(object):
def __init__(self, cM: ConfigManager, embedding_layer: torch.nn.Embedding, num_classes: int):
self.cM = cM
# Check for CUDA
use_cuda = self.cM.use_cuda and torch.cuda.is_available()
self.device = torch.device("cuda" if use_cuda else "cpu")
logging.debug(f"Device used: {self.device}")
self.embedding_layer = embedding_layer
self.num_classes = num_classes
self.net = Net(
self.cM.embedding_size,
self.cM.hidden_sizes,
self.cM.activation_functions,
num_classes,
embedding_layer,
self.device,
)
self.net.to(self.device)
# Loss and Optimizer
self.criterion = nn.CrossEntropyLoss()
self.optimizer = torch.optim.Adam(self.net.parameters(), lr=self.cM.learning_rate)
def train_model(
self,
dataset_size: int,
train_iter: torchtext.data.Iterator,
dev_iter: torchtext.data.Iterator = None,
):
"""
Trains the model with the given dataset
Uses the model specified in net
:param dev_iter: The dev dataset for performance measuring
:param train_iter: The train dataset iterator including all data for training
:param dataset_size: The size of the dataset
:param batch_size: The batch size to use for training
:return:
"""
highest_acc = 0
auto_stopper = self.cM.autostopper and dev_iter is not None
last_improvement = 0
autostopper_threshold = self.cM.autostopper_threshold
writer = SummaryWriter()
for epoch in range(self.cM.num_epochs):
total_loss = 0
total_hits = 0
count = 0
with tqdm(
train_iter, position=0, desc=f"[Epoch: {epoch}/{self.cM.num_epochs}] Iteration"
) as progress_bar:
for batch in progress_bar:
sent = batch.Sentence
labels = Variable(batch.Frame[0]).to(self.device)
# Forward + Backward + Optimize
self.optimizer.zero_grad() # zero the gradient buffer
outputs = self.net(sent)
loss = self.criterion(outputs, labels)
loss.backward()
self.optimizer.step()
total_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
total_hits += (predicted == labels).sum().item()
count += labels.size(0)
# Just update every 20 iterations
if count % 20 == 0:
train_loss = round((total_loss / count), 4)
train_acc = round((total_hits / count), 4)
progress_bar.set_postfix(
Loss=train_loss, Acc=train_acc, Frames=f"{count}/{dataset_size}"
)
train_loss = total_loss / count
train_acc = total_hits / count
if dev_iter is None:
logging.info(f"Train Acc: {train_acc}, Train Loss: {train_loss}")
writer.add_scalars("data/loss", {"train_loss": train_loss}, epoch)
writer.add_scalars("data/acc", {"train_acc": train_acc}, epoch)
continue
dev_acc, dev_loss = self.eval_model(dev_iter)
last_improvement += 1
if dev_acc > highest_acc:
highest_acc = dev_acc
last_improvement = 0
self.save_model(self.cM.saved_model + ".auto")
logging.info(
f"Train Acc: {train_acc}, Dev Acc: {dev_acc}, Train Loss: {train_loss}, Dev Loss: {dev_loss}"
)
writer.add_scalars("data/loss", {"train_loss": train_loss, "dev_loss": dev_loss}, epoch)
writer.add_scalars("data/acc", {"train_acc": train_acc, "dev_acc": dev_acc}, epoch)
if auto_stopper and (last_improvement > autostopper_threshold):
writer.close()
return
writer.close()
def query(self, x: List[int]):
"""
Query a single sentence
:param x: A list of ints representing words according to the embedding dictionary
:return: The prediction of the frame
"""
x = torch.tensor(x)
output = self.net(x)
# _, predicted = torch.max(output.data, 1)
return output.data.to("cpu")
def predict(self, dataset_iter: torchtext.data.Iterator):
"""
Uses the model to predict all given input data
:param dataset_iter: The dataset to predict
:return: A list of predictions
"""
predictions = []
for batch in iter(dataset_iter):
sent = batch.Sentence
outputs = self.net(sent)
_, predicted = torch.max(outputs.data, 1)
predictions.append(predicted.to("cpu"))
return predictions
def eval_model(self, dev_iter: torchtext.data.Iterator):
""" Evaluates the model on the given dataset
UPDATE: again required and integrated for evaluating the accuracy during training.
Still not recommended for final evaluation purposes.
NOTE: only works on gold FEEs, therefore deprecated
use f1 evaluation instead
:param dev_iter: The dataset to evaluate on
:return: The accuracy reached on the given dataset
"""
eval_criterion = nn.CrossEntropyLoss()
correct = 0.0
total = 0.0
loss = 0.0
for batch in iter(dev_iter):
sent = batch.Sentence
labels = Variable(batch.Frame[0]).to(self.device)
outputs = self.net(sent)
batch_loss = eval_criterion(outputs, labels)
_, predicted = torch.max(outputs.data, 1)
total += self.cM.batch_size
correct += (predicted == labels).sum()
loss += batch_loss.item()
correct = correct.item()
logging.debug(f"Correct predictions: {correct} Total examples: {total}")
accuracy = correct / total
loss = loss / total
return accuracy, loss
def save_model(self, path: str):
"""
Saves the current model at the given path
:param path: The path to save the model at
:return:
"""
upper_path = path[: path.rfind("/")]
if not os.path.isdir(upper_path):
os.makedirs(upper_path)
torch.save(self.net.state_dict(), path)
def load_model(self, path: str):
"""
Loads the model from a given path
:param path: The path from where to load the model
:return:
"""
self.net.load_state_dict(torch.load(path))
| 2,222 | 7,897 | 46 |
36c4c87462bf79cb67dc00f2e8ad299e8de7e43c | 12,354 | py | Python | build/scripts/build_mn.py | SitdikovRustam/CatBoost | 39fb9dfddb24e977ed87efc71063b03cd4bc8f16 | [
"Apache-2.0"
] | 33 | 2016-12-15T21:47:13.000Z | 2020-10-27T23:53:59.000Z | build/scripts/build_mn.py | dsferz/machinelearning_yandex | 8fde8314c5c70299ece8b8f00075ddfcd5e07ddf | [
"Apache-2.0"
] | null | null | null | build/scripts/build_mn.py | dsferz/machinelearning_yandex | 8fde8314c5c70299ece8b8f00075ddfcd5e07ddf | [
"Apache-2.0"
] | 14 | 2016-12-28T17:00:33.000Z | 2022-01-16T20:15:27.000Z | #!/usr/bin/env python
# Ymake MatrixNet support
import sys
import os
import shutil
import re
import subprocess
if __name__ == '__main__':
if len(sys.argv) < 2:
print >>sys.stderr, "Usage: build_mn.py <funcName> <args...>"
sys.exit(1)
if (sys.argv[2:]):
globals()[sys.argv[1]](sys.argv[2:])
else:
globals()[sys.argv[1]]()
| 37.323263 | 185 | 0.581755 | #!/usr/bin/env python
# Ymake MatrixNet support
import sys
import os
import shutil
import re
import subprocess
def get_value(val):
dct = val.split('=', 1)
if len(dct) > 1:
return dct[1]
return ''
class BuildMnBase(object):
def Run(self, mninfo, mnname, mnrankingSuffix, mncppPath, check=False, ptr=False, multi=False):
self.mninfo = mninfo
self.mnname = mnname
self.mnrankingSuffix = mnrankingSuffix
self.mncppPath = mncppPath
self.check = check
self.ptr = ptr
self.multi = multi
dataprefix = "MN_External_"
mninfoName = os.path.basename(self.mninfo)
data = dataprefix + mnname
datasize = data + "Size"
if self.multi:
if self.ptr:
mntype = "const NMatrixnet::TMnMultiCategPtr"
mnload = "(new NMatrixnet::TMnMultiCateg( {1}, {2}, \"{0}\"))".format(mninfoName, data, datasize)
else:
mntype = "const NMatrixnet::TMnMultiCateg"
mnload = "({1}, {2}, \"{0}\")".format(mninfoName, data, datasize)
else:
if self.ptr:
mntype = "const NMatrixnet::TMnSsePtr"
mnload = "(new NMatrixnet::TMnSseInfo({1}, {2}, \"{0}\"))".format(mninfoName, data, datasize)
else:
mntype = "const NMatrixnet::TMnSseInfo"
mnload = "({1}, {2}, \"{0}\")".format(mninfoName, data, datasize)
if self.check:
self.CheckMn()
mncpptmpPath = self.mncppPath + ".tmp"
mncpptmp = open(mncpptmpPath, 'w')
if self.multi:
mncpptmp.write("#include <kernel/matrixnet/mn_multi_categ.h>\n")
else:
mncpptmp.write("#include <kernel/matrixnet/mn_sse.h>\n")
rodatapath = os.path.dirname(self.mncppPath) + "/" + dataprefix + self.mnname + ".rodata"
mncpptmp.write("namespace{\n")
mncpptmp.write(" extern \"C\" {\n")
mncpptmp.write(" extern const unsigned char {1}{0}[];\n".format(self.mnname, dataprefix))
mncpptmp.write(" extern const ui32 {1}{0}Size;\n".format(self.mnname, dataprefix))
mncpptmp.write(" }\n")
mncpptmp.write("}\n")
archiverCall = subprocess.Popen([self.archiver, "-q", "-p", "-o", rodatapath, self.mninfo], stdout=None, stderr=subprocess.PIPE)
archiverCall.wait()
mncpptmp.write("extern {0} {1};\n".format(mntype, self.mnname))
mncpptmp.write("{0} {1}{2};".format(mntype, self.mnname, mnload))
mncpptmp.close()
shutil.move(mncpptmpPath, self.mncppPath)
def CheckMn(self):
if not self.fml_unused_tool:
print >>sys.stderr, "fml_unused_tool undefined!"
failed_msg = "fml_unused_tool failed: {0} -A {1} -e -r {2}".format(self.fml_unused_tool, self.SrcRoot, self.mninfo)
assert not subprocess.call([self.fml_unused_tool, "-A", self.SrcRoot, "-e", "-r", self.mninfo]), failed_msg
class BuildMn(BuildMnBase):
def Run(self, argv):
if len(argv) < 6:
print >>sys.stderr, "BuildMn.Run(<ARCADIA_ROOT> <archiver> <mninfo> <mnname> <mnrankingSuffix> <cppOutput> [params...])"
sys.exit(1)
self.SrcRoot = argv[0]
self.archiver = argv[1]
mninfo = argv[2]
mnname = argv[3]
mnrankingSuffix = argv[4]
mncppPath = argv[5]
check = False
ptr = False
multi = False
self.fml_unused_tool = ''
for param in argv[6:]:
if param == "CHECK":
check = True
elif param == "PTR":
ptr = True
elif param == "MULTI":
multi = True
elif param.startswith('fml_tool='):
self.fml_unused_tool = get_value(param)
else:
print >>sys.stdout, "Unknown param: {0}".format(param)
super(BuildMn, self).Run(mninfo, mnname, mnrankingSuffix, mncppPath, check=check, ptr=ptr, multi=multi)
class BuildMns(BuildMnBase):
def InitBase(self, listname, mnrankingSuffix):
self.autogen = '// DO NOT EDIT THIS FILE DIRECTLY, AUTOGENERATED!\n'
self.mnrankingSuffix = mnrankingSuffix
self.mnlistname = listname + mnrankingSuffix
self.mnlistelem = "const NMatrixnet::TMnSsePtr*"
mnlisttype = "ymap< TString, {0} >".format(self.mnlistelem)
self.mnlist = "const {0} {1}".format(mnlisttype, self.mnlistname)
self.mnmultilistname = "{0}{1}Multi".format(listname, self.mnrankingSuffix)
self.mnmultilistelem = "const NMatrixnet::TMnMultiCategPtr*"
mnmultilisttype = "ymap< TString, {0} >".format(self.mnmultilistelem)
self.mnmultilist = "const {0} {1}".format(mnmultilisttype, self.mnmultilistname)
def InitForAll(self, argv):
if len(argv) < 8:
print >>sys.stderr, "BuildMns.InitForAll(<ARCADIA_ROOT> <BINDIR> <archiver> <listname> <mnranking_suffix> <hdrfile> <srcfile> <mninfos> [fml_tool=<fml_unused_tool> CHECK])"
sys.exit(1)
bmns_args = []
self.check = False
self.fml_unused_tool = ''
for arg in argv:
if arg == "CHECK":
self.check = True
elif arg.startswith('fml_tool='):
self.fml_unused_tool = get_value(arg)
else:
bmns_args.append(arg)
self.SrcRoot = bmns_args[0]
self.BINDIR = bmns_args[1]
self.archiver = bmns_args[2]
self.listname = bmns_args[3]
self.mnrankingSuffix = get_value(bmns_args[4])
self.hdrfile = bmns_args[5]
self.srcfile = bmns_args[6]
self.mninfos = bmns_args[7:]
self.InitBase(self.listname, self.mnrankingSuffix)
def InitForHeader(self, argv):
if len(argv) < 4:
print >>sys.stderr, "BuildMns.InitForHeader(<listname> <rankingSuffix> <hdrfile> <mninfos...>)"
sys.exit(1)
self.listname = argv[0]
self.mnrankingSuffix = get_value(argv[1])
self.hdrfile = argv[2]
self.mninfos = argv[3:]
self.InitBase(self.listname, self.mnrankingSuffix)
def InitForCpp(self, argv):
if len(argv) < 5:
print >>sys.stderr, "BuildMns.InitForCpp(<listname> <rankingSuffix> <hdrfile> <srcfile> <mninfos...>)"
sys.exit(1)
self.listname = argv[0]
self.mnrankingSuffix = get_value(argv[1])
self.hdrfile = argv[2]
self.srcfile = argv[3]
self.mninfos = argv[4:]
self.InitBase(self.listname, self.mnrankingSuffix)
def InitForFiles(self, argv):
if len(argv) < 7:
print >>sys.stderr, "BuildMns.InitForFiles(<ARCADIA_ROOT> <BINDIR> <archiver> <fml_unused_tool> <listname> <rankingSuffix> <mninfos...> [CHECK])"
sys.exit(1)
bmns_args = []
self.check = False
self.fml_unused_tool = ''
for arg in argv:
if arg == "CHECK":
self.check = True
elif arg.startswith('fml_tool='):
self.fml_unused_tool = get_value(arg)
else:
bmns_args.append(arg)
self.SrcRoot = bmns_args[0]
self.BINDIR = bmns_args[1]
self.archiver = bmns_args[2]
self.listname = bmns_args[3]
self.mnrankingSuffix = get_value(bmns_args[4])
self.mninfos = bmns_args[5:]
def BuildMnsHeader(self):
if self.mninfos:
self.mninfos = sorted(set(self.mninfos))
tmpHdrPath = self.hdrfile + ".tmp"
tmpHdrFile = open(tmpHdrPath, 'w')
tmpHdrFile.write(self.autogen)
tmpHdrFile.write("#include <kernel/matrixnet/mn_sse.h>\n")
tmpHdrFile.write("#include <kernel/matrixnet/mn_multi_categ.h>\n\n")
tmpHdrFile.write("extern {0};\n".format(self.mnlist))
tmpHdrFile.write("extern {0};\n".format(self.mnmultilist))
for item in self.mninfos:
mnfilename = os.path.basename(item)
mnfilename, ext = os.path.splitext(mnfilename)
mnname = re.sub("[^-a-zA-Z0-9_]", "_", mnfilename)
if ext == ".info":
mnname = "staticMn{0}{1}Ptr".format(self.mnrankingSuffix, mnname)
tmpHdrFile.write("extern const NMatrixnet::TMnSsePtr {0};\n".format(mnname))
elif ext == ".mnmc":
mnname = "staticMnMulti{0}{1}Ptr".format(self.mnrankingSuffix, mnname)
tmpHdrFile.write("extern const NMatrixnet::TMnMultiCategPtr {0};\n".format(mnname))
tmpHdrFile.close()
shutil.move(tmpHdrPath, self.hdrfile)
def BuildMnFiles(self):
for item in self.mninfos:
mnfilename = os.path.basename(item)
mnfilename, ext = os.path.splitext(mnfilename)
mnname = re.sub("[^-a-zA-Z0-9_]", "_", mnfilename)
if ext == ".info":
mnname = "staticMn{0}{1}Ptr".format(self.mnrankingSuffix, mnname)
super(BuildMns, self).Run(item, mnname, self.mnrankingSuffix, self.BINDIR + "/mn.{0}.cpp".format(mnname), check=self.check, ptr=True, multi=False)
elif ext == ".mnmc":
mnname = "staticMnMulti{0}{1}Ptr".format(self.mnrankingSuffix, mnname)
# BUILD_MN_PTR_MULTI
super(BuildMns, self).Run(item, mnname, self.mnrankingSuffix, self.BINDIR + "/mnmulti.{0}.cpp".format(mnname), check=False, ptr=True, multi=True)
def BuildMnsCpp(self):
if self.mninfos:
self.mninfos = sorted(set(self.mninfos))
tmpSrcPath = self.srcfile + ".tmp"
tmpSrcFile = open(tmpSrcPath, 'w')
hdrrel = os.path.basename(self.hdrfile)
mnnames = []
mnmultinames = []
for item in self.mninfos:
mnfilename = os.path.basename(item)
mnfilename, ext = os.path.splitext(mnfilename)
if ext == ".info":
mnnames.append(mnfilename)
elif ext == ".mnmc":
mnmultinames.append(mnfilename)
tmpSrcFile.write(self.autogen)
tmpSrcFile.write("#include \"{0}\"\n\n".format(hdrrel))
if mnnames:
mndata = self.mnlistname + "_data"
tmpSrcFile.write("static const std::pair< TString, {0} > {1}[] = {{\n".format(self.mnlistelem, mndata))
for item in mnnames:
mnname = re.sub("[^-a-zA-Z0-9_]", "_", item)
tmpSrcFile.write(" std::make_pair(TString(\"{0}\"), &staticMn{1}{2}Ptr),\n".format(item, self.mnrankingSuffix, mnname))
tmpSrcFile.write("};\n")
tmpSrcFile.write("{0}({1},{1} + sizeof({1}) / sizeof({1}[0]));\n\n".format(self.mnlist, mndata))
else:
tmpSrcFile.write("{0};\n\n".format(self.mnlist))
if mnmultinames:
mnmultidata = self.mnmultilistname + "_data"
tmpSrcFile.write("static const std::pair< TString, {0} > {1}[] = {{\n".format(self.mnmultilistelem, mnmultidata))
for item in mnmultinames:
mnname = re.sub("[^-a-zA-Z0-9_]", "_", item)
tmpSrcFile.write(" std::make_pair(TString(\"{0}\"), &staticMnMulti{1}{2}Ptr),\n".format(item, self.mnrankingSuffix, mnname))
tmpSrcFile.write("};\n")
tmpSrcFile.write("{0}({1},{1} + sizeof({1}) / sizeof({1}[0]));\n".format(self.mnmultilist, mnmultidata))
else:
tmpSrcFile.write("{0};\n".format(self.mnmultilist))
tmpSrcFile.close()
shutil.move(tmpSrcPath, self.srcfile)
def BuildMnsAllF(argv):
bldMns = BuildMns()
bldMns.InitForAll(argv)
bldMns.BuildMnsCpp()
bldMns.BuildMnsHeader()
bldMns.BuildMnFiles()
def BuildMnsCppF(argv):
bldMns = BuildMns()
bldMns.InitForCpp(argv)
bldMns.BuildMnsCpp()
def BuildMnsHeaderF(argv):
bldMns = BuildMns()
bldMns.InitForHeader(argv)
bldMns.BuildMnsHeader()
def BuildMnsFilesF(argv):
bldMns = BuildMns()
bldMns.InitForFiles(argv)
bldMns.BuildMnFiles()
def BuildMnF(argv):
bldMn = BuildMn()
bldMn.Run(argv)
if __name__ == '__main__':
if len(sys.argv) < 2:
print >>sys.stderr, "Usage: build_mn.py <funcName> <args...>"
sys.exit(1)
if (sys.argv[2:]):
globals()[sys.argv[1]](sys.argv[2:])
else:
globals()[sys.argv[1]]()
| 11,457 | 18 | 501 |
1a013c58525679a82e132d2ef013f1c9bb9e9a4a | 2,084 | py | Python | Teacher/migrations/0001_initial.py | AnonC0DER/C1Academy | 449b35866b703462e4f2dbe20ed34aed9593b3ad | [
"CC0-1.0"
] | 1 | 2022-02-18T19:46:26.000Z | 2022-02-18T19:46:26.000Z | Teacher/migrations/0001_initial.py | AnonC0DER/C1Academy | 449b35866b703462e4f2dbe20ed34aed9593b3ad | [
"CC0-1.0"
] | null | null | null | Teacher/migrations/0001_initial.py | AnonC0DER/C1Academy | 449b35866b703462e4f2dbe20ed34aed9593b3ad | [
"CC0-1.0"
] | null | null | null | # Generated by Django 3.2.9 on 2022-01-20 19:34
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import uuid
| 46.311111 | 241 | 0.62524 | # Generated by Django 3.2.9 on 2022-01-20 19:34
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Teacher',
fields=[
('email', models.EmailField(max_length=255)),
('username', models.CharField(max_length=75)),
('first_name', models.CharField(max_length=75)),
('last_name', models.CharField(max_length=75)),
('phone_number', models.CharField(max_length=15, validators=[django.core.validators.RegexValidator(message='Phone number must be entered in the format: +98xxxxxxxxxx. Up to 15 numbers allowed.', regex='^\\+?1?\\d{9,15}$')])),
('image', models.ImageField(upload_to='TeacherImages/')),
('last_login', models.DateTimeField(auto_now=True)),
('created', models.DateTimeField(auto_now_add=True)),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True)),
('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Classroom',
fields=[
('name', models.CharField(max_length=120)),
('class_hours', models.CharField(help_text='Set class hours -> 6:45PM - 8:00PM', max_length=60)),
('created', models.DateTimeField(auto_now_add=True)),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True)),
('teacher', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='Teacher.teacher')),
],
),
]
| 0 | 1,862 | 23 |
72166ba3c2141d8936efe5c2be7c7c7edf2b3d04 | 496 | py | Python | Between_Two_Sets.py | Quarantinex/Hackerrank_Python_Algorithm | 4a5fc532bfbdac02e66e9d0d9ae279c4e33ca017 | [
"MIT"
] | null | null | null | Between_Two_Sets.py | Quarantinex/Hackerrank_Python_Algorithm | 4a5fc532bfbdac02e66e9d0d9ae279c4e33ca017 | [
"MIT"
] | null | null | null | Between_Two_Sets.py | Quarantinex/Hackerrank_Python_Algorithm | 4a5fc532bfbdac02e66e9d0d9ae279c4e33ca017 | [
"MIT"
] | null | null | null | if __name__=='__main__':
n,m = map(int,input().split())
arr = list(map(int,input().split()))
brr = list(map(int,input().split()))
count = 0
for i in range(max(arr),min(brr)+1):
flag = True
for j in arr:
if i%j!=0:
flag = False
break
if flag:
for k in brr:
if k%i!=0:
flag = False
break
if flag:
count+=1
print(count) | 26.105263 | 40 | 0.41129 | if __name__=='__main__':
n,m = map(int,input().split())
arr = list(map(int,input().split()))
brr = list(map(int,input().split()))
count = 0
for i in range(max(arr),min(brr)+1):
flag = True
for j in arr:
if i%j!=0:
flag = False
break
if flag:
for k in brr:
if k%i!=0:
flag = False
break
if flag:
count+=1
print(count) | 0 | 0 | 0 |
9564701ea09724db42281703c4a714b201629f77 | 1,913 | py | Python | jes/jes-v5.020-linux/jes/python/jes/bridge/terpcontrol.py | utv-teaching/foundations-computer-science | 568e19fd83a3355dab2814229f335abf31bfd7e9 | [
"MIT"
] | null | null | null | jes/jes-v5.020-linux/jes/python/jes/bridge/terpcontrol.py | utv-teaching/foundations-computer-science | 568e19fd83a3355dab2814229f335abf31bfd7e9 | [
"MIT"
] | null | null | null | jes/jes-v5.020-linux/jes/python/jes/bridge/terpcontrol.py | utv-teaching/foundations-computer-science | 568e19fd83a3355dab2814229f335abf31bfd7e9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
jes.bridge.terpcontrol
======================
This interacts with the interpreter, to keep the GUI locked down while
the interpreter runs.
(In JES, "terp" is short for "interpreter," not "terrapin.")
:copyright: (C) 2014 Matthew Frazier and Mark Guzdial
:license: GNU GPL v2 or later, see jes/help/JESCopyright.txt for details
"""
import Stoppable
import StoppableInput
import StoppableOutput
from jes.gui.commandwindow.redirect import RedirectStdio
from jes.gui.components.threading import threadsafe
| 28.552239 | 74 | 0.681129 | # -*- coding: utf-8 -*-
"""
jes.bridge.terpcontrol
======================
This interacts with the interpreter, to keep the GUI locked down while
the interpreter runs.
(In JES, "terp" is short for "interpreter," not "terrapin.")
:copyright: (C) 2014 Matthew Frazier and Mark Guzdial
:license: GNU GPL v2 or later, see jes/help/JESCopyright.txt for details
"""
import Stoppable
import StoppableInput
import StoppableOutput
from jes.gui.commandwindow.redirect import RedirectStdio
from jes.gui.components.threading import threadsafe
class InterpreterControl(Stoppable):
def __init__(self, gui, interpreter):
self.gui = gui
self.interpreter = interpreter
self.redirect = RedirectStdio(gui.commandWindow)
interpreter.beforeRun.connect(self.afterLock)
interpreter.afterRun.connect(self.beforeUnlock)
interpreter.onException.connect(self.showException)
def stop(self):
self.interpreter.stopThread()
@threadsafe
def afterLock(self, terp, mode, **_):
self.gui.startWork()
self.gui.setRunning(True)
self.gui.editor.editable = False
self.redirect.install()
StoppableInput.setThingToStop(self)
StoppableOutput.setThingToStop(self)
@threadsafe
def beforeUnlock(self, terp, mode, **_):
StoppableInput.setThingToStop(None)
StoppableOutput.setThingToStop(None)
self.redirect.uninstall()
self.gui.editor.document.removeLineHighlighting()
self.gui.editor.editable = True
self.gui.setRunning(False)
self.gui.stopWork()
@threadsafe
def showException(self, terp, excRecord, mode, **_):
msg = excRecord.getExceptionMsg()
lineno = excRecord.getLineNumber()
if msg:
self.gui.commandWindow.display(msg, 'python-traceback')
if lineno:
self.gui.editor.showErrorLine(lineno)
| 1,158 | 197 | 23 |
424a8d6e18b6abbcf0e57959bb47419d37865b4b | 1,499 | py | Python | icubam/backoffice/handlers/upload.py | rth/icubam | 316a0fba79360189a06e068f4b1d3d17b91f0275 | [
"Apache-2.0"
] | 33 | 2020-03-27T02:01:33.000Z | 2021-09-10T22:32:42.000Z | icubam/backoffice/handlers/upload.py | rth/icubam | 316a0fba79360189a06e068f4b1d3d17b91f0275 | [
"Apache-2.0"
] | 198 | 2020-03-27T08:35:25.000Z | 2020-11-06T15:20:25.000Z | icubam/backoffice/handlers/upload.py | rth/icubam | 316a0fba79360189a06e068f4b1d3d17b91f0275 | [
"Apache-2.0"
] | 18 | 2020-03-26T20:38:50.000Z | 2021-08-30T07:31:26.000Z | """Creating/edition of ICUs."""
from absl import logging
import io
import json
import tornado.web
from icubam.backoffice.handlers import base
from icubam.db import synchronizer
from typing import Dict, Callable
| 29.98 | 79 | 0.688459 | """Creating/edition of ICUs."""
from absl import logging
import io
import json
import tornado.web
from icubam.backoffice.handlers import base
from icubam.db import synchronizer
from typing import Dict, Callable
class UploadHandler(base.BaseHandler):
ROUTE = "upload"
def answer(self, msg, error=False) -> None:
logging.error(msg)
self.write(json.dumps({'msg': msg, 'error': error}))
@tornado.web.authenticated
def post(self) -> None:
try:
data = json.loads(self.request.body.decode())
except Exception as e:
return self.answer(f'Could not upload json {e}', error=True)
content = data.get('data', None)
if content is None:
return self.answer('No CSV content', error=True)
sync = synchronizer.CSVSynchronizer(self.db)
sync_fns: Dict[base.ObjType, Callable[..., int]] = {
base.ObjType.USERS: sync.sync_users_from_csv,
base.ObjType.ICUS: sync.sync_icus_from_csv,
base.ObjType.BEDCOUNTS: sync.sync_bedcounts_from_csv
}
objtype_name = data.get('objtype', None)
try:
objtype = base.ObjType[objtype_name]
sync_fn = sync_fns[objtype]
except KeyError:
return self.answer(
'Cannot find proper synchronization method.', error=True
)
try:
num_updates = sync_fn(io.StringIO(content), force_update=True)
return self.answer(f'Updated {num_updates} {objtype}')
except Exception as e:
return self.answer(f'Failing while syncing csv content: {e}', error=True)
| 1,148 | 115 | 23 |
f4b9a963494be6d889c49dfb0051dd25e357be3b | 237 | py | Python | user_profile/settings.py | alldevic/nav_info | 32681d1cd3ad43472c8f7fb49922094c4045111c | [
"MIT"
] | 1 | 2019-12-25T07:50:09.000Z | 2019-12-25T07:50:09.000Z | user_profile/settings.py | alldevic/nav_info | 32681d1cd3ad43472c8f7fb49922094c4045111c | [
"MIT"
] | 176 | 2019-11-07T07:08:27.000Z | 2022-03-12T00:04:50.000Z | user_profile/settings.py | alldevic/nav_info | 32681d1cd3ad43472c8f7fb49922094c4045111c | [
"MIT"
] | 4 | 2020-07-20T06:48:27.000Z | 2021-06-29T08:04:26.000Z | from django.conf import settings
USERPROFILE_SETTINGS = {
'app_verbose_name': "Custom User",
'register_proxy_auth_group_model': True,
}
if hasattr(settings, 'USERPROFILE'):
USERPROFILE_SETTINGS.update(settings.USERPROFILE)
| 23.7 | 53 | 0.767932 | from django.conf import settings
USERPROFILE_SETTINGS = {
'app_verbose_name': "Custom User",
'register_proxy_auth_group_model': True,
}
if hasattr(settings, 'USERPROFILE'):
USERPROFILE_SETTINGS.update(settings.USERPROFILE)
| 0 | 0 | 0 |
b816aab00b223f4a9c26b1dfce0ca81c9134ebc1 | 155 | py | Python | src/testcases/gen.py | tsw303005/MapReduce | e29778a439210963a7cd8047e55123e0c810b79b | [
"MIT"
] | null | null | null | src/testcases/gen.py | tsw303005/MapReduce | e29778a439210963a7cd8047e55123e0c810b79b | [
"MIT"
] | null | null | null | src/testcases/gen.py | tsw303005/MapReduce | e29778a439210963a7cd8047e55123e0c810b79b | [
"MIT"
] | null | null | null | import random
with open('09.loc', 'w') as f:
for i in range(1, 1001):
s = str(i) + ' ' + str(random.randint(1, 100)) + '\n'
f.write(s) | 25.833333 | 61 | 0.503226 | import random
with open('09.loc', 'w') as f:
for i in range(1, 1001):
s = str(i) + ' ' + str(random.randint(1, 100)) + '\n'
f.write(s) | 0 | 0 | 0 |
b4aacb6f6ea6f57d2a23509d9fd4d4ca35240d73 | 3,422 | py | Python | dla_cnn/data_model/Prediction.py | AhmedElshaarany/qso_lya_detection_pipeline | fc365326750f1636fe9cad5a1a80b3156375b193 | [
"MIT"
] | 8 | 2016-12-19T07:29:25.000Z | 2019-05-31T06:43:21.000Z | dla_cnn/data_model/Prediction.py | AhmedElshaarany/qso_lya_detection_pipeline | fc365326750f1636fe9cad5a1a80b3156375b193 | [
"MIT"
] | 10 | 2016-11-01T22:16:56.000Z | 2020-02-16T14:54:16.000Z | dla_cnn/data_model/Prediction.py | AhmedElshaarany/qso_lya_detection_pipeline | fc365326750f1636fe9cad5a1a80b3156375b193 | [
"MIT"
] | 8 | 2018-06-05T10:40:17.000Z | 2019-01-15T22:38:09.000Z | import scipy.signal as signal
import numpy as np
| 46.876712 | 155 | 0.655172 | import scipy.signal as signal
import numpy as np
class Prediction(object):
def __init__(self, peaks_ixs=None, offset_hist=None, offset_conv_sum=None,
loc_pred=None, loc_conf=None, offsets=None, density_data=None):
# Peaks data
self._peaks_ixs = None
self.peaks_ixs = peaks_ixs
self.offset_hist = offset_hist
self.offset_conv_sum = offset_conv_sum
# Prediction data
self.loc_pred = loc_pred
self.loc_conf = loc_conf
self.offsets = offsets
self.density_data = density_data
#
@property
def peaks_ixs(self):
return self._peaks_ixs
@peaks_ixs.setter
def peaks_ixs(self, peaks_ixs):
self._peaks_ixs = np.sort(peaks_ixs) if peaks_ixs is not None else None
# Returns a smoothed version of loc_conf
def smoothed_loc_conf(self, kernel=75):
# noinspection PyTypeChecker
return signal.medfilt(self.loc_conf, kernel)
def smoothed_conv_sum(self, kernel=9):
return signal.medfilt(self.offset_conv_sum, kernel)
# Returns the column density estimates for a specific peak and the mean
# Handles cases where the column density is too close to another DLA
# Takes a bias adjustment polynomial to adjust the column density, returns the adjustment factor
# Note, the bias adjustment polynomial is hard coded here, but it would more logically be stored with the model, this is time-saving shortcut for now.
# bias_adjust learned from 5k 96451 test dataset
def get_coldensity_for_peak(self, peak_ix,
bias_adjust=(0.0028149011281380278276520456870457564946264028549194,
-0.0646188010849933769375041947569116018712520599365234,
-0.004256561717710568779060587019102968042716383934021,
23.555317918478582583929892280139029026031494140625)):
normal_range = 30
is_close_dla_left = np.any((self.peaks_ixs < peak_ix) & (self.peaks_ixs >= peak_ix-normal_range*2))
is_close_dla_right = np.any((self.peaks_ixs > peak_ix) & (self.peaks_ixs <= peak_ix+normal_range*2))
if is_close_dla_left and is_close_dla_right:
# Special case where a DLA is pinned between two close DLAs
range_left = (peak_ix - max(self.peaks_ixs[self.peaks_ixs<peak_ix]))/2
range_right = (min(self.peaks_ixs[self.peaks_ixs>peak_ix]) - peak_ix)/2
col_densities = self.density_data[max(0,peak_ix - range_left):peak_ix + range_right]
else:
# Take the left side predictions or right side predictions or both
range_left = 0 if is_close_dla_left else normal_range
range_right = 0 if is_close_dla_right else normal_range
col_densities = self.density_data[max(0,peak_ix - range_left):peak_ix + range_right]
if len(col_densities) == 0:
import pdb; pdb.set_trace()
mean_col_density = np.mean(col_densities)
bias_correction = np.polyval(bias_adjust, mean_col_density) - mean_col_density if bias_adjust else 0.0
return col_densities + bias_correction, \
mean_col_density + bias_correction, \
np.std(col_densities), \
bias_correction
| 2,614 | 729 | 24 |
59d375a815616f74c0678b19727359655056dc12 | 525 | py | Python | boomslang/api/packages/migrations/0002_package_team_owner.py | arnaudblois/liripype | c1b1436310139f7c0765042b89a881f11fa03aa4 | [
"MIT"
] | null | null | null | boomslang/api/packages/migrations/0002_package_team_owner.py | arnaudblois/liripype | c1b1436310139f7c0765042b89a881f11fa03aa4 | [
"MIT"
] | null | null | null | boomslang/api/packages/migrations/0002_package_team_owner.py | arnaudblois/liripype | c1b1436310139f7c0765042b89a881f11fa03aa4 | [
"MIT"
] | null | null | null | # Generated by Django 2.1 on 2018-08-27 07:36
from django.db import migrations, models
import django.db.models.deletion
| 22.826087 | 110 | 0.620952 | # Generated by Django 2.1 on 2018-08-27 07:36
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('packages', '0001_initial'),
('teams', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='package',
name='team_owner',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='teams.Team'),
),
]
| 0 | 380 | 23 |
571b3648a6fd441c10879674bfec152e0b319312 | 1,576 | py | Python | common-files/python-libs/helm.py | microfocus-idol/idol-containers-toolkit | 0b9b19ab86736fa8c662de34f382df406fbc4952 | [
"MIT"
] | 1 | 2020-10-22T07:44:21.000Z | 2020-10-22T07:44:21.000Z | common-files/python-libs/helm.py | microfocus-idol/idol-containers-toolkit | 0b9b19ab86736fa8c662de34f382df406fbc4952 | [
"MIT"
] | 1 | 2020-11-11T10:04:23.000Z | 2020-11-11T10:04:23.000Z | common-files/python-libs/helm.py | microfocus-idol/idol-containers-toolkit | 0b9b19ab86736fa8c662de34f382df406fbc4952 | [
"MIT"
] | 1 | 2021-02-01T18:31:18.000Z | 2021-02-01T18:31:18.000Z | ###
# Copyright (c) 2019-2020 Micro Focus or one of its affiliates.
#
# Licensed under the MIT License (the "License"); you may not use this file
# except in compliance with the License.
#
# The only warranties for products and services of Micro Focus and its affiliates
# and licensors ("Micro Focus") are as may be set forth in the express warranty
# statements accompanying such products and services. Nothing herein should be
# construed as constituting an additional warranty. Micro Focus shall not be
# liable for technical or editorial errors or omissions contained herein. The
# information contained herein is subject to change without notice.
###
""" base helper functions for helm scripts """
import subprocess
| 36.651163 | 81 | 0.741117 | ###
# Copyright (c) 2019-2020 Micro Focus or one of its affiliates.
#
# Licensed under the MIT License (the "License"); you may not use this file
# except in compliance with the License.
#
# The only warranties for products and services of Micro Focus and its affiliates
# and licensors ("Micro Focus") are as may be set forth in the express warranty
# statements accompanying such products and services. Nothing herein should be
# construed as constituting an additional warranty. Micro Focus shall not be
# liable for technical or editorial errors or omissions contained herein. The
# information contained herein is subject to change without notice.
###
""" base helper functions for helm scripts """
import subprocess
def run_and_check_returncode(cmd):
try:
subprocess.run(cmd).check_returncode()
except subprocess.CalledProcessError as e:
print(e, "Continuing...")
def create_configmaps(directory):
run_and_check_returncode(['kubectl','create','-k', directory])
def delete_configmaps(directory):
run_and_check_returncode(['kubectl','delete','-k', directory])
def launch_kubernetes(name, chart, values, upgrade=False):
action = 'upgrade' if upgrade else 'install'
command = ['helm', action, name, chart]
command.extend(['--values={}'.format(v) for v in values])
run_and_check_returncode(command)
def clear_kubernetes(name, configmaps):
#uninstall chart
run_and_check_returncode(['helm','delete', name])
#delete all possible configmaps
for configmap in configmaps:
delete_configmaps(configmap)
| 737 | 0 | 115 |
c3411c834b0581456f21cb30e867ac898303ae25 | 823 | py | Python | test_package/conanfile.py | sintef-ocean/conan-mscl | 8ee84b701d5b10daf2d5defee580b711f438fa54 | [
"MIT"
] | null | null | null | test_package/conanfile.py | sintef-ocean/conan-mscl | 8ee84b701d5b10daf2d5defee580b711f438fa54 | [
"MIT"
] | null | null | null | test_package/conanfile.py | sintef-ocean/conan-mscl | 8ee84b701d5b10daf2d5defee580b711f438fa54 | [
"MIT"
] | null | null | null | from conans import ConanFile, CMake, tools
import os
| 30.481481 | 69 | 0.578372 | from conans import ConanFile, CMake, tools
import os
class MSCLTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = ("cmake_paths", "cmake_find_package")
options = {"shared": [True, False]}
default_options = {"shared": False}
#requires = "boost/1.78.0"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def test(self):
target_name = "TestTarget"
if self.settings.os == "Windows":
tester_exe = target_name + ".exe"
tester_path = os.path.join(self.build_folder,
str(self.settings.build_type))
else:
tester_exe = target_name
tester_path = "." + os.sep
self.run(os.path.join(tester_path, tester_exe))
| 462 | 284 | 23 |
c55b3e1851e7d227d7e876a05a40e2d7bdb3def0 | 5,851 | py | Python | riak/tests/test_btypes.py | albeus/riak-python-client | 51bf875f1f5e394d45540a3850a8453db0951c40 | [
"Apache-2.0"
] | null | null | null | riak/tests/test_btypes.py | albeus/riak-python-client | 51bf875f1f5e394d45540a3850a8453db0951c40 | [
"Apache-2.0"
] | null | null | null | riak/tests/test_btypes.py | albeus/riak-python-client | 51bf875f1f5e394d45540a3850a8453db0951c40 | [
"Apache-2.0"
] | null | null | null | import platform
if platform.python_version() < '2.7':
unittest = __import__('unittest2')
else:
import unittest
from . import SKIP_BTYPES
from riak.bucket import RiakBucket, BucketType
from riak import RiakError, RiakObject
| 36.117284 | 79 | 0.63972 | import platform
if platform.python_version() < '2.7':
unittest = __import__('unittest2')
else:
import unittest
from . import SKIP_BTYPES
from riak.bucket import RiakBucket, BucketType
from riak import RiakError, RiakObject
class BucketTypeTests(object):
def test_btype_init(self):
btype = self.client.bucket_type('foo')
self.assertIsInstance(btype, BucketType)
self.assertEqual('foo', btype.name)
self.assertIs(btype, self.client.bucket_type('foo'))
def test_btype_get_bucket(self):
btype = self.client.bucket_type('foo')
bucket = btype.bucket(self.bucket_name)
self.assertIsInstance(bucket, RiakBucket)
self.assertIs(btype, bucket.bucket_type)
self.assertIs(bucket,
self.client.bucket_type('foo').bucket(self.bucket_name))
self.assertIsNot(bucket, self.client.bucket(self.bucket_name))
def test_btype_default(self):
defbtype = self.client.bucket_type('default')
othertype = self.client.bucket_type('foo')
self.assertTrue(defbtype.is_default())
self.assertFalse(othertype.is_default())
def test_btype_repr(self):
defbtype = self.client.bucket_type("default")
othertype = self.client.bucket_type("foo")
self.assertEqual("<BucketType 'default'>", str(defbtype))
self.assertEqual("<BucketType 'foo'>", str(othertype))
self.assertEqual("<BucketType 'default'>", repr(defbtype))
self.assertEqual("<BucketType 'foo'>", repr(othertype))
@unittest.skipIf(SKIP_BTYPES == '1', "SKIP_BTYPES is set")
def test_btype_get_props(self):
defbtype = self.client.bucket_type("default")
btype = self.client.bucket_type("pytest")
with self.assertRaises(ValueError):
defbtype.get_properties()
props = btype.get_properties()
self.assertIsInstance(props, dict)
self.assertIn('n_val', props)
self.assertEqual(3, props['n_val'])
@unittest.skipIf(SKIP_BTYPES == '1', "SKIP_BTYPES is set")
def test_btype_set_props(self):
defbtype = self.client.bucket_type("default")
btype = self.client.bucket_type("pytest")
with self.assertRaises(ValueError):
defbtype.set_properties({'allow_mult': True})
oldprops = btype.get_properties()
try:
btype.set_properties({'allow_mult': True})
newprops = btype.get_properties()
self.assertIsInstance(newprops, dict)
self.assertIn('allow_mult', newprops)
self.assertTrue(newprops['allow_mult'])
if 'claimant' in oldprops: # HTTP hack
del oldprops['claimant']
finally:
btype.set_properties(oldprops)
@unittest.skipIf(SKIP_BTYPES == '1', "SKIP_BTYPES is set")
def test_btype_set_props_immutable(self):
btype = self.client.bucket_type("pytest-maps")
with self.assertRaises(RiakError):
btype.set_property('datatype', 'counter')
@unittest.skipIf(SKIP_BTYPES == '1', "SKIP_BTYPES is set")
def test_btype_list_buckets(self):
btype = self.client.bucket_type("pytest")
bucket = btype.bucket(self.bucket_name)
obj = bucket.new(self.key_name)
obj.data = [1, 2, 3]
obj.store()
self.assertIn(bucket, btype.get_buckets())
buckets = []
for nested_buckets in btype.stream_buckets():
buckets.extend(nested_buckets)
self.assertIn(bucket, buckets)
@unittest.skipIf(SKIP_BTYPES == '1', "SKIP_BTYPES is set")
def test_btype_list_keys(self):
btype = self.client.bucket_type("pytest")
bucket = btype.bucket(self.bucket_name)
obj = bucket.new(self.key_name)
obj.data = [1, 2, 3]
obj.store()
self.assertIn(self.key_name, bucket.get_keys())
keys = []
for keylist in bucket.stream_keys():
keys.extend(keylist)
self.assertIn(self.key_name, keys)
@unittest.skipIf(SKIP_BTYPES == '1', "SKIP_BTYPES is set")
def test_default_btype_list_buckets(self):
default_btype = self.client.bucket_type("default")
bucket = default_btype.bucket(self.bucket_name)
obj = bucket.new(self.key_name)
obj.data = [1, 2, 3]
obj.store()
self.assertIn(bucket, default_btype.get_buckets())
buckets = []
for nested_buckets in default_btype.stream_buckets():
buckets.extend(nested_buckets)
self.assertIn(bucket, buckets)
self.assertItemsEqual(buckets, self.client.get_buckets())
@unittest.skipIf(SKIP_BTYPES == '1', "SKIP_BTYPES is set")
def test_default_btype_list_keys(self):
btype = self.client.bucket_type("default")
bucket = btype.bucket(self.bucket_name)
obj = bucket.new(self.key_name)
obj.data = [1, 2, 3]
obj.store()
self.assertIn(self.key_name, bucket.get_keys())
keys = []
for keylist in bucket.stream_keys():
keys.extend(keylist)
self.assertIn(self.key_name, keys)
oldapikeys = self.client.get_keys(self.client.bucket(self.bucket_name))
self.assertItemsEqual(keys, oldapikeys)
@unittest.skipIf(SKIP_BTYPES == '1', "SKIP_BTYPES is set")
def test_multiget_bucket_types(self):
btype = self.client.bucket_type('pytest')
bucket = btype.bucket(self.bucket_name)
for i in range(100):
obj = bucket.new(self.key_name + str(i))
obj.data = {'id': i}
obj.store()
mget = bucket.multiget([self.key_name + str(i) for i in range(100)])
for mobj in mget:
self.assertIsInstance(mobj, RiakObject)
self.assertEqual(bucket, mobj.bucket)
self.assertEqual(btype, mobj.bucket.bucket_type)
| 4,758 | 836 | 23 |
097aa9e3afa4b3733fa3df2e955c167e0ad889a7 | 664 | py | Python | replacer.py | keyansheng/krdict-to-anki | 09c8c37b797100855733aaafb4ca4d86eef6d068 | [
"MIT"
] | null | null | null | replacer.py | keyansheng/krdict-to-anki | 09c8c37b797100855733aaafb4ca4d86eef6d068 | [
"MIT"
] | null | null | null | replacer.py | keyansheng/krdict-to-anki | 09c8c37b797100855733aaafb4ca4d86eef6d068 | [
"MIT"
] | null | null | null | import sys
import csv
import scraper
if __name__ == "__main__":
source_filename = sys.argv[1]
destination_filename = sys.argv[2]
word_column = int(sys.argv[3])
definition_column = int(sys.argv[4])
with open(source_filename, "r") as source_file:
with open(destination_filename, "w") as destination_file:
source = csv.reader(source_file, delimiter="\t")
destination = csv.writer(destination_file, delimiter="\t")
for row in source:
row[definition_column] = scraper.generate_definition(row[word_column])
print(row[word_column])
destination.writerow(row)
| 36.888889 | 86 | 0.649096 | import sys
import csv
import scraper
if __name__ == "__main__":
source_filename = sys.argv[1]
destination_filename = sys.argv[2]
word_column = int(sys.argv[3])
definition_column = int(sys.argv[4])
with open(source_filename, "r") as source_file:
with open(destination_filename, "w") as destination_file:
source = csv.reader(source_file, delimiter="\t")
destination = csv.writer(destination_file, delimiter="\t")
for row in source:
row[definition_column] = scraper.generate_definition(row[word_column])
print(row[word_column])
destination.writerow(row)
| 0 | 0 | 0 |
2c75e788b1112b474c9333de664e37bb3addeefc | 530 | py | Python | project/arturo/routes.py | ArturoMorales93/Plataformas_II_Project | 7dd54c8c5159a1eb8c761a3a8e4f4bfb96a078eb | [
"Unlicense"
] | 1 | 2021-01-29T15:16:49.000Z | 2021-01-29T15:16:49.000Z | project/arturo/routes.py | ArturoMorales93/Plataformas_II_Project | 7dd54c8c5159a1eb8c761a3a8e4f4bfb96a078eb | [
"Unlicense"
] | 12 | 2021-02-01T20:31:31.000Z | 2021-04-15T07:34:54.000Z | project/arturo/routes.py | ArturoMorales93/Plataformas_II_Project | 7dd54c8c5159a1eb8c761a3a8e4f4bfb96a078eb | [
"Unlicense"
] | 1 | 2021-03-08T23:34:37.000Z | 2021-03-08T23:34:37.000Z | from flask import render_template
from . import arturo
@arturo.route('/es/machine-learning', methods=['GET'])
@arturo.route('/machine-learning', methods=['GET'])
@arturo.route('/en/machine-learning', methods=['GET']) | 33.125 | 67 | 0.722642 | from flask import render_template
from . import arturo
@arturo.route('/es/machine-learning', methods=['GET'])
@arturo.route('/machine-learning', methods=['GET'])
def es_arturo():
# Especificar el tema en la variable
title = "Machine Learning"
return render_template('machine-learning.html', title=title)
@arturo.route('/en/machine-learning', methods=['GET'])
def en_arturo():
# Especificar el tema en la variable
title = "Machine Learning"
return render_template('en_machine-learning.html', title=title) | 267 | 0 | 44 |
9f07100cf5d3d44d98a61e18233ef17c73a0b2a5 | 2,263 | py | Python | test/test_torque4.py | DrNeilSmith/cog | 7fc6ee4790ab68f22828dd5550a616ac8a3c3423 | [
"MIT"
] | null | null | null | test/test_torque4.py | DrNeilSmith/cog | 7fc6ee4790ab68f22828dd5550a616ac8a3c3423 | [
"MIT"
] | null | null | null | test/test_torque4.py | DrNeilSmith/cog | 7fc6ee4790ab68f22828dd5550a616ac8a3c3423 | [
"MIT"
] | null | null | null | from cog.torque import Graph
import unittest
import os
import shutil
DIR_NAME = "TorqueTest4"
if __name__ == '__main__':
unittest.main()
| 32.797101 | 97 | 0.601414 | from cog.torque import Graph
import unittest
import os
import shutil
DIR_NAME = "TorqueTest4"
def ordered(obj):
if isinstance(obj, dict):
return sorted((k, ordered(v)) for k, v in list(obj.items()))
if isinstance(obj, list):
return sorted(ordered(x) for x in obj)
else:
return obj
class TorqueTest(unittest.TestCase):
maxDiff = None
@classmethod
def setUpClass(cls):
if not os.path.exists("/tmp/"+DIR_NAME):
os.mkdir("/tmp/" + DIR_NAME)
data_dir = "test/test-data/test_func.nq"
# choose appropriate path based on where the test is called from.
if os.path.exists("test-data/test_func.nq"):
data_dir = "test-data/test_func.nq"
TorqueTest.g = Graph(graph_name="people", cog_home=DIR_NAME)
TorqueTest.g.load_triples(data_dir, "people")
print(">>> test setup complete.\n")
def test_torque_func_out(self):
expected = {'result': [{'id': 'alice'}, {'id': 'dani'}, {'id': 'greg'}]}
actual = TorqueTest.g.v().out("score", func=lambda x: int(x) > 5).inc().all()
self.assertTrue(expected == actual)
def test_torque_func_out2(self):
expected = {'result': [{'id': 'toronto'}]}
actual = TorqueTest.g.v().out("city", func=lambda x: x.startswith("to")).all()
self.assertTrue(expected == actual)
def test_torque_func(self):
expected = {'result': [{'id': 'vancouver'}]}
actual = TorqueTest.g.v(func=lambda x: x.startswith("van")).all()
self.assertTrue(expected == actual)
def test_torque_func_inc(self):
expected = {'result': [{'id': 'dani'}]}
actual = TorqueTest.g.v().inc("city", func=lambda x: x.startswith("d")).all()
self.assertTrue(expected == actual)
def test_torque_func(self):
expected = {'result': [{'id': 'edmonton'}, {'id': 'vancouver'}, {'id': 'montreal'}]}
actual = TorqueTest.g.v().out("score", func=lambda x: int(x) > 5).inc().out("city").all()
self.assertTrue(expected == actual)
@classmethod
def tearDownClass(cls):
TorqueTest.g.close()
shutil.rmtree("/tmp/"+DIR_NAME)
print("*** deleted test data.")
if __name__ == '__main__':
unittest.main()
| 1,815 | 257 | 46 |
5ebb66836b712be7e32c6ffc4e61dd48294c572c | 32 | py | Python | Classificatioons/ample.py | Hackit-2-0/Team-CodeCrafters | 3536289412555a7f92de12458517bcb073007015 | [
"MIT"
] | 3 | 2020-06-02T01:36:52.000Z | 2020-11-15T07:59:17.000Z | Classificatioons/ample.py | Hackit-2-0/Team-CodeCrafters | 3536289412555a7f92de12458517bcb073007015 | [
"MIT"
] | null | null | null | Classificatioons/ample.py | Hackit-2-0/Team-CodeCrafters | 3536289412555a7f92de12458517bcb073007015 | [
"MIT"
] | 3 | 2020-04-21T12:10:39.000Z | 2020-10-30T19:35:16.000Z | import sys
print(sys.argv[0])
| 6.4 | 18 | 0.6875 | import sys
print(sys.argv[0])
| 0 | 0 | 0 |
503afa019799ba0dbad8d1907b611a477215959b | 1,332 | py | Python | plugins/csv_plugin.py | gr33ndata/rivellino | 1c77d60bd527db6cc55c7844695d3ba7e1212f2d | [
"MIT"
] | null | null | null | plugins/csv_plugin.py | gr33ndata/rivellino | 1c77d60bd527db6cc55c7844695d3ba7e1212f2d | [
"MIT"
] | null | null | null | plugins/csv_plugin.py | gr33ndata/rivellino | 1c77d60bd527db6cc55c7844695d3ba7e1212f2d | [
"MIT"
] | null | null | null | from plugins import BasePlugin
from plugins import PluginsData
from etllib.conf import Conf
from etllib.csv import CSV
import os
| 28.340426 | 66 | 0.532282 | from plugins import BasePlugin
from plugins import PluginsData
from etllib.conf import Conf
from etllib.csv import CSV
import os
class CSVPlugin(BasePlugin):
def field_names(self):
pass
def file_path(self, rule=None, position='in'):
this_path = os.path.dirname(os.path.realpath(__file__))
if position == 'in':
path = '/'.join([
this_path,
'..',
rule['source_node']['path']
])
else:
pass
filename = rule['action']
return os.path.join(path, filename)
def run(self, rule, data=None):
if data:
# Used as Egress
lines = []
header = ', '.join([str(i) for i in data.fields])
lines.append('{}\n'.format(header))
for record in data.values:
line = ', '.join([str(i) for i in record])
lines.append('{}\n'.format(line))
CSV(filepath=rule['destination_table']).write(lines)
else:
# Used as Ingress
csv_file = self.file_path(rule=rule, position='in')
data = CSV(filepath=csv_file).read()
ret_data = PluginsData(data['fields'], data['values'])
return ret_data
def init(rule):
return CSVPlugin(rule)
| 1,059 | 7 | 127 |
c2f9870929753ad3a9c8c17472608b32468ad423 | 1,115 | py | Python | books/urls.py | adilmohak/django_book_sharing | 6d47cb131524dc761becb7d432b7cc75064c4f58 | [
"MIT"
] | 13 | 2021-03-26T05:39:58.000Z | 2021-10-13T22:03:46.000Z | books/urls.py | adilmohak/django_book_sharing | 6d47cb131524dc761becb7d432b7cc75064c4f58 | [
"MIT"
] | 1 | 2021-03-26T05:42:47.000Z | 2021-04-24T17:33:26.000Z | books/urls.py | adilmohak/django_book_sharing | 6d47cb131524dc761becb7d432b7cc75064c4f58 | [
"MIT"
] | 2 | 2021-03-26T05:54:59.000Z | 2021-03-26T09:03:46.000Z | from django.urls import path
from django.conf.urls import url
from django.views import generic
from .views import (
BookListView, BookDetailView, BookUpdateView,
delete_book, user_booklist, user_booklist_update, BookCreateView, ReviewCreateView, review_update_view
)
app_name = 'books'
urlpatterns = [
url(r'^$', BookListView.as_view(), name='books'),
url(r'^create/$', BookCreateView.as_view(), name='create'),
url(r'^review-create/$', ReviewCreateView.as_view(), name='review_create'),
url(r'^(?P<slug>[\w-]+)/review-update/$', review_update_view, name='review_update'),
url(r'^recommendations/$', generic.TemplateView.as_view(template_name='books/recommendation.html'), name='recommendation'),
url(r'^(?P<slug>[\w-]+)/detail/$', BookDetailView.as_view(), name='detail'),
url(r'^(?P<slug>[\w-]+)/update/$', BookUpdateView.as_view(), name='update'),
url(r'^(?P<slug>[\w-]+)/delete/$', delete_book, name='delete'),
url(r'^user-booklist/$', user_booklist, name='user_booklist'),
url(r'^user/booklist/update/$', user_booklist_update, name='user_booklist_update'),
]
| 48.478261 | 127 | 0.697758 | from django.urls import path
from django.conf.urls import url
from django.views import generic
from .views import (
BookListView, BookDetailView, BookUpdateView,
delete_book, user_booklist, user_booklist_update, BookCreateView, ReviewCreateView, review_update_view
)
app_name = 'books'
urlpatterns = [
url(r'^$', BookListView.as_view(), name='books'),
url(r'^create/$', BookCreateView.as_view(), name='create'),
url(r'^review-create/$', ReviewCreateView.as_view(), name='review_create'),
url(r'^(?P<slug>[\w-]+)/review-update/$', review_update_view, name='review_update'),
url(r'^recommendations/$', generic.TemplateView.as_view(template_name='books/recommendation.html'), name='recommendation'),
url(r'^(?P<slug>[\w-]+)/detail/$', BookDetailView.as_view(), name='detail'),
url(r'^(?P<slug>[\w-]+)/update/$', BookUpdateView.as_view(), name='update'),
url(r'^(?P<slug>[\w-]+)/delete/$', delete_book, name='delete'),
url(r'^user-booklist/$', user_booklist, name='user_booklist'),
url(r'^user/booklist/update/$', user_booklist_update, name='user_booklist_update'),
]
| 0 | 0 | 0 |
57894e6230ba29f9646ceb6c2dba47efcded853c | 6,152 | py | Python | covsirphy/cleaning/word.py | skelwadkar/COVID-19_project | 61e315e6d1de872f4b6fec27432ae202bbc6f69b | [
"Apache-2.0"
] | null | null | null | covsirphy/cleaning/word.py | skelwadkar/COVID-19_project | 61e315e6d1de872f4b6fec27432ae202bbc6f69b | [
"Apache-2.0"
] | null | null | null | covsirphy/cleaning/word.py | skelwadkar/COVID-19_project | 61e315e6d1de872f4b6fec27432ae202bbc6f69b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import defaultdict
from datetime import datetime
import numpy as np
import pandas as pd
class Word(object):
"""
Word definition.
"""
# Variables of SIR-like model
N = "Population"
S = "Susceptible"
C = "Confirmed"
CI = "Infected"
F = "Fatal"
R = "Recovered"
FR = "Fatal or Recovered"
V = "Vaccinated"
E = "Exposed"
W = "Waiting"
# Column names
DATE = "Date"
START = "Start"
END = "End"
T = "Elapsed"
TS = "t"
TAU = "tau"
COUNTRY = "Country"
ISO3 = "ISO3"
PROVINCE = "Province"
STR_COLUMNS = [DATE, COUNTRY, PROVINCE]
COLUMNS = [*STR_COLUMNS, C, CI, F, R]
NLOC_COLUMNS = [DATE, C, CI, F, R]
VALUE_COLUMNS = [C, CI, F, R]
FIG_COLUMNS = [CI, F, R, FR, V, E, W]
# Date format: 22Jan2020 etc.
DATE_FORMAT = "%d%b%Y"
# Separator of country and province
SEP = "/"
# EDA
RATE_COLUMNS = [
"Fatal per Confirmed",
"Recovered per Confirmed",
"Fatal per (Fatal or Recovered)"
]
# Optimization
A = "_actual"
P = "_predicted"
# Phase name
SUFFIX_DICT = defaultdict(lambda: "th")
SUFFIX_DICT.update({1: "st", 2: "nd", 3: "rd"})
TENSE = "Type"
PAST = "Past"
FUTURE = "Future"
INITIAL = "Initial"
ODE = "ODE"
RT = "Rt"
# Scenario analysis
PHASE = "Phase"
SERIES = "Scenario"
MAIN = "Main"
# Flag
UNKNOWN = "-"
@classmethod
def num2str(cls, num):
"""
Convert numbers to 1st, 2nd etc.
@num <int>: number
@return <str>
"""
if not isinstance(num, int):
raise TypeError("@num must be an integer.")
q, mod = divmod(num, 10)
suffix = "th" if q == 1 else cls.SUFFIX_DICT[mod]
return f"{num}{suffix}"
@staticmethod
def negative_exp(x, a, b):
"""
Negative exponential function f(x)=A exp(-Bx).
@x <float>: x values
parameters of the function
- a <float>
- b <float>
"""
return a * np.exp(-b * x)
@classmethod
def date_obj(cls, date_str):
"""
Convert a string to a datetime object.
@date_str <str>: date, like 22Jan2020
@return <datetime.datetime>
"""
obj = datetime.strptime(date_str, cls.DATE_FORMAT)
return obj
@staticmethod
def flatten(nested_list, unique=True):
"""
Flatten the nested list.
@nested_list <list[list[object]]>: nested list
@unique <bool>: if True, only unique values will remain
@return <list[object]>
"""
flattened = sum(nested_list, list())
if unique:
return list(set(flattened))
return flattened
@staticmethod
def validate_dataframe(target, name="df", time_index=False, columns=None):
"""
Validate the dataframe has the columns.
@target <pd.DataFrame>: the dataframe to validate
@name <str>: argument name of the dataframe
@time_index <bool>: if True, the dataframe must has DatetimeIndex
@columns <list[str]/None>: the columns the dataframe must have
@df <pd.DataFrame>: as-is the target
"""
df = target.copy()
if not isinstance(df, pd.DataFrame):
raise TypeError(f"@{name} must be a instance of <pd.DataFrame>.")
if time_index and (not isinstance(df.index, pd.DatetimeIndex)):
raise TypeError(f"Index of @{name} must be <pd.DatetimeIndex>.")
if columns is None:
return df
if not set(columns).issubset(set(df.columns)):
cols_str = ', '.join(
[col for col in columns if col not in df.columns]
)
raise KeyError(f"@{name} must have {cols_str}, but not included.")
return df
@staticmethod
def validate_natural_int(target, name="number"):
"""
Validate the natural (non-negative) number.
If the value is natural number and the type was float,
will be converted to an integer.
@target <int/float/str>: value to validate
@name <str>: argument name of the value
@return <int>: as-is the target
"""
s = f"@{name} must be a natural number, but {target} was applied"
try:
number = int(target)
except TypeError:
raise TypeError(f"{s} and not converted to integer.")
if number != target:
raise ValueError(f"{s}. |{target} - {number}| > 0")
if number < 1:
raise ValueError(f"{s}. This value is under 1")
return number
@staticmethod
def validate_subclass(target, parent, name="target"):
"""
Validate the target is a subclass of the parent class.
@target <object>: target to validate
@parent <object>: parent class
@name <str>: argument name of the target
@return <int>: as-is the target
"""
s = f"@{name} must be an sub class of {type(parent)}, but {type(target)} was applied."
if not issubclass(target, parent):
raise TypeError(s)
return target
@staticmethod
def validate_instance(target, class_obj, name="target"):
"""
Validate the target is a instance of the class object.
@target <instance>: target to validate
@parent <class>: class object
@name <str>: argument name of the target
@return <instance>: as-is target
"""
s = f"@{name} must be an instance of {type(class_obj)}, but {type(target)} was applied."
if not isinstance(target, class_obj):
raise TypeError(s)
return target
@classmethod
def divisors(cls, value):
"""
Return the list of divisors of the value.
@value <int>: target value
@return <list[int]>: the list of divisors
"""
value = cls.validate_natural_int(value)
divisors = [
i for i in range(1, value + 1) if value % i == 0
]
return divisors
| 30.606965 | 96 | 0.563882 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import defaultdict
from datetime import datetime
import numpy as np
import pandas as pd
class Word(object):
"""
Word definition.
"""
# Variables of SIR-like model
N = "Population"
S = "Susceptible"
C = "Confirmed"
CI = "Infected"
F = "Fatal"
R = "Recovered"
FR = "Fatal or Recovered"
V = "Vaccinated"
E = "Exposed"
W = "Waiting"
# Column names
DATE = "Date"
START = "Start"
END = "End"
T = "Elapsed"
TS = "t"
TAU = "tau"
COUNTRY = "Country"
ISO3 = "ISO3"
PROVINCE = "Province"
STR_COLUMNS = [DATE, COUNTRY, PROVINCE]
COLUMNS = [*STR_COLUMNS, C, CI, F, R]
NLOC_COLUMNS = [DATE, C, CI, F, R]
VALUE_COLUMNS = [C, CI, F, R]
FIG_COLUMNS = [CI, F, R, FR, V, E, W]
# Date format: 22Jan2020 etc.
DATE_FORMAT = "%d%b%Y"
# Separator of country and province
SEP = "/"
# EDA
RATE_COLUMNS = [
"Fatal per Confirmed",
"Recovered per Confirmed",
"Fatal per (Fatal or Recovered)"
]
# Optimization
A = "_actual"
P = "_predicted"
# Phase name
SUFFIX_DICT = defaultdict(lambda: "th")
SUFFIX_DICT.update({1: "st", 2: "nd", 3: "rd"})
TENSE = "Type"
PAST = "Past"
FUTURE = "Future"
INITIAL = "Initial"
ODE = "ODE"
RT = "Rt"
# Scenario analysis
PHASE = "Phase"
SERIES = "Scenario"
MAIN = "Main"
# Flag
UNKNOWN = "-"
@classmethod
def num2str(cls, num):
"""
Convert numbers to 1st, 2nd etc.
@num <int>: number
@return <str>
"""
if not isinstance(num, int):
raise TypeError("@num must be an integer.")
q, mod = divmod(num, 10)
suffix = "th" if q == 1 else cls.SUFFIX_DICT[mod]
return f"{num}{suffix}"
@staticmethod
def negative_exp(x, a, b):
"""
Negative exponential function f(x)=A exp(-Bx).
@x <float>: x values
parameters of the function
- a <float>
- b <float>
"""
return a * np.exp(-b * x)
@classmethod
def date_obj(cls, date_str):
"""
Convert a string to a datetime object.
@date_str <str>: date, like 22Jan2020
@return <datetime.datetime>
"""
obj = datetime.strptime(date_str, cls.DATE_FORMAT)
return obj
@staticmethod
def flatten(nested_list, unique=True):
"""
Flatten the nested list.
@nested_list <list[list[object]]>: nested list
@unique <bool>: if True, only unique values will remain
@return <list[object]>
"""
flattened = sum(nested_list, list())
if unique:
return list(set(flattened))
return flattened
@staticmethod
def validate_dataframe(target, name="df", time_index=False, columns=None):
"""
Validate the dataframe has the columns.
@target <pd.DataFrame>: the dataframe to validate
@name <str>: argument name of the dataframe
@time_index <bool>: if True, the dataframe must has DatetimeIndex
@columns <list[str]/None>: the columns the dataframe must have
@df <pd.DataFrame>: as-is the target
"""
df = target.copy()
if not isinstance(df, pd.DataFrame):
raise TypeError(f"@{name} must be a instance of <pd.DataFrame>.")
if time_index and (not isinstance(df.index, pd.DatetimeIndex)):
raise TypeError(f"Index of @{name} must be <pd.DatetimeIndex>.")
if columns is None:
return df
if not set(columns).issubset(set(df.columns)):
cols_str = ', '.join(
[col for col in columns if col not in df.columns]
)
raise KeyError(f"@{name} must have {cols_str}, but not included.")
return df
@staticmethod
def validate_natural_int(target, name="number"):
"""
Validate the natural (non-negative) number.
If the value is natural number and the type was float,
will be converted to an integer.
@target <int/float/str>: value to validate
@name <str>: argument name of the value
@return <int>: as-is the target
"""
s = f"@{name} must be a natural number, but {target} was applied"
try:
number = int(target)
except TypeError:
raise TypeError(f"{s} and not converted to integer.")
if number != target:
raise ValueError(f"{s}. |{target} - {number}| > 0")
if number < 1:
raise ValueError(f"{s}. This value is under 1")
return number
@staticmethod
def validate_subclass(target, parent, name="target"):
"""
Validate the target is a subclass of the parent class.
@target <object>: target to validate
@parent <object>: parent class
@name <str>: argument name of the target
@return <int>: as-is the target
"""
s = f"@{name} must be an sub class of {type(parent)}, but {type(target)} was applied."
if not issubclass(target, parent):
raise TypeError(s)
return target
@staticmethod
def validate_instance(target, class_obj, name="target"):
"""
Validate the target is a instance of the class object.
@target <instance>: target to validate
@parent <class>: class object
@name <str>: argument name of the target
@return <instance>: as-is target
"""
s = f"@{name} must be an instance of {type(class_obj)}, but {type(target)} was applied."
if not isinstance(target, class_obj):
raise TypeError(s)
return target
@classmethod
def divisors(cls, value):
"""
Return the list of divisors of the value.
@value <int>: target value
@return <list[int]>: the list of divisors
"""
value = cls.validate_natural_int(value)
divisors = [
i for i in range(1, value + 1) if value % i == 0
]
return divisors
| 0 | 0 | 0 |
2b1e8a46967b18fc28159c92adb3c0e3a5e58d2d | 10,171 | py | Python | implementation/MSG_GAN/GAN.py | phuocnguyen2008/T2F_MSG_GAN | 16088d17c9a44de0b60563f16abf42320ffd554c | [
"MIT"
] | null | null | null | implementation/MSG_GAN/GAN.py | phuocnguyen2008/T2F_MSG_GAN | 16088d17c9a44de0b60563f16abf42320ffd554c | [
"MIT"
] | 5 | 2021-06-08T22:52:25.000Z | 2022-02-10T03:08:40.000Z | implementation/MSG_GAN/GAN.py | phuocnguyen2008/T2F_MSG_GAN | 16088d17c9a44de0b60563f16abf42320ffd554c | [
"MIT"
] | null | null | null | import datetime
import os
import time
import timeit
import numpy as np
import torch as th
class Generator(th.nn.Module):
""" Generator of the GAN network """
def turn_on_spectral_norm(self):
"""
private helper for turning on the spectral normalization
:return: None (has side effect)
"""
from torch.nn.utils import spectral_norm
if self.spectral_norm_mode is not None:
assert self.spectral_norm_mode is False, \
"can't apply spectral_norm. It is already applied"
# apply the same to the remaining relevant blocks
for module in self.layers:
module.conv_1 = spectral_norm(module.conv_1)
module.conv_2 = spectral_norm(module.conv_2)
# toggle the state variable:
self.spectral_norm_mode = True
def turn_off_spectral_norm(self):
"""
private helper for turning off the spectral normalization
:return: None (has side effect)
"""
from torch.nn.utils import remove_spectral_norm
if self.spectral_norm_mode is not None:
assert self.spectral_norm_mode is True, \
"can't remove spectral_norm. It is not applied"
# remove the applied spectral norm
for module in self.layers:
remove_spectral_norm(module.conv_1)
remove_spectral_norm(module.conv_2)
# toggle the state variable:
self.spectral_norm_mode = False
def forward(self, x):
"""
forward pass of the Generator
:param x: input noise
:return: *y => output of the generator at various scales
"""
from torch import tanh
outputs = [] # initialize to empty list
y = x # start the computational pipeline
for block, converter in zip(self.layers, self.rgb_converters):
y = block(y)
outputs.append(tanh(converter(y)))
return outputs
class Discriminator(th.nn.Module):
""" Discriminator of the GAN """
def turn_on_spectral_norm(self):
"""
private helper for turning on the spectral normalization
:return: None (has side effect)
"""
from torch.nn.utils import spectral_norm
if self.spectral_norm_mode is not None:
assert self.spectral_norm_mode is False, \
"can't apply spectral_norm. It is already applied"
# apply the same to the remaining relevant blocks
for module in self.layers:
module.conv_1 = spectral_norm(module.conv_1)
module.conv_2 = spectral_norm(module.conv_2)
# toggle the state variable:
self.spectral_norm_mode = True
def turn_off_spectral_norm(self):
"""
private helper for turning off the spectral normalization
:return: None (has side effect)
"""
from torch.nn.utils import remove_spectral_norm
if self.spectral_norm_mode is not None:
assert self.spectral_norm_mode is True, \
"can't remove spectral_norm. It is not applied"
# remove the applied spectral norm
for module in self.layers:
remove_spectral_norm(module.conv_1)
remove_spectral_norm(module.conv_2)
# toggle the state variable:
self.spectral_norm_mode = False
| 35.315972 | 93 | 0.599351 | import datetime
import os
import time
import timeit
import numpy as np
import torch as th
class Generator(th.nn.Module):
""" Generator of the GAN network """
def __init__(self, depth=7, latent_size=512, dilation=1, use_spectral_norm=True):
from torch.nn import ModuleList, Conv2d
from MSG_GAN.CustomLayers import GenGeneralConvBlock, GenInitialBlock
super().__init__()
assert latent_size != 0 and ((latent_size & (latent_size - 1)) == 0), \
"latent size not a power of 2"
if depth >= 4:
assert latent_size >= np.power(2, depth - 4), "latent size will diminish to zero"
# state of the generator:
self.depth = depth
self.latent_size = latent_size
self.spectral_norm_mode = None
self.dilation = dilation
# register the modules required for the GAN Below ...
# create the ToRGB layers for various outputs:
def to_rgb(in_channels):
return Conv2d(in_channels, 3, (1, 1), bias=True)
# create a module list of the other required general convolution blocks
self.layers = ModuleList([GenInitialBlock(self.latent_size)])
self.rgb_converters = ModuleList([to_rgb(self.latent_size)])
# create the remaining layers
for i in range(self.depth - 1):
if i <= 2:
layer = GenGeneralConvBlock(self.latent_size, self.latent_size,
dilation=dilation)
rgb = to_rgb(self.latent_size)
else:
layer = GenGeneralConvBlock(
int(self.latent_size // np.power(2, i - 3)),
int(self.latent_size // np.power(2, i - 2)),
dilation=dilation
)
rgb = to_rgb(int(self.latent_size // np.power(2, i - 2)))
self.layers.append(layer)
self.rgb_converters.append(rgb)
# if spectral normalization is on:
if use_spectral_norm:
self.turn_on_spectral_norm()
def turn_on_spectral_norm(self):
"""
private helper for turning on the spectral normalization
:return: None (has side effect)
"""
from torch.nn.utils import spectral_norm
if self.spectral_norm_mode is not None:
assert self.spectral_norm_mode is False, \
"can't apply spectral_norm. It is already applied"
# apply the same to the remaining relevant blocks
for module in self.layers:
module.conv_1 = spectral_norm(module.conv_1)
module.conv_2 = spectral_norm(module.conv_2)
# toggle the state variable:
self.spectral_norm_mode = True
def turn_off_spectral_norm(self):
"""
private helper for turning off the spectral normalization
:return: None (has side effect)
"""
from torch.nn.utils import remove_spectral_norm
if self.spectral_norm_mode is not None:
assert self.spectral_norm_mode is True, \
"can't remove spectral_norm. It is not applied"
# remove the applied spectral norm
for module in self.layers:
remove_spectral_norm(module.conv_1)
remove_spectral_norm(module.conv_2)
# toggle the state variable:
self.spectral_norm_mode = False
def forward(self, x):
"""
forward pass of the Generator
:param x: input noise
:return: *y => output of the generator at various scales
"""
from torch import tanh
outputs = [] # initialize to empty list
y = x # start the computational pipeline
for block, converter in zip(self.layers, self.rgb_converters):
y = block(y)
outputs.append(tanh(converter(y)))
return outputs
class Discriminator(th.nn.Module):
""" Discriminator of the GAN """
def __init__(self, depth=7, feature_size=512, dilation=1, use_spectral_norm=True):
from torch.nn import ModuleList
from MSG_GAN.CustomLayers import DisGeneralConvBlock, DisFinalBlock
from torch.nn import Conv2d
super().__init__()
assert feature_size != 0 and ((feature_size & (feature_size - 1)) == 0), \
"latent size not a power of 2"
if depth >= 4:
assert feature_size >= np.power(2, depth - 4), \
"feature size cannot be produced"
# create state of the object
self.depth = depth
self.feature_size = feature_size
self.spectral_norm_mode = None
self.dilation = dilation
# create the fromRGB layers for various inputs:
def from_rgb(out_channels):
return Conv2d(3, out_channels, (1, 1), bias=True)
self.rgb_to_features = ModuleList([from_rgb(self.feature_size // 2)])
# create a module list of the other required general convolution blocks
self.layers = ModuleList([DisFinalBlock(self.feature_size)])
# create the remaining layers
for i in range(self.depth - 1):
if i > 2:
layer = DisGeneralConvBlock(
int(self.feature_size // np.power(2, i - 2)),
int(self.feature_size // np.power(2, i - 2)),
dilation=dilation
)
rgb = from_rgb(int(self.feature_size // np.power(2, i - 1)))
else:
layer = DisGeneralConvBlock(self.feature_size, self.feature_size // 2,
dilation=dilation)
rgb = from_rgb(self.feature_size // 2)
self.layers.append(layer)
self.rgb_to_features.append(rgb)
# just replace the last converter
self.rgb_to_features[self.depth - 1] = \
from_rgb(self.feature_size // np.power(2, i - 2))
# if spectral normalization is on:
if use_spectral_norm:
self.turn_on_spectral_norm()
def turn_on_spectral_norm(self):
"""
private helper for turning on the spectral normalization
:return: None (has side effect)
"""
from torch.nn.utils import spectral_norm
if self.spectral_norm_mode is not None:
assert self.spectral_norm_mode is False, \
"can't apply spectral_norm. It is already applied"
# apply the same to the remaining relevant blocks
for module in self.layers:
module.conv_1 = spectral_norm(module.conv_1)
module.conv_2 = spectral_norm(module.conv_2)
# toggle the state variable:
self.spectral_norm_mode = True
def turn_off_spectral_norm(self):
"""
private helper for turning off the spectral normalization
:return: None (has side effect)
"""
from torch.nn.utils import remove_spectral_norm
if self.spectral_norm_mode is not None:
assert self.spectral_norm_mode is True, \
"can't remove spectral_norm. It is not applied"
# remove the applied spectral norm
for module in self.layers:
remove_spectral_norm(module.conv_1)
remove_spectral_norm(module.conv_2)
# toggle the state variable:
self.spectral_norm_mode = False
def forward(self, inputs):
from torch.nn import AvgPool2d, LeakyReLU
assert len(inputs) == self.depth, \
"Mismatch between input and Network scales"
y = self.rgb_to_features[self.depth - 1](inputs[self.depth - 1]) #32x256x256
y = self.layers[self.depth - 1](y)
#y = th.cat((inputs[self.depth - 1], y), dim=1)
#y = self.layers[self.depth - 1](y) #alpha 64x128x128
#y_ = AvgPool2d(2)(inputs[self.depth - 1]) #3x128x128
#y_ = self.rgb_to_features[self.depth - 2](y_) #64x128x128
#y_ = LeakyReLU(0.2)(y_)
#y = y * 0.95 + y_ * 0.05
for x, block, converter in \
zip(reversed(inputs[:-1]),
reversed(self.layers[:-1]),
reversed(self.rgb_to_features[:-1])):
input_part = converter(x) # convert the input:
y = th.cat((input_part, y), dim=1) # concatenate the inputs:
y = block(y) # apply the block
return y
class MSG_GAN:
def __init__(self, depth=7, latent_size=512, gen_dilation=1,
dis_dilation=1, use_spectral_norm=True, device=th.device("cpu")):
""" constructor for the class """
from torch.nn import DataParallel
self.gen = Generator(depth, latent_size, dilation=gen_dilation,
use_spectral_norm=use_spectral_norm).to(device)
self.dis = Discriminator(depth, latent_size, dilation=dis_dilation,
use_spectral_norm=use_spectral_norm).to(device)
# Create the Generator and the Discriminator
if device == th.device("cuda"):
self.gen = DataParallel(self.gen)
self.dis = DataParallel(self.dis)
# state of the object
self.latent_size = latent_size
self.depth = depth
self.device = device
# by default the generator and discriminator are in eval mode
self.gen.eval()
self.dis.eval()
def optimize_discriminator(self, dis_optim, noise, real_batch, loss_fn):
# generate a batch of samples
fake_samples = self.gen(noise)
fake_samples = list(map(lambda x: x.detach(), fake_samples))
loss = loss_fn.dis_loss(real_batch, fake_samples)
# optimize discriminator
dis_optim.zero_grad()
loss.backward(retain_graph=True)
dis_optim.step()
return loss.item()
def optimize_generator(self, gen_optim, noise, real_batch, loss_fn):
# generate a batch of samples
fake_samples = self.gen(noise)
loss = loss_fn.gen_loss(real_batch, fake_samples)
# optimize discriminator
gen_optim.zero_grad()
loss.backward(retain_graph=True)
gen_optim.step()
return loss.item() | 5,695 | 1,018 | 104 |
a590d346a433b2746f8e174030cd02a2220f6fe0 | 10,771 | py | Python | gsj_2020/figure_2_bulks_yeast.py | asistradition/inferelator_run_scripts | 5f122e8f4ff565f8ccf1b3224bc1408839969097 | [
"MIT"
] | 1 | 2020-04-20T14:53:10.000Z | 2020-04-20T14:53:10.000Z | gsj_2020/figure_2_bulks_yeast.py | asistradition/inferelator_run_scripts | 5f122e8f4ff565f8ccf1b3224bc1408839969097 | [
"MIT"
] | null | null | null | gsj_2020/figure_2_bulks_yeast.py | asistradition/inferelator_run_scripts | 5f122e8f4ff565f8ccf1b3224bc1408839969097 | [
"MIT"
] | null | null | null | # Load modules
from inferelator import inferelator_workflow, inferelator_verbose_level, MPControl, crossvalidation_workflow
from inferelator.benchmarking.scenic import SCENICWorkflow, SCENICRegression
from inferelator.distributed.inferelator_mp import MPControl
# Set verbosity level to "Talky"
inferelator_verbose_level(1)
# Set the location of the input data and the desired location of the output files
DATA_DIR = '~/repos/inferelator/data/yeast'
OUTPUT_DIR = '/scratch/cj59/yeast_inference'
PRIORS_FILE_NAME = 'YEASTRACT_20190713_BOTH.tsv'
GOLD_STANDARD_FILE_NAME = 'gold_standard.tsv.gz'
TF_LIST_FILE_NAME = 'tf_names.tsv'
# Multiprocessing needs to be protected with the if __name__ == 'main' pragma
if __name__ == '__main__':
MPControl.set_multiprocess_engine("dask-cluster")
MPControl.client.use_default_configuration("greene", n_jobs=2)
MPControl.client.add_worker_conda("source /scratch/cgsb/gresham/no_backup/Chris/.conda/bin/activate scenic")
MPControl.connect()
# Define the general run parameters
# Data Set 1
if __name__ == '__main__':
# Create a worker
worker = inferelator_workflow(regression=SCENICRegression, workflow=SCENICWorkflow)
worker = set_up_workflow(worker)
worker.set_expression_file(tsv="calico_expression_matrix_raw_microarray.tsv.gz")
worker.set_file_properties(extract_metadata_from_expression_matrix=True,
expression_matrix_metadata=['TF', 'strain', 'date', 'restriction', 'mechanism', 'time'],
metadata_handler="nonbranching")
worker.adjacency_method = "grnboost2"
worker.set_output_file_names(curve_data_file_name="metric_curve.tsv.gz")
worker._do_preprocessing = False
worker.do_scenic = False
worker.append_to_path("output_dir", "set1_raw_grnboost")
worker.run()
# BBSR
worker = inferelator_workflow(regression="bbsr", workflow="tfa")
worker = set_up_workflow(worker)
worker.set_expression_file(tsv="calico_expression_matrix_raw_microarray.tsv.gz")
worker.set_file_properties(extract_metadata_from_expression_matrix=True,
expression_matrix_metadata=['TF', 'strain', 'date', 'restriction', 'mechanism', 'time'],
metadata_handler="nonbranching")
worker.set_crossvalidation_parameters(split_gold_standard_for_crossvalidation=True,
cv_split_ratio=0.2)
worker.set_run_parameters(num_bootstraps=5)
worker.append_to_path("output_dir", "set1_raw_bbsr")
worker.set_output_file_names(curve_data_file_name="metric_curve.tsv.gz")
cv_wrap = set_up_cv_seeds(worker)
cv_wrap.run()
del cv_wrap
del worker
# STARS-LASSO
worker = inferelator_workflow(regression="stars", workflow="tfa")
worker = set_up_workflow(worker)
worker.set_expression_file(tsv="calico_expression_matrix_raw_microarray.tsv.gz")
worker.set_file_properties(extract_metadata_from_expression_matrix=True,
expression_matrix_metadata=['TF', 'strain', 'date', 'restriction', 'mechanism', 'time'],
metadata_handler="nonbranching")
worker.set_crossvalidation_parameters(split_gold_standard_for_crossvalidation=True,
cv_split_ratio=0.2)
worker.set_run_parameters(num_bootstraps=5)
worker.append_to_path("output_dir", "set1_raw_stars")
worker.set_output_file_names(curve_data_file_name="metric_curve.tsv.gz")
cv_wrap = set_up_cv_seeds(worker)
cv_wrap.run()
del cv_wrap
del worker
# BBSR-BY-TASK
worker = inferelator_workflow(regression="bbsr", workflow="multitask")
worker = set_up_workflow(worker)
# Calico data task
task1 = worker.create_task(task_name="Calico_2019",
expression_matrix_file="calico_expression_matrix_raw.tsv.gz",
expression_matrix_columns_are_genes=True,
extract_metadata_from_expression_matrix=True,
expression_matrix_metadata=['TF', 'strain', 'date', 'restriction', 'mechanism',
'time'],
workflow_type="tfa",
metadata_handler="nonbranching")
# Kostya data task
task2 = worker.create_task(task_name="Kostya_2019",
expression_matrix_file="kostya_microarray_yeast.tsv.gz",
expression_matrix_columns_are_genes=True,
extract_metadata_from_expression_matrix=True,
expression_matrix_metadata=['isTs', 'is1stLast', 'prevCol', 'del.t', 'condName'],
workflow_type="tfa",
metadata_handler="branching")
worker.set_crossvalidation_parameters(split_gold_standard_for_crossvalidation=True,
cv_split_ratio=0.2)
worker.set_run_parameters(num_bootstraps=5)
worker.append_to_path("output_dir", "set1_raw_joint_bbsr")
cv_wrap = set_up_cv_seeds(worker)
cv_wrap.run()
del cv_wrap
del worker
# STARS-BY-TASK
worker = inferelator_workflow(regression="stars", workflow="multitask")
worker = set_up_workflow(worker)
# Calico data task
task1 = worker.create_task(task_name="Calico_2019",
expression_matrix_file="calico_expression_matrix_raw.tsv.gz",
expression_matrix_columns_are_genes=True,
extract_metadata_from_expression_matrix=True,
expression_matrix_metadata=['TF', 'strain', 'date', 'restriction', 'mechanism',
'time'],
workflow_type="tfa",
metadata_handler="nonbranching")
# Kostya data task
task2 = worker.create_task(task_name="Kostya_2019",
expression_matrix_file="kostya_microarray_yeast.tsv.gz",
expression_matrix_columns_are_genes=True,
extract_metadata_from_expression_matrix=True,
expression_matrix_metadata=['isTs', 'is1stLast', 'prevCol', 'del.t', 'condName'],
workflow_type="tfa",
metadata_handler="branching")
worker.set_crossvalidation_parameters(split_gold_standard_for_crossvalidation=True,
cv_split_ratio=0.2)
worker.set_run_parameters(num_bootstraps=5)
worker.append_to_path("output_dir", "set1_raw_joint_stars")
cv_wrap = set_up_cv_seeds(worker)
cv_wrap.run()
del cv_wrap
del worker
# AMUSR
worker = inferelator_workflow(regression="amusr", workflow="multitask")
worker = set_up_workflow(worker)
# Calico data task
task1 = worker.create_task(task_name="Calico_2019",
expression_matrix_file="calico_expression_matrix_raw.tsv.gz",
extract_metadata_from_expression_matrix=True,
expression_matrix_metadata=['TF', 'strain', 'date', 'restriction', 'mechanism',
'time'],
workflow_type="tfa",
metadata_handler="nonbranching")
# Kostya data task
task2 = worker.create_task(task_name="Kostya_2019",
expression_matrix_file="kostya_microarray_yeast.tsv.gz",
extract_metadata_from_expression_matrix=True,
expression_matrix_metadata=['isTs', 'is1stLast', 'prevCol', 'del.t', 'condName'],
workflow_type="tfa",
metadata_handler="branching")
worker.set_crossvalidation_parameters(split_gold_standard_for_crossvalidation=True,
cv_split_ratio=0.2)
worker.set_run_parameters(num_bootstraps=5, use_numba=True)
worker.append_to_path("output_dir", "set1_raw_joint_amusr")
cv_wrap = set_up_cv_seeds(worker)
cv_wrap.run()
del cv_wrap
del worker
"""
# Create a worker
worker = inferelator_workflow(regression=SCENICRegression, workflow=SCENICWorkflow)
worker = set_up_workflow(worker)
worker.set_expression_file(tsv="calico_expression_matrix_raw_microarray.tsv.gz")
worker.set_file_properties(extract_metadata_from_expression_matrix=True,
expression_matrix_metadata=['TF', 'strain', 'date', 'restriction', 'mechanism', 'time'],
metadata_handler="nonbranching")
worker.adjacency_method = "genie3"
worker.append_to_path("output_dir", "set1_genie3")
worker.run()
# Data Set 2
# Create a worker
worker = inferelator_workflow(regression=SCENICRegression, workflow=SCENICWorkflow)
worker = set_up_workflow(worker)
worker.set_expression_file(tsv="kostya_microarray_yeast.tsv.gz")
worker.set_file_properties(extract_metadata_from_expression_matrix=True,
expression_matrix_metadata=['isTs', 'is1stLast', 'prevCol', 'del.t', 'condName'],
metadata_handler="branching")
worker.adjacency_method = "grnboost2"
worker.append_to_path("output_dir", "set2_grnboost")
worker.run()
# Create a worker
worker = inferelator_workflow(regression=SCENICRegression, workflow=SCENICWorkflow)
worker = set_up_workflow(worker)
worker.set_expression_file(tsv="kostya_microarray_yeast.tsv.gz")
worker.set_file_properties(extract_metadata_from_expression_matrix=True,
expression_matrix_metadata=['isTs', 'is1stLast', 'prevCol', 'del.t', 'condName'],
metadata_handler="branching")
worker.adjacency_method = "genie3"
worker.append_to_path("output_dir", "set2_genie3")
worker.run()
"""
| 45.447257 | 116 | 0.644044 | # Load modules
from inferelator import inferelator_workflow, inferelator_verbose_level, MPControl, crossvalidation_workflow
from inferelator.benchmarking.scenic import SCENICWorkflow, SCENICRegression
from inferelator.distributed.inferelator_mp import MPControl
# Set verbosity level to "Talky"
inferelator_verbose_level(1)
# Set the location of the input data and the desired location of the output files
DATA_DIR = '~/repos/inferelator/data/yeast'
OUTPUT_DIR = '/scratch/cj59/yeast_inference'
PRIORS_FILE_NAME = 'YEASTRACT_20190713_BOTH.tsv'
GOLD_STANDARD_FILE_NAME = 'gold_standard.tsv.gz'
TF_LIST_FILE_NAME = 'tf_names.tsv'
# Multiprocessing needs to be protected with the if __name__ == 'main' pragma
if __name__ == '__main__':
MPControl.set_multiprocess_engine("dask-cluster")
MPControl.client.use_default_configuration("greene", n_jobs=2)
MPControl.client.add_worker_conda("source /scratch/cgsb/gresham/no_backup/Chris/.conda/bin/activate scenic")
MPControl.connect()
# Define the general run parameters
def set_up_workflow(wkf):
wkf.set_file_paths(input_dir=DATA_DIR,
output_dir=OUTPUT_DIR,
tf_names_file=TF_LIST_FILE_NAME,
priors_file=PRIORS_FILE_NAME,
gold_standard_file=GOLD_STANDARD_FILE_NAME)
wkf.set_output_file_names(curve_data_file_name="metric_curve.tsv.gz")
return wkf
def set_up_cv_seeds(wkf):
cv = crossvalidation_workflow.CrossValidationManager(wkf)
cv.add_gridsearch_parameter('random_seed', list(range(42, 52)))
return cv
# Data Set 1
if __name__ == '__main__':
# Create a worker
worker = inferelator_workflow(regression=SCENICRegression, workflow=SCENICWorkflow)
worker = set_up_workflow(worker)
worker.set_expression_file(tsv="calico_expression_matrix_raw_microarray.tsv.gz")
worker.set_file_properties(extract_metadata_from_expression_matrix=True,
expression_matrix_metadata=['TF', 'strain', 'date', 'restriction', 'mechanism', 'time'],
metadata_handler="nonbranching")
worker.adjacency_method = "grnboost2"
worker.set_output_file_names(curve_data_file_name="metric_curve.tsv.gz")
worker._do_preprocessing = False
worker.do_scenic = False
worker.append_to_path("output_dir", "set1_raw_grnboost")
worker.run()
# BBSR
worker = inferelator_workflow(regression="bbsr", workflow="tfa")
worker = set_up_workflow(worker)
worker.set_expression_file(tsv="calico_expression_matrix_raw_microarray.tsv.gz")
worker.set_file_properties(extract_metadata_from_expression_matrix=True,
expression_matrix_metadata=['TF', 'strain', 'date', 'restriction', 'mechanism', 'time'],
metadata_handler="nonbranching")
worker.set_crossvalidation_parameters(split_gold_standard_for_crossvalidation=True,
cv_split_ratio=0.2)
worker.set_run_parameters(num_bootstraps=5)
worker.append_to_path("output_dir", "set1_raw_bbsr")
worker.set_output_file_names(curve_data_file_name="metric_curve.tsv.gz")
cv_wrap = set_up_cv_seeds(worker)
cv_wrap.run()
del cv_wrap
del worker
# STARS-LASSO
worker = inferelator_workflow(regression="stars", workflow="tfa")
worker = set_up_workflow(worker)
worker.set_expression_file(tsv="calico_expression_matrix_raw_microarray.tsv.gz")
worker.set_file_properties(extract_metadata_from_expression_matrix=True,
expression_matrix_metadata=['TF', 'strain', 'date', 'restriction', 'mechanism', 'time'],
metadata_handler="nonbranching")
worker.set_crossvalidation_parameters(split_gold_standard_for_crossvalidation=True,
cv_split_ratio=0.2)
worker.set_run_parameters(num_bootstraps=5)
worker.append_to_path("output_dir", "set1_raw_stars")
worker.set_output_file_names(curve_data_file_name="metric_curve.tsv.gz")
cv_wrap = set_up_cv_seeds(worker)
cv_wrap.run()
del cv_wrap
del worker
# BBSR-BY-TASK
worker = inferelator_workflow(regression="bbsr", workflow="multitask")
worker = set_up_workflow(worker)
# Calico data task
task1 = worker.create_task(task_name="Calico_2019",
expression_matrix_file="calico_expression_matrix_raw.tsv.gz",
expression_matrix_columns_are_genes=True,
extract_metadata_from_expression_matrix=True,
expression_matrix_metadata=['TF', 'strain', 'date', 'restriction', 'mechanism',
'time'],
workflow_type="tfa",
metadata_handler="nonbranching")
# Kostya data task
task2 = worker.create_task(task_name="Kostya_2019",
expression_matrix_file="kostya_microarray_yeast.tsv.gz",
expression_matrix_columns_are_genes=True,
extract_metadata_from_expression_matrix=True,
expression_matrix_metadata=['isTs', 'is1stLast', 'prevCol', 'del.t', 'condName'],
workflow_type="tfa",
metadata_handler="branching")
worker.set_crossvalidation_parameters(split_gold_standard_for_crossvalidation=True,
cv_split_ratio=0.2)
worker.set_run_parameters(num_bootstraps=5)
worker.append_to_path("output_dir", "set1_raw_joint_bbsr")
cv_wrap = set_up_cv_seeds(worker)
cv_wrap.run()
del cv_wrap
del worker
# STARS-BY-TASK
worker = inferelator_workflow(regression="stars", workflow="multitask")
worker = set_up_workflow(worker)
# Calico data task
task1 = worker.create_task(task_name="Calico_2019",
expression_matrix_file="calico_expression_matrix_raw.tsv.gz",
expression_matrix_columns_are_genes=True,
extract_metadata_from_expression_matrix=True,
expression_matrix_metadata=['TF', 'strain', 'date', 'restriction', 'mechanism',
'time'],
workflow_type="tfa",
metadata_handler="nonbranching")
# Kostya data task
task2 = worker.create_task(task_name="Kostya_2019",
expression_matrix_file="kostya_microarray_yeast.tsv.gz",
expression_matrix_columns_are_genes=True,
extract_metadata_from_expression_matrix=True,
expression_matrix_metadata=['isTs', 'is1stLast', 'prevCol', 'del.t', 'condName'],
workflow_type="tfa",
metadata_handler="branching")
worker.set_crossvalidation_parameters(split_gold_standard_for_crossvalidation=True,
cv_split_ratio=0.2)
worker.set_run_parameters(num_bootstraps=5)
worker.append_to_path("output_dir", "set1_raw_joint_stars")
cv_wrap = set_up_cv_seeds(worker)
cv_wrap.run()
del cv_wrap
del worker
# AMUSR
worker = inferelator_workflow(regression="amusr", workflow="multitask")
worker = set_up_workflow(worker)
# Calico data task
task1 = worker.create_task(task_name="Calico_2019",
expression_matrix_file="calico_expression_matrix_raw.tsv.gz",
extract_metadata_from_expression_matrix=True,
expression_matrix_metadata=['TF', 'strain', 'date', 'restriction', 'mechanism',
'time'],
workflow_type="tfa",
metadata_handler="nonbranching")
# Kostya data task
task2 = worker.create_task(task_name="Kostya_2019",
expression_matrix_file="kostya_microarray_yeast.tsv.gz",
extract_metadata_from_expression_matrix=True,
expression_matrix_metadata=['isTs', 'is1stLast', 'prevCol', 'del.t', 'condName'],
workflow_type="tfa",
metadata_handler="branching")
worker.set_crossvalidation_parameters(split_gold_standard_for_crossvalidation=True,
cv_split_ratio=0.2)
worker.set_run_parameters(num_bootstraps=5, use_numba=True)
worker.append_to_path("output_dir", "set1_raw_joint_amusr")
cv_wrap = set_up_cv_seeds(worker)
cv_wrap.run()
del cv_wrap
del worker
"""
# Create a worker
worker = inferelator_workflow(regression=SCENICRegression, workflow=SCENICWorkflow)
worker = set_up_workflow(worker)
worker.set_expression_file(tsv="calico_expression_matrix_raw_microarray.tsv.gz")
worker.set_file_properties(extract_metadata_from_expression_matrix=True,
expression_matrix_metadata=['TF', 'strain', 'date', 'restriction', 'mechanism', 'time'],
metadata_handler="nonbranching")
worker.adjacency_method = "genie3"
worker.append_to_path("output_dir", "set1_genie3")
worker.run()
# Data Set 2
# Create a worker
worker = inferelator_workflow(regression=SCENICRegression, workflow=SCENICWorkflow)
worker = set_up_workflow(worker)
worker.set_expression_file(tsv="kostya_microarray_yeast.tsv.gz")
worker.set_file_properties(extract_metadata_from_expression_matrix=True,
expression_matrix_metadata=['isTs', 'is1stLast', 'prevCol', 'del.t', 'condName'],
metadata_handler="branching")
worker.adjacency_method = "grnboost2"
worker.append_to_path("output_dir", "set2_grnboost")
worker.run()
# Create a worker
worker = inferelator_workflow(regression=SCENICRegression, workflow=SCENICWorkflow)
worker = set_up_workflow(worker)
worker.set_expression_file(tsv="kostya_microarray_yeast.tsv.gz")
worker.set_file_properties(extract_metadata_from_expression_matrix=True,
expression_matrix_metadata=['isTs', 'is1stLast', 'prevCol', 'del.t', 'condName'],
metadata_handler="branching")
worker.adjacency_method = "genie3"
worker.append_to_path("output_dir", "set2_genie3")
worker.run()
"""
| 511 | 0 | 45 |
4820f58ee724581a19340bf41c1d6b6e2b8c5698 | 709 | py | Python | manage.py | aarjitpaudel/Nepali-news-portal-kbd | ff42b905361fbbacb617510c0a5bd26adf7f7272 | [
"MIT"
] | 5 | 2019-12-01T14:23:36.000Z | 2021-05-10T13:13:16.000Z | manage.py | aarjitpaudel/Nepali-news-portal-kbd | ff42b905361fbbacb617510c0a5bd26adf7f7272 | [
"MIT"
] | 29 | 2019-11-25T23:21:10.000Z | 2021-03-19T23:17:37.000Z | manage.py | hemanta212/Khabar-board | 37d079c9ae3897e0100bab1396be36e7f6508a08 | [
"MIT"
] | 2 | 2019-12-23T01:01:45.000Z | 2021-07-22T04:45:02.000Z | import os
from flask_final.config import Debug, Secrets
from flask_final import db, create_app
is_env_var_set = os.getenv("SQLALCHEMY_DATABASE_URI")
if not is_env_var_set:
config = Secrets()
else:
config = Debug
# Support for relative sqlite URIs
if config.SQLALCHEMY_DATABASE_URI == "sqlite:///site.db":
temp_app = create_app(config)
config.SQLALCHEMY_DATABASE_URI = "sqlite:///" + os.path.join(
temp_app.root_path, "site.db"
)
app = create_app(config)
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command("db", MigrateCommand)
if __name__ == "__main__":
manager.run()
| 23.633333 | 65 | 0.74048 | import os
from flask_final.config import Debug, Secrets
from flask_final import db, create_app
is_env_var_set = os.getenv("SQLALCHEMY_DATABASE_URI")
if not is_env_var_set:
config = Secrets()
else:
config = Debug
# Support for relative sqlite URIs
if config.SQLALCHEMY_DATABASE_URI == "sqlite:///site.db":
temp_app = create_app(config)
config.SQLALCHEMY_DATABASE_URI = "sqlite:///" + os.path.join(
temp_app.root_path, "site.db"
)
app = create_app(config)
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command("db", MigrateCommand)
if __name__ == "__main__":
manager.run()
| 0 | 0 | 0 |
304ef5cef3e0a6f6f6da377089a2f9f447bd40ce | 1,991 | py | Python | tests/unit/test_lidar.py | tukiains/actris-cloudnet | 26f2607b890630146469cfa410fce99438ceee3f | [
"MIT"
] | 13 | 2020-02-16T06:52:51.000Z | 2022-03-10T09:43:19.000Z | tests/unit/test_lidar.py | tukiains/actris-cloudnet | 26f2607b890630146469cfa410fce99438ceee3f | [
"MIT"
] | 17 | 2020-01-15T10:47:08.000Z | 2022-03-28T13:08:23.000Z | tests/unit/test_lidar.py | tukiains/actris-cloudnet | 26f2607b890630146469cfa410fce99438ceee3f | [
"MIT"
] | 12 | 2020-03-03T16:45:13.000Z | 2022-03-23T08:02:43.000Z | import numpy as np
import numpy.ma as ma
from numpy.testing import assert_array_equal
import pytest
import netCDF4
from cloudnetpy.categorize.lidar import Lidar
WAVELENGTH = 900.0
@pytest.fixture(scope='session')
def fake_lidar_file(tmpdir_factory):
"""Creates a simple lidar file for testing."""
file_name = tmpdir_factory.mktemp("data").join("radar_file.nc")
root_grp = netCDF4.Dataset(file_name, "w", format="NETCDF4_CLASSIC")
n_time, n_height = 4, 4
root_grp.createDimension('time', n_time)
root_grp.createDimension('height', n_height)
root_grp.createVariable('time', 'f8', 'time')[:] = np.arange(n_time)
var = root_grp.createVariable('height', 'f8', 'height')
var[:] = np.arange(n_height)
var.units = 'km'
root_grp.createVariable('wavelength', 'f8')[:] = WAVELENGTH
root_grp.createVariable('latitude', 'f8')[:] = 60.43
root_grp.createVariable('longitude', 'f8')[:] = 25.4
var = root_grp.createVariable('altitude', 'f8')
var[:] = 120.3
var.units = 'm'
var = root_grp.createVariable('beta', 'f8', ('time', 'height'))
var[:] = ma.array([[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4]], dtype=float,
mask=[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
root_grp.close()
return file_name
| 34.929825 | 72 | 0.588649 | import numpy as np
import numpy.ma as ma
from numpy.testing import assert_array_equal
import pytest
import netCDF4
from cloudnetpy.categorize.lidar import Lidar
WAVELENGTH = 900.0
@pytest.fixture(scope='session')
def fake_lidar_file(tmpdir_factory):
"""Creates a simple lidar file for testing."""
file_name = tmpdir_factory.mktemp("data").join("radar_file.nc")
root_grp = netCDF4.Dataset(file_name, "w", format="NETCDF4_CLASSIC")
n_time, n_height = 4, 4
root_grp.createDimension('time', n_time)
root_grp.createDimension('height', n_height)
root_grp.createVariable('time', 'f8', 'time')[:] = np.arange(n_time)
var = root_grp.createVariable('height', 'f8', 'height')
var[:] = np.arange(n_height)
var.units = 'km'
root_grp.createVariable('wavelength', 'f8')[:] = WAVELENGTH
root_grp.createVariable('latitude', 'f8')[:] = 60.43
root_grp.createVariable('longitude', 'f8')[:] = 25.4
var = root_grp.createVariable('altitude', 'f8')
var[:] = 120.3
var.units = 'm'
var = root_grp.createVariable('beta', 'f8', ('time', 'height'))
var[:] = ma.array([[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4]], dtype=float,
mask=[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
root_grp.close()
return file_name
def test_init(fake_lidar_file):
obj = Lidar(fake_lidar_file)
assert obj.data['lidar_wavelength'].data == WAVELENGTH
assert obj.data['beta_bias'].data == 3
assert obj.data['beta_error'].data == 0.5
def test_rebin(fake_lidar_file):
obj = Lidar(fake_lidar_file)
time_new = np.array([1.1, 2.1])
height_new = np.array([500, 1500])
obj.rebin_to_grid(time_new, height_new)
result = np.array([[1.5, 2.5],
[1.5, 2.5]])
assert_array_equal(obj.data['beta'].data, result)
| 479 | 0 | 46 |