max_stars_repo_path
stringlengths
3
269
max_stars_repo_name
stringlengths
4
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.05M
score
float64
0.23
5.13
int_score
int64
0
5
Libraries/topological_sort.py
tonko2/AtCoder
2
12773251
<reponame>tonko2/AtCoder<filename>Libraries/topological_sort.py def topological_sort(inc, out): # ・ノードIDは、0~N-1とする # ・以下のデータは既に作られているとする # inc[n] = nに流入するリンク数(int) # out[n] = nからの流出先のノード集合(list or set) # N = 7 # inc = [0] * N # out = [[] for _ in range(N)] # inc[0] = 3 # inc[1] = 0 # inc[2] = 0 # inc[3] = 0 # inc[4] = 1 # inc[5] = 1 # inc[6] = 1 # # out[0] = [4, 5, 6] # out[1] = [0] # out[2] = [0] # out[3]= [0] # out[4] = [] # out[5] = [] # out[6] = [] # 入次数が0のノード S = {i for i, c in enumerate(inc) if c == 0} L = [] while S: n = S.pop() L.append(n) for m in out[n]: inc[m] -= 1 if inc[m] == 0: S.add(m) return L
3.328125
3
japonicus/interface.py
mczero80/japonicus
229
12773252
<reponame>mczero80/japonicus #!/bin/python import evaluation def showTitleDisclaimer(backtestsettings, VERSION): TITLE = """ ██╗ █████╗ ██████╗ ██████╗ ███╗ ██╗██╗ ██████╗██╗ ██╗███████╗ ██║██╔══██╗██╔══██╗██╔═══██╗████╗ ██║██║██╔════╝██║ ██║██╔════╝ ██║███████║██████╔╝██║ ██║██╔██╗ ██║██║██║ ██║ ██║███████╗ ██ ██║██╔══██║██╔═══╝ ██║ ██║██║╚██╗██║██║██║ ██║ ██║╚════██║ ╚█████╔╝██║ ██║██║ ╚██████╔╝██║ ╚████║██║╚██████╗╚██████╔╝███████║ ╚════╝ ╚═╝ ╚═╝╚═╝ ╚═════╝ ╚═╝ ╚═══╝╚═╝ ╚═════╝ ╚═════╝ ╚══════╝ """ TITLE += "\t EVOLUTIONARY GENETIC ALGORITHMS" try: print(TITLE, end="") except UnicodeEncodeError or SyntaxError: print("\nJAPONICUS\n") print('\t' * 4 + 'v%.2f' % VERSION) print() profitDisclaimer = "The profits reported here depends on backtest interpreter function;" interpreterFuncName = backtestsettings['interpreteBacktestProfit'] interpreterInfo = evaluation.gekko.backtest.getInterpreterBacktestInfo( interpreterFuncName) print("%s \n\t%s\n" % (profitDisclaimer, interpreterInfo))
2.265625
2
AutonomousPkg/build/lib/Autonomous/Modules/Obstacle_Avoidance.py
GabeCasciano/Capstone20
1
12773253
<reponame>GabeCasciano/Capstone20<filename>AutonomousPkg/build/lib/Autonomous/Modules/Obstacle_Avoidance.py from Autonomous.Sensors.LIDAR import LIDAR_Interface, Utils from Autonomous.Modules.Sensor_Fusion import Sensor_Fusion from Autonomous.Modules.Obstacle_Detection import Obstacle_Detection from Autonomous.Modules.Path_Planning import Path import numpy as np import math class Obstacle_Avoidance: SF = Sensor_Fusion() Obs_Det = Obstacle_Detection() PP = Path() def __init__(self): self.__running = False self.x_ref = self.SF.gps_latitude self.y_ref = self.SF.gps_longitude self.__obstacle = self.Obs_Det.detected_obstacle self.x_dest = self.PP._dest_lat self.y_dest = self.PP.destination_long self.__obstacle_flag = False self.__orient_angle = np.asarray[0] # self.__orient_angle_det = np.asarray[0] def avoid_obstacle(self, __obstacle:tuple, __obstacle_flag, __orient_angle): # obstacle is the location of the obstacle relative to the robot # direction represents going to the left around the obstacle if false # uses the self.__running var to indicate that it is still running and to control the main while loop __obstacle = np.asarray(__obstacle) x_obs = __obstacle[:,0] y_obs = __obstacle[:,1] diff_x = abs(x_obs - self.x_ref) diff_y = abs(y_obs - self.y_ref) diff_angle = self.x_ref - self.theta max_dist = 200 for j in range(0, len(y_obs)): if (diff_x > 20) and (diff_y > 20): # obst not detected close by for i in range(j, j + 1): if y_obs[j] > self.dist_dest: if diff_angle == 0: __orient_angle = self.theta elif diff_angle > 0: __orient_angle = self.x_ref + diff_angle elif diff_angle < 0: __orient_angle = self.x_ref - diff_angle __obstacle_flag = True else: __obstacle_flag= False # print("orient angle 1", orient_angle) elif (diff_x < 20) and (diff_y < 20): # obs detected close by # set temp path alpha_temp = 90 alpha_temp = np.radians(alpha_temp) # __orient_angle_det = self.theta + alpha_temp might not need this? diff_2 = alpha_temp - self.theta if y_obs[j] > self.dist_dest: if diff_2 > 0: __orient_angle = alpha_temp + diff_2 elif diff_2 < 0: __orient_angle = alpha_temp - diff_2 __obstacle_flag = True else: __obstacle_flag = False # print("orient angle 2", orient_angle2) pass def dist_dest(self): return math.sqrt(math.pow((self.x_dest - self.x_ref), 2) + math.pow(self.y_dest - self.y_ref), 2) def theta(self): return abs(math.radians(math.atan((self.y_dest - self.y_ref) / (self.x_dest - self.x_ref)))) # optimal travelling angle @property def orientation_angle(self): return self.__orient_angle #@property #def orientation_angle_det(self): # return self.__orient_angle_det @property #this needs to be changed def obstacle_detected_flag(self): if not self.__obstacle_flag: return self.__obstacle_flag @property def running(self): while self.__running: # need to be fixed avoid_obs = self.avoid_obstacle(self, self.__obstacle, self.__obstacle_flag, __orient_angle) return @property def duration(self): return
2.859375
3
torrentscraper/webscrapers/utils/uri_builder.py
AsiganTheSunk/python-torrent-scrapper
5
12773254
#!/usr/bin/env python3 # Import System Libraries import urllib.parse # Import Custom Constant from lib.fileflags import FileFlags as fflags # Constants SEASON_WRAP = -1 EPISODE_WRAP = 0 TEMPORADA_WRAP = 1 SPECIAL_WEBSCRAPER = 'MejorTorrentScraper' class UriBuilder(object): def __init__(self, logger): self.name = self.__class__.__name__ # Custom Logger self.logger = logger def build_request_url(self, websearch, webscraper): ''' This function performs the construction of a custom uri, for the WebScraper that is being used in the call. :param websearch: this value, represents the items used in the construction of the custom uri :type websearch: websearch :param webscraper: this value, represents the WebScraper you're using :type webscraper: WebScraper :return: this function, returns the custom uri created for the request :rtype: str ''' search_query = {} search_params = {} search_uri_list = [] if webscraper.name == SPECIAL_WEBSCRAPER: search_query = self._get_mjrt_url(websearch) else: search_query = self._get_general_url(websearch) if webscraper.default_params != {}: search_params = {**search_query, **webscraper.default_params} else: search_params = search_query self.logger.debug('{0} Query Params:'.format(self.name)) for item in search_params: self.logger.debug('[ {0} : {1} ]'.format(item, search_params[item])) if webscraper.query_type: search_uri = '{0}{1}?{2}'.format(webscraper.main_page, webscraper.default_search, ( urllib.parse.urlencode(search_params))) else: search_uri = '{0}{1}{2}{3}'.format(webscraper.main_page, webscraper.default_search, ( search_params['q']).replace(' ','%20').replace('&','and'), webscraper.default_tail) self.logger.debug0('{0} Generated Uri from Query Params: [ {1} ]'.format(self.name, search_uri)) return search_uri def _get_mjrt_url(self, websearch): ''' :param websearch: :return: ''' if websearch.search_type == fflags.FILM_DIRECTORY_FLAG: return {'valor': '{0}'.format(websearch.title)} else: if websearch.season != '': return {'valor': '{0} - {1}'.format(websearch.title, websearch.season[-1:])} else: return {'valor': '{0}'.format(websearch.title)} def _get_general_url(self, websearch): ''' :param websearch: :return: ''' if websearch.search_type == fflags.FILM_DIRECTORY_FLAG: return {'q': '{0} {1} {2}'.format( websearch.title, websearch.year, websearch.quality.strip())} elif websearch.search_type == fflags.ANIME_DIRECTORY_FLAG: return ({'q': '{0} {1} {2} {3}'.format( websearch.source, websearch.title, websearch.episode, websearch.quality).strip()}) elif websearch.search_type == fflags.SHOW_DIRECTORY_FLAG: if websearch.season != '' and websearch.episode != '': return {'q': '{0} S{1}E{2} {3}'.format( websearch.title, websearch.season, websearch.episode, websearch.quality).strip()} else: return {'q': '{0} Season {1} {2}'.format( websearch.title, websearch.season[1:], websearch.quality).strip()}
2.984375
3
build/lib/IP_Connector/PrintRecord.py
manthanchauhan/IP_Connector
1
12773255
""" Overloading 'print()' """ import sys def print_record(string): with open('logout_details.txt', 'a') as record: stdout = sys.stdout sys.stdout = record print(string, end='') sys.stdout = stdout return
3.6875
4
flake8_intsights/checkers/indents.py
Intsights/flake8-intsights
12
12773256
<reponame>Intsights/flake8-intsights import tokenize from . import _checker class Checker( _checker.BaseChecker, ): @classmethod def check( cls, filename, lines, tokens, start_position_to_token, ast_tree, astroid_tree, all_astroid_nodes, ): yield from cls.check_only_spaces_indents( tokens=tokens, ) yield from cls.check_indentations_gradually( lines=lines, tokens=tokens, ) @classmethod def check_only_spaces_indents( cls, tokens, ): for token in tokens: token_type_is_indent = token.type == tokenize.INDENT if not token_type_is_indent: continue number_of_tabs = token.string.count('\t') if number_of_tabs > 0: yield from cls.error_yielder.yield_error( error_id='I003', line_number=token.start[0], column_offset=0, ) number_of_spaces = token.string.count(' ') spaces_are_not_in_four = number_of_spaces % 4 != 0 if spaces_are_not_in_four: yield from cls.error_yielder.yield_error( error_id='I004', line_number=token.start[0], column_offset=0, ) @classmethod def check_indentations_gradually( cls, lines, tokens, ): previous_indenation_level = 0 for line_number, line in enumerate(lines, 1): if line.strip() == '': continue current_indenation_level = cls.get_line_indentation_level( line=line, ) tokens_indented_ungradually = (current_indenation_level - previous_indenation_level) > 1 if tokens_indented_ungradually: col_offset = len(line) - len(line.lstrip()) current_token = cls.get_token_by_position( lineno=line_number, col_offset=col_offset, tokens=tokens, ) current_token_is_string = current_token.type == tokenize.STRING if not current_token_is_string: yield from cls.error_yielder.yield_error( error_id='I006', line_number=line_number, column_offset=0, ) previous_indenation_level = current_indenation_level
2.4375
2
mpf/devices/shot_group.py
enteryourinitials/mpf
0
12773257
"""Contains the ShotGroup base class.""" from collections import deque from mpf.core.device_monitor import DeviceMonitor from mpf.core.events import event_handler from mpf.core.mode import Mode from mpf.core.mode_device import ModeDevice from mpf.core.player import Player @DeviceMonitor("common_state", "rotation_enabled") class ShotGroup(ModeDevice): """Represents a group of shots in a pinball machine by grouping together multiple `Shot` class devices. This is used so you get get "group-level" functionality, like shot rotation, shot group completion, etc. This would be used for a group of rollover lanes, a bank of standups, etc. """ config_section = 'shot_groups' collection = 'shot_groups' class_label = 'shot_group' __slots__ = ["rotation_enabled", "profile", "rotation_pattern"] def __init__(self, machine, name): """Initialise shot group.""" super().__init__(machine, name) self.rotation_enabled = None self.profile = None self.rotation_pattern = None def add_control_events_in_mode(self, mode) -> None: """Remove enable here.""" def device_loaded_in_mode(self, mode: Mode, player: Player): """Add device in mode.""" super().device_loaded_in_mode(mode, player) self._check_for_complete() self.profile = self.config['shots'][0].profile self.rotation_pattern = deque(self.profile.config['rotation_pattern']) self.rotation_enabled = not self.config['enable_rotation_events'] for shot in self.config['shots']: self.machine.events.add_handler("{}_hit".format(shot.name), self._hit) def device_removed_from_mode(self, mode): """Disable device when mode stops.""" super().device_removed_from_mode(mode) self.machine.events.remove_handler(self._hit) @property def common_state(self): """Return common state if all shots in this group are in the same state. Will return None otherwise. """ state = self.config['shots'][0].state_name for shot in self.config['shots']: if state != shot.state_name: # shots do not have a common state return None return state def _check_for_complete(self): """Check if all shots in this group are in the same state.""" state = self.common_state if not state: # shots do not have a common state return # if we reached this point we got a common state self.debug_log( "Shot group is complete with state: %s", state) self.machine.events.post('{}_complete'.format(self.name), state=state) '''event: (name)_complete desc: All the member shots in the shot group called (name) are in the same state. args: state: name of the common state of all shots. ''' self.machine.events.post('{}_{}_complete'.format(self.name, state)) '''event: (name)_(state)_complete desc: All the member shots in the shot group called (name) are in the same state named (state). ''' @event_handler(2) def event_enable(self, **kwargs): """Handle enable control event.""" del kwargs self.enable() def enable(self): """Enable all member shots.""" for shot in self.config['shots']: shot.enable() @event_handler(3) def event_disable(self, **kwargs): """Handle disable control event.""" del kwargs self.disable() def disable(self): """Disable all member shots.""" for shot in self.config['shots']: shot.disable() @event_handler(1) def event_reset(self, **kwargs): """Handle reset control event.""" del kwargs self.reset() def reset(self): """Reset all member shots.""" for shot in self.config['shots']: shot.reset() @event_handler(4) def event_restart(self, **kwargs): """Handle restart control event.""" del kwargs self.restart() def restart(self): """Restart all member shots.""" for shot in self.config['shots']: shot.restart() def _hit(self, advancing, **kwargs): """One of the member shots in this shot group was hit. Args: kwarg: { profile: the current profile of the member shot that was hit state: the current state of the member shot that was hit advancing: boolean of whether the state is advancing } """ if advancing: self._check_for_complete() self.machine.events.post(self.name + '_hit') '''event: (name)_hit desc: A member shots in the shot group called (name) has been hit. ''' self.machine.events.post("{}_{}_hit".format(self.name, kwargs['state'])) '''event: (name)_(state)_hit desc: A member shot with state (state) in the shot group (name) has been hit. ''' @event_handler(9) def event_enable_rotation(self, **kwargs): """Handle enable_rotation control event.""" del kwargs self.enable_rotation() def enable_rotation(self): """Enable shot rotation. If disabled, rotation events do not actually rotate the shots. """ self.debug_log('Enabling rotation') self.rotation_enabled = True @event_handler(2) def event_disable_rotation(self, **kwargs): """Handle disable rotation control event.""" del kwargs self.disable_rotation() def disable_rotation(self): """Disable shot rotation. If disabled, rotation events do not actually rotate the shots. """ self.debug_log('Disabling rotation') self.rotation_enabled = False @event_handler(4) def event_rotate(self, direction=None, **kwargs): """Handle rotate control event.""" del kwargs self.rotate(direction) def rotate(self, direction=None): """Rotate (or "shift") the state of all the shots in this group. This is used for things like lane change, where hitting the flipper button shifts all the states of the shots in the group to the left or right. This method actually transfers the current state of each shot profile to the left or the right, and the shot on the end rolls over to the taret on the other end. Args: direction: String that specifies whether the rotation direction is to the left or right. Values are 'right' or 'left'. Default of None will cause the shot group to rotate in the direction as specified by the rotation_pattern. Note that this shot group must, and rotation_events for this shot group, must both be enabled for the rotation events to work. """ if not self.rotation_enabled: self.debug_log("Received rotation request. " "Rotation Enabled: %s. Will NOT rotate", self.rotation_enabled) return # shot_state_list is deque of tuples (state num, show step num) shot_state_list = deque() shots_to_rotate = [] for shot in self.config['shots']: if shot.can_rotate: shots_to_rotate.append(shot) shot_state_list.append(shot.state) # figure out which direction we're going to rotate if not direction: direction = self.rotation_pattern[0] self.rotation_pattern.rotate(-1) self.debug_log("Since no direction was specified, pulling from" " rotation pattern: '%s'", direction) # rotate that list if direction.lower() in ('right', 'r'): shot_state_list.rotate(1) else: shot_state_list.rotate(-1) # step through all our shots and update their states for i, shot in enumerate(shots_to_rotate): shot.jump(state=shot_state_list[i], force=True) @event_handler(8) def event_rotate_right(self, **kwargs): """Handle rotate right control event.""" del kwargs self.rotate_right() def rotate_right(self): """Rotate the state of the shots to the right. This method is the same as calling rotate('right') """ self.rotate(direction='right') @event_handler(7) def event_rotate_left(self, **kwargs): """Handle rotate left control event.""" del kwargs self.rotate_left() def rotate_left(self): """Rotate the state of the shots to the left. This method is the same as calling rotate('left') """ self.rotate(direction='left')
2.5
2
sysbd/module.py
chen-charles/sysbd
0
12773258
<reponame>chen-charles/sysbd<gh_stars>0 __author__ = 'Charles' import os import copy from . import macro class Module(object): def __init__(self, name, target): self.name = name; self.target = target def override(self, subcobj=None): if issubclass(subcobj.__class__, self): subcobj.compilers = copy.deepcopy(self.compilers) subcobj.dependencies = copy.deepcopy(self.dependencies) # Details name = "" target = "" path = "" # Needed compilers = dict() dependencies = set() #module objects def solve_dependencies(self, func): self.dependencies = set(map(func, self.dependencies)) def build(self, link): self.make(set(link)) def make(self, link): pass def collect(self, ext, parse_inc_sep=os.sep, parse_inc_stat="#include"): all_name = list() inc_data = dict() for name, fullname, root in macro.walk(self.path, ext): inc = ["%s "%fullname] all_name.append(fullname+".o") #[:-1-len(ext)] for i in self.parse_include(root, name, os.sep): inc.append(i) inc_data[fullname] = (" ".join(inc)) return all_name, inc_data macrosdefined = set() def parse_include(self, path, name, sep=os.sep, include_stat="#include"): skipping = False with open(path+sep+name, "r") as f: for i in f.readlines(): if skipping: if "#endif" in i: skipping = False continue if "#define" in i: self.macrosdefined.add(i.split()[1]) continue if "#ifndef" in i: if i.split()[1] in self.macrosdefined: skipping = True continue if "#ifdef" in i: if i.split()[1] not in self.macrosdefined: skipping = True continue if "#undef" in i: if i.split()[1] in self.macrosdefined: self.macrosdefined.remove(i.split()[1]) if include_stat in i: if '<' in i or '>' in i: continue elif '\'' in i: t = '\'' elif '\"' in i: t = '\"' else:continue i = i[i.index(t)+1:] i = i[:i.index(t)] li = i.split(sep) p = path.split(sep) for j in li: if j == "..": p.pop() elif j == ".": pass else: p.append(j) yield sep.join(p) for j in self.parse_include(sep.join(p[:-1]), p[-1], sep, include_stat): yield j
2.328125
2
event/migrations/0003_event_require_message.py
RevolutionTech/carrier-owl
0
12773259
<reponame>RevolutionTech/carrier-owl # Generated by Django 2.2.9 on 2020-01-13 05:57 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("event", "0002_event_remove_location_and_rename_fields"), ] operations = [ migrations.AlterField( model_name="event", name="message", field=models.TextField(), ), ]
1.328125
1
experiments/segmentation/unet_mcdropout_pascal.py
ElementAI/baal
575
12773260
import argparse from copy import deepcopy from pprint import pprint import torch.backends from PIL import Image from torch import optim from torchvision.transforms import transforms from tqdm import tqdm from baal import get_heuristic, ActiveLearningLoop from baal.bayesian.dropout import MCDropoutModule from baal import ModelWrapper from baal import ClassificationReport from baal import PILToLongTensor from utils import pascal_voc_ids, active_pascal, add_dropout, FocalLoss try: import segmentation_models_pytorch as smp except ImportError: raise Exception('This example requires `smp`.\n pip install segmentation_models_pytorch') import torch import torch.nn.functional as F import numpy as np def mean_regions(n, grid_size=16): # Compute the mean uncertainty per regions. # [batch_size, W, H] n = torch.from_numpy(n[:, None, ...]) # [Batch_size, 1, grid, grid] out = F.adaptive_avg_pool2d(n, grid_size) return np.mean(out.view([-1, grid_size ** 2]).numpy(), -1) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--al_step", default=200, type=int) parser.add_argument("--batch_size", default=8, type=int) parser.add_argument("--initial_pool", default=40, type=int) parser.add_argument("--n_data_to_label", default=20, type=int) parser.add_argument("--lr", default=0.001) parser.add_argument("--heuristic", default="random", type=str) parser.add_argument("--reduce", default="sum", type=str) parser.add_argument("--data_path", default="/data", type=str) parser.add_argument("--iterations", default=20, type=int) parser.add_argument("--learning_epoch", default=50, type=int) return parser.parse_args() def get_datasets(initial_pool, path): IM_SIZE = 224 # TODO add better data augmentation scheme. transform = transforms.Compose( [transforms.Resize(512), transforms.CenterCrop(IM_SIZE), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ]) test_transform = transforms.Compose( [transforms.Resize(512), transforms.CenterCrop(IM_SIZE), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ]) target_transform = transforms.Compose( [transforms.Resize(512, interpolation=Image.NEAREST), transforms.CenterCrop(IM_SIZE), PILToLongTensor(pascal_voc_ids)]) active_set, test_set = active_pascal(path=path, transform=transform, test_transform=test_transform, target_transform=target_transform) active_set.label_randomly(initial_pool) return active_set, test_set def main(): args = parse_args() batch_size = args.batch_size use_cuda = torch.cuda.is_available() hyperparams = vars(args) pprint(hyperparams) active_set, test_set = get_datasets(hyperparams['initial_pool'], hyperparams['data_path']) # We will use the FocalLoss criterion = FocalLoss(gamma=2, alpha=0.25) # Our model is a simple Unet model = smp.Unet( encoder_name='resnext50_32x4d', encoder_depth=5, encoder_weights='imagenet', decoder_use_batchnorm=False, classes=len(pascal_voc_ids) ) # Add a Dropout layerto use MC-Dropout add_dropout(model, classes=len(pascal_voc_ids), activation=None) # This will enable Dropout at test time. model = MCDropoutModule(model) # Put everything on GPU. if use_cuda: model.cuda() # Make an optimizer optimizer = optim.SGD(model.parameters(), lr=hyperparams["lr"], momentum=0.9, weight_decay=5e-4) # Keep a copy of the original weights initial_weights = deepcopy(model.state_dict()) # Add metrics model = ModelWrapper(model, criterion) model.add_metric('cls_report', lambda: ClassificationReport(len(pascal_voc_ids))) # Which heuristic you want to use? # We will use our custom reduction function. heuristic = get_heuristic(hyperparams['heuristic'], reduction=mean_regions) # The ALLoop is in charge of predicting the uncertainty and loop = ActiveLearningLoop(active_set, model.predict_on_dataset_generator, heuristic=heuristic, ndata_to_label=hyperparams['n_data_to_label'], # Instead of predicting on the entire pool, only a subset is used max_sample=1000, batch_size=batch_size, iterations=hyperparams["iterations"], use_cuda=use_cuda ) acc = [] for epoch in tqdm(range(args.al_step)): # Following Gal et al. 2016, we reset the weights. model.load_state_dict(initial_weights) # Train 50 epochs before sampling. model.train_on_dataset(active_set, optimizer, batch_size, hyperparams['learning_epoch'], use_cuda) # Validation! model.test_on_dataset(test_set, batch_size, use_cuda) should_continue = loop.step() metrics = model.metrics val_loss = metrics['test_loss'].value logs = { "val": val_loss, "epoch": epoch, "train": metrics['train_loss'].value, "labeled_data": active_set.labelled, "Next Training set size": len(active_set), 'cls_report': metrics['test_cls_report'].value, } pprint(logs) acc.append(logs) if not should_continue: break if __name__ == "__main__": main()
2.046875
2
test_murls.py
cyberbikepunk/murls
0
12773261
""" Test suite for the murls module. """ from murls import http, https def test_init(): assert http('site.com') == 'http://site.com' assert https('site.com') == 'https://site.com' def test_path(): url = http('site.com') assert url.path('foo', 'bar') == 'http://site.com/foo/bar' assert url.path('foo') == 'http://site.com/foo' def test_query(): url = http('site.com') assert url.query(foo='bar', bar='foo') == 'http://site.com?foo=bar&bar=foo' or 'http://site.com?bar=foo&foo=bar' assert url.query(foo='foo') == 'http://site.com?foo=foo&bar=foo' or 'http://site.com?bar=foo&foo=foo'
2.953125
3
ally/Order/utils.py
jpwatt/PyAlly
2
12773262
<filename>ally/Order/utils.py # MIT License # # Copyright (c) 2020 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import xml.etree.cElementTree as ET def shouldIgnore ( s ): """Should we ignore the key s? (Right now, returns true if s takes the form "__xxxxx...", prepended by two '_' chars """ return len(s) > 1 and s[:2] == '__' def woodChipper ( tree ): """Given a tree structure, sort out the leaves from the subtrees (and also aggregate all the other junk) """ leaves = [] trees = [] for k,v in tree.items(): if not shouldIgnore(k): # Pull out the subtrees if isinstance(v, dict): trees.append ( (k,v) ) # Pull out the leaves else: leaves.append ( (k,str(v)) ) return leaves, trees def _transposeTreeHelper ( tree, name="unnamed_tree", work=None ): """Recursive steps of vvvv """ # Sort all this stuff out leaves, trees = woodChipper ( tree ) # Create this current object root = ET.SubElement( work, name, attrib = dict(leaves) ) # Now add all of our branches for name, subtree in trees: # Now pass this root object off to the helper _transposeTreeHelper ( subtree, name=name, work=root ) def transposeTree ( tree, name="unnamed_tree", stringify=True, ): """Give me a nested dictionary structure, where at each level: - Leaves should become XML attributes - Subtrees become subtags """ # Sort all this stuff out leaves, trees = woodChipper ( tree ) # Create the root element, and add any needed leaves root = ET.Element( name, attrib = dict(leaves) ) # Now add all of our branches for name, subtree in trees: # Now pass this root object off to the helper _transposeTreeHelper ( subtree, name=name, work=root ) # And return the new tree if stringify: return ET.tostring(root) # Return raw tree else: return root def fixTag ( tag ): return tag.split('}',1)[-1] def parseTree ( tree ): if isinstance(tree, str): tree = ET.fromstring(tree) if tree is None: return {} x = tree.attrib for child in tree: x = { **x, **parseTree(child) } return { fixTag(tree.tag) : x }
2.21875
2
alembic/env.py
yiunsr/suerp
0
12773263
import os import sys import asyncio import debugpy from sqlalchemy import engine_from_config from sqlalchemy import pool from sqlalchemy.ext.asyncio import AsyncEngine from sqlalchemy.ext.asyncio import create_async_engine from alembic import context from alembic.config import Config # debugpy.listen(5678) # print("Waiting for debugger attach") # debugpy.wait_for_client() # debugpy.breakpoint() # print('break on this line') # print("==== start alembic ==") # https://stackoverflow.com/a/66772223/6652082 if sys.platform.startswith("win"): asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.append( os.path.dirname( os.path.dirname(os.path.dirname(os.path.abspath(__file__))) ) ) from app.models.base import Base from app import create_app try: config = context.config # noqa except Exception: ini_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), "../alembic.ini" ) ini_path = os.path.abspath(ini_path) config = Config(ini_path) # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel # target_metadata = mymodel.Base.metadata target_metadata = None # other values from the config, defined by the needs of env.py, # can be acquired: # my_important_option = config.get_main_option("my_important_option") # ... etc. # https://gist.github.com/utek/6163250 def exclude_tables_from_config(config_): tables_ = config_.get("tables", None) if tables_ is not None: tables = tables_.split(",") else: tables = [] new_tables = [] for table in tables: table = table.strip() new_tables.append(table) return new_tables # [alembic:exclude] # table_col = products.create_at, users.updated_at def exclude_colum_from_config(config_): columns_ = config_.get("table_col") if columns_ is not None: columns = columns_.split(",") else: columns = [] new_columns = [] for column in columns: column = column.strip() new_columns.append(column) return new_columns exclude_tables = exclude_tables_from_config( config.get_section("alembic:exclude") ) exclude_table_cols = exclude_colum_from_config( config.get_section("alembic:exclude") ) def include_object(object, name, type_, *args, **kwargs): ret_table = not (type_ == "table" and name in exclude_tables) ret_col = not (type_ == "column" and name in exclude_table_cols) return ret_table and ret_col application = create_app(os.getenv("FASTAPI_CONFIG") or "default") target_metadata = Base.metadata def get_url(): global application url = application.config["DATABASE_URI"] return url def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ print("offline mode") global configure url = get_url() context.configure( url=url, target_metadata=target_metadata, literal_binds=True, include_object=include_object, dialect_opts={"paramstyle": "named"}, ) with context.begin_transaction(): context.run_migrations() def do_run_migrations(connection): context.configure( connection=connection, target_metadata=target_metadata, include_object=include_object, ) with context.begin_transaction(): context.run_migrations() async def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ print("online mode") url = get_url() # connectable = AsyncEngine( # engine_from_config( # config, # url=url, # prefix="sqlalchemy.", # poolclass=pool.NullPool, # future=True, # ) # ) connectable = create_async_engine(url, future=True, echo=True) async with connectable.connect() as connection: await connection.run_sync(do_run_migrations) if __name__ == "__main__": # run_migrations_offline() asyncio.run(run_migrations_online()) else: if context.is_offline_mode(): run_migrations_offline() else: asyncio.run(run_migrations_online()) print("==== end alembic ==")
2.03125
2
Step3_Aggregation/aggregation_MachineLearning.py
Gaskell-1206/MSI_vs_MSS_Classification
0
12773264
<gh_stars>0 # Run aggregation by machine learning based methods # Reference: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>J, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. # Development and interpretation of a pathomics-based model for the prediction of microsatellite instability in Colorectal Cancer. Theranostics 2020; 10(24):11080-11091. # doi:10.7150/thno.49864. Available from http://www.thno.org/v10p11080.htm # The source codes of the referenced paper available at https://github.com/yfzon/EPLA. # This code was modified by <NAME> for our work. # if run at google colab # from google.colab import drive # drive.mount('/content/drive') import os import gc import time import logging import numpy as np import pandas as pd import argparse from xgboost.sklearn import XGBClassifier import joblib import pandas as pd import csv from numpy import save import joblib import pickle as pkl from sklearn.naive_bayes import MultinomialNB import sklearn.feature_extraction.text as ft # PALHI: patch likelihood histogram def genPatientIdxDict(patient_ID): ''' generate the prediction list according to slide/patient name ''' patient_idx_dict = {} unique_patient, unique_patient_idx = np.unique(patient_ID, return_index=True) for p in unique_patient: patient_idx_dict[p] = np.where(patient_ID == p)[0] return patient_idx_dict, unique_patient_idx def loadLikelihood_test(llh_file): ''' read the likelihood list according to tile name llh_file: likelihood file ''' llh_tbl = pd.read_csv(llh_file, header=0, index_col=None) test_llh_tbl = llh_tbl.sort_values(by=['slides']) logging.info('We have {:} patients'.format(len(np.unique(test_llh_tbl['slides'])))) te_data = {'patient_ID':test_llh_tbl['slides'].values, 'patch_name':test_llh_tbl['tiles'].values, 'likelihood':test_llh_tbl['probability'].values, 'true_label':test_llh_tbl['target'].values} return te_data def genLikelihoodHist(likelihood, patient_ID, num_bin, norm_hist = False): ''' likelihood: (num_patch, ) patient_ID: (num_patch, ) num_bin: euqal size [0, 1] norm_hist: whether to normalize each hist return patient_hist: (num_unique_patient, num_bin) unique_patient_idx: (num_unique_patient, ) ''' bins = [-float('Inf')] bins.extend([i/num_bin for i in range(1, num_bin)]) bins.append(float('Inf')) patient_idx_dict, unique_patient_idx = genPatientIdxDict(patient_ID) patient_hist = np.zeros((len(unique_patient_idx), num_bin)) for i in range(len(unique_patient_idx)): idx = patient_idx_dict[patient_ID[unique_patient_idx[i]]] patient_hist[i,:] = np.histogram(likelihood[idx], bins = bins)[0] if norm_hist: patient_hist[i,:] = patient_hist[i,:] / np.sum(patient_hist[i,:]) return patient_hist, unique_patient_idx def genWsiDf_test(te_data, te_unique_patient_idx, te_pred_prob, te_pred_label): ''' columns: Sample.ID, Patch.Num, WSI.Score, WSI.pred ''' wsi_score = np.array(te_pred_prob) wsi_pred = np.array(te_pred_label) sample_ID = np.array(te_data['patient_ID'][te_unique_patient_idx]) true_label = np.array(te_data['true_label'][te_unique_patient_idx]) te_patch_num = np.zeros(len(te_unique_patient_idx), dtype=int) for i in range(len(te_unique_patient_idx)): idx = te_data['patient_ID'] == te_data['patient_ID'][te_unique_patient_idx[i]] te_patch_num[i] = np.sum(idx) patch_num = np.array(te_patch_num) wsi_pred_df = pd.DataFrame({'Sample.ID':sample_ID, 'Patch.Num':patch_num, 'WSI.Score':wsi_score, 'WSI.pred':wsi_pred, 'TrueLabel':true_label}) return wsi_pred_df def PALHI_inference(te_data, clf, num_bin=200, norm_hist=False): ''' PAtch Likelihood HIstogram pipeline tr_data, te_data: dict with 'patient_ID', 'MSI_label', 'MSI_score', 'patch_name', 'likelihood' cls_model: num_bin: norm_hist: ''' te_patient_hist, te_unique_patient_idx = genLikelihoodHist(te_data['likelihood'], te_data['patient_ID'], num_bin, norm_hist) te_pred_label = clf.predict(te_patient_hist) te_pred_prob = clf.predict_proba(te_patient_hist)[:,1] gc.collect() wsi_pred_df = genWsiDf_test(te_data, te_unique_patient_idx, te_pred_prob, te_pred_label) return wsi_pred_df # BoW: bag of words def genBoW(data, precision): ''' data is a dict with 'patient_ID', 'patch_name', 'likelihood' precision: precision of BoW ''' corpus_list = [] sample_id_list = [] patch_no_list = [] patient_idx_dict, unique_patient_idx = genPatientIdxDict(data['patient_ID']) for i in range(len(unique_patient_idx)): pid = data['patient_ID'][unique_patient_idx[i]] llh = data['likelihood'][patient_idx_dict[pid]] llh = llh.tolist() words = ' '.join(["{0:.{1}f}".format(x, precision) for x in llh]) wsi_patches = len(patient_idx_dict[pid]) corpus_list.append(words) sample_id_list.append(pid) patch_no_list.append(wsi_patches) return unique_patient_idx, corpus_list, sample_id_list, patch_no_list def BOW(te_data, cv, tf, precision, model): # generate corpus te_unique_patient_idx, corpus_te, sample_id_te, patch_no_te = genBoW(te_data, precision) test_tfmat = cv.transform(corpus_te) test_x = tf.transform(test_tfmat) logging.info("Results on testing set") te_pred_label = model.predict(test_x) te_pred_prob = model.predict_proba(test_x)[:, 1] wsi_pred_df = genWsiDf_test(te_data, te_unique_patient_idx, te_pred_prob, te_pred_label) return wsi_pred_df # Visulaization import itertools from sklearn.metrics import confusion_matrix, roc_curve, auc import matplotlib.pyplot as plt def cm(y_true, y_pred, name): ''' calculate and draw confusion matrix ''' cnf_matrix = confusion_matrix(y_true, y_pred) np.set_printoptions(precision=2) plt.figure() class_names = ['MSS','MSI'] plt.imshow(cnf_matrix, interpolation='nearest', cmap = plt.cm.Blues) plt.colorbar() plt.title(name, fontsize = 16) tick_marks = np.arange(len(class_names)) plt.xticks(tick_marks, class_names, rotation=45) plt.yticks(tick_marks, class_names) fmt = 'd' thresh = cnf_matrix.max() / 2. for i, j in itertools.product(range(cnf_matrix.shape[0]), range(cnf_matrix.shape[1])): plt.text(j, i, format(cnf_matrix[i, j], fmt), horizontalalignment="center", color="white" if cnf_matrix[i, j] > thresh else "black", fontsize = 16) plt.tight_layout() plt.ylabel('True label', fontsize = 16) plt.xlabel('Predicted label', fontsize = 16) def plot_roccurve(y_true, y_pred, name): ''' calculate and plot ROC curve ''' fpr, tpr, _ = roc_curve(y_true, y_pred) roc_auc = auc(fpr, tpr) plt.figure() plt.plot(fpr, tpr, label="ROC curve (area = {0:0.2f})".format(roc_auc)) plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.0]) plt.title(name) plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate") plt.legend(loc="lower right", fontsize = 16) plt.show() return fpr, tpr, roc_auc def accuracy(y_true, y_pred, name): ''' calculate and print accuracy ''' print('Accuracy', (name), ': ',len(np.where(y_true==y_pred)[0])/len(y_true)) if __name__ == "__main__": # main filename = 'prediction' form = filename + '.csv' te_data = loadLikelihood_test(form) # PALHI clf = XGBClassifier() clf = joblib.load(os.path.join('palhi.model')) wsi_pred_df_palhi = PALHI_inference(te_data, clf) # BoW model = joblib.load(os.path.join('bow.model')) cv = ft.CountVectorizer(decode_error="replace", vocabulary=pkl.load(open('bow_feature.pkl', "rb"))) tf = pkl.load(open('bow_tfidftransformer.pkl', "rb")) wsi_pred_df_bow = BOW(te_data, cv, tf, 3, model) # ensemble weights = [0.5,0.5] youden_criterion = 0.5 #could be custom probability = weights[0]*wsi_pred_df_palhi['WSI.Score']+weights[1]*wsi_pred_df_bow['WSI.Score'] prediction = probability.apply(lambda x: 1 if x >= youden_criterion else 0) modelPredDFs = pd.DataFrame({'slides':wsi_pred_df_palhi['Sample.ID'], 'target':wsi_pred_df_palhi['TrueLabel'], 'prediction':prediction, 'probability':probability}) accuracy(modelPredDFs['target'],modelPredDFs['prediction'], filename+' ensemble') cm(modelPredDFs['target'],modelPredDFs['prediction'], filename+' ensemble') fpr, tpr, roc_auc = plot_roccurve(wsi_pred_df_palhi['TrueLabel'],wsi_pred_df_palhi['WSI.Score'], filename+' PALHI')
2.515625
3
bevodevo/policies/params.py
riveSunder/bevodevo
4
12773265
from abc import ABC, abstractmethod from collections import OrderedDict from functools import reduce import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import gym import matplotlib.pyplot as plt class Params(): """ policy which outputs the policy parameters directly, i.e. for direct optimization """ def __init__(self, dim_in=7, dim_act=6): self.dim_act = dim_act self.init_params() def init_params(self): self.params = np.random.randn(self.dim_act)/3 - 1.75 self.num_params = self.dim_act def forward(self, obs): return self.get_params() def get_params(self): return self.params def set_params(self, params): assert params.shape == self.params.shape self.params = params def reset(self): pass if __name__ == "__main__": # run tests print("OK")
2.84375
3
src/module/MF.py
for-review-56/jsai-201911
0
12773266
<filename>src/module/MF.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Matrix Factorization which does not learns only UI-matrix but also attributes. """ import numpy as np import sys from . import util ''' import numpy as np import sys from src.module import util ''' class MF: def __init__(self, n_latent_factor=200, learning_rate=0.005, regularization_weight=0.02, n_epochs=20, global_bias=True, id_bias=True, verbose=False, random_seed=None): """ Collabolative Filtering so called Matrix Factorization. Arguments: - n_latent_factor [int]: number of latent dimensions - learning_rate [float]: learning rate - regularization_weight [float]: regularization parameter - global_bias [True/False]: set bias of global. - id_bias [True/False]: set bias of user_id, item_id. - n_epochs [int]: number of epoch of train(SGD) - random_seed [int]: random seed to set in np.random.seed() """ # set random_seed if random_seed: np.random.seed(random_seed) self.n_latent_factor = n_latent_factor self.learning_rate = learning_rate self.regularization_weight = regularization_weight self.global_bias = global_bias self.id_bias = id_bias self.n_epochs = n_epochs self.verbose = verbose def fit(self, user_ids, item_ids, ratings, user_attributes=None, item_attributes=None): """ Arguments: - user_ids [array-like-object]: the array of user id. - item_ids [array-like-object]: the array of item id. - ratings [array-like-object]: the array of rating. - user_attributes [dictinary]: dictinary which key is user_id and value is vector of user attributes. if None, doesn't train on the attributes. ex) {'user00' : [0,1,0], 'user01': [.5,0,.5]]} - item_attributes [dictinary]: dictinary which key is item_id and value is vector of item attributes. if None, doesn't train on the attributes. ex) {'item00' : [0,1,0], 'item01': [.5,0,.5]]} """ # Set up before fit self._fit_setup( user_ids, item_ids, ratings, user_attributes, item_attributes ) # Initialize coefficents of attributes. if user_attributes: self.a_u = np.zeros(self.n_dim_user_attributes, np.double) if item_attributes: self.a_i = np.zeros(self.n_dim_item_attributes, np.double) # Initialize the biases if self.global_bias: self.b = np.mean([r[2] for r in self.R]) if self.id_bias: self.b_u = np.zeros(self.num_users, np.double) self.b_i = np.zeros(self.num_items, np.double) # Initialize user and item latent feature matrice if self.n_latent_factor: self.P = np.random.normal(0, scale=.1, size=(self.num_users, self.n_latent_factor)) self.Q = np.random.normal(0, scale=.1, size=(self.num_items, self.n_latent_factor)) # Perform stochastic gradient descent for number of iterations before_mse = sys.maxsize stop_cnt = 0 for i in range(self.n_epochs): # update parametors self.sgd() mse = self.mse() if ((i+1) % 10 == 0) and self.verbose: print("Iteration: %d ; error(MAE) = %.4f ; learn_rate = %.4f ;" % (i+1, mse, self.learning_rate)) # if error improve rate is not enough, update self.learning_rate lower. mse_improve_rate = (before_mse-mse)/before_mse if before_mse>0 else 0 if mse_improve_rate < 1e-8 : self.learning_rate *= 0.5 stop_cnt += 1 # if stop_cnt is more than a threshold, stop training. if stop_cnt > 10: break before_mse = mse return self def _fit_setup(self, user_ids, item_ids, ratings, user_attributes, item_attributes): """ transform user_ids and item_ids to the incremental index. Arguments example: user_ids = [1,1,2] item_ids = [0,5,0] ratings = [3,3,4] user_attributes = {1:[0,1,1], 2:[0,0,1]} item_attributes = {0:[0,1], 5:[1,1]} """ # set id transformer user_ids_transformer = util.id_transformer() item_ids_transformer = util.id_transformer() transformed_user_ids = user_ids_transformer.fit_transform(user_ids) transformed_item_ids = item_ids_transformer.fit_transform(item_ids) self.user_ids_transformer = user_ids_transformer self.item_ids_transformer = item_ids_transformer # put parameters pn self self.R = [(u,i,r) for u,i,r in zip(transformed_user_ids, transformed_item_ids, ratings)] self.num_users, self.num_items = len(set(transformed_user_ids)), len(set(transformed_item_ids)) # change attributes to numpy.array as UserAttribute, ItemAttribute if user_attributes: self.n_dim_user_attributes = len(list(user_attributes.values())[0]) self.UserAttr = np.zeros(shape=[self.num_users, self.n_dim_user_attributes]) for _id,attr in user_attributes.items(): transformed_id = self.user_ids_transformer.transform([_id], unknown=None)[0] if transformed_id is not None: self.UserAttr[transformed_id, :] = attr self.fit_user_attributes = True else: self.fit_user_attributes = False if item_attributes: self.n_dim_item_attributes = len(list(item_attributes.values())[0]) self.ItemAttr = np.zeros(shape=[self.num_items, self.n_dim_item_attributes]) for _id,attr in item_attributes.items(): transformed_id = self.item_ids_transformer.transform([_id], unknown=None)[0] if transformed_id is not None: self.ItemAttr[transformed_id, :] = attr self.fit_item_attributes = True else: self.fit_item_attributes = False def predict(self, user_ids, item_ids, user_attributes=dict(), item_attributes=dict()): """ Arguments: user_ids [array-like object]: pass item_ids [array-like object]: pass user_attributes [dict]: pass item_attributes [dict]: pass """ # check argument. if (self.fit_user_attributes) and (user_attributes==dict()): raise 'This instance has be fitted using user_attributes, but no attributes in the arguments.' if (self.fit_item_attributes) and (item_attributes==dict()): raise 'This instance has be fitted using item_attributes, but no attributes in the arguments.' # predict results = [] for u,i in zip(user_ids, item_ids): tf_u = self.user_ids_transformer.transform([u], unknown=None)[0] tf_i = self.item_ids_transformer.transform([i], unknown=None)[0] user_attr = user_attributes.get(u, None) if (user_attr is None) and (self.fit_user_attributes): user_attr = self.UserAttr[tf_u] item_attr = item_attributes.get(i, None) if (item_attr is None) and (self.fit_item_attributes): item_attr = self.ItemAttr[tf_i] results.append(self._predict(tf_u, tf_i, user_attr, item_attr)) return np.array(results) def predict_high_speed_but_no_preprocess(self, user_ids, item_ids, user_attributes=None, item_attributes=None): """ Don't Use it. This is for a specific use. user_ids = [0,0,1,2,] item_ids = [2,4,4,4,] """ tf_us = self.user_ids_transformer.transform(user_ids, unknown=None) tf_is = self.item_ids_transformer.transform(item_ids, unknown=None) a_u, a_i, pq = 0, 0, 0 b = self.b b_u = util.numpy_null_indexing(self.b_u, tf_us, {None:0, np.nan:0}) b_i = util.numpy_null_indexing(self.b_i, tf_is, {None:0, np.nan:0}) if (self.fit_user_attributes) and (user_attributes is not None): attributes_array = np.array([user_attributes.get(u, self.UserAttr[tu]) for u,tu in zip(user_ids, tf_us)]) a_u = (self.a_u * attributes_array).mean(axis=1) if (self.fit_item_attributes) and (item_attributes is not None): attributes_array = np.array([item_attributes.get(i, self.ItemAttr[ti]) for i,ti in zip(item_ids, tf_is)]) a_i = (self.a_i * attributes_array).mean(axis=1) if self.n_latent_factor: pq = (self.P[tf_us, :] * self.Q[tf_is, :]).sum(axis=1) return b + b_u + b_i + a_u + a_i + pq def mse(self): """ A function to compute the total mean square error """ user_ids, item_ids, ratings = [], [], [] for u,i,r in self.R: user_ids.append(u) item_ids.append(i) ratings.append(r) error = 0 for u,i,r in self.R: user_attr = self.UserAttr[u] if self.fit_user_attributes else None item_attr = self.ItemAttr[i] if self.fit_item_attributes else None predicted = self._predict(u, i, user_attr, item_attr) error += pow(r - predicted, 2) return np.sqrt(error) def sgd(self): """ Perform stochastic graident descent """ for u,i,r in self.R: # Computer prediction and error user_attr = self.UserAttr[u] if self.fit_user_attributes else None item_attr = self.ItemAttr[i] if self.fit_item_attributes else None e = r - self._predict(u, i, user_attr, item_attr) # Update attribute coefficient if self.fit_user_attributes: self.a_u += self.learning_rate * self.UserAttr[u] * self.UserAttr[i].mean() * (e - self.regularization_weight * self.a_u) if self.fit_item_attributes: self.a_i += self.learning_rate * self.ItemAttr[i] * self.ItemAttr[i].mean() * (e - self.regularization_weight * self.a_i) # Update biases if self.id_bias: self.b_u[u] += self.learning_rate * (e - self.regularization_weight * self.b_u[u]) self.b_i[i] += self.learning_rate * (e - self.regularization_weight * self.b_i[i]) # Update user and item latent feature matrices if self.n_latent_factor: self.P[u, :] += self.learning_rate * (e * self.Q[i, :] - self.regularization_weight * self.P[u,:]) self.Q[i, :] += self.learning_rate * (e * self.P[u, :] - self.regularization_weight * self.Q[i,:]) def _predict(self, u, i, user_attr=None, item_attr=None): """ Get the predicted rating of user u and item i. user_attr [np.array] is vector of user attributes. item_attr [np.array] in vector of item attributes. """ prediction = 0 # global bias if self.global_bias: prediction += self.b # user_id bias, item_id bias if self.id_bias: if u is not None: prediction += self.b_u[u] if i is not None: prediction += self.b_i[i] # attributes if (self.fit_user_attributes) and (user_attr is not None): prediction += (self.a_u * user_attr).mean() if (self.fit_item_attributes) and (item_attr is not None): prediction += (self.a_i * item_attr).mean() # latent factor if self.n_latent_factor: if (u is not None) and (i is not None): prediction += np.dot(self.P[u, :], self.Q[i, :].T) return prediction if __name__ == 'some tests': # Usage user_ids = [1,1,1,1,5,5,8,8] item_ids = [1,2,3,4,2,4,8,9] ratings = [5,5,4,4,3,3,2,2] out_sample_user_ids = [1,5,8,10,10] out_sample_item_ids = [8,1,1,1,10] ####################################### # library Suprise と比較して同様の結果を出力できるかの確認。 from src.module.MF import MF n_epochs = 10000 mf = MF(n_latent_factor=1, learning_rate=0.005, regularization_weight=0.02, n_epochs=n_epochs, verbose=True) mf.fit(user_ids, item_ids, ratings, ) # 新しいIDの推定確認 mf.predict([99,1,99], [1,99,99]) # compair the result with the libraray 'Suprise' from surprise import SVD # SVD algorithm from src.Suprise_algo_wrapper import algo_wrapper svd = algo_wrapper(SVD(n_factors=1, lr_all=0.005, reg_all=0.02, n_epochs=n_epochs)) svd.fit(user_ids, item_ids, ratings, ) # 目視確認 for me, sur in zip(mf.predict(user_ids, item_ids), svd.predict(user_ids, item_ids)): print(me, sur, '%.5f'%((me-sur)/sur)) # 目視確認 for me, sur in zip(mf.predict(out_sample_user_ids, out_sample_item_ids), svd.predict(out_sample_user_ids, out_sample_item_ids)): print(me, sur, '%.5f'%((me-sur)/sur)) ####################################### # 明らかにattributeの影響を受けているデータに対して、適切に学習ができるか? import numpy as np n_sample = 1000 user_ids = np.random.choice(range(10), size=n_sample) item_ids = np.random.choice(range(5), size=n_sample) user_attribute = {i:np.random.choice([0,1], size=2, replace=True) for i in range(10)} item_attribute = {i:np.random.choice([0,1], size=3, replace=True) for i in range(10)} answer_user_attr_coef = np.array([10, -5]) answer_item_attr_coef = np.array([-3, 0, 8]) rating_on_only_attribute = lambda u,i: (user_attribute[u]*answer_user_attr_coef).sum() + (item_attribute[i]*answer_item_attr_coef).sum() ratings = [rating_on_only_attribute(u,i) for u,i in zip(user_ids, item_ids)] from src.module.MF import MF mf = MF(n_latent_factor=0, learning_rate=0.010, regularization_weight=0.0, n_epochs=100, global_bias=False, id_bias=False, verbose=True) mf.fit(user_ids, item_ids, ratings, user_attribute, item_attribute) print(mf.a_u, answer_user_attr_coef) print(mf.a_i, answer_item_attr_coef) for p,a in zip(mf.predict(user_ids, item_ids, user_attribute, item_attribute), ratings): print(p,a,(p-a)/(abs(a)+0.00001)) # predict_high_speed_but_no_preprocess のテスト from src.module.MF import MF mf = MF(n_latent_factor=0, learning_rate=0.010, regularization_weight=0.0, n_epochs=30, global_bias=True, id_bias=True, verbose=True) mf.fit(user_ids, item_ids, ratings, user_attribute, item_attribute) high_speed = mf.predict_high_speed_but_no_preprocess(user_ids, item_ids, user_attribute, item_attribute) normal_speed = mf.predict(user_ids, item_ids, user_attribute, item_attribute) assert all(high_speed==normal_speed)
2.84375
3
seisflows3/tools/err.py
bch0w/seisflows3
0
12773267
#!/usr/bin/env python3 """ Custom errors for Seisflows """ class ParameterError(ValueError): """ A new ValueError class which explains the Parameter's that threw the error """ def __init__(self, *args): if len(args) == 0: msg = "Bad parameter." super(ParameterError, self).__init__(msg) elif len(args) == 1: msg = f"Bad parameter: {args[0]}" super(ParameterError, self).__init__(msg) elif args[1] not in args[0]: msg = f"{args[1]} is not defined." super(ParameterError, self).__init__(msg) elif key in obj: msg = f"{args[0]} has bad value: {args[1].__getattr__(args[0])}" super(ParameterError, self).__init__(msg) class CheckError(ValueError): """ An error called by the Check functions within each module, that returns the name of the class that raised the error, as well as the parameter in question. """ def __init__(self, cls, par): """ CheckError simply returns a print message """ msg = f"{cls.__class__.__name__} requires parameter {par}" super(CheckError, self).__init__(msg)
3.359375
3
CrossValidation.py
gamzeakkurt/NLP-DisasterTweets
1
12773268
from sklearn.model_selection import StratifiedKFold import pandas as pd skf = StratifiedKFold(n_splits=10, random_state=48, shuffle=True) def CV(predictors,target): for fold, (train_index, test_index) in enumerate(skf.split(predictors, target)): x_train, x_valid = pd.DataFrame(predictors.iloc[train_index]), pd.DataFrame(predictors.iloc[test_index]) y_train, y_valid = target.iloc[train_index], target.iloc[test_index] return x_train, x_valid, y_train, y_valid
2.65625
3
workflows/api/tests.py
xflows/clowdflows-backend
4
12773269
from django.contrib.auth.models import User from rest_framework.reverse import reverse from rest_framework import status from rest_framework.test import APITestCase from workflows.models import Workflow, Widget, Input TEST_USERNAME = 'testuser' TEST_PASSWORD = '<PASSWORD>' # Test workflow ids TEST_WORKFLOW_USERS_PK = 2 TEST_WORKFLOW_OTHER_USER_PRIVATE_PK = 4 TEST_WORKFLOW_OTHER_USER_PUBLIC_PK = 6 TEST_OUTPUT_PK = 9 # Test widget ids TEST_WIDGET_USERS_PK = 6 TEST_WIDGET_OTHER_USER_PRIVATE_PK = 33 TEST_WIDGET_OTHER_USER_PUBLIC_PK = 34 # Test widget parameters TEST_PARAMETER_USERS_PK = 10 TEST_PARAMETER_OTHER_USER_PRIVATE_PK = 98 TEST_PARAMETER_OTHER_USER_PUBLIC_PK = 99 class BaseAPITestCase(APITestCase): fixtures = ['test_data_api', ] def _login(self): self.client.login(username=TEST_USERNAME, password=<PASSWORD>) def _logout(self): self.client.logout() def _test_multiple_response_codes(self, verb, urls, codes, data=None): for url, code in zip(urls, codes): response = verb(url, data) if data else verb(url) self.assertEqual(response.status_code, code) class SupportingAPITests(BaseAPITestCase): def test_register(self): url = reverse('user-create') response = self.client.post(url, { 'username': 'testuser3', 'password': '<PASSWORD>', 'email': '<EMAIL>' }) self.assertEqual(response.status_code, 200) self.assertEqual(User.objects.filter(username='testuser3').count(), 1) def test_login(self): url = reverse('token-create') response = self.client.post(url, { 'username': 'testuser', 'password': '123' }) self.assertEqual(response.status_code, 200) def test_logout(self): url = reverse('token-destroy') self._login() response = self.client.post(url) # HTTP_AUTHORIZATION="Token %s" % auth_token) self.assertEqual(response.status_code, 204) def test_widget_library(self): url = reverse('widget-library-list') # Test without authentication - this should fail response = self.client.get(url) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) self._login() response = self.client.get(url) self.assertEqual(response.status_code, status.HTTP_200_OK) self._logout() class WorkflowAPITests(BaseAPITestCase): def test_create(self): url = reverse('workflow-list') workflow_data = { 'name': 'Untitled workflow', 'is_public': False, 'description': '', 'widget': None, 'template_parent': None } # Test without authentication - this should not be allowed response = self.client.post(url, workflow_data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) self._login() response = self.client.post(url, workflow_data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self._logout() def test_patch(self): url = reverse('workflow-detail', kwargs={'pk': TEST_WORKFLOW_USERS_PK}) url_other_user_private = reverse('workflow-detail', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PRIVATE_PK}) url_other_user_public = reverse('workflow-detail', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PUBLIC_PK}) workflowData = { 'name': 'Test workflow', 'is_public': True, 'description': 'Test description' } # Test without authentication - this should not be allowed response = self.client.patch(url, workflowData) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) self._login() response = self.client.patch(url, workflowData) updated_workflow = response.data self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(updated_workflow['name'], 'Test workflow') self.assertEqual(updated_workflow['is_public'], True) self.assertEqual(updated_workflow['description'], 'Test description') # Try to patch self._test_multiple_response_codes( self.client.patch, [url_other_user_private, url_other_user_public], [status.HTTP_403_FORBIDDEN, status.HTTP_403_FORBIDDEN], data=workflowData ) self._logout() def test_delete(self): url = reverse('workflow-detail', kwargs={'pk': TEST_WORKFLOW_USERS_PK}) url_other_user_private = reverse('workflow-detail', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PRIVATE_PK}) url_other_user_public = reverse('workflow-detail', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PUBLIC_PK}) # Test without authentication - this should not be allowed response = self.client.delete(url) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) self._login() self._test_multiple_response_codes( self.client.delete, [url, url_other_user_private, url_other_user_public], [status.HTTP_204_NO_CONTENT, status.HTTP_403_FORBIDDEN, status.HTTP_403_FORBIDDEN] ) self._logout() def test_reset(self): url = reverse('workflow-reset', kwargs={'pk': TEST_WORKFLOW_USERS_PK}) url_other_user_private = reverse('workflow-reset', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PRIVATE_PK}) url_other_user_public = reverse('workflow-reset', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PUBLIC_PK}) # Test without authentication - this should not be allowed response = self.client.post(url) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) self._login() response = self.client.post(url, format="json") data = response.json() self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(data['status'], 'ok') workflow = Workflow.objects.get(pk=TEST_WORKFLOW_USERS_PK) for widget in workflow.widgets.all(): self.assertEqual(widget.finished, False) self.assertEqual(widget.error, False) self.assertEqual(widget.running, False) self._test_multiple_response_codes( self.client.post, [url_other_user_private, url_other_user_public], [status.HTTP_403_FORBIDDEN, status.HTTP_403_FORBIDDEN] ) self._logout() def test_run(self): url = reverse('workflow-run', kwargs={'pk': TEST_WORKFLOW_USERS_PK}) url_other_user_private = reverse('workflow-run', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PRIVATE_PK}) url_other_user_public = reverse('workflow-run', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PUBLIC_PK}) # Test without authentication - this should not be allowed response = self.client.post(url) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) self._login() response = self.client.post(url, format="json") # data = response.json() self.assertEqual(response.status_code, status.HTTP_200_OK) # self.assertEqual(data['status'], 'ok') workflow = Workflow.objects.get(pk=TEST_WORKFLOW_USERS_PK) for widget in workflow.widgets.all(): self.assertEqual(widget.finished, True) self._test_multiple_response_codes( self.client.post, [url_other_user_private, url_other_user_public], [status.HTTP_403_FORBIDDEN, status.HTTP_403_FORBIDDEN] ) self._logout() def test_subprocess(self): url = reverse('workflow-subprocess', kwargs={'pk': TEST_WORKFLOW_USERS_PK}) url_other_user_private = reverse('workflow-subprocess', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PRIVATE_PK}) url_other_user_public = reverse('workflow-subprocess', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PUBLIC_PK}) # Test without authentication - this should not be allowed response = self.client.post(url) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) self._login() response = self.client.post(url, format="json") widget = response.json() self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(widget['type'], 'subprocess') self._test_multiple_response_codes( self.client.post, [url_other_user_private, url_other_user_public], [status.HTTP_403_FORBIDDEN, status.HTTP_403_FORBIDDEN] ) # Get subprocess workflow object subprocess_workflow = Widget.objects.get(pk=widget['id']).workflow_link # Test adding input url = reverse('workflow-subprocess-input', kwargs={'pk': subprocess_workflow.pk}) response = self.client.post(url) widget = response.json() self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(widget['type'], 'input') # Test adding output url = reverse('workflow-subprocess-output', kwargs={'pk': subprocess_workflow.pk}) response = self.client.post(url) widget = response.json() self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(widget['type'], 'output') self._logout() def test_subprocess_forloop(self): url = reverse('workflow-subprocess', kwargs={'pk': TEST_WORKFLOW_USERS_PK}) self._login() # First add a subprocess response = self.client.post(url) widget = response.json() subprocess_workflow = Widget.objects.get(pk=widget['id']).workflow_link # Test adding for loop widgets url = reverse('workflow-subprocess-forloop', kwargs={'pk': subprocess_workflow.pk}) response = self.client.post(url) data = response.json() self.assertNotIn('status', data) widget_types = {w['type'] for w in data} self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertSetEqual(widget_types, {'for_input', 'for_output'}) self._logout() def test_subprocess_xvalidation(self): url = reverse('workflow-subprocess', kwargs={'pk': TEST_WORKFLOW_USERS_PK}) self._login() # First add a subprocess response = self.client.post(url) data = response.json() self.assertNotIn('status', data) subprocess_workflow = Widget.objects.get(pk=data['id']).workflow_link # Test adding cross validation widgets url = reverse('workflow-subprocess-xvalidation', kwargs={'pk': subprocess_workflow.pk}) response = self.client.post(url) widgets = response.json() widget_types = {w['type'] for w in widgets} self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertSetEqual(widget_types, {'cv_input', 'cv_output'}) self._logout() class WidgetAPITests(BaseAPITestCase): def test_fetch_value(self): url = reverse('output-value', kwargs={'pk': TEST_OUTPUT_PK}) self._login() response = self.client.get(url) data = response.json() self.assertEqual(data['value'], '5') def test_create(self): url = reverse('widget-list') workflow_url = reverse('workflow-detail', kwargs={'pk': TEST_WORKFLOW_USERS_PK}) workflow_url_private = reverse('workflow-detail', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PRIVATE_PK}) workflow_url_public = reverse('workflow-detail', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PUBLIC_PK}) widget_data = { 'workflow': workflow_url, 'x': 50, 'y': 50, 'name': '<NAME>', 'abstract_widget': 3, # Multiply integers abstract widget 'finished': False, 'error': False, 'running': False, 'interaction_waiting': False, 'type': 'regular', 'progress': 0 } # Test without authentication - this should not be allowed response = self.client.post(url, widget_data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) self._login() response = self.client.post(url, widget_data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) # Test on other user's workflows widget_data['workflow'] = workflow_url_private response = self.client.post(url, widget_data) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) widget_data['workflow'] = workflow_url_public response = self.client.post(url, widget_data) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self._logout() def test_patch(self): widget_url = reverse('widget-detail', kwargs={'pk': TEST_WIDGET_USERS_PK}) widget_url_private = reverse('widget-detail', kwargs={'pk': TEST_WIDGET_OTHER_USER_PRIVATE_PK}) widget_url_public = reverse('widget-detail', kwargs={'pk': TEST_WIDGET_OTHER_USER_PUBLIC_PK}) widget_data = { 'x': 12, 'y': 34, 'name': '<NAME>' } # Test without authentication - this should not be allowed response = self.client.patch(widget_url, widget_data) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) self._login() response = self.client.patch(widget_url, widget_data) self.assertEqual(response.status_code, status.HTTP_200_OK) widget = Widget.objects.get(pk=TEST_WIDGET_USERS_PK) self.assertEqual(widget.x, widget_data['x']) self.assertEqual(widget.y, widget_data['y']) self.assertEqual(widget.name, widget_data['name']) # Test on other user's widgets response = self.client.patch(widget_url_private, widget_data) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) response = self.client.patch(widget_url_public, widget_data) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self._logout() def test_reset(self): widget_url = reverse('widget-reset', kwargs={'pk': TEST_WIDGET_USERS_PK}) widget_url_private = reverse('widget-reset', kwargs={'pk': TEST_WIDGET_OTHER_USER_PRIVATE_PK}) widget_url_public = reverse('widget-reset', kwargs={'pk': TEST_WIDGET_OTHER_USER_PUBLIC_PK}) # Test without authentication - this should not be allowed response = self.client.post(widget_url) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) self._login() response = self.client.post(widget_url) self.assertEqual(response.status_code, status.HTTP_200_OK) widget = Widget.objects.get(pk=TEST_WIDGET_USERS_PK) self.assertEqual(widget.finished, False) # Test on other user's widgets response = self.client.post(widget_url_private) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) response = self.client.post(widget_url_public) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self._logout() def test_run(self): widget_url = reverse('widget-run', kwargs={'pk': TEST_WIDGET_USERS_PK}) widget_reset_url = reverse('widget-reset', kwargs={'pk': TEST_WIDGET_USERS_PK}) widget_url_private = reverse('widget-run', kwargs={'pk': TEST_WIDGET_OTHER_USER_PRIVATE_PK}) widget_url_public = reverse('widget-run', kwargs={'pk': TEST_WIDGET_OTHER_USER_PUBLIC_PK}) # Test without authentication - this should not be allowed response = self.client.post(widget_url) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) self._login() # First reset the widget response = self.client.post(widget_reset_url) self.assertEqual(response.status_code, status.HTTP_200_OK) widget = Widget.objects.get(pk=TEST_WIDGET_USERS_PK) self.assertEqual(widget.finished, False) # .. then run response = self.client.post(widget_url) self.assertEqual(response.status_code, status.HTTP_200_OK) widget = Widget.objects.get(pk=TEST_WIDGET_USERS_PK) self.assertEqual(widget.finished, True) # Test on other user's widgets response = self.client.post(widget_url_private) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) response = self.client.post(widget_url_public) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self._logout() def test_delete(self): widget_url = reverse('widget-detail', kwargs={'pk': TEST_WIDGET_USERS_PK}) widget_url_private = reverse('widget-detail', kwargs={'pk': TEST_WIDGET_OTHER_USER_PRIVATE_PK}) widget_url_public = reverse('widget-detail', kwargs={'pk': TEST_WIDGET_OTHER_USER_PUBLIC_PK}) # Test without authentication - this should not be allowed response = self.client.delete(widget_url) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) self._login() response = self.client.delete(widget_url) self.assertEqual(response.status_code, status.HTTP_200_OK) widget_count = Widget.objects.filter(pk=TEST_WIDGET_USERS_PK).count() self.assertEqual(widget_count, 0) # Test on other user's widgets response = self.client.delete(widget_url_private) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) response = self.client.delete(widget_url_public) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self._logout() def test_save_parameters(self): widget_url = reverse('widget-save-parameters', kwargs={'pk': TEST_WIDGET_USERS_PK}) widget_url_private = reverse('widget-save-parameters', kwargs={'pk': TEST_WIDGET_OTHER_USER_PRIVATE_PK}) widget_url_public = reverse('widget-save-parameters', kwargs={'pk': TEST_WIDGET_OTHER_USER_PUBLIC_PK}) parameters = [{ 'id': TEST_PARAMETER_USERS_PK, 'value': '42' }] # Test without authentication - this should not be allowed response = self.client.patch(widget_url, parameters) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) self._login() response = self.client.patch(widget_url, parameters) self.assertEqual(response.status_code, status.HTTP_200_OK) parameter = Input.objects.get(pk=TEST_PARAMETER_USERS_PK) self.assertEqual(parameter.value, '42') # Test on other user's widgets parameters[0]['id'] = TEST_PARAMETER_OTHER_USER_PRIVATE_PK response = self.client.patch(widget_url_private, parameters) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) parameters[0]['id'] = TEST_PARAMETER_OTHER_USER_PUBLIC_PK response = self.client.patch(widget_url_public, parameters) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self._logout()
2.28125
2
app.py
clebertsilva28/desafio9
13
12773270
<reponame>clebertsilva28/desafio9<filename>app.py from flask import Flask, render_template, request, json, jsonify import os import json import numpy as np import io from PIL import Image app = Flask(__name__) app.config.from_object(__name__) port = int(os.getenv('PORT', 8080)) @app.route("/", methods=['GET']) def hello(): error=None return render_template('index.html', error=error) @app.route("/iot", methods=['GET']) def result(): print(request) # Implemente sua lógica aqui e insira as respostas na variável 'resposta' resposta = { "iotData": "data", "itu": "data", "volumeAgua": "data", "fahrenheit": "data" } response = app.response_class( response=json.dumps(resposta), status=200, mimetype='application/json' ) return response def prepare_image(image): image = image.resize(size=(96,96)) image = np.array(image, dtype="float") / 255.0 image = np.expand_dims(image,axis=0) image = image.tolist() return image @app.route('/predict', methods=['POST']) def predict(): print(request) image = request.files["image"].read() image = Image.open(io.BytesIO(image)) image = prepare_image(image) # Faça uma requisição para o serviço Watson Machine Learning aqui e retorne a classe detectada na variável 'resposta' resposta = { "class": "data" } return resposta if __name__ == '__main__': app.run(host='0.0.0.0', port=port)
3.015625
3
pharedox/image_processing.py
omarvaneer/pharynx_redox
2
12773271
<reponame>omarvaneer/pharynx_redox<gh_stars>1-10 import logging from typing import Union, Tuple import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import numpy.ma as ma import pandas as pd import SimpleITK as sITK import xarray as xr from numpy.polynomial.polynomial import Polynomial from scipy import ndimage as ndi from scipy.stats import norm, zscore from skimage import io, measure, transform from skimage.measure import label, regionprops from skimage.transform import AffineTransform, warp def measure_under_labels( imgs: xr.DataArray, masks: xr.DataArray, ref_wvl: str = "410", ratio_numerator="410", ratio_denominator="470", ): """Measure the intensities of each channel under the label image""" df = [] imgs = imgs.where(imgs.wavelength != "TL", drop=True) for a in imgs.animal.values: for tp in imgs.timepoint.values: for p in imgs.pair.values: for wvl in imgs.wavelength.values: img_selector = dict(animal=a, timepoint=tp, pair=p, wavelength=wvl) if "wavelength" in masks.dims: seg_frame = masks.sel( animal=a, timepoint=tp, pair=p, wavelength=ref_wvl ) else: # single wavelength was passed seg_frame = masks.sel(animal=a, timepoint=tp, pair=p) labels = measure.label(seg_frame) sub_df = pd.DataFrame( measure.regionprops_table( labels, intensity_image=imgs.sel(**img_selector).values, properties=["label", "mean_intensity", "area"], ) ) sub_df["animal"] = a sub_df["timepoint"] = tp sub_df["pair"] = p sub_df["wavelength"] = wvl sub_df["strain"] = imgs.sel(**img_selector).strain.values df.append(sub_df) df = pd.concat(df) df = df.set_index(["animal", "timepoint", "pair", "wavelength", "label"]).unstack( "wavelength" ) df[("mean_intensity", "r")] = ( df["mean_intensity"][ratio_numerator] / df["mean_intensity"][ratio_denominator] ) df[("area", "r")] = df[("area", ratio_numerator)] df[("strain", "r")] = df[("strain", ratio_numerator)] df = df.stack("wavelength") return df def subtract_medians( data: xr.DataArray, image_data: xr.DataArray = None ) -> xr.DataArray: """ Subtract medians from data, optionally calculating them from a separate piece of data. Parameters ---------- data the data to subtract the median from. image_data the data to calculate the median with. Must include the dimensions `x` and `y`. If specified, all other dimensions must be identical to those in `data`. If not specified, the medians will be calculated with `data`. """ if image_data is None: image_data = data submed = data.copy() submed.values = np.maximum(data - image_data.median(dim=["x", "y"]), 0) return submed def get_lr_bounds( rot_seg_stack: xr.DataArray, pad: int = 0, ref_wvl: str = "410", ref_pair: int = 0 ) -> np.ndarray: """ Get the left and right boundaries of the rotated pharynxes Parameters ---------- rot_seg_stack the rotated segmented pharynxes pad the amount of padding on the left/right of the bounds ref_wvl the wavelength to use for calculating bounds ref_pair the pair to use for calculating bounds Returns ------- bounds An (m, 2) array where m = number of animals, the first column is the left bound and the second column is the right bound """ imgs = rot_seg_stack.sel(wavelength=ref_wvl, pair=ref_pair) bounds = np.zeros((imgs.animal.size, 2)) # (animal, (l, r)) for i, img in enumerate(imgs): _, l, _, r = measure.regionprops(measure.label(img))[0].bbox bounds[i, :] = [l - pad, r + pad - 1] return bounds.astype(np.int) def center_and_rotate_pharynxes( fl_images: xr.DataArray, seg_images: xr.DataArray ) -> Tuple[xr.DataArray, xr.DataArray]: """ Given a fluorescence stack and a pharyngeal mask stack, center and rotate each frame of both the FL and mask such that the pharynx is in the center of the image, with its anterior on the left. Parameters ---------- fl_images The fluorescence images to rotate and align seg_images The segmented images to rotate and align Returns ------- (rotated_fl_stack, rotated_seg_stack) A 2-tuple where the first item is the rotated fluorescence stack and the second is the rotated mask stack """ img_center_y, img_center_x = ( fl_images.y.size // 2, fl_images.x.size // 2, ) fl_rotated_stack = fl_images.copy() seg_rotated_stack = seg_images.copy() # STACK_ITERATION for img_idx in range(fl_images.animal.size): for wvl in fl_images.wavelength.data: for pair in fl_images.pair.data: for tp in fl_images.timepoint.values: # Optimization potential here... # this recalculates all region properties for the reference each time img = fl_images.isel(animal=img_idx).sel( wavelength=wvl, pair=pair, timepoint=tp ) try: # Old data, had wavelength attached ref_seg = seg_images.isel(animal=img_idx, wavelength=0).sel( pair=pair, timepoint=tp ) except ValueError: ref_seg = seg_images.isel(animal=img_idx).sel( pair=pair, timepoint=tp ) try: props = measure.regionprops(measure.label(ref_seg))[0] except IndexError: raise ValueError( f"No binary objects found in image @ [idx={img_idx} ; wvl={wvl} ; pair={pair}]" ) # pharynx_center_y, pharynx_center_x = props.centroid pharynx_center_y, pharynx_center_x = np.mean( np.nonzero(ref_seg), axis=1 ) pharynx_orientation = props.orientation translation_matrix = transform.EuclideanTransform( translation=( -(img_center_x - pharynx_center_x), -(img_center_y - pharynx_center_y), ) ) rotated_img = rotate( img.data, translation_matrix, pharynx_orientation ) rotated_seg = rotate( ref_seg.data, translation_matrix, pharynx_orientation, order=0, preserve_range=True, ) fl_rotated_stack.loc[dict(wavelength=wvl, pair=pair, timepoint=tp)][ img_idx ] = rotated_img seg_rotated_stack.loc[dict(pair=pair, timepoint=tp)][ img_idx ] = rotated_seg fl_rotated_stack.values = fl_rotated_stack.values.astype(fl_images.dtype) return fl_rotated_stack, seg_rotated_stack def extract_largest_binary_object( bin_img: Union[xr.DataArray, np.ndarray] ) -> Union[xr.DataArray, np.ndarray]: """ Extracts the largest binary object from the given binary image Parameters ---------- bin_img The binary image to process Returns ------- bin_img The binary image containing only the largest binary object from the input """ labels = measure.label(bin_img) if labels.max() == 0: # If there are no objects in the image... simply return the image return bin_img return labels == np.argmax(np.bincount(labels.flat)[1:]) + 1 def get_area_of_largest_object(mask: np.ndarray) -> int: """Returns the area (px) of the largest object in a binary image Parameters ---------- mask : np.ndarray the binary image Returns ------- int the area of the largest object """ try: return measure.regionprops(measure.label(mask))[0].area except IndexError: return 0 def segment_pharynx( fl_img: xr.DataArray, target_area: int = 450, area_range: int = 100 ) -> xr.DataArray: """Generate a mask for the given image containing a pharynx. Parameters ---------- fl_img : xr.DataArray a fluorescent image containing a single pharynx target_area : int, optional the presumptive area (in px) of a pharynx, by default 450 area_range : int, optional the acceptable range (in px) above/below the target_area, by default 100 Returns ------- xr.DataArray an image containing the segmented pharynx (dtype: np.uint8). Pixels of value=1 indicate the pharynx, pixels of value=0 indicate the background. """ # target_area = 450 # experimentally derived # area_range = 100 min_area = target_area - area_range max_area = target_area + area_range max_iter = 300 p = 0.15 t = fl_img.max() * p mask = fl_img > t area = get_area_of_largest_object(mask) i = 0 while (min_area > area) or (area > max_area): if i >= max_iter: return mask area = get_area_of_largest_object(mask) logging.debug(f"Setting p={p}") if area > max_area: p = p + 0.01 if area < min_area: p = p - 0.01 i = i + 1 t = fl_img.max() * p mask = fl_img > t if p < 0: # break out if loop gets stuck w/ sensible default logging.warning("Caught infinite loop") return fl_img > (fl_img.max() * 0.15) if p > 0.9: logging.warning("Caught infinite loop") return fl_img > (fl_img.max() * 0.15) mask = extract_largest_binary_object(mask).astype(np.uint8) return mask def segment_pharynxes( fl_stack: xr.DataArray, wvl: str = "410", target_area: int = 450, area_range: int = 100, ) -> xr.DataArray: """Segment a hyperstack of pharynxes Parameters ---------- fl_stack : xr.DataArray the fluorescent images to segment wvl : str, optional the wavelength to segment, by default "410" target_area : int, optional the presumptive area of a pharynx, in pixels, by default 450 area_range : int, optional the acceptable range of pharyngeal areas, by default 100 Returns ------- xr.DataArray the masks for the specified wavelength """ to_segment = fl_stack.sel(wavelength=wvl) seg = xr.apply_ufunc( segment_pharynx, to_segment, input_core_dims=[["y", "x"]], output_core_dims=[["y", "x"]], vectorize=True, kwargs={"target_area": target_area, "area_range": area_range}, ) return seg def rotate( img: Union[np.ndarray, xr.DataArray], tform, orientation, order=1, preserve_range=True, ): """ Rotate the given image with the given translation matrix and orientation angle Parameters ---------- img the image to rotate tform the translation matrix to apply orientation the angle of orientation (radians) order the order of the interpolation preserve_range preserve the input data range Returns ------- rotated the translated and rotated image """ return transform.rotate( transform.warp( img, tform, preserve_range=preserve_range, mode="wrap", order=order ), np.degrees(np.pi / 2 - orientation), mode="edge", order=order, ) def calculate_midlines(rot_seg_stack: xr.DataArray, degree: int = 4) -> xr.DataArray: """ Calculate a midline for each animal in the given stack Parameters ---------- rot_seg_stack The rotated mask with which midlines should be calculated. degree The degree of the polynomial fit Returns ------- midlines a DataArray containing the midline objects See Also -------- calculate_midline """ return xr.apply_ufunc( calculate_midline, rot_seg_stack, input_core_dims=[["y", "x"]], vectorize=True, keep_attrs=True, kwargs={"degree": degree}, ) def calculate_midline( rot_seg_img: Union[np.ndarray, xr.DataArray], degree: int = 4, pad: int = 10 ) -> Polynomial: """ Calculate a the midline for a single image by fitting a polynomial to the segmented pharynx Parameters ---------- rot_seg_img: Union[np.ndarray, xr.DataArray] The rotated masked pharynx image degree the degree of the polynomial pad the number of pixels to "pad" the domain of the midline with respect to the boundaries of the segmentation mask Returns ------- Polynomial the estimated midline Notes ----- Right now this only works for images that this have been centered and aligned with their anterior-posterior along the horizontal. """ import warnings with warnings.catch_warnings(): warnings.simplefilter("ignore") try: rp = measure.regionprops(measure.label(rot_seg_img))[0] xs, ys = rp.coords[:, 1], rp.coords[:, 0] left_bound, _, _, right_bound = rp.bbox return Polynomial.fit( xs, ys, degree, domain=[left_bound - pad, right_bound + pad] ) except IndexError: # Indicates trying to measure on TL for example return None def measure_under_midline( fl: xr.DataArray, mid: Polynomial, n_points: int = 100, thickness: float = 0.0, order=1, norm_scale=1, flatten=True, ) -> np.ndarray: """ Measure the intensity profile of the given image under the given midline at the given x-coordinates. Parameters ---------- flatten norm_scale order the interpolation order fl The fluorescence image to measure mid The midline under which to measure n_points The number of points to measure under thickness The thickness of the line to measure under. Notes ----- Using thickness is slower, depending on the amount of thickness On my machine (2GHz Intel Core i5), as of 12/4/19: 0-thickness: 492 µs ± 16.6 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) 2-thickness: 1.99 ms ± 65.8 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) 10-thickness: 3.89 ms ± 92.1 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) Returns ------- zs: np.ndarray The intensity profile of the image measured under the midline at the given x-coordinates. """ # Make sure the image orientation matches with the expected order of map_coordinates try: if thickness == 0: xs, ys = mid.linspace(n=n_points) fl = np.asarray(fl) return ndi.map_coordinates(fl, np.stack([xs, ys]), order=order) else: # Gets a bit wonky, but makes sense # We need to get the normal lines from each point in the midline # then measure under those lines. # First, get the coordinates of the midline xs, ys = mid.linspace(n=n_points) # Now, we get the angles of each normal vector der = mid.deriv() normal_slopes = -1 / der(xs) normal_thetas = np.arctan(normal_slopes) # We get the x and y components of the start/end of the normal vectors mag = thickness / 2 x0 = np.cos(normal_thetas) * mag y0 = np.sin(normal_thetas) * mag x1 = np.cos(normal_thetas) * -mag y1 = np.sin(normal_thetas) * -mag # These are the actual coordinates of the starts/ends of the normal vectors as they move # from (x,y) coordinates in the midline xs0 = xs + x0 xs1 = xs + x1 ys0 = ys + y0 ys1 = ys + y1 # We need to measure in a consistent direction along the normal line # if y0 < y1, we're going to be measuring in an opposite direction along the line... so we need flip the coordinates for y0, y1, x0, x1, i in zip(ys0, ys1, xs0, xs1, range(len(xs0))): if y0 < y1: tx = xs0[i] xs0[i] = xs1[i] xs1[i] = tx ty = ys0[i] ys0[i] = ys1[i] ys1[i] = ty n_line_pts = thickness all_xs = np.linspace(xs0, xs1, n_line_pts) all_ys = np.linspace(ys0, ys1, n_line_pts) straightened = ndi.map_coordinates(fl, [all_xs, all_ys], order=order) if flatten: # Create a normal distribution centered around 0 with the given scale (see scipy.norm.pdf) # the distribution is then tiled to be the same shape as the straightened pharynx # then, this resultant matrix is the weights for averaging w = np.tile( norm.pdf(np.linspace(-1, 1, n_line_pts), scale=norm_scale), (n_points, 1), ).T profile = np.average(straightened, axis=0, weights=w) return profile else: return straightened except AttributeError: # This happens if the image is TL. Then it will have `None` instead of # a midline object pass except Exception as e: # Here, something actually went wrong logging.warning(f"measuring under midline failed with error {e}") return np.zeros((1, n_points)) def measure_under_midlines( fl_stack: xr.DataArray, midlines: xr.DataArray, n_points: int = 300, order=1, thickness=0, ) -> xr.DataArray: """ Measure under all midlines in stack Parameters ---------- order fl_stack The fluorescence stack under which to measure midlines: dict A DataArray containing the midlines n_points: int the number of points to sample under the midline thickness: float the thickness of the midline to measure under Returns ------- profile_data: xr.DataArray the intensity profiles for each image in the stack """ measurements = xr.apply_ufunc( measure_under_midline, fl_stack, midlines, input_core_dims=[["x", "y"], []], output_core_dims=[["position"]], vectorize=True, keep_attrs=True, kwargs={ "n_points": n_points, "thickness": thickness, "order": order, "flatten": True, }, ) measurements = measurements.assign_coords( {"position": np.linspace(0, 1, measurements.position.size)}, ) try: measurements = measurements.assign_coords(time=fl_stack.time) except AttributeError: pass return measurements def shift(image: np.ndarray, vector: np.ndarray) -> np.ndarray: """ Translate the image according to the given movement vector Parameters ---------- image the image to translate vector : translation parameters ``(dx, dy)`` Returns ------- img: np.ndarray the translated image """ tform = AffineTransform(translation=vector) shifted = warp(image, tform, mode="wrap", preserve_range=True) shifted = shifted.astype(image.dtype) return shifted def normalize_images_by_wvl_pair( fl_imgs: xr.DataArray, profiles: xr.DataArray, percent_to_clip: float = 2.0 ): """ Normalize images by subtracting mean profile then min-max rescaling to [0, 1] Parameters ---------- fl_imgs the images to normalize profiles the intensity profiles corresponding to the images percent_to_clip how much to clip the profile when calculating mean/min/max, expressed as a percentage of the length of the profile Returns ------- xr.DataArray the normalized images """ idx_to_clip = int(profiles.shape[-1] * percent_to_clip / 100) profiles = profiles[:, idx_to_clip:-idx_to_clip] norm_fl = fl_imgs.copy().astype(np.float) for pair in fl_imgs.pair: for wvl in fl_imgs.wavelength.values: if wvl not in profiles.wavelength.values: continue for animal in range(fl_imgs.animal.size): prof = profiles.sel(wavelength=wvl, pair=pair).isel(animal=animal) img = fl_imgs.sel(wavelength=wvl, pair=pair)[animal].astype(np.float) # First, center according to mean img = img - prof.mean() # Then rescale to [0, 1] img = (img - prof.min()) / (prof.max() - prof.min()) norm_fl.loc[dict(wavelength=wvl, pair=pair)][animal] = img return norm_fl def normalize_images_single_wvl( fl_imgs: Union[np.ndarray, xr.DataArray], profiles: Union[np.ndarray, xr.DataArray], percent_to_clip: float = 2.0, ) -> Union[np.ndarray, xr.DataArray]: """ Normalize single wavelength image stack by subtracting the mean of the corresponding intensity profile, then min-max rescaling to [0, 1] Parameters ---------- fl_imgs an array-like structure of shape (frame, row, col) profiles an array-like structure of shape (frame, position_along_midline) percent_to_clip how much to clip the profile when calculating mean/min/max, expressed as a percentage of the length of the profile Returns ------- Union[np.ndarray, xr.DataArray] normalized images """ if fl_imgs.ndim != 3: raise ValueError("images must have shape (frame, row, col)") if profiles.ndim != 2: raise ValueError("profiles must have shape (frame, position_along_midline)") normed_imgs = fl_imgs.copy().astype(np.float32) idx_to_clip = int(profiles.shape[-1] * percent_to_clip / 100) profiles = profiles[:, idx_to_clip:-idx_to_clip] prof_means = np.mean(profiles, axis=1) profiles = profiles - prof_means normed_imgs = normed_imgs - prof_means prof_mins = np.min(profiles, axis=1) prof_maxs = np.max(profiles, axis=1) normed_imgs = (normed_imgs - prof_mins) / (prof_maxs - prof_mins) return normed_imgs def z_normalize_with_masks(imgs, masks): """ Perform z-normalization [0] on the entire image (relative to the content within the masks). That is to say, we center the pixels (within the mask) such that their mean is 0, and ensure their standard deviation is ~1. This allows us to see spatial patterns within the masked region (even if pixels outside of the masked region fall very far above or below those inside) by setting the colormap center around 0. [0] - https://jmotif.github.io/sax-vsm_site/morea/algorithm/znorm.html """ masked = ma.masked_array(imgs, np.logical_not(masks)) mu = np.mean(masked, axis=(-2, -1), keepdims=True) sigma = np.std(masked, axis=(-2, -1), keepdims=True) return (imgs - mu) / sigma def create_normed_rgb_ratio_stack( r_imgs, seg_imgs, vmin=-7, vmax=7, cmap="coolwarm", output_filename=None ): """ Z-normalize the images (relative to the masks), then transform them into RGB with the given colormap """ r_znormed = z_normalize_with_masks(r_imgs, seg_imgs) # noinspection PyUnresolvedReferences normalizer = mpl.colors.Normalize(vmin=vmin, vmax=vmax) if isinstance(cmap, str): cmap = plt.get_cmap(cmap) # TODO generalize dtype? for now, 32-bit only rgb_img = cmap(normalizer(r_znormed))[:, :, :, :3].astype(np.float16) if output_filename is not None: io.imsave(output_filename, rgb_img) return rgb_img def get_bbox(m, pad=5): try: y_min, x_min, y_max, x_max = np.array(regionprops(label(m))[0].bbox) y_min = max(int(y_min - (pad / 2)), 0) x_min = max(int(x_min - (pad / 2)), 0) y_max = min(int(y_max + (pad / 2)), m.shape[0]) x_max = min(int(x_max + (pad / 2)), m.shape[1]) return np.array([y_min, x_min, y_max, x_max]).astype(np.float) except IndexError: return [np.nan, np.nan, np.nan, np.nan] def bspline_intra_modal_registration( fixed_image, moving_image, fixed_image_mask=None, point_width=5.0, ): registration_method = sITK.ImageRegistrationMethod() # Determine the number of BSpline control points using the physical spacing we want # for the control grid. grid_physical_spacing = [ point_width, point_width, point_width, ] image_physical_size = [ size * spacing for size, spacing in zip(fixed_image.GetSize(), fixed_image.GetSpacing()) ] mesh_size = [ int(image_size / grid_spacing + 0.5) for image_size, grid_spacing in zip(image_physical_size, grid_physical_spacing) ] initial_transform = sITK.BSplineTransformInitializer( image1=fixed_image, transformDomainMeshSize=mesh_size, order=2 ) registration_method.SetInitialTransform(initial_transform) registration_method.SetMetricAsMeanSquares() if fixed_image_mask: registration_method.SetMetricFixedMask(fixed_image_mask) registration_method.SetInterpolator(sITK.sitkLinear) registration_method.SetOptimizerAsLBFGSB( gradientConvergenceTolerance=1e-5, numberOfIterations=10 ) return registration_method.Execute(fixed_image, moving_image) def register_image(fixed, moving, mask=None, point_width=5.0): z_fixed = zscore(fixed.values) z_moving = zscore(moving.values) if mask is not None: mask = sITK.GetImageFromArray(mask * 255) tx = bspline_intra_modal_registration( sITK.GetImageFromArray(z_fixed), sITK.GetImageFromArray(z_moving), fixed_image_mask=mask, point_width=point_width, ) reg_moving = sITK.GetArrayFromImage( sITK.Resample( sITK.GetImageFromArray(moving), sITK.GetImageFromArray(fixed), tx, sITK.sitkLinear, ) ) return reg_moving def crop(img, bbox): y_min, x_min, y_max, x_max = bbox.values.astype(np.int) return img[y_min:y_max, x_min:x_max] def register_all_images( imgs, masks, bbox_pad=10, point_width=6.0, fixed_wvl="410", moving_wvl="470", mask_wvl="410", ): bboxes = xr.apply_ufunc( get_bbox, masks, input_core_dims=[["y", "x"]], output_core_dims=[["pos"]], vectorize=True, kwargs={"pad": bbox_pad}, ).assign_coords({"pos": ["min_row", "max_row", "min_col", "max_col"]}) reg_imgs = imgs.copy() for animal in imgs.animal: for pair in imgs.pair: for timepoint in imgs.timepoint: fixed = imgs.sel( animal=animal, pair=pair, timepoint=timepoint, wavelength=fixed_wvl ) moving = imgs.sel( animal=animal, pair=pair, timepoint=timepoint, wavelength=moving_wvl ) mask = masks.sel(animal=animal, pair=pair, timepoint=timepoint) bbox = bboxes.sel(animal=animal, pair=pair, timepoint=timepoint) # crop image crop_fixed = crop(fixed, bbox) crop_moving = crop(moving, bbox) crop_mask = crop(mask, bbox) # register image reg_moving = register_image( crop_fixed, crop_moving, mask=crop_mask, point_width=point_width ) # paste cropped images back into correct location (from bbox) y_min, x_min, y_max, x_max = bbox.values.astype(np.int) reg_imgs.loc[ dict( animal=animal, pair=pair, timepoint=timepoint, wavelength=moving_wvl, ) ][y_min:y_max, x_min:x_max] = reg_moving return reg_imgs
2
2
nengo_loihi/hardware/tests/test_interface.py
Michaeljurado24/nengo-loihi
0
12773272
import socket import nengo import numpy as np import pytest from nengo.exceptions import SimulationError from nengo_loihi.block import Axon, LoihiBlock, Synapse from nengo_loihi.builder.builder import Model from nengo_loihi.builder.discretize import discretize_model from nengo_loihi.hardware import interface as hardware_interface from nengo_loihi.hardware.allocators import Greedy from nengo_loihi.hardware.builder import build_board from nengo_loihi.hardware.nxsdk_shim import NxsdkBoard class MockNxsdk: def __init__(self): self.__version__ = None def test_error_on_old_version(monkeypatch): mock = MockNxsdk() mock.__version__ = "0.5.5" monkeypatch.setattr(hardware_interface, "nxsdk", mock) with pytest.raises(ImportError, match="nxsdk"): hardware_interface.HardwareInterface.check_nxsdk_version() def test_no_warn_on_current_version(monkeypatch): mock = MockNxsdk() mock.__version__ = str(hardware_interface.HardwareInterface.max_nxsdk_version) monkeypatch.setattr(hardware_interface, "nxsdk", mock) monkeypatch.setattr(hardware_interface, "assert_nxsdk", lambda: True) with pytest.warns(None) as record: hardware_interface.HardwareInterface.check_nxsdk_version() assert len(record) == 0 def test_warn_on_future_version(monkeypatch): mock = MockNxsdk() mock.__version__ = "100.0.0" monkeypatch.setattr(hardware_interface, "nxsdk", mock) monkeypatch.setattr(hardware_interface, "assert_nxsdk", lambda: True) with pytest.warns(UserWarning): hardware_interface.HardwareInterface.check_nxsdk_version() def test_builder_poptype_errors(): pytest.importorskip("nxsdk") # Test error in build_synapse model = Model() block = LoihiBlock(1) block.compartment.configure_lif() model.add_block(block) synapse = Synapse(1) synapse.set_weights([[1]]) synapse.pop_type = 8 block.add_synapse(synapse) discretize_model(model) allocator = Greedy() # one core per ensemble board = allocator(model, n_chips=1) with pytest.raises(ValueError, match="unrecognized pop_type"): build_board(board) # Test error in build_axon model = Model() block0 = LoihiBlock(1) block0.compartment.configure_lif() model.add_block(block0) block1 = LoihiBlock(1) block1.compartment.configure_lif() model.add_block(block1) axon = Axon(1) block0.add_axon(axon) synapse = Synapse(1) synapse.set_weights([[1]]) synapse.pop_type = 8 axon.target = synapse block1.add_synapse(synapse) discretize_model(model) board = allocator(model, n_chips=1) with pytest.raises(ValueError, match="unrecognized pop_type"): build_board(board) def test_host_snip_recv_bytes(): host_snip = hardware_interface.HostSnip(None) # We bypass the host_snip.connect method and connect manually host_address = "127.0.0.1" # Standard loopback interface address # Configure socket to send data to itself host_snip.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) host_snip.socket.bind((host_address, host_snip.port)) host_snip.socket.connect((host_address, host_snip.port)) # Generate random data to send data = np.random.randint(0, 8192, size=1100, dtype=np.int32) # Correctly receive data in two chunks # Note that chunks are 4096 bytes at the smallest (HostSnip.recv_size) host_snip.send_all(data) received = host_snip.recv_bytes(1024 * 4) assert np.all(received == data[:1024]) rest = 1100 - 1024 received = host_snip.recv_bytes(rest * 4) assert np.all(received == data[-rest:]) # Send too little data host_snip.send_all(data) with pytest.raises(RuntimeError, match="less than expected"): host_snip.recv_bytes(1536 * 4) # Send shutdown signal at the end data[-1] = -1 host_snip.send_all(data) with pytest.raises(RuntimeError, match="shutdown signal from chip"): host_snip.recv_bytes(1100 * 4) # Too little data with shutdown signal still raises too little data host_snip.send_all(data) with pytest.raises(RuntimeError, match="less than expected"): host_snip.recv_bytes(2048 * 4) @pytest.mark.target_loihi def test_interface_connection_errors(Simulator, monkeypatch): with nengo.Network() as net: nengo.Ensemble(2, 1) # test opening closed interface error sim = Simulator(net) interface = sim.sims["loihi"] interface.close() with pytest.raises(SimulationError, match="cannot be reopened"): with interface: pass sim.close() # test failed connection error def start(*args, **kwargs): raise Exception("Mock failure to connect") monkeypatch.setattr(NxsdkBoard, "start", start) with pytest.raises(SimulationError, match="Mock failure to connect"): with Simulator(net): pass @pytest.mark.filterwarnings("ignore:Model is precomputable.") @pytest.mark.target_loihi def test_snip_input_count(Simulator, seed, plt): with nengo.Network(seed=seed) as model: a = nengo.Ensemble(100, 1) for i in range(30): stim = nengo.Node(0.5) nengo.Connection(stim, a, synapse=None) with Simulator(model, precompute=False) as sim: with pytest.warns(UserWarning, match="Too many spikes"): sim.run(0.01)
1.960938
2
image2dice_pattern/dice_art_pattern_from_image_create_dice_image_by_paste.py
MajeedAskari/educational_python_scripts
0
12773273
<reponame>MajeedAskari/educational_python_scripts from PIL import Image, ImageOps, ImageDraw dicew = 300 quality = 6 # minimum = 1, maximum = resolution of your dice image / dicesize im = Image.open("image.png") im = ImageOps.grayscale(im) im = ImageOps.equalize(im) diceh = im.height / im.width * dicew dicesize = int(im.width / dicew) nim = Image.new("L", (im.width * quality, im.height * quality), 'white') # nimd = ImageDraw.Draw(nim) dices = [] for i in range(1, 7): dim = Image.open("dice/" + str(i) + ".jpg") dim = dim.resize((dicesize * quality, dicesize * quality), Image.ANTIALIAS) dim = ImageOps.equalize(dim) dices.append(dim) for y in range(0, im.height-dicesize, dicesize): for x in range(0, im.width-dicesize, dicesize): thisSectorColor = 0 for dicex in range(0, dicesize): for dicey in range(0, dicesize): thisSectorColor += im.getpixel((x+dicex, y+dicey)) thisSectorColor = thisSectorColor / (dicesize **2 ) #nimd.rectangle(((x, y),(x+dicesize, y+dicesize)), thisSectorColor) diceNumber = (255-thisSectorColor) * 5 / 255 + 1 #print (x, y, thisSectorColor, diceNumber) # print diceNumber, nim.paste(dices[diceNumber - 1], (x * quality, y * quality)) # print nim.save("diceimage.png") # nim.show()
3.703125
4
python-is-easy/assignments/basic-loops/main.py
eDyablo/pirple
0
12773274
''' Homework assignment for the 'Python is easy' course by Pirple. Written by <NAME>. It pprints the numbers from 1 to 100. But for multiples of three print "Fizz" instead of the number and for the multiples of five print "Buzz". For numbers which are multiples of both three and five print "FizzBuzz". It also adds "prime" to the output when the number is a prime (divisible only by itself and one). ''' for number in range(1, 101): output = "" # collects output for the number if number % 3 == 0: output += "Fizz" if number % 5 == 0: output += "Buzz" if output == "": output = str(number) isPrime = True # assume the number is a prime by default for divisor in range(2, number): if number % divisor == 0: isPrime = False # mark the number as not a prime break if isPrime: output += " prime" print(output)
4.34375
4
stubs/m5stack_flowui-v1_4_0-beta/flowlib/units/_color.py
mattytrentini/micropython-stubs
0
12773275
<filename>stubs/m5stack_flowui-v1_4_0-beta/flowlib/units/_color.py """ Module: 'flowlib.units._color' on M5 FlowUI v1.4.0-beta """ # MCU: (sysname='esp32', nodename='esp32', release='1.11.0', version='v1.11-284-g5d8e1c867 on 2019-08-30', machine='ESP32 module with ESP32') # Stubber: 1.3.1 - updated from typing import Any class Color: """""" def _available(self, *argv) -> Any: pass def _read_u16(self, *argv) -> Any: pass def _read_u8(self, *argv) -> Any: pass def _register_char(self, *argv) -> Any: pass def _register_short(self, *argv) -> Any: pass def _valid(self, *argv) -> Any: pass def _write_u16(self, *argv) -> Any: pass def _write_u8(self, *argv) -> Any: pass blue = None def deinit(self, *argv) -> Any: pass def enable(self, *argv) -> Any: pass def getRGB(self, *argv) -> Any: pass green = None portMethod = 255 rawData = None red = None def setGains(self, *argv) -> Any: pass def setIntegrationTime(self, *argv) -> Any: pass INT_TIME_DELAY = None _CYCLES = None _GAINS = None _INTEGRATION_TIME_THRESHOLD_HIGH = 614.4 _INTEGRATION_TIME_THRESHOLD_LOW = 2.4 def const(): pass i2c_bus = None time = None unit = None ustruct = None
2.1875
2
constants.py
OnRails-IN/backend
0
12773276
<gh_stars>0 import os ENV = os.environ.get("PYTHON_ENV") INDEX_KEYS = { 'a':'Amelie', 'b':'Basterds', 'c':'Corleone', 'd':'Django', 'e':'Edgar', 'f':'Floorgang', 'g':'Gandalf', 'h':'HansLanda', 'i':'Ireland', 'j':'Jeeves', 'k':'Kubrick', 'l':'Lebowski', 'm':'Masterpiece', 'n':'Norman', 'o':'Ozymandias', 'p':'Pikachu', 'q':'Quasimodo', 'r':'Reddit', 's':'Strangelove', 't':'Tambourine', 'u':'Updog', 'v':'Vader', 'w':'Waffles', 'x':'Xenon', 'y':'Yoda', 'z':'Zulu' } SECRET_KEY = os.environ.get('SECRET_KEY') DOMAIN = os.environ.get('DOMAIN') ES_URI = os.environ.get('ES_URI') AWS_ACCESS_KEY = os.environ.get('AWS_ACCESS_KEY') AWS_SECRET_KEY = os.environ.get('AWS_SECRET_KEY') AWS_REGION = os.environ.get('AWS_REGION') REDIS_HOST = os.environ.get('REDIS_HOST') REDIS_PORT = os.environ.get('REDIS_PORT') REDIS_PASSWORD = os.environ.get('REDIS_PASSWORD') DYNAMO_URL = os.environ.get('DYNAMO_URL') COORDINATES = { "loco_number": { "x": 140, "y": 123 }, "loco_class": { "x": 120, "y": 62 }, "loco_shed": { "x": 140, "y": 62 }, "train_number": { "x": 140, "y": 170 }, "train_name": { "x": 140, "y": 200 }, "username": { "x": 140, "y": 227 }, "timestamp": { "x": 5, "y": 250 }, "location": { "x": 275, "y": 250 } }
1.8125
2
backend/kale/tests/unit_tests/test_kfputils.py
brness/kale
502
12773277
<reponame>brness/kale # Copyright 2020 The Kale Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import json from testfixtures import mock from kale.common import kfputils @mock.patch('kale.common.kfputils.workflowutils') @mock.patch('kale.common.kfputils.podutils') def test_update_uimetadata_not_exists(podutils, workflowutils, tmpdir): """Test the uimetadata file is created when it does not exists.""" podutils.get_pod_name.return_value = 'test_pod' podutils.get_namespace.return_value = 'test_ns' workflowutils.get_workflow_name.return_value = 'test_wk' filepath = os.path.join(tmpdir, 'tmp_uimetadata.json') # update tmp file kfputils.update_uimetadata('test', uimetadata_path=filepath) # check file has been updated correctly updated = json.loads(open(filepath).read()) target = {"outputs": [{ 'type': 'web-app', 'storage': 'minio', 'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert updated == target @mock.patch('kale.common.kfputils.workflowutils') @mock.patch('kale.common.kfputils.podutils') def test_update_uimetadata_from_empty(podutils, workflowutils, tmpdir): """Test that the uimetadata file is updated inplace correctly.""" podutils.get_pod_name.return_value = 'test_pod' podutils.get_namespace.return_value = 'test_ns' workflowutils.get_workflow_name.return_value = 'test_wk' # create base tmp file base = {"outputs": []} filepath = os.path.join(tmpdir, 'tmp_uimetadata.json') json.dump(base, open(filepath, 'w')) # update tmp file kfputils.update_uimetadata('test', uimetadata_path=filepath) # check file has been updated correctly updated = json.loads(open(filepath).read()) target = {"outputs": [{ 'type': 'web-app', 'storage': 'minio', 'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert updated == target @mock.patch('kale.common.kfputils.workflowutils') @mock.patch('kale.common.kfputils.podutils') def test_update_uimetadata_from_not_empty(podutils, workflowutils, tmpdir): """Test that the uimetadata file is updated inplace correctly.""" podutils.get_pod_name.return_value = 'test_pod' podutils.get_namespace.return_value = 'test_ns' workflowutils.get_workflow_name.return_value = 'test_wk' # create base tmp file markdown = { 'type': 'markdown', 'storage': 'inline', 'source': '#Some markdown' } base = {"outputs": [markdown]} filepath = os.path.join(tmpdir, 'tmp_uimetadata.json') json.dump(base, open(filepath, 'w')) # update tmp file kfputils.update_uimetadata('test', uimetadata_path=filepath) # check file has been updated correctly updated = json.loads(open(filepath).read()) target = {"outputs": [markdown, { 'type': 'web-app', 'storage': 'minio', 'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert updated == target
1.617188
2
Algorithms_easy/1309. Decrypt String from Alphabet to Integer Mapping.py
VinceW0/Leetcode_Python_solutions
4
12773278
""" 1309. Decrypt String from Alphabet to Integer Mapping Given a string s formed by digits ('0' - '9') and '#' . We want to map s to English lowercase characters as follows: Characters ('a' to 'i') are represented by ('1' to '9') respectively. Characters ('j' to 'z') are represented by ('10#' to '26#') respectively. Return the string formed after mapping. It's guaranteed that a unique mapping will always exist. Example 1: Input: s = "10#11#12" Output: "jkab" Explanation: "j" -> "10#" , "k" -> "11#" , "a" -> "1" , "b" -> "2". Example 2: Input: s = "1326#" Output: "acz" Example 3: Input: s = "25#" Output: "y" Example 4: Input: s = "12345678910#11#12#13#14#15#16#17#18#19#20#21#22#23#24#25#26#" Output: "abcdefghijklmnopqrstuvwxyz" Constraints: 1 <= s.length <= 1000 s[i] only contains digits letters ('0'-'9') and '#' letter. s will be valid string such that mapping is always possible. """ class Solution: def freqAlphabets(self, s: str): return s.replace("10#", "j")\ .replace("11#", "k")\ .replace("12#", "l")\ .replace("13#", "m")\ .replace("14#", "n")\ .replace("15#", "o")\ .replace("16#", "p")\ .replace("17#", "q")\ .replace("18#", "r")\ .replace("19#", "s")\ .replace("20#", "t")\ .replace("21#", "u")\ .replace("22#", "v")\ .replace("23#", "w")\ .replace("24#", "x")\ .replace("25#", "y")\ .replace("26#", "z")\ .replace("1", "a")\ .replace("2", "b")\ .replace("3", "c")\ .replace("4", "d")\ .replace("5", "e")\ .replace("6", "f")\ .replace("7", "g")\ .replace("8", "h")\ .replace("9", "i") class Solution: def freqAlphabets(self, s: str): return re.sub(r'\d{2}#|\d', lambda x: chr(int(x.group()[:2])+96), s)
3.984375
4
sendtoairport/groupingdata.py
hellodu-dev/team_schedule
6
12773279
<reponame>hellodu-dev/team_schedule # coding:utf-8 """ <NAME> June 9,2017 修改 <NAME> June 22,2017 June 29 ,2017 """ import json from itertools import groupby from operator import itemgetter from sendtoairport import overwritestartschedule import func_timeout # 将订单信息转换为json数组[{numberoforder:3,OrderInfos:[{orderpoid:gfg,pickupTime:1800},{orderpoid:jhj,pickupTime:1800}]},{....}] def incodejs(timepointVec, scheduleDataVec): car = [] for i in xrange(len(timepointVec)): for element in scheduleDataVec[i]: d = {} d['numberoforder'] = len(element) ord = [] for element2 in element: f = {} f['BID'] = element2 f['timetable'] = timepointVec[i] ord.append(f) d['OrderInfos'] = ord car.append(d) jsondatar = json.dumps(car, ensure_ascii=False, separators=(',', ':')).encode('utf-8') return jsondatar # def incodejs(scheduleDataAndTime): # car = [] # for element in scheduleDataAndTime: # d = {} # d['numberoforder'] = len(element) # ord = [] # for element2 in element: # f = {} # f['BID'] = element2['BID'] # f['pickupTime'] = element2['pickupTime'] # ord.append(f) # d['OrderInfos'] = ord # car.append(d) # jsondatar = json.dumps(car, ensure_ascii=False, separators=(',', ':')).encode('utf-8') # return jsondatar # def incodejs(AllscheduleData, scheduleDataVec): # car = [] # tmpIdx = 1 # for element4 in scheduleDataVec: # for element1 in element4: # for element2 in element1: # for element3 in AllscheduleData: # if element2 == element3['BID']: # f = {} # f["orderNum"] = element3['BID'] # f["lng"] = element3['bdlng'] # f["lat"] = element3['bdlat'] # f["idx"] = tmpIdx # car.append(f) # tmpIdx = tmpIdx + 1 # jsondatar = json.dumps(car, ensure_ascii=False, separators=(',', ':')).encode('utf-8') # return jsondatar @func_timeout.func_set_timeout(20) # 超过20秒说明程序出了问题,自动停止这次进程 def geteachTimepointSchedule(AllscheduleData): AllscheduleData = sorted(AllscheduleData, key=itemgetter('date')) allDataGroups = groupby(AllscheduleData, key=itemgetter('date')) timepointVec = [] scheduleDataVec = [] # [[[a,b,c],[c,d,e]]] for today, todaydata in allDataGroups: todayScheduleData = list(todaydata) todayScheduleData = sorted(todayScheduleData, key=itemgetter('timetable')) groups = groupby(todayScheduleData, key=itemgetter('timetable')) for key, value in groups: timepointVec.append(key) timepointorder = list(value) if len(timepointorder) > 1: scheduleDataVec.append(overwritestartschedule.startschedul(timepointorder)) else: onlyone = [[timepointorder[0]['BID']]] scheduleDataVec.append(onlyone) # jsondata = incodejs(AllscheduleData, scheduleDataVec) jsondata = incodejs(timepointVec, scheduleDataVec) # sc = schedulerecomtime.SCHEDULETIME() # scheduleAndTime = sc.incressPickupTime(scheduleDataVec, AllscheduleData) # jsondata = incodejs(scheduleAndTime) return jsondata
2.328125
2
CodeUP/Python basic 100/6014.py
cmsong111/NJ_code
0
12773280
a=input() print(a) print(a) print(a)
1.71875
2
hooks/post_gen_project.py
daleal/cookiecutter-pypackage
18
12773281
import shutil import subprocess import json import os # CLEANUP MANIFEST_FILE = "manifest.json" def cleanup_disabled_features(): print("⚒ Cleaning up...") with open(MANIFEST_FILE) as manifest_file: manifest = json.load(manifest_file) for feature in manifest["features"]: if not feature["enabled"]: for resource in feature["resources"]: remove_resource(resource) remove_resource(MANIFEST_FILE) print("✔ Cleanup complete") def remove_resource(resource): if os.path.isfile(resource): os.remove(resource) elif os.path.isdir(resource): shutil.rmtree(resource) # ENVIRONMENT SETUP def build_environment(): print("⚒ Building the environment...") try: create_virtual_environment() install_dependencies() print("✔ The environment is ready") except subprocess.CalledProcessError: print( "❌ The environment could not be built, you can do it later " "using the included command `make build-env`." ) def create_virtual_environment(): subprocess.check_call(["python3", "-m", "venv", ".venv"]) def install_dependencies(): subprocess.check_call( ["poetry", "run", "pip", "install", "--upgrade", "pip"] ) subprocess.check_call(["poetry", "run", "poetry", "install"]) # GIT INITIALIZATION def execute_git_initialization(): print("⚒ Initializing git repository...") try: initialize_git_repository() initial_git_commit() rename_git_branch() create_git_stable_branch() print("✔ git initialization complete") except subprocess.CalledProcessError: print("❌ The git repository could not be correctly initialized.") def initialize_git_repository(): subprocess.check_call(["git", "init"]) def initial_git_commit(): subprocess.check_call(["git", "add", "."]) subprocess.check_call(["git", "commit", "-m", "'Initial commit'"]) def rename_git_branch(): subprocess.check_call(["git", "branch", "-M", "{{ cookiecutter.git_main_branch }}"]) def create_git_stable_branch(): subprocess.check_call(["git", "branch", "stable"]) if __name__ == "__main__": cleanup_disabled_features() build_environment() execute_git_initialization()
2.5
2
pyqmc/energy.py
gcassella/pyqmc
0
12773282
#import numpy as np import jax.numpy as jnp import pyqmc.eval_ecp as eval_ecp from pyqmc.distance import RawDistance def ee_energy(configs): ne = configs.shape[1] if ne == 1: return jnp.zeros(configs.shape[0]) ee = jnp.zeros(configs.shape[0]) ee, ij = RawDistance().dist_matrix(configs) ee = jnp.linalg.norm(ee, axis=2) return jnp.sum(1.0 / ee, axis=1) def ei_energy(mol, configs): ei = 0.0 for c, coord in zip(mol.atom_charges(), mol.atom_coords()): delta = configs - coord[jnp.newaxis, jnp.newaxis, :] deltar = jnp.sqrt(jnp.sum(delta ** 2, axis=2)) ei += -c * jnp.sum(1.0 / deltar, axis=1) return ei def ii_energy(mol): ei = 0.0 d = RawDistance() rij, ij = d.dist_matrix(mol.atom_coords()[jnp.newaxis, :, :]) if len(ij) == 0: return jnp.array([0.0]) rij = jnp.linalg.norm(rij, axis=2)[0, :] iitot = 0 c = mol.atom_charges() for (i, j), r in zip(ij, rij): iitot += c[i] * c[j] / r return iitot def get_ecp(mol, configs, wf, threshold): return eval_ecp.ecp(mol, configs, wf, threshold) def kinetic(configs, wf): nconf, nelec, ndim = configs.shape ke = jnp.zeros(nconf) ke += -0.5 * jnp.real(wf["laplacian"](configs)) return ke def energy(mol, configs, wf, threshold): """Compute the local energy of a set of configurations. Args: mol: A pyscf-like 'Mole' object. nelec, atom_charges(), atom_coords(), and ._ecp are used. configs: a nconfiguration x nelectron x 3 numpy array wf: A Wavefunction-like object. Functions used include recompute(), lapacian(), and testvalue() Returns: a dictionary with energy components ke, ee, ei, and total """ ee = ee_energy(configs) ei = ei_energy(mol, configs) ecp_val = get_ecp(mol, configs, wf, threshold) ii = ii_energy(mol) ke = kinetic(configs, wf) # print(ke,ee,ei,ii) return { "ke": ke, "ee": ee, "ei": ei, "ecp": ecp_val, "total": ke + ee + ei + ecp_val + ii, }
1.859375
2
fdk_client/platform/models/SearchLogReq.py
kavish-d/fdk-client-python
0
12773283
<reponame>kavish-d/fdk-client-python """Platform Models.""" from marshmallow import fields, Schema from marshmallow.validate import OneOf from ..enums import * from ..models.BaseSchema import BaseSchema class SearchLogReq(BaseSchema): # Analytics swagger.json marketplace_name = fields.Str(required=False) start_date = fields.Str(required=False) company_id = fields.Str(required=False) end_date = fields.Str(required=False) identifier = fields.Str(required=False) identifier_value = fields.Str(required=False)
1.765625
2
shared_functions.py
scottmanderson/minerva
0
12773284
from app.models import DataSource, DataSourcePoll def fetch_all_data_sources(): try: all_data_sources = DataSource.query.all() except Exception: print("error fetching data sources; table likely empty") all_data_sources = [] return all_data_sources def fetch_all_data_source_polls(): try: all_data_source_polls = DataSourcePoll.query.all() except Exception: print("error fetching data source polls; table likely empty") all_data_source_polls = [] return all_data_source_polls
2.28125
2
EnglishCheck.py
williamHuang5468/LearnCircleTool
0
12773285
# -*- coding: utf-8 -*- from datetime import datetime from datetime import timedelta from datetime import date import sys def countLearnCirlce(date, itemName): days = [0,1,2,3,4,5,6] result = [] for item in days: result.append((date + timedelta(days=item)).strftime('%Y/%m/%d')) return result def getTitle(date): days = [0,6] result = [] for item in days: result.append((date + timedelta(days=item)).strftime('%Y/%m/%d')) first = "每週練習:".decode('utf-8') resultString = "%s%s~%s" %(first, result[0],result[1]) return resultString if __name__ == '__main__': separated = "-" input = sys.argv[1].split(',') itemName = ["閱讀,句子分段 30Min", "聽力,Echo Method,15Min"] date = datetime(int(input[0]), int(input[1]), int(input[2])) result = countLearnCirlce(date, itemName) resultString = getTitle(date) print resultString for item in result: print "%s%s%s"%(item, separated, itemName[0].decode('utf-8')) print "%s%s%s"%(item, separated, itemName[1].decode('utf-8'))
3.421875
3
comments/serializers.py
syqu22/django-react-blog
0
12773286
<filename>comments/serializers.py from rest_framework import serializers from users.serializers import UserSerializer from comments.models import Comment class CommentSerializer(serializers.ModelSerializer): author = UserSerializer(read_only=True) class Meta: model = Comment fields = ['id', 'author', 'body', 'created_at'] class CreateCommentSerializer(serializers.ModelSerializer): class Meta: model = Comment fields = ['body']
2.265625
2
dotnet/private/rules/common.bzl
wbiker/rules_dotnet
0
12773287
<gh_stars>0 load( "@io_bazel_rules_dotnet//dotnet/private:providers.bzl", "DotnetLibrary", ) def collect_transitive_info(deps): """Collects transitive information. Args: deps: Dependencies that the DotnetLibrary depends on. Returns: A depsets of the references, runfiles and deps. References and deps also include direct dependencies provided by deps. However, runtfiles do not include direct runfiles. """ direct_refs = [] direct_deps = [] transitive_refs = [] transitive_runfiles = [] transitive_deps = [] for dep in deps: assembly = dep[DotnetLibrary] if assembly.ref: direct_refs += assembly.ref.files.to_list() elif assembly.result: direct_refs.append(assembly.result) if assembly.transitive_refs: transitive_refs.append(assembly.transitive_refs) transitive_runfiles.append(assembly.runfiles) direct_deps.append(assembly) if assembly.transitive: transitive_deps.append(assembly.transitive) return ( depset(direct = direct_refs, transitive = transitive_refs), depset(direct = [], transitive = transitive_runfiles), depset(direct = direct_deps, transitive = transitive_deps), )
2
2
data_load_cv_siamese.py
bionlplab/poagnet
0
12773288
import os import numpy as np # from skimage.io import imread import cv2 import copy from skimage.transform import resize def load_data_siamese(x_size,y_size,data_path,label_path,image_s_path,uncentain_path,validation_name,test_name): tmp = np.loadtxt(label_path, dtype=np.str, delimiter=",") # delete one image because we don't have the jpg image, 8252 is the position of this item and 1 is related to the title tmp = np.delete(tmp,8252+1, axis = 0) ran = tmp[:,0] lr = tmp[:,1] tracking = tmp[:,2] tmp1=tmp[:,3] ran = ran[1:len(ran)] lr = lr[1:len(lr)] tracking = tracking[1:len(tracking)] tmp1=tmp1[1:len(tmp1)] #generate ran and tracking numer for image with ending -s tmp_s = np.loadtxt(image_s_path, dtype=np.str, delimiter=",") ran_s = tmp_s[:,1] tracking_s = tmp_s[:,2] ran_s = ran_s[1:len(ran_s)] tracking_s = tracking_s[1:len(tracking_s)] #generate ran and tracking numer for image with uncentain label tmp_un = np.loadtxt(uncentain_path, dtype=np.str, delimiter=",") ran_un = tmp_un[:,0] tracking_un = tmp_un[:,1] ran_un = ran_un[1:len(ran_un)] tracking_un = tracking_un[1:len(tracking_un)] # x_size = 331 # y_size = 331 val_images1 = np.ndarray((len(validation_name)*20, x_size, y_size,3)) val_images2 = np.ndarray((len(validation_name)*20, x_size, y_size,3)) # val_images = [] val_labels = [] le = 0 for i in range(len(validation_name)): ind = np.argwhere(ran==validation_name[i][0]) kk = 0 for j in range(len(ind)): if lr[int(ind[j])] == validation_name[i][1]: data_paths = os.path.join(data_path, (ran[int(ind[j])] + '-'+ tracking[int(ind[j])] + '.jpg')) IM = cv2.imread(data_paths) if kk == 0: val_images_base = cv2.resize(IM, (x_size, y_size)) gt = tmp1[int(ind[j])] kk =1 else: val_images1[le] = val_images_base val_images2[le] = cv2.resize(IM, (x_size, y_size)) le += 1 if gt == tmp1[int(ind[j])]: val_labels = np.append(val_labels,1) else: val_labels = np.append(val_labels,0) # # take the second image as the ground truth # val_labels = np.append(val_labels,tmp1[int(ind[j])]) # continue val_images1 = val_images1[0:le,:,:,:] val_images2 = val_images2[0:le,:,:,:] val_images = [val_images1,val_images2] test_images1 = np.ndarray((len(test_name)*20, x_size, y_size,3)) test_images2 = np.ndarray((len(test_name)*20, x_size, y_size,3)) #test_images = [] test_labels = [] le = 0 ind_start = [] ll_index = 0 for i in range(len(test_name)): ind = np.argwhere(ran==test_name[i][0]) kk = 0 for j in range(len(ind)): if lr[int(ind[j])] == test_name[i][1]: data_paths = os.path.join(data_path, (ran[int(ind[j])] + '-'+ tracking[int(ind[j])] + '.jpg')) IM = cv2.imread(data_paths) if kk ==0: test_images_base = cv2.resize(IM, (x_size, y_size)) gt = tmp1[int(ind[j])] kk = 1 ind_start = np.append(ind_start,ll_index) else: test_images1[le] = test_images_base test_images2[le] = cv2.resize(IM, (x_size, y_size)) le += 1 if gt == tmp1[int(ind[j])]: test_labels = np.append(test_labels,1) else: test_labels = np.append(test_labels,0) ll_index += 1 # # take the second image as the ground truth # test_labels = np.append(test_labels,tmp1[int(ind[j])]) # continue test_images1 = test_images1[0:le,:,:,:] test_images2 = test_images2[0:le,:,:,:] test_images =[test_images1,test_images2] # test_images_s = np.ndarray((len(test_name)*10, x_size, y_size,3)) # #test_images = [] # test_labels_s = [] # le = 0 # for i in range(len(test_name)): # ind = np.argwhere(ran==test_name[i][0]) # ind_s = np.argwhere(ran_s==test_name[i][0]) # for j in range(len(ind)): # if lr[int(ind[j])] == test_name[i][1] and len(np.argwhere(tracking_s[ind_s]==tracking[int(ind[j])])) != 0: # data_paths = os.path.join(data_path, (ran[int(ind[j])] + '-'+ tracking[int(ind[j])] + '.jpg')) # IM = cv2.imread(data_paths) # test_images_s[le] = cv2.resize(IM, (x_size, y_size)) # # test_images_s[le] = resize(IM, (x_size, y_size, 3)) # # test_images_s[le] = IM # #test_images = np.append(test_images,IM) # le += 1 # test_labels_s = np.append(test_labels_s,tmp1[int(ind[j])]) # # continue # test_images_s = test_images_s[0:le,:,:,:] # test_images_un = np.ndarray((len(test_name)*10, x_size, y_size,3)) # #test_images = [] # test_labels_un = [] # le = 0 # for i in range(len(test_name)): # ind = np.argwhere(ran==test_name[i][0]) # ind_un = np.argwhere(ran_un==test_name[i][0]) # for j in range(len(ind)): # if lr[int(ind[j])] == test_name[i][1] and len(np.argwhere(tracking_un[ind_un]==tracking[int(ind[j])])) != 0: # data_paths = os.path.join(data_path, (ran[int(ind[j])] + '-'+ tracking[int(ind[j])] + '.jpg')) # IM = cv2.imread(data_paths) # test_images_un[le] = cv2.resize(IM, (x_size, y_size)) # # test_images_un[le] = resize(IM, (x_size, y_size, 3)) # # test_images_un[le] = IM # #test_images = np.append(test_images,IM) # le += 1 # test_labels_un = np.append(test_labels_un,tmp1[int(ind[j])]) # # continue # test_images_un = test_images_un[0:le,:,:,:] # return val_labels, test_labels # return val_images,val_labels, test_images,test_labels, test_images_s, test_labels_s, test_images_un, test_labels_un return val_images,val_labels, test_images,test_labels,ind_start
2.765625
3
camel_bubblesort/__init__.py
camel-ci/camel-ci-python3
0
12773289
name = "camel-ci-python"
1.0625
1
classes/logger.py
tmcdonagh/Autorippr
162
12773290
# -*- coding: utf-8 -*- """ Simple logging class Released under the MIT license Copyright (c) 2012, <NAME> @category misc @version $Id: 1.7.0, 2016-08-22 14:53:29 ACST $; @author <NAME> @license http://opensource.org/licenses/MIT """ import logging import os import sys class Logger(object): def __init__(self, name, debug, silent): self.silent = silent frmt = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s', "%Y-%m-%d %H:%M:%S" ) if debug: loglevel = logging.DEBUG else: loglevel = logging.INFO self.createhandlers(frmt, name, loglevel) def __del__(self): if not self.silent: self.log.removeHandler(self.sh) self.log.removeHandler(self.fh) self.log = None def createhandlers(self, frmt, name, loglevel): self.log = logging.getLogger(name) self.log.setLevel(loglevel) if not self.silent: self.sh = logging.StreamHandler(sys.stdout) self.sh.setLevel(loglevel) self.sh.setFormatter(frmt) self.log.addHandler(self.sh) DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) self.fh = logging.FileHandler('%s/autorippr.log' % DIR) self.fh.setLevel(loglevel) self.fh.setFormatter(frmt) self.log.addHandler(self.fh) def debug(self, msg): self.log.debug(msg) def info(self, msg): self.log.info(msg) def warn(self, msg): self.log.warn(msg) def error(self, msg): self.log.error(msg) def critical(self, msg): self.log.critical(msg)
2.859375
3
api/tacticalrmm/agents/migrations/0015_note.py
BaDTaG/tacticalrmm
1
12773291
<gh_stars>1-10 # Generated by Django 3.1.1 on 2020-09-22 20:57 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ("agents", "0014_remove_agent_managed_by_wsus"), ] operations = [ migrations.CreateModel( name="Note", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("note", models.TextField(blank=True, null=True)), ("entry_time", models.DateTimeField(auto_now_add=True)), ( "agent", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name="notes", to="agents.agent", ), ), ( "user", models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="user", to=settings.AUTH_USER_MODEL, ), ), ], ), ]
1.617188
2
policyhandler/policy_consts.py
alex-sh2020/dcaegen2-platform-policy-handler
2
12773292
<reponame>alex-sh2020/dcaegen2-platform-policy-handler # ================================================================================ # Copyright (c) 2017-2019 AT&T Intellectual Property. All rights reserved. # ================================================================================ # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============LICENSE_END========================================================= # """contants of policy-handler""" POLICY_ID = 'policy_id' POLICY_BODY = 'policy_body' CATCH_UP = "catch_up" AUTO_CATCH_UP = "auto catch_up" AUTO_RECONFIGURE = "auto reconfigure" LATEST_POLICIES = "latest_policies" REMOVED_POLICIES = "removed_policies" ERRORED_POLICIES = "errored_policies" POLICY_FILTER = "policy_filter" POLICY_FILTERS = "policy_filters" POLICIES = "policies" POLICY_VERSIONS = "policy_versions" POLICY_NAMES = "policy_names" POLICY_FILTER_MATCHES = "policy_filter_matches" TARGET_ENTITY = "target_entity"
1.109375
1
cpa/tests/test2.py
DavidStirling/CellProfiler-Analyst
98
12773293
# Fancy-pants method for getting a where clause that groups adjacent image keys # using "BETWEEN X AND Y" ... unfortunately this usually takes far more # characters than using "ImageNumber IN (X,Y,Z...)" since we don't run into # queries asking for consecutive image numbers very often (except when we do it # deliberately). It is also slower than the "IN" method unless the ImageNumbers # come in a smaller list of consecutive groups. # # ...still, this may be very useful since it is notably faster when ImageNumbers # are consecutive. def get_where_clause_for_images(keys, is_sorted=False): ''' takes a list of keys and returns a (hopefully) short where clause that includes those keys. ''' def in_sequence(k1,k2): if len(k1)>1: if k1[:-1] != k2[:-1]: return False return k1[-1]==(k2[-1]-1) def optimize_for_query(keys, is_sorted=False): if not is_sorted: keys.sort() groups = [] in_run = False for i in range(len(keys)): if i == len(keys)-1: if in_run: groups[-1] += [keys[i]] else: groups += [[keys[i]]] break if in_run: if in_sequence(keys[i], keys[i+1]): continue else: groups[-1] += [keys[i]] in_run = False else: if in_sequence(keys[i], keys[i+1]): in_run = True groups += [[keys[i]]] return groups groups = optimize_for_query(keys) wheres = [] for k in groups: if len(k)==1: wheres += ['%s=%s'%(col,value) for col, value in zip(object_key_columns(), k[0])] else: # expect 2 keys: the first and last of a contiguous run first, last = k if p.table_id: wheres += ['(%s=%s AND %s BETWEEN %s and %s)'% (p.table_id, first[0], p.image_id, first[1], last[1])] else: wheres += ['(%s BETWEEN %s and %s)'% (p.image_id, first[0], last[0])] return ' OR '.join(wheres)
2.96875
3
modules/btcppi.py
halesyy/ip-share
0
12773294
<reponame>halesyy/ip-share<gh_stars>0 from packer import Packer from fdates import parse P = Packer() P.dataset(1, { "url": "http://bitcoinppi.com/v1.1/global_ppi.csv?from=2011-07-01%2000%3A00&to=2020-10-26%2000%3A00", "parse_as": "csv", "name": "Global PPI Bitcoin", "index": "tick", "subsets": ["global_ppi"], "scrape_every": "1 day", "reverse": True }) # P.sma(1, "Rec_prob", period=5) P.dataset(2, { "url": "^VIX.yfi", "range": "daily", "name": "VIX", "index": "date", "subsets": ["close"], "scrape_every": "1 day" }) # P.sma(2, "close", period=5) P.minimize([1, 2], normalize=True) # our "callables" for date parsing our datasets def nyfed_dp(input): s = str(input).split(":")[0].split("T")[0] return parse("year-month-day", s, normalize=True) def yahoofin_dp(input): return parse("year-month-day", str(input), normalize=True) def ppi(input): return parse("year-month-day", str(input.split(" ")[0]), normalize=True) P.parse_indexes_as_date([ [1, ppi], [2, nyfed_dp] ]) # Meta-creators mid = P.index([2], by=1) mid = P.clean(mid) P.meta_line_chart("./../bridges/bitcoin_ppi.bridge.json", { "use": mid, "bottom": 0, "left": [1], "right": [2], "names": ["string:Date", "number:Bitcoin PPI", "number:VIX"] })
2.21875
2
scitokens_protect.py
jbasney/scitokens-heroku
0
12773295
<reponame>jbasney/scitokens-heroku<filename>scitokens_protect.py import scitokens from functools import wraps from flask import request import traceback from cryptography.hazmat.primitives import serialization from cryptography.hazmat.backends import default_backend import base64 import os def protect(**outer_kwargs): def real_decorator(some_function): @wraps(some_function) def wrapper(*args, **kwargs): if 'Authorization' not in request.headers: headers = { 'WWW-Authenticate': 'Bearer' } return ("No Authentication Header", 401, headers) bearer = request.headers.get("Authorization") if len(bearer.split()) != 2: headers = { 'WWW-Authenticate': 'Bearer' } return ("Authentication header incorrect format", 401, headers) serialized_token = bearer.split()[1] try: # Read in the private key environment variable private_key = serialization.load_pem_private_key( base64.b64decode(os.environ['PRIVATE_KEY']), password=<PASSWORD>, backend=default_backend() ) # Get the public numbers public_key = private_key.public_key() public_pem = public_key.public_bytes( encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo ) token = scitokens.SciToken.deserialize(serialized_token, audience = outer_kwargs['audience'], public_key = public_pem) except Exception as e: print(str(e)) traceback.print_exc() headers = { 'WWW-Authenticate': 'Bearer' } return ("Unable to deserialize: %{}".format(str(e)), 401, headers) def check_scope(value): if value == outer_kwargs['scp']: return True else: return False def check_iss(value): if value == "https://demo.scitokens.org": return True else: return False def return_true(value): return True validator = scitokens.Validator() validator.add_validator('scp', check_scope) validator.add_validator('iss', check_iss) # the jwt library already validates the below in the deserialization validator.add_validator('iat', return_true) validator.add_validator('exp', return_true) validator.add_validator('nbf', return_true) validator.add_validator('aud', return_true) validator.add_validator('jti', return_true) try: validator.validate(token) except scitokens.scitokens.ClaimInvalid as ce: headers = { 'WWW-Authenticate': 'Bearer' } return ("Validation incorrect", 403, headers) return some_function(*args, **kwargs) return wrapper return real_decorator @protect(aud="asdf") def stuff(blah, stuff, **kwargs): print(blah) print(stuff) for key, value in kwargs.iteritems(): print("%s = %s" % (key, value))
2.4375
2
restaurant.py
julencosme/python-crash-course
0
12773296
<filename>restaurant.py # A module for class Restaurant """A set of classes that can be used to represent a restaurant.""" class Restaurant(): """A model of a restaurant.""" def __init__(self, name, cuisine): """Initialize name and age attributes.""" self.name = name self.cuisine = cuisine def describe_restaurant(self): """Simulate a description of restaurant.""" print(self.name.title() + " is the restaurant.") print(self.cuisine.title() + " is the type of cuisine.") def open_restaurant(self): """Simulate a message alerting that the restaurant is open.""" print(self.name.title() + " is open.") class IceCreamStand(Restaurant): """Represent aspects of a restaurant, specific to ice cream stands.""" def __init__(self, name, cuisine): """Initialize attributes of parent class; then initialize attributes specific to an ice cream stand.""" super().__init__(name, cuisine) flavors = "vanilla, chocolate, strawberry, and rocky road." self.flavors = flavors def describe_ice_cream_flavors(self): """Print a statement describing the ice cream flavors offered.""" print("This ice cream stand has the following flavors: " + self.flavors)
4.125
4
wsgi.py
livingstack/RiverFlows
0
12773297
import os import signal import sys import traceback import time from django.core.wsgi import get_wsgi_application #from whitenoise.django import DjangoWhiteNoise os.environ.setdefault("DJANGO_SETTINGS_MODULE", "RiverFlows.settings") application = get_wsgi_application() #application = DjangoWhiteNoise(application)
1.34375
1
bokchoy/compat.py
ulule/bokchoy
0
12773298
<filename>bokchoy/compat.py<gh_stars>0 import six if not six.PY2: # Python 3.x and up text_type = str string_types = (str,) def as_text(v): if v is None: return None elif isinstance(v, bytes): return v.decode('utf-8') elif isinstance(v, str): return v else: raise ValueError('Unknown type %r' % type(v)) else: # Python 2.x text_type = unicode string_types = (str, unicode) def as_text(v): if v is None: return None return v.decode('utf-8') def decode_redis_hash(h): return h
2.671875
3
figuras/PycharmKayStatisticalReport/problem_2_7.py
bor9/estudiando_el_kay
0
12773299
<reponame>bor9/estudiando_el_kay<filename>figuras/PycharmKayStatisticalReport/problem_2_7.py<gh_stars>0 import matplotlib.pyplot as plt import numpy as np from scipy.stats import norm import math import matplotlib.colors as colors from matplotlib import cm from matplotlib import rc __author__ = 'ernesto' # if use latex or mathtext rc('text', usetex=True) rc('mathtext', fontset='cm') # auxiliar function for plot ticks of equal length in x and y axis despite its scales. def convert_display_to_data_coordinates(transData, length=10): # create a transform which will take from display to data coordinates inv = transData.inverted() # transform from display coordinates to data coordinates in x axis data_coords = inv.transform([(0, 0), (length, 0)]) # get the length of the segment in data units yticks_len = data_coords[1, 0] - data_coords[0, 0] # transform from display coordinates to data coordinates in y axis data_coords = inv.transform([(0, 0), (0, length)]) # get the length of the segment in data units xticks_len = data_coords[1, 1] - data_coords[0, 1] return xticks_len, yticks_len ##################################### # PARAMETERS - This can be modified # ##################################### # normal pdf variances var1 = 0.5 var2 = 2 var_std = 1 # normal pdf mean theta = 6 epsilon = 1.5 # maximum deviation from the mean where to plot each gaussian max_mean_dev = 3.1 * var2 ##################### # END OF PARAMETERS # ##################### # abscissa values xmin = theta - max_mean_dev xmax = theta + max_mean_dev x = np.linspace(xmin, xmax, 300) # normal distribution and density values in x pdf_var1 = norm.pdf(x, theta, math.sqrt(var1)) pdf_var2 = norm.pdf(x, theta, math.sqrt(var2)) pdf_std = norm.pdf(x, theta, math.sqrt(var_std)) # axis parameters dx = xmax / 20 xmin_ax = xmin - dx xmax_ax = xmax + dx ym = np.amax(pdf_var1) ymax_ax = ym + ym / 10 ymin_ax = -ym / 10 # areas to fill limits pdf1_xinf = np.linspace(xmin, theta-epsilon, 50) pdf1_inf = norm.pdf(pdf1_xinf, theta, math.sqrt(var1)) pdf1_xsup = np.linspace(theta+epsilon, xmax, 50) pdf1_sup = norm.pdf(pdf1_xsup, theta, math.sqrt(var1)) pdf2_xinf = np.linspace(xmin, theta-epsilon, 50) pdf2_inf = norm.pdf(pdf2_xinf, theta, math.sqrt(var2)) pdf2_xsup = np.linspace(theta+epsilon, xmax, 50) pdf2_sup = norm.pdf(pdf2_xsup, theta, math.sqrt(var2)) epsilon1 = epsilon / math.sqrt(var1) epsilon2 = epsilon / math.sqrt(var2) pdfstd1_xinf = np.linspace(xmin, theta-epsilon1, 50) pdfstd1_inf = norm.pdf(pdfstd1_xinf, theta, math.sqrt(var_std)) pdfstd1_xsup = np.linspace(theta+epsilon1, xmax, 50) pdfstd1_sup = norm.pdf(pdfstd1_xsup, theta, math.sqrt(var_std)) pdfstd2_xinf = np.linspace(xmin, theta-epsilon2, 50) pdfstd2_inf = norm.pdf(pdfstd2_xinf, theta, math.sqrt(var_std)) pdfstd2_xsup = np.linspace(theta+epsilon2, xmax, 50) pdfstd2_sup = norm.pdf(pdfstd2_xsup, theta, math.sqrt(var_std)) # length of the ticks for all subplot (6 pixels) display_length = 6 # in pixels # x ticks labels margin xtm = -0.09 ytm = 0.4 # font size fontsize = 14 # colors from coolwarm cNorm = colors.Normalize(vmin=0, vmax=1) scalarMap = cm.ScalarMappable(norm=cNorm, cmap=cm.coolwarm) col10 = scalarMap.to_rgba(0) col20 = scalarMap.to_rgba(1) fig = plt.figure(0, figsize=(10, 6), frameon=False) # PLOT OF F(x | x < a) ax = plt.subplot2grid((2, 8), (0, 0), rowspan=1, colspan=4) plt.xlim(xmin_ax, xmax_ax) plt.ylim(ymin_ax, ymax_ax) # horizontal and vertical ticks length xtl, ytl = convert_display_to_data_coordinates(ax.transData, length=display_length) # axis arrows plt.annotate("", xytext=(xmin_ax, 0), xycoords='data', xy=(xmax_ax, 0), textcoords='data', arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002)) plt.annotate("", xytext=(0, ymin_ax), xycoords='data', xy=(0, ymax_ax), textcoords='data', arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002)) plt.plot(x, pdf_var1, color='k', linewidth=2) # filled areas ax.fill_between(pdf1_xinf, 0, pdf1_inf, color=col10) ax.fill_between(pdf1_xsup, 0, pdf1_sup, color=col10) # xlabels and xtickslabels plt.plot([theta, theta], [0, xtl], 'k') plt.plot([theta-epsilon, theta-epsilon], [0, xtl], 'k') plt.plot([theta+epsilon, theta+epsilon], [0, xtl], 'k') plt.text(theta, xtm, '$\\theta$', fontsize=fontsize, ha='center', va='baseline') plt.text(theta-epsilon, xtm, '$\\theta-\epsilon$', fontsize=fontsize, ha='center', va='baseline') plt.text(theta+epsilon, xtm, '$\\theta+\epsilon$', fontsize=fontsize, ha='center', va='baseline') plt.text(xmax_ax, xtm, '$\hat{\\theta}$', fontsize=fontsize, ha='right', va='baseline') plt.text(ytm, ymax_ax, '$p(\hat{\\theta})=\mathcal{N}(\\theta,\,\sigma^2_{\hat{\\theta}})$', fontsize=fontsize, ha='left', va='center') plt.text(xmax_ax+0.4, ymax_ax, '$\sigma^2_{\hat{\\theta}}<\sigma^2_{\check{\\theta}}$', fontsize=fontsize, ha='center', va='center') plt.axis('off') ## ax = plt.subplot2grid((2, 8), (0, 4), rowspan=1, colspan=4) plt.xlim(xmin_ax, xmax_ax) plt.ylim(ymin_ax, ymax_ax) # axis arrows plt.annotate("", xytext=(xmin_ax, 0), xycoords='data', xy=(xmax_ax, 0), textcoords='data', arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002)) plt.annotate("", xytext=(theta, ymin_ax), xycoords='data', xy=(theta, ymax_ax), textcoords='data', arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002)) plt.plot(x, pdf_std, color='k', linewidth=2) # filled areas ax.fill_between(pdfstd1_xinf, 0, pdfstd1_inf, color=col10) ax.fill_between(pdfstd1_xsup, 0, pdfstd1_sup, color=col10) xtm2 = -0.11 # xlabels and xtickslabels plt.plot([theta-epsilon1, theta-epsilon1], [0, xtl], 'k') plt.plot([theta+epsilon1, theta+epsilon1], [0, xtl], 'k') # plt.text(theta-epsilon1, xtm, '$$-\epsilon/\sqrt{\\textrm{var}(\hat{\\theta})}$$', # fontsize=fontsize, ha='center', va='baseline') # plt.text(theta-epsilon1, xtm, '$$-\\frac{\epsilon}{\sigma_{\hat{\\theta}}}$$', fontsize=fontsize, ha='center', va='baseline') plt.text(theta-epsilon1, xtm2, '$-\epsilon/\sigma_{\hat{\\theta}}$', fontsize=fontsize, ha='center', va='baseline') plt.text(theta+epsilon1, xtm2, '$\epsilon/\sigma_{\hat{\\theta}}$', fontsize=fontsize, ha='center', va='baseline') plt.text(xmax_ax, xtm2, '$(\hat{\\theta}-\\theta$)/\sigma_{\hat{\\theta}}', fontsize=fontsize, ha='center', va='baseline') plt.text(theta + ytm, ymax_ax, '$p((\hat{\\theta}-\\theta$)/\sigma_{\hat{\\theta}})=\mathcal{N}(0,\,1)$', fontsize=fontsize, ha='left', va='center') plt.axis('off') ######################### ######################### ax = plt.subplot2grid((2, 8), (1, 0), rowspan=1, colspan=4) plt.xlim(xmin_ax, xmax_ax) plt.ylim(ymin_ax, ymax_ax) # axis arrows plt.annotate("", xytext=(xmin_ax, 0), xycoords='data', xy=(xmax_ax, 0), textcoords='data', arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002)) plt.annotate("", xytext=(0, ymin_ax), xycoords='data', xy=(0, ymax_ax), textcoords='data', arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002)) plt.plot(x, pdf_var2, color='k', linewidth=2) # filled areas ax.fill_between(pdf2_xinf, 0, pdf2_inf, color=col10) ax.fill_between(pdf2_xsup, 0, pdf2_sup, color=col10) # xlabels and xtickslabels plt.plot([theta, theta], [0, xtl], 'k') plt.plot([theta-epsilon, theta-epsilon], [0, xtl], 'k') plt.plot([theta+epsilon, theta+epsilon], [0, xtl], 'k') plt.text(theta, xtm, '$\\theta$', fontsize=fontsize, ha='center', va='baseline') plt.text(theta-epsilon, xtm, '$\\theta-\epsilon$', fontsize=fontsize, ha='center', va='baseline') plt.text(theta+epsilon, xtm, '$\\theta+\epsilon$', fontsize=fontsize, ha='center', va='baseline') plt.text(xmax_ax, xtm, '$\check{\\theta}$', fontsize=fontsize, ha='right', va='baseline') plt.text(ytm, ymax_ax, '$p(\check{\\theta})=\mathcal{N}(\\theta,\,\sigma^2_{\check{\\theta}})$', fontsize=fontsize, ha='left', va='center') plt.axis('off') ## ax = plt.subplot2grid((2, 8), (1, 4), rowspan=1, colspan=4) plt.xlim(xmin_ax, xmax_ax) plt.ylim(ymin_ax, ymax_ax) # axis arrows plt.annotate("", xytext=(xmin_ax, 0), xycoords='data', xy=(xmax_ax, 0), textcoords='data', arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002)) plt.annotate("", xytext=(theta, ymin_ax), xycoords='data', xy=(theta, ymax_ax), textcoords='data', arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002)) plt.plot(x, pdf_std, color='k', linewidth=2) # filled areas ax.fill_between(pdfstd2_xinf, 0, pdfstd2_inf, color=col10) ax.fill_between(pdfstd2_xsup, 0, pdfstd2_sup, color=col10) xtm2 = -0.11 # xlabels and xtickslabels plt.plot([theta-epsilon2, theta-epsilon2], [0, xtl], 'k') plt.plot([theta+epsilon2, theta+epsilon2], [0, xtl], 'k') plt.text(theta-epsilon2, xtm2, '$-\epsilon/\sigma_{\check{\\theta}}$', fontsize=fontsize, ha='center', va='baseline') plt.text(theta+epsilon2, xtm2, '$\epsilon/\sigma_{\check{\\theta}}$', fontsize=fontsize, ha='center', va='baseline') plt.text(xmax_ax, xtm2, '$(\check{\\theta}-\\theta$)/\sigma_{\check{\\theta}}', fontsize=fontsize, ha='center', va='baseline') plt.text(theta + ytm, ymax_ax, '$p((\check{\\theta}-\\theta$)/\sigma_{\check{\\theta}})=\mathcal{N}(0,\,1)$', fontsize=fontsize, ha='left', va='center') plt.axis('off') # save as pdf image plt.savefig('problem_2_7.pdf', bbox_inches='tight') plt.show()
3.0625
3
smoke-test/tests/cypress/integration_test.py
pramodbiligiri/datahub
9
12773300
import pytest import subprocess from tests.utils import ingest_file_via_rest from tests.utils import delete_urns_from_file @pytest.fixture(scope="module", autouse=True) def ingest_cleanup_data(): print("ingesting test data") ingest_file_via_rest("tests/cypress/data.json") yield print("removing test data") delete_urns_from_file("tests/cypress/data.json") def test_run_cypress(frontend_session, wait_for_healthchecks): command = f"npx cypress run" print('starting?') proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd="tests/cypress") stdout = proc.stdout.read() stderr = proc.stderr.read() return_code = proc.wait() print(stdout.decode("utf-8")) print('stderr output:') print(stderr.decode("utf-8")) print('return code', return_code) assert(return_code == 0)
2.015625
2
PythonProject/my_photoshop/blur.py
curqder/MyProjects
0
12773301
<reponame>curqder/MyProjects """ File: blur.py Name: 楊翔竣 <NAME> ------------------------------- This file shows the original image first, smiley-face.png, and then compare to its blurred image. The blur algorithm uses the average RGB values of a pixel's nearest neighbors """ from simpleimage import SimpleImage def blur(img): """ :param img: the original image :return: img, the blurred image This function make image blurred """ old_image = img blurred = SimpleImage.blank(old_image.width, old_image.height) for y in range(old_image.height): for x in range(old_image.width): count = 0 sum_r = 0 sum_g = 0 sum_b = 0 for i in range(-1, 2, 1): # 1step, i= -1,0,1 for j in range(-1, 2, 1): # 1step, j = -1,0,1 pixel_x = x + j # neighbor pixel pixel_y = y + j # neighbor pixel if 0 <= pixel_x < old_image.width: if 0 <= pixel_y < old_image.height: pixel = old_image.get_pixel(pixel_x, pixel_y) count += 1 sum_r += pixel.red sum_g += pixel.green sum_b += pixel.blue new_pixel = blurred.get_pixel(x, y) new_pixel.red = sum_r / count new_pixel.green = sum_g / count new_pixel.blue = sum_b / count return blurred def main(): """ Use 4 for loops to find neighbors, and make new image's RGB values by neighbors' average """ old_img = SimpleImage("images/smiley-face.png") old_img.show() blurred_img = blur(old_img) for i in range(5): blurred_img = blur(blurred_img) blurred_img.show() if __name__ == '__main__': main()
3.71875
4
src/examples/__init__.py
flipdot/manim-gallery
14
12773302
from os.path import dirname, join import glob from pathlib import Path modules = glob.glob(join(dirname(__file__), '**/*.py'), recursive=True) # __all__ = [basename(f)[:-3] for f in modules if isfile(f) and not f.startswith('_')] paths = [Path(x) for x in modules] __all__ = [ f'{p.parent.name}.{p.stem}' for p in paths if p.is_file() and not p.name.startswith('_') and p.parent.name != 'examples']
2.765625
3
research/cv/dlinknet/src/loss.py
mindspore-ai/models
77
12773303
# Copyright 2022 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ import mindspore import mindspore.nn as nn class dice_bce_loss(nn.LossBase): def __init__(self, batch=True, reduction="mean"): super(dice_bce_loss, self).__init__(reduction) self.batch = batch self.bce_loss = nn.BCELoss(reduction='mean') self.sum = mindspore.ops.ReduceSum(keep_dims=False) def soft_dice_coeff(self, y_true, y_pred): smooth = 0.0 # may change if self.batch: i = self.sum(y_true) j = self.sum(y_pred) intersection = self.sum(y_true * y_pred) else: i = y_true.sum(1).sum(1).sum(1) j = y_pred.sum(1).sum(1).sum(1) intersection = (y_true * y_pred).sum(1).sum(1).sum(1) score = (2. * intersection + smooth) / (i + j + smooth) return score.mean() def soft_dice_loss(self, y_true, y_pred): loss = 1 - self.soft_dice_coeff(y_true, y_pred) return loss def construct(self, predict, target): a = self.bce_loss(predict, target) b = self.soft_dice_loss(target, predict) return a + b class iou_bce_loss(nn.LossBase): def __init__(self, batch=True, reduction="mean"): super(iou_bce_loss, self).__init__(reduction) self.batch = batch self.bce_loss = nn.BCELoss(reduction='mean') self.sum = mindspore.ops.ReduceSum(keep_dims=False) def soft_dice_coeff(self, y_true, y_pred): smooth = 0.0 # may change if self.batch: i = self.sum(y_true) j = self.sum(y_pred) intersection = self.sum(y_true * y_pred) else: i = y_true.sum(1).sum(1).sum(1) j = y_pred.sum(1).sum(1).sum(1) intersection = (y_true * y_pred).sum(1).sum(1).sum(1) score = (intersection + smooth) / (i + j - intersection + smooth) # iou return score.mean() def soft_dice_loss(self, y_true, y_pred): loss = 1 - self.soft_dice_coeff(y_true, y_pred) return loss def construct(self, predict, target): a = self.bce_loss(predict, target) b = self.soft_dice_loss(target, predict) return a + b
2.390625
2
ceilometer/storage/impl_sqlalchemy.py
dreamhost/ceilometer
0
12773304
# -*- encoding: utf-8 -*- # # Author: <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """SQLAlchemy storage backend.""" from __future__ import absolute_import import copy import os from sqlalchemy import func from ceilometer.openstack.common import log from ceilometer.openstack.common import timeutils from ceilometer.storage import base from ceilometer.storage import models as api_models from ceilometer.storage.sqlalchemy import migration from ceilometer.storage.sqlalchemy.models import Meter, Project, Resource from ceilometer.storage.sqlalchemy.models import Source, User, Base import ceilometer.storage.sqlalchemy.session as sqlalchemy_session LOG = log.getLogger(__name__) class SQLAlchemyStorage(base.StorageEngine): """Put the data into a SQLAlchemy database. Tables:: - user - { id: user uuid } - source - { id: source id } - project - { id: project uuid } - meter - the raw incoming data - { id: meter id counter_name: counter name user_id: user uuid (->user.id) project_id: project uuid (->project.id) resource_id: resource uuid (->resource.id) resource_metadata: metadata dictionaries counter_type: counter type counter_unit: counter unit counter_volume: counter volume timestamp: datetime message_signature: message signature message_id: message uuid } - resource - the metadata for resources - { id: resource uuid resource_metadata: metadata dictionaries project_id: project uuid (->project.id) user_id: user uuid (->user.id) } - sourceassoc - the relationships - { meter_id: meter id (->meter.id) project_id: project uuid (->project.id) resource_id: resource uuid (->resource.id) user_id: user uuid (->user.id) source_id: source id (->source.id) } """ OPTIONS = [] def register_opts(self, conf): """Register any configuration options used by this engine.""" conf.register_opts(self.OPTIONS) @staticmethod def get_connection(conf): """Return a Connection instance based on the configuration settings. """ return Connection(conf) def make_query_from_filter(query, event_filter, require_meter=True): """Return a query dictionary based on the settings in the filter. :param filter: EventFilter instance :param require_meter: If true and the filter does not have a meter, raise an error. """ if event_filter.meter: query = query.filter(Meter.counter_name == event_filter.meter) elif require_meter: raise RuntimeError('Missing required meter specifier') if event_filter.source: query = query.filter(Meter.sources.any(id=event_filter.source)) if event_filter.start: ts_start = event_filter.start query = query.filter(Meter.timestamp >= ts_start) if event_filter.end: ts_end = event_filter.end query = query.filter(Meter.timestamp < ts_end) if event_filter.user: query = query.filter_by(user_id=event_filter.user) if event_filter.project: query = query.filter_by(project_id=event_filter.project) if event_filter.resource: query = query.filter_by(resource_id=event_filter.resource) if event_filter.metaquery: raise NotImplementedError('metaquery not implemented') return query class Connection(base.Connection): """SqlAlchemy connection.""" def __init__(self, conf): url = conf.database_connection if url == 'sqlite://': url = os.environ.get('CEILOMETER_TEST_SQL_URL', url) LOG.info('connecting to %s', url) self.session = sqlalchemy_session.get_session(url, conf) def upgrade(self, version=None): migration.db_sync(self.session.get_bind(), version=version) def clear(self): engine = self.session.get_bind() for table in reversed(Base.metadata.sorted_tables): engine.execute(table.delete()) def record_metering_data(self, data): """Write the data to the backend storage system. :param data: a dictionary such as returned by ceilometer.meter.meter_message_from_counter """ if data['source']: source = self.session.query(Source).get(data['source']) if not source: source = Source(id=data['source']) self.session.add(source) else: source = None # create/update user && project, add/update their sources list if data['user_id']: user = self.session.merge(User(id=str(data['user_id']))) if not filter(lambda x: x.id == source.id, user.sources): user.sources.append(source) else: user = None if data['project_id']: project = self.session.merge(Project(id=str(data['project_id']))) if not filter(lambda x: x.id == source.id, project.sources): project.sources.append(source) else: project = None # Record the updated resource metadata rmetadata = data['resource_metadata'] resource = self.session.merge(Resource(id=str(data['resource_id']))) if not filter(lambda x: x.id == source.id, resource.sources): resource.sources.append(source) resource.project = project resource.user = user # Current metadata being used and when it was last updated. resource.resource_metadata = rmetadata # autoflush didn't catch this one, requires manual flush self.session.flush() # Record the raw data for the event. meter = Meter(counter_type=data['counter_type'], counter_unit=data['counter_unit'], counter_name=data['counter_name'], resource=resource) self.session.add(meter) if not filter(lambda x: x.id == source.id, meter.sources): meter.sources.append(source) meter.project = project meter.user = user meter.timestamp = data['timestamp'] meter.resource_metadata = rmetadata meter.counter_volume = data['counter_volume'] meter.message_signature = data['message_signature'] meter.message_id = data['message_id'] return def get_users(self, source=None): """Return an iterable of user id strings. :param source: Optional source filter. """ query = self.session.query(User.id) if source is not None: query = query.filter(User.sources.any(id=source)) return (x[0] for x in query.all()) def get_projects(self, source=None): """Return an iterable of project id strings. :param source: Optional source filter. """ query = self.session.query(Project.id) if source: query = query.filter(Project.sources.any(id=source)) return (x[0] for x in query.all()) def get_resources(self, user=None, project=None, source=None, start_timestamp=None, end_timestamp=None, metaquery={}, resource=None): """Return an iterable of api_models.Resource instances :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param source: Optional source filter. :param start_timestamp: Optional modified timestamp start range. :param end_timestamp: Optional modified timestamp end range. :param metaquery: Optional dict with metadata to match on. :param resource: Optional resource filter. """ query = self.session.query(Meter,).group_by(Meter.resource_id) if user is not None: query = query.filter(Meter.user_id == user) if source is not None: query = query.filter(Meter.sources.any(id=source)) if start_timestamp: query = query.filter(Meter.timestamp >= start_timestamp) if end_timestamp: query = query.filter(Meter.timestamp < end_timestamp) if project is not None: query = query.filter(Meter.project_id == project) if resource is not None: query = query.filter(Meter.resource_id == resource) if metaquery: raise NotImplementedError('metaquery not implemented') for meter in query.all(): yield api_models.Resource( resource_id=meter.resource_id, project_id=meter.project_id, user_id=meter.user_id, metadata=meter.resource_metadata, meter=[ api_models.ResourceMeter( counter_name=m.counter_name, counter_type=m.counter_type, counter_unit=m.counter_unit, ) for m in meter.resource.meters ], ) def get_meters(self, user=None, project=None, resource=None, source=None, metaquery={}): """Return an iterable of api_models.Meter instances :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param resource: Optional ID of the resource. :param source: Optional source filter. :param metaquery: Optional dict with metadata to match on. """ query = self.session.query(Resource) if user is not None: query = query.filter(Resource.user_id == user) if source is not None: query = query.filter(Resource.sources.any(id=source)) if resource: query = query.filter(Resource.id == resource) if project is not None: query = query.filter(Resource.project_id == project) query = query.options( sqlalchemy_session.sqlalchemy.orm.joinedload('meters')) if metaquery: raise NotImplementedError('metaquery not implemented') for resource in query.all(): meter_names = set() for meter in resource.meters: if meter.counter_name in meter_names: continue meter_names.add(meter.counter_name) yield api_models.Meter( name=meter.counter_name, type=meter.counter_type, unit=meter.counter_unit, resource_id=resource.id, project_id=resource.project_id, user_id=resource.user_id, ) def get_samples(self, event_filter): """Return an iterable of api_models.Samples """ query = self.session.query(Meter) query = make_query_from_filter(query, event_filter, require_meter=False) samples = query.all() for s in samples: # Remove the id generated by the database when # the event was inserted. It is an implementation # detail that should not leak outside of the driver. yield api_models.Sample( # Replace 'sources' with 'source' to meet the caller's # expectation, Meter.sources contains one and only one # source in the current implementation. source=s.sources[0].id, counter_name=s.counter_name, counter_type=s.counter_type, counter_unit=s.counter_unit, counter_volume=s.counter_volume, user_id=s.user_id, project_id=s.project_id, resource_id=s.resource_id, timestamp=s.timestamp, resource_metadata=s.resource_metadata, message_id=s.message_id, message_signature=s.message_signature, ) def _make_volume_query(self, event_filter, counter_volume_func): """Returns complex Meter counter_volume query for max and sum.""" subq = self.session.query(Meter.id) subq = make_query_from_filter(subq, event_filter, require_meter=False) subq = subq.subquery() mainq = self.session.query(Resource.id, counter_volume_func) mainq = mainq.join(Meter).group_by(Resource.id) return mainq.filter(Meter.id.in_(subq)) def get_event_interval(self, event_filter): """Return the min and max timestamps from samples, using the event_filter to limit the samples seen. ( datetime.datetime(), datetime.datetime() ) """ query = self.session.query(func.min(Meter.timestamp), func.max(Meter.timestamp)) query = make_query_from_filter(query, event_filter) results = query.all() a_min, a_max = results[0] return (a_min, a_max) def _make_stats_query(self, event_filter): query = self.session.query( func.min(Meter.timestamp).label('tsmin'), func.max(Meter.timestamp).label('tsmax'), func.avg(Meter.counter_volume).label('avg'), func.sum(Meter.counter_volume).label('sum'), func.min(Meter.counter_volume).label('min'), func.max(Meter.counter_volume).label('max'), func.count(Meter.counter_volume).label('count')) return make_query_from_filter(query, event_filter) @staticmethod def _stats_result_to_model(result, period, period_start, period_end): duration = (timeutils.delta_seconds(result.tsmin, result.tsmax) if result.tsmin is not None and result.tsmax is not None else None) return api_models.Statistics( count=int(result.count), min=result.min, max=result.max, avg=result.avg, sum=result.sum, duration_start=result.tsmin, duration_end=result.tsmax, duration=duration, period=period, period_start=period_start, period_end=period_end, ) def get_meter_statistics(self, event_filter, period=None): """Return an iterable of api_models.Statistics instances containing meter statistics described by the query parameters. The filter must have a meter value set. """ if not period or not event_filter.start or not event_filter.end: res = self._make_stats_query(event_filter).all()[0] if not period: yield self._stats_result_to_model(res, 0, res.tsmin, res.tsmax) return query = self._make_stats_query(event_filter) # HACK(jd) This is an awful method to compute stats by period, but # since we're trying to be SQL agnostic we have to write portable # code, so here it is, admire! We're going to do one request to get # stats by period. We would like to use GROUP BY, but there's no # portable way to manipulate timestamp in SQL, so we can't. for period_start, period_end in base.iter_period( event_filter.start or res.tsmin, event_filter.end or res.tsmax, period): q = query.filter(Meter.timestamp >= period_start) q = q.filter(Meter.timestamp < period_end) r = q.all()[0] # Don't return results that didn't have any event if r.count: yield self._stats_result_to_model( result=r, period=int(timeutils.delta_seconds(period_start, period_end)), period_start=period_start, period_end=period_end, )
2.140625
2
plaza_routing/setup.py
PlazaNav/PlazaNav
10
12773305
from setuptools import setup, find_packages with open('README.md') as f: readme = f.read() setup( name='plaza_routing', version='0.0.1', description='Plaza routing service for plazaroute', long_description=readme, author='<NAME>, <NAME>', author_email='<EMAIL>', url='https://github.com/PlazaRoute/plazaroute', license="MIT License", packages=find_packages(exclude=('tests', 'docs')), package_data={'': ['integration/routing_strategy/graphhopper_swagger.json']} )
1.21875
1
algorithm/challenges/misere-nim-1.py
rishabhiitbhu/hackerrank
0
12773306
<reponame>rishabhiitbhu/hackerrank g = int(input().strip()) for _ in range(g): n = int(input().strip()) s = [int(x) for x in input().strip().split(' ')] x = 0 for i in s: x ^= i if len(set(s))==1 and 1 in s: #here x=0 or 1 if x:#odd no. of ones print("Second") else:#even no. of ones print("First") else: if x: print("First") else: print("Second")
3.546875
4
flaskTest.py
thejonty/sensoRaspi
0
12773307
from flask import Flask, render_template, request, url_for from datetime import datetime from readWeather import readWeather app = Flask(__name__) @app.route("/", methods=['GET', 'POST']) def print_form(): now = datetime.now() timeString = now.strftime("%Y-%m-%d %H:%M") templateData = { 'title' : 'Humidity and Temperature Log', 'time' : timeString } if request.method == 'POST': readW = readWeather('humidity_temp_log.h5') result=request.form['fooput'] readW.generatePlot(result) filename1 = 'temp_humid_' + result + '.png' return render_template('form.html',result=result, filename1=filename1, **templateData) if request.method == 'GET': #filename1 = 'temp.png' return render_template("form.html", **templateData) if __name__ == "__main__": #app.run(host='0.0.0.0', debug=True) app.run(host='192.168.0.24', port=5000, debug=False)
3.0625
3
django/docs/releases/1.7.7.txt.py
roshanba/mangal
0
12773308
<filename>django/docs/releases/1.7.7.txt.py XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXX XXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXX XXXXX XXXXXX XXXXX XXXXX XXXXXXX XXXX XXX XXXXXXXX XXXXXX XX XXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXX XXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXXX XX XXXX XXXXXXXXXXXX XXX XXXXXXX XX XXXX XXX XXXX XX XXX XXXXX XXXX XXXXXXXXXX XXX XXXXXXXX XX XXXX XXXXXXXXX XXXXX XXXXXXX XX XX XXXXXXXX XXXX XX XXXXXXXXXXXXXXXXX XXXX XXXXX XXXX XXXXXXX XXXXXXXX XX XXXXXX XXXX XXXXXXX XXXXXXXX XX XXXXXX XX XXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXX X XXXXX XXX XXXXXX XXXX XXXXXXXXX XXXXXX XXXXXXX XXXX XXXX XXXXXXXXXX XXX XXX XXX XXX XXXXXX XXX XXXX XXXXX XXXXXXXX XX XXXXXXX XXXXXXXXX XX XXXXXX XXXX XXXXXX XXXXXXXXXXXXXXXX XXXX XXX XXXXXX XXX XXXXXXXX XXXXX XX XX XXXXXXX XXX XXXXXX XX XXX XXXXXX XXXX XXXXXXXXXX XXXXXXXXXX XXXXXXXX XXXX XXXXXXXXXX XX XXXXXXXXX XX XXXXXXXX XXXXX XXX XXXXXXX XX XXXXXXXXXXXXXXXX XXXXX XXXX XXXXX XX XXXXX XXXX XXXX XXX XXXXXX XX X XXXXXXXXXXXXXXXX XXXX XXXXXXX XXXXXXXX XX XXXXXX XXX XXXXXXX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX XXXXXXXX XXX XXXXXX XXX XXXXXXXXXXXXX XXXXXXXX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXX XX XXXX XXXXX XX XXXX XXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX XX XXXXXXXX XXX XXXX XX XX XXX XXXXXXXX XXXX XXX XXXXXXXX XXXXXX XXX XXXXX XXXXXXXXX XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XXXX XXXX XXXXXXX XXXXXXX XXXXXXXXXX XXX XX XXXXXXXXXX XXXX XXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXX XXXXX XXXXXXX XXXXXX XXXXXX XXXXXXXXXX XXXXX XX XXXX XXX XXXX XXX XXXX XXX XXXXXXXXXXXX XXXXXXXX XXXXXX XXX XXXXXXXX XXXX XX XXXXXX XXXXXXXXXX XXXXXX XXXXXXXX XX XXXXXX XXXX XXXXX XXXX XXXXXXXX XXXX XXXXXXX XXXXXXXXXX XXXX XX XXXXXXXXXXXXXXXXXXXX XX XXXXXXXX XXXXX XX XXXXXXXXXXX XX XX XXXXXX XXXXXX XXXXX X XXXXXXX XXXXXXX XXXXXXXX XX X XXXXXXXXX XXXXXX XX XXXXXXXXXXXXXXXXX XX XXXXXXX XXXX XXXXXXXX XXXXXXX XXX XXXX XXXX X XXX XXXX X XXXXX XXXX XXXXX XXXXXX XXXX XX XXX XXXXXX XX XXXX XXXXXXXX XXXX XX XXXXXX XXXXXX XXXXXX XXXXXXX XXXXXXXXXX XX XXX XXXXX XX X XXX XX XX XXXXXX XXXXXXXXX XXXXXXXX XXXXXXXX X XXXXX XXXXXXXX XX XXXXXXX XX XXXXXXXXXX XXXXX XXXXXXXX X XXXXXXXX XXXXX XXXXX XXXXXXXXX XXXXX XX XX XXXXXXXX XXX XXXXXXX XXXX XXXXXXXXXX XXX XXXXXXXXXX XXXXXXXXXXXXXXXXXX X XXXXXXX XXXXXXX XXXXXXXXX XXXXX XX XXX XXX XXXX XXXX XXXXXXX XXXXXXXXX XXXXXXXXXX XXXX XXXXXXXXXXXXXXXX XXXXXXXXX XX XXXXXX XXXX XXXXXXXXXXXXXXX XXX XXXXXXXXX XXXX XX XXXXXXX XX XXXXXXXXXX XXXXXXXXXXXXXXXXXX
1.53125
2
dags/somalia_dag.py
flatten-official/flatten-scripts
2
12773309
from datetime import datetime from airflow import DAG from airflow.operators.bash_operator import BashOperator from airflow.operators.python_operator import PythonOperator from somalia.form.main import main from somalia.form.somalia_form_data import run_form_data_scraping from somalia.gsheets.somalia_sheet import main from utils.dags import default_args from utils.debugger import enable_cloud_debugger enable_cloud_debugger() #################### # CONFIRMED SCRIPT # #################### somalia_confirmed_data_dag = DAG( dag_id='somalia-confirmed', start_date=datetime(2020, 4, 22), schedule_interval='15 4,16 * * *', # 15 minutes past hour 4 and 16. default_args=default_args, catchup=True ) echo_confirmed = BashOperator( task_id='Echo', bash_command='echo "Getting Somalia Confirmed Data"' ) run_confirmed_service = PythonOperator( task_id='get_somalia_confirmed_data', python_callable=main, dag=somalia_confirmed_data_dag ) #################### # FORM SCRIPT # #################### somalia_form_data_dag = DAG( dag_id='somalia-form', start_date=datetime(2020, 4, 25), schedule_interval='0 * * * *', # every hour default_args=default_args, catchup=True ) echo_form = BashOperator( task_id='Echo', bash_command='echo "Getting Somalia Form Data"' ) run_form_service = PythonOperator( task_id='get_somalia_form_data', python_callable=run_form_data_scraping, dag=somalia_form_data_dag ) #################### # SHEETS SCRIPT # #################### somalia_sheets_upload = DAG( dag_id='somalia-sheets-upload', start_date=datetime(2020, 4, 25), schedule_interval='5 * * * *', # 5 minutes past every hour default_args=default_args, catchup=True ) echo_sheet = BashOperator( task_id='Echo', bash_command='echo "Getting Somalia Form Data"' ) upload_sheet = PythonOperator( task_id='upload-to-sheets', python_callable=main, dag=somalia_sheets_upload ) # Three separate DAGs since none are dependant on the other # Start times are offset to not overload the instances echo_confirmed >> run_confirmed_service echo_form >> run_form_service echo_sheet >> upload_sheet
2.171875
2
src/gtk3/treeview-treestore/glade/MainWindow.py
alexandrebarbaruiva/gui-python-gtk
42
12773310
# -*- coding: utf-8 -*- """Gtk.TreeView(), Gtk.TreeStore().""" import gi gi.require_version(namespace='Gtk', version='3.0') from gi.repository import Gtk class Handler: brazilian_cities = { 'SP': ['Botucatu', 'São Manuel'], 'SC': ['Florianópolis', 'Joinville'] } def __init__(self): # Acessando o `Gtk.ListStore()`. tree_store = builder.get_object(name='tree_store') for state, cities_list in self.brazilian_cities.items(): iter = tree_store.append(parent=None, row=[state]) for city in cities_list: tree_store.append(parent=iter, row=[city]) def on_row_double_click(self, widget, tree_path, tree_view_column): model = widget.get_model() print(model) tree_iter = model.get_iter(tree_path) print(tree_iter) column = tree_view_column.get_sort_column_id() print(column) column_title = tree_view_column.get_title() print(column_title) print(f'Coluna: {column} - Título: {column_title}') value = model.get_value(iter=tree_iter, column=column) print(f'Texto da linha {value}') if __name__ == '__main__': builder = Gtk.Builder.new() builder.add_from_file(filename='MainWindow.glade') builder.connect_signals(obj_or_map=Handler()) win = builder.get_object(name='MainWindow') win.connect('destroy', Gtk.main_quit) win.show_all() Gtk.main()
2.84375
3
tensorflow/contrib/rnn/python/ops/lstm_ops.py
returncode13/tensorflow
0
12773311
<filename>tensorflow/contrib/rnn/python/ops/lstm_ops.py # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """LSTM Fused Cell ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import load_library from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import rnn_cell from tensorflow.python.ops import variable_scope as vs from tensorflow.python.platform import resource_loader _lstm_ops_so = load_library.load_op_library( resource_loader.get_path_to_datafile("_lstm_ops.so")) assert _lstm_ops_so, "Could not load _lstm_ops.so." # pylint: disable=invalid-name def _lstm_fused_cell(x, cs_prev, h_prev, w, b, wci=None, wcf=None, wco=None, forget_bias=None, cell_clip=None, use_peephole=None, name=None): r"""Computes the LSTM cell forward propagation for 1 time step. This implementation uses 1 weight matrix and 1 bias vector, there is no diagonal peephole connection. This kernel op implements the following mathematical equations: ```python xh = [x, h_prev] [i, f, ci, o] = xh * w + b f = f + forget_bias i = sigmoid(i) f = sigmoid(f) ci = tanh(ci) o = sigmoid(o) cs = ci .* i + cs_prev .* f co = tanh(cs) h = co .* o ``` Args: x: A `Tensor`. Must be one of the following types: `float32`, `float64`. The input to the LSTM cell. cs_prev: A `Tensor`. Must have the same type as `x`. h_prev: A `Tensor`. Must have the same type as `x`. w: A `Tensor`. Must have the same type as `x`. The weight matrix. b: A `Tensor`. Must have the same type as `x`. The bias vector. wci: A `Tensor`. Must have the same type as `x`. wcf: A `Tensor`. Must have the same type as `x`. wco: A `Tensor`. Must have the same type as `x`. forget_bias: An optional `float`. Defaults to `1`. The forget gate bias. cell_clip: An optional `float`. Defaults to `3`. use_peephole: An optional `bool`. Defaults to `False`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (i, cs, f, o, ci, co, h). i: A `Tensor`. Has the same type as `x`. The input gate. cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh. f: A `Tensor`. Has the same type as `x`. The forget gate. o: A `Tensor`. Has the same type as `x`. The output gate. ci: A `Tensor`. Has the same type as `x`. The cell input. co: A `Tensor`. Has the same type as `x`. The cell after the tanh. h: A `Tensor`. Has the same type as `x`. The output h vector. Raises: ValueError: If cell_size is None. """ if wci is None: cell_size = cs_prev.get_shape().with_rank(2)[1].value if cell_size is None: raise ValueError("cell_size from `cs_prev` should not be None.") wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size]) wco = wci wcf = wci # pylint: disable=protected-access return _lstm_ops_so.lstm_fused_cell(x=x, cs_prev=cs_prev, h_prev=h_prev, w=w, wci=wci, wco=wco, wcf=wcf, b=b, forget_bias=forget_bias, cell_clip=cell_clip, use_peephole=use_peephole, name=name) # pylint: enable=protected-access def _fused_lstm(seq_len_max, x, w, b, cs_prev=None, h_prev=None, wci=None, wcf=None, wco=None, forget_bias=None, cell_clip=None, use_peephole=None, name=None): r"""TODO(williamchan): add doc. Args: seq_len_max: A `Tensor` of type `int64`. x: A list of at least 1 `Tensor` objects of the same type in: `float32`. w: A `Tensor`. Must have the same type as `x`. b: A `Tensor`. Must have the same type as `x`. cs_prev: A `Tensor`. Must have the same type as `x`. h_prev: A `Tensor`. Must have the same type as `x`. wci: A `Tensor`. Must have the same type as `x`. wcf: A `Tensor`. Must have the same type as `x`. wco: A `Tensor`. Must have the same type as `x`. forget_bias: An optional `float`. Defaults to `1`. cell_clip: An optional `float`. Defaults to `3`. use_peephole: An optional `bool`. Defaults to `False`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (i, cs, f, o, ci, co, h). i: A list with the same number of `Tensor` objects as `x` of `Tensor` objects of the same type as x. cs: A list with the same number of `Tensor` objects as `x` of `Tensor` objects of the same type as x. f: A list with the same number of `Tensor` objects as `x` of `Tensor` objects of the same type as x. o: A list with the same number of `Tensor` objects as `x` of `Tensor` objects of the same type as x. ci: A list with the same number of `Tensor` objects as `x` of `Tensor` objects of the same type as x. co: A list with the same number of `Tensor` objects as `x` of `Tensor` objects of the same type as x. h: A list with the same number of `Tensor` objects as `x` of `Tensor` objects of the same type as x. Raises: ValueError: If `b` does not have a valid shape. """ batch_size = x[0].get_shape().with_rank(2)[0].value cell_size4 = b.get_shape().with_rank(1)[0].value if cell_size4 is None: raise ValueError("`b` shape must not be None.") cell_size = cell_size4 / 4 zero_state = None if cs_prev is None or h_prev is None: zero_state = array_ops.constant(0, dtype=dtypes.float32, shape=[batch_size, cell_size]) if cs_prev is None: cs_prev = zero_state if h_prev is None: h_prev = zero_state if wci is None: wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size]) wco = wci wcf = wci # pylint: disable=protected-access return _lstm_ops_so.fused_lstm(seq_len_max=seq_len_max, x=x, cs_prev=cs_prev, h_prev=h_prev, w=w, wci=wci, wco=wco, wcf=wcf, b=b, forget_bias=forget_bias, cell_clip=cell_clip, name=name, use_peephole=use_peephole) # pylint: enable=protected-access # pylint: enable=invalid-name ops.RegisterShape("LSTMFusedCell")(None) _lstm_fused_cell_grad_outputs = ["cs_prev_grad", "dicfo"] @ops.RegisterShape("LSTMFusedCell") def _LSTMFusedCellShape(op): batch_size = op.inputs[0].get_shape().with_rank(2)[0].value cell_size = op.inputs[1].get_shape().with_rank(2)[1].value return (tensor_shape.TensorShape([batch_size, cell_size]), tensor_shape.TensorShape([batch_size, cell_size]), tensor_shape.TensorShape([batch_size, cell_size]), tensor_shape.TensorShape([batch_size, cell_size]), tensor_shape.TensorShape([batch_size, cell_size]), tensor_shape.TensorShape([batch_size, cell_size]), tensor_shape.TensorShape([batch_size, cell_size])) @ops.RegisterGradient("LSTMFusedCell") def _LSTMFusedCellGrad(op, *grad): """Gradient for LSTMFusedCell.""" (x, cs_prev, h_prev, w, wci, wco, wcf, b) = op.inputs (i, cs, f, o, ci, co, _) = op.outputs (_, cs_grad, _, _, _, _, h_grad) = grad batch_size = x.get_shape().with_rank(2)[0].value if batch_size is None: batch_size = -1 input_size = x.get_shape().with_rank(2)[1].value if input_size is None: raise ValueError("input_size from `x` should not be None.") cell_size = cs_prev.get_shape().with_rank(2)[1].value if cell_size is None: raise ValueError("cell_size from `cs_prev` should not be None.") (cs_prev_grad, dicfo, wci_grad, wcf_grad, wco_grad) = _lstm_ops_so.lstm_fused_cell_grad( x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad, use_peephole=op.get_attr("use_peephole")) # Backprop from dicfo to xh. xh_grad = math_ops.matmul(dicfo, w, transpose_b=True) x_grad = array_ops.slice(xh_grad, (0, 0), (batch_size, input_size)) x_grad.get_shape().merge_with(x.get_shape()) h_prev_grad = array_ops.slice(xh_grad, (0, input_size), (batch_size, cell_size)) h_prev_grad.get_shape().merge_with(h_prev.get_shape()) # Backprop from dicfo to w. xh = array_ops.concat(1, [x, h_prev]) w_grad = math_ops.matmul(xh, dicfo, transpose_a=True) w_grad.get_shape().merge_with(w.get_shape()) # Backprop from dicfo to b. b_grad = nn_ops.bias_add_grad(dicfo) b_grad.get_shape().merge_with(b.get_shape()) return (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad, wco_grad, b_grad) @ops.RegisterShape("LSTMFusedCellGrad") def _LSTMFusedCellGradShape(op): batch_size = op.inputs[0].get_shape().with_rank(2)[0].value cell_size = op.inputs[1].get_shape().with_rank(2)[1].value return [tensor_shape.TensorShape([batch_size, cell_size]), tensor_shape.TensorShape([batch_size, cell_size * 4]), tensor_shape.TensorShape([cell_size]), tensor_shape.TensorShape([cell_size]), tensor_shape.TensorShape([cell_size])] @ops.RegisterShape("FusedLSTM") def _FusedLSTMShape(op): max_len = op.get_attr("max_len") x = op.inputs[1] b = op.inputs[-1] batch_size = x.get_shape().with_rank(2)[0].value cell_size = b.get_shape().with_rank(1)[0].value / 4 return [tensor_shape.TensorShape([batch_size, cell_size])] * max_len * 7 @ops.RegisterGradient("FusedLSTM") def _FusedLSTMGrad(op, *grad): """Gradient for FusedLSTM.""" max_len = op.get_attr("max_len") seq_len_max = op.inputs[0] x = op.inputs[1:1 + max_len] cs_prev = op.inputs[-7] h_prev = op.inputs[-6] w = op.inputs[-5] wci = op.inputs[-4] wco = op.inputs[-3] wcf = op.inputs[-2] b = op.inputs[-1] i = op.outputs[0 * max_len:1 * max_len] cs = op.outputs[1 * max_len:2 * max_len] f = op.outputs[2 * max_len:3 * max_len] o = op.outputs[3 * max_len:4 * max_len] ci = op.outputs[4 * max_len:5 * max_len] co = op.outputs[5 * max_len:6 * max_len] h = op.outputs[6 * max_len:7 * max_len] cs_grad = grad[-max_len * 2:-max_len] h_grad = grad[-max_len:] (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wco_grad, wcf_grad, b_grad) = _lstm_ops_so.fused_lstm_grad( seq_len_max, x, cs_prev, h_prev, w, wci, wco, wcf, b, i, cs, f, o, ci, co, h, cs_grad, h_grad, use_peephole=op.get_attr("use_peephole")) return [None] + x_grad + [cs_prev_grad, h_prev_grad, w_grad, wci_grad, wco_grad, wcf_grad, b_grad] @ops.RegisterShape("FusedLSTMGrad") def _FusedLSTMGradShape(op): """Shape for FusedLSTM.""" max_len = op.get_attr("max_len") x = op.inputs[1] cs_prev = op.inputs[1 + max_len] h_prev = op.inputs[2 + max_len] w = op.inputs[3 + max_len] wci = op.inputs[4 + max_len] wco = op.inputs[5 + max_len] wcf = op.inputs[6 + max_len] b = op.inputs[7 + max_len] x_shape = x.get_shape().with_rank(2) cs_prev_shape = cs_prev.get_shape().with_rank(2) h_prev_shape = h_prev.get_shape().with_rank(2) w_shape = w.get_shape().with_rank(2) wci_shape = wci.get_shape().with_rank(1) wco_shape = wco.get_shape().with_rank(1) wcf_shape = wcf.get_shape().with_rank(1) b_shape = b.get_shape().with_rank(1) return [x_shape] * max_len + [cs_prev_shape, h_prev_shape, w_shape, wci_shape, wco_shape, wcf_shape, b_shape] class LSTMFusedCell(rnn_cell.RNNCell): """Basic LSTM recurrent network cell. The implementation is based on: http://arxiv.org/abs/1409.2329. We add forget_bias (default: 1) to the biases of the forget gate in order to reduce the scale of forgetting in the beginning of the training. Unlike BasicLSTMCell, this is a monolithic op and should be much faster. The weight and bias matrixes should be compatible as long as the variabel scope matches. """ def __init__(self, num_units, forget_bias=1.0, use_peephole=False): """Initialize the basic LSTM cell. Args: num_units: int, The number of units in the LSTM cell. forget_bias: float, The bias added to forget gates (see above). use_peephole: Whether to use peephole connectios or not. """ self._num_units = num_units self._forget_bias = forget_bias self._use_peephole = use_peephole @property def state_size(self): return (self._num_units,) * 2 @property def output_size(self): return self._num_units def __call__(self, x, states_prev, scope=None): """Long short-term memory cell (LSTM).""" with vs.variable_scope(scope or type(self).__name__): x_shape = x.get_shape().with_rank(2) if not x_shape[1]: raise ValueError("Expecting x_shape[1] to be sets: %s" % str(x_shape)) if len(states_prev) != 2: raise ValueError("Expecting states_prev to be a tuple with length 2.") input_size = x_shape[1] w = vs.get_variable("W", [input_size + self._num_units, self._num_units * 4]) b = vs.get_variable("b", [w.get_shape().with_rank(2)[1]], initializer=init_ops.constant_initializer(0.0)) wci = vs.get_variable("wci", [self._num_units]) wco = vs.get_variable("wco", [self._num_units]) wcf = vs.get_variable("wcf", [self._num_units]) (cs_prev, h_prev) = states_prev (_, cs, _, _, _, _, h) = _lstm_fused_cell(x, cs_prev, h_prev, w, b, wci=wci, wco=wco, wcf=wcf, forget_bias=self._forget_bias, use_peephole=self._use_peephole) return (h, (cs, h))
1.9375
2
venv/Lib/site-packages/pandas/tseries/api.py
arnoyu-hub/COMP0016miemie
1
12773312
<filename>venv/Lib/site-packages/pandas/tseries/api.py<gh_stars>1-10 """ Timeseries API """ # flake8: noqa from pandas.tseries.frequencies import infer_freq import pandas.tseries.offsets as offsets
1.179688
1
rpcpy/exceptions.py
william-wambua/rpc.py
152
12773313
<gh_stars>100-1000 class SerializerNotFound(Exception): """ Serializer not found """
1.382813
1
gamelib/overlay.py
etmaifo/CleanerBot
1
12773314
<reponame>etmaifo/CleanerBot import pygame, os from gamelib.constants import COLOR, SCREEN, ASSET, GAME, FONT from gamelib.physicsbody import PhysicsBody import pygame.mixer as mixer class CountDownOverlay(object): def __init__(self): mixer.init() self.overlay = PhysicsBody(0, SCREEN.height/5, SCREEN.width * 3/4, 37, ASSET.countdown_overlay) self.overlay.rect.centerx = SCREEN.width/2 self.font = pygame.font.Font(FONT.default, 20) self.font.set_bold(True) self.text = self.font.render("3", True, COLOR.white) self.text_rect = self.text.get_rect() self.text_rect.center = self.overlay.rect.center self.sound = mixer.Sound(os.path.join("assets", "sfx", "start.wav")) self.sound.set_volume(0.3) self.sound_playing = False self.blink = 0 def update(self, remainingSeconds): self.blink += 1 if self.blink > GAME.fps: self.blink = 0 seconds = str(int(remainingSeconds/GAME.fps)-1) if seconds == "0": seconds = "GO!" if not self.sound_playing: self.sound.play() self.sound_playing = True self.text = self.font.render(seconds, True, COLOR.white) self.text_rect = self.text.get_rect() self.text_rect.center = self.overlay.rect.center def draw(self, screen): if self.blink < GAME.fps/2: screen.blit(self.overlay.image, self.overlay.rect) screen.blit(self.text, self.text_rect)
2.890625
3
src/GimelStudio/node_importer.py
iwoithe/Gimel-Studio
47
12773315
<reponame>iwoithe/Gimel-Studio<gh_stars>10-100 # ---------------------------------------------------------------------------- # Gimel Studio Copyright 2019-2021 by <NAME> and contributors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # FILE: node_importer.py # AUTHOR(S): <NAME> # PURPOSE: Imports the core and custom nodes so that they are registered # ---------------------------------------------------------------------------- import os from GimelStudio.utils import LoadPythonScripts # First, we import the custom nodes # directly from the corenode directory. from GimelStudio.corenodes.output import output_node from GimelStudio.corenodes.input import (image_node, color_image_node, noise_image_node, gradient_image_node) #from_blender_node from GimelStudio.corenodes.mask import edge_detect_node # from GimelStudio.corenodes.draw import text_node from GimelStudio.corenodes.color import (color_balance_node, contrast_node, brightness_node, invert_alpha_node, get_channel_node) from GimelStudio.corenodes.blend import (mix_node, composite_node, alpha_composite_node) from GimelStudio.corenodes.distort import (flip_node, crop_node) from GimelStudio.corenodes.filter import (blur_node, opacity_node, sharpness_node, effect_spread_node, invert_node, dilate_erode_node) from GimelStudio.corenodes.convert import (to_normal_map_node, to_bump_map_node, to_roughness_map_node, to_specular_map_node, to_ao_map_node) print("[INFO] Registered core nodes") # Next, we load the custom nodes # from the 'customnodes' directory. try: LoadPythonScripts("customnodes") print("[INFO] Registered custom node scripts") except Exception as error: print("[WARNING] Error registering custom nodes: \n", error) finally: pass
1.78125
2
demo/euleriansolid/main00_mesh.py
Technologicat/extrafeathers
0
12773316
# -*- coding: utf-8; -*- import pathlib import matplotlib.pyplot as plt import dolfin from extrafeathers import meshfunction from extrafeathers import meshiowrapper from extrafeathers import plotmagic print(pathlib.Path.cwd()) meshiowrapper.import_gmsh(src="demo/meshes/box.msh", dst="demo/meshes/box.h5") # for use by the flow solvers mesh, domain_parts, boundary_parts = meshiowrapper.read_hdf5_mesh("demo/meshes/box.h5") # Visualize the fluid mesh plt.figure(1) plt.clf() # mesh itself plt.subplot(2, 2, 1) dolfin.plot(mesh) plt.ylabel("Mesh") # local mesh size plt.subplot(2, 2, 2) theplot = dolfin.plot(meshfunction.meshsize(mesh)) plt.colorbar(theplot) plt.ylabel("Local mesh size") # domain parts (subdomains) plt.subplot(2, 2, 3) theplot = dolfin.plot(domain_parts) plt.colorbar(theplot) plt.ylabel("Phys. surfaces") # boundary parts plt.subplot(2, 2, 4) plotmagic.plot_facet_meshfunction(boundary_parts, invalid_values=[2**64 - 1]) plt.axis("scaled") plt.legend(loc="best") plt.ylabel("Phys. boundaries") plt.suptitle("Structure") plt.show()
2.296875
2
physical_education/tasks.py
skrapi/physical_education
0
12773317
from typing import Tuple, TYPE_CHECKING from . import utils from pyomo.environ import Constraint if TYPE_CHECKING: from .system import System3D def periodic(robot: 'System3D', but_not: Tuple[str, ...], but_not_vel: Tuple[str, ...] = tuple()): """ Make all position and velocity states in `robot` periodic, except for the positions (q) defined in `but_not` and the velocities (dq) in `but_not_vel` ``` >>> periodic(robot, but_not=('x',)) >>> periodic(robot, but_not=('x', 'y'), but_not_vel=('x', 'y')) ``` """ assert robot.m is not None,\ 'robot does not have a pyomo model defined on it' nfe, ncp = len(robot.m.fe), len(robot.m.cp) utils.remove_constraint_if_exists(robot.m, 'periodic_q') utils.remove_constraint_if_exists(robot.m, 'periodic_dq') # periodic positions qs = [ (link['q'][1, ncp, q], link['q'][nfe, ncp, q]) for link in robot.links for q in link.pyomo_sets['q_set'] if q not in but_not ] robot.m.add_component( 'periodic_q', Constraint( range(len(qs)), rule=lambda m, i: qs[i][0] == qs[i][1]) ) # periodic velocities dqs = [ (link['dq'][1, ncp, q], link['dq'][nfe, ncp, q]) for link in robot.links for q in link.pyomo_sets['q_set'] if q not in but_not_vel ] robot.m.add_component( 'periodic_dq', Constraint( range(len(dqs)), rule=lambda m, i: dqs[i][0] == dqs[i][1]) )
2.84375
3
djangofeeds/optimization.py
operasoftware/django-feeds
11
12773318
import BeautifulSoup from HTMLParser import HTMLParseError from django.conf import settings import re DJANGOFEEDS_REMOVE_TRACKERS = getattr(settings, "DJANGOFEEDS_REMOVE_TRACKERS", True) # The obvious tracker images DJANGOFEEDS_TRACKER_SERVICES = getattr(settings, "DJANGOFEEDS_TRACKER_SERVICES", [ 'http://feedads', 'http://feeds.feedburner.com/~r/', 'http://feeds.feedburner.com/~ff/', 'http://rss.feedsportal.com/c/', 'http://ads.pheedo.com/', 'http://a.rfihub.com/', 'http://segment-pixel.invitemedia.com/', 'http://pixel.quantserve.com/', 'http://feeds.newscientist.com/', 'http://mf.feeds.reuters.com/c/', 'http://telegraph.feedsportal.com/c/', ]) DJANGOFEEDS_SMALL_IMAGE_LIMIT = getattr(settings, "DJANGOFEEDS_SMALL_IMAGE_LIMIT", 50) class PostContentOptimizer(object): """Remove diverse abberation and annoying content in the posts. The idea is to remove some tracker images in the feeds because these images are a pollution to the user. Identified tools that add tracker images and tools into the feeds * Feedburner toolbar -- 4 toolbar images, 1 tracker image. * Pheedcontent.com toolbar -- 4 toolbar images, 1 advertisement image. * Digg/Reddit generic toolbar - 3 toolbar, no tracker image. * http://res.feedsportal.com/ -- 2 toolbar images, 1 tracker image. * http://a.rfihub.com/ -- associated with http://rocketfuelinc.com/, used for ads or tracking. Not quite sure. About 80% of them use feedburner. Few use cases of feeds: * feedburner toolbar and tracker * WULFMORGENSTALLER * MarketWatch.com - Top Stories * Hollywood.com - Recent News * Wired: entertainement * Livescience.com * Reader Digest * Pheedcontent.com toolbar * Sports News : CBSSports.com * Digg/Reddit toolbar * Abstruse goose * http://res.feedsportal.com/ * New scientist.com """ def looks_like_tracker(self, url): """Return True if the image URL has to be removed.""" for service in DJANGOFEEDS_TRACKER_SERVICES: if url.startswith(service): return True return False def optimize(self, html): """Remove unecessary spaces, <br> and image tracker.""" # Remove uneccesary white spaces html = html.strip() try: soup = BeautifulSoup.BeautifulSoup(html) self.remove_excessive_br(soup) if DJANGOFEEDS_REMOVE_TRACKERS: self.remove_trackers(soup) except HTMLParseError: return html return str(soup).strip() def remove_excessive_br(self, soup): # start with true to remove any starting br tag last_one_is_br = True children = soup.childGenerator() for el in children: if isinstance(el, BeautifulSoup.Tag): if el.name == 'br': if last_one_is_br: el.replaceWith("") last_one_is_br = True else: last_one_is_br = False def remove_trackers(self, soup): """Remove the trackers.""" stripped_count = 0 for image in soup("img"): already_removed = False # remove images that looks like tracker image_source = image.get("src", "") if (len(image_source) == 0 or self.looks_like_tracker(image_source)): image.replaceWith("") already_removed = True # remove small images try: image_width = int(image.get("width", DJANGOFEEDS_SMALL_IMAGE_LIMIT)) except ValueError: image_width = None if (image_width is not None and image_width < DJANGOFEEDS_SMALL_IMAGE_LIMIT and not already_removed): image.replaceWith("") # remove links that looks like tracker for link in soup("a"): link_href = link.get("href") if link_href and "://" in link_href: if self.looks_like_tracker(link_href): link.replaceWith("")
2.5
2
commander/src/commander/commands/AddFilterCommand.py
ugnelis/ros-cameras-controller
4
12773319
import uuid import json from commander.commands.Command import Command from commander.data_classes.Filter import Filter class AddFilterCommand(Command): """ Add a filter for the camera command. """ def __init__(self): Command.__init__(self) def execute(self, **kwargs): """ Execute the command. :param kwargs: key-worded arguments. :keyword cameras: List of the cameras. :keyword filter_types: List of the filter types. :keyword camera_id: Camera's ID. :keyword image_topic: Video camera's topic. :keyword filter_type: Filter type. :return: Response. """ cameras = kwargs.get('cameras') filter_types = kwargs.get('filter_types') camera_id = kwargs.get('camera_id') filter_type = kwargs.get('filter_type') if not camera_id in cameras: return [json.dumps({"message": "Camera with this ID does not exist.", "code": 404})] image_topic = "/" + camera_id + "/video_stream_to_topic/stream/image" id = str(uuid.uuid1()).replace("-", "") if not filter_type in filter_types: return [json.dumps({"message": "Filter type does not exist.", "code": 404})] # Run filter. filter_executor = filter_types[filter_type]() filter_executor.execute(image_topic=image_topic, namespace=id) filter = Filter() filter.id = id filter.type = filter_type filter.executor = filter_executor cameras[camera_id].add_filter(filter) return [json.dumps({"message": "Filter is added.", "code": 200, "camera": filter.to_dict()})]
2.5625
3
src/rpc/farmer_rpc_client.py
DONG-Jason/chia-blockchain
0
12773320
<filename>src/rpc/farmer_rpc_client.py<gh_stars>0 from typing import Dict, List, Optional from src.rpc.rpc_client import RpcClient from src.types.sized_bytes import bytes32 class FarmerRpcClient(RpcClient): """ Client to Chia RPC, connects to a local farmer. Uses HTTP/JSON, and converts back from JSON into native python objects before returning. All api calls use POST requests. Note that this is not the same as the peer protocol, or wallet protocol (which run Chia's protocol on top of TCP), it's a separate protocol on top of HTTP that provides easy access to the full node. """ async def get_signage_point(self, sp_hash: bytes32) -> Optional[Dict]: try: return await self.fetch("get_signage_point", {"sp_hash": sp_hash.hex()}) except ValueError: return None async def get_signage_points(self) -> List[Dict]: return (await self.fetch("get_signage_points", {}))["signage_points"]
3.078125
3
setup.py
ptpt/zhub
0
12773321
#!/usr/bin/env python # -*- coding: utf-8 -*- try: from setuptools import setup except ImportError: from distutils.core import setup def readme(): with open('README.md') as f: return f.read() def requirements(): return list(open('requirements.txt')) setup(name='zhub', version='0.1', description='Command line tool for ZenHub', long_description=readme(), classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Environment :: Console', 'Intended Audience :: End Users/Desktop', 'Topic :: Utilities'], url='https://github.com/ptpt/zhub', author='<NAME>', author_email='<EMAIL>', keywords='github, zenhub, command-line, client', license='MIT', py_modules=['zhub'], install_requires=requirements(), entry_points=''' [console_scripts] zhub=zhub:cli ''', zip_safe=False)
1.382813
1
basics/color/hue.py
abhikpal/p5-examples
16
12773322
<filename>basics/color/hue.py # # Hue # # Hue is the color reflected from or transmitted through an object. # and is typically referred to as the name of the color (red, blue, # yellow, etc). Move the cursor vertically over each bar to alter its # hue. # from p5 import * bar_width = 20 last_bar = None def setup(): size(640, 360) title("Hue") color_mode('HSB', height, height, height) no_stroke() background(0) def draw(): global last_bar which_bar = mouse_x // bar_width if which_bar is not last_bar: bar_x = which_bar * bar_width fill(mouse_y, height, height) rect((bar_x, 0), bar_width, height) last_bar = which_bar if __name__ == '__main__': run()
4.09375
4
plugins/modules/hashivault_identity_entity.py
fastlorenzo/ansible-modules-hashivault
0
12773323
<reponame>fastlorenzo/ansible-modules-hashivault #!/usr/bin/env python from ansible_collections.terryhowe.hashivault.plugins.module_utils.hashivault import hashivault_argspec from ansible_collections.terryhowe.hashivault.plugins.module_utils.hashivault import hashivault_auth_client from ansible_collections.terryhowe.hashivault.plugins.module_utils.hashivault import hashivault_init from ansible_collections.terryhowe.hashivault.plugins.module_utils.hashivault import hashiwrapper ANSIBLE_METADATA = {'status': ['stableinterface'], 'supported_by': 'community', 'version': '1.1'} DOCUMENTATION = ''' --- module: hashivault_identity_entity version_added: "3.13.0" short_description: Hashicorp Vault entity create module description: - Module to manage identity entity in Hashicorp Vault. options: name: description: - entity name to create or update. id: description: - entity id to update. metadata: description: - metadata to be associated with entity disabled: description: - whether the entity is disabled policies: description: - entity policies. state: description: - whether create/update or delete the entity extends_documentation_fragment: hashivault ''' EXAMPLES = ''' --- - hosts: localhost tasks: - hashivault_identity_entity: name: 'bob' policies: 'bob' ''' def main(): argspec = hashivault_argspec() argspec['name'] = dict(required=False, type='str', default=None) argspec['id'] = dict(required=False, type='str', default=None) argspec['metadata'] = dict(required=False, type='dict', default=None) argspec['disabled'] = dict(required=False, type='bool', default=None) argspec['policies'] = dict(required=False, type='list', default=None) argspec['state'] = dict(required=False, choices=['present', 'absent'], default='present') module = hashivault_init(argspec) result = hashivault_identity_entity(module.params) if result.get('failed'): module.fail_json(**result) else: module.exit_json(**result) def hashivault_identity_entity_update(entity_details, client, entity_id, entity_name, entity_metadata, entity_disabled, entity_policies): if entity_metadata is None: entity_metadata = entity_details['metadata'] if entity_policies is None: entity_policies = entity_details['policies'] if entity_disabled is None: entity_disabled = entity_details['disabled'] if entity_details['name'] != entity_name or entity_details['disabled'] != entity_disabled or \ entity_details['metadata'] != entity_metadata or \ set([] if entity_details['policies'] is None else entity_details['policies']) != set(entity_policies): try: client.secrets.identity.update_entity( entity_id=entity_id, name=entity_name, metadata=entity_metadata, policies=entity_policies, disabled=entity_disabled ) except Exception as e: return {'failed': True, 'msg': str(e)} return {'changed': True} return {'changed': False} def hashivault_identity_entity_create_or_update(params): client = hashivault_auth_client(params) entity_name = params.get('name') entity_id = params.get('id') entity_metadata = params.get('metadata') entity_disabled = params.get('disabled') entity_policies = params.get('policies') if entity_id is not None: try: entity_details = client.secrets.identity.read_entity(entity_id=entity_id) except Exception as e: return {'failed': True, 'msg': str(e)} return hashivault_identity_entity_update(entity_details['data'], client, entity_name, entity_id, entity_metadata, entity_disabled, entity_policies) elif entity_name is not None: try: entity_details = client.secrets.identity.read_entity_by_name(name=entity_name) except Exception: response = client.secrets.identity.create_or_update_entity_by_name( name=entity_name, metadata=entity_metadata, policies=entity_policies, disabled=entity_disabled ) return {'changed': True, 'data': response['data']} return hashivault_identity_entity_update(entity_details['data'], client, entity_name=entity_name, entity_id=entity_details['data']['id'], entity_metadata=entity_metadata, entity_disabled=entity_disabled, entity_policies=entity_policies) return {'failed': True, 'msg': "Either name or id must be provided"} def hashivault_identity_entity_delete(params): client = hashivault_auth_client(params) entity_id = params.get('id') entity_name = params.get('name') if entity_id is not None: try: client.secrets.identity.read_entity(entity_id=entity_id) except Exception: return {'changed': False} client.secrets.identity.delete_entity(entity_id=entity_id) return {'changed': True} elif entity_name is not None: try: client.secrets.identity.read_entity_by_name(name=entity_name) except Exception: return {'changed': False} client.secrets.identity.delete_entity_by_name(name=entity_name) return {'changed': True} return {'failed': True, 'msg': "Either name or id must be provided"} @hashiwrapper def hashivault_identity_entity(params): state = params.get('state') if state == 'present': return hashivault_identity_entity_create_or_update(params) elif state == 'absent': return hashivault_identity_entity_delete(params) else: return {'failed': True, 'msg': 'Unknown state'} if __name__ == '__main__': main()
1.8125
2
app/main/errors.py
hettlage/salt-data-quality-site
0
12773324
<reponame>hettlage/salt-data-quality-site from flask import current_app, render_template from . import main @main.errorhandler(500) def internal_server_error(e): current_app.logger.error(str(e), exc_info=1) return render_template('500.html'), 500 @main.errorhandler(404) def file_not_found_error(e): return render_template('404.html'), 404 @main.errorhandler(Exception) def exception_raised(e): current_app.logger.error(str(e), exc_info=1) return render_template('500.html'), 500
2.34375
2
CIM14/IEC61968/Customers/__init__.py
MaximeBaudette/PyCIM
58
12773325
<gh_stars>10-100 # Copyright (C) 2010-2011 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """This package contains the core information classes that support customer billing applications. """ from CIM14.IEC61968.Customers.Customer import Customer from CIM14.IEC61968.Customers.CustomerAccount import CustomerAccount from CIM14.IEC61968.Customers.ServiceCategory import ServiceCategory from CIM14.IEC61968.Customers.PricingStructure import PricingStructure from CIM14.IEC61968.Customers.ServiceLocation import ServiceLocation from CIM14.IEC61968.Customers.CustomerAgreement import CustomerAgreement from CIM14.IEC61968.Customers.Tariff import Tariff nsURI = "http://iec.ch/TC57/2009/CIM-schema-cim14#Customers" nsPrefix = "cimCustomers" class RevenueKind(str): """Accounting classification of the type of revenue collected for the CustomerAgreement, typically used to break down accounts for revenue accounting. Values are: industrial, streetLight, other, nonResidential, irrigation, residential, commercial """ pass class CustomerKind(str): """Kind of customer. Values are: residentialAndCommercial, residentialStreetlightOthers, residentialAndStreetlight, pumpingLoad, energyServiceSupplier, windMachine, residential, internalUse, residentialFarmService, other, energyServiceScheduler, commercialIndustrial """ pass class ServiceKind(str): """Kind of service. Values are: water, time, electricity, heat, rates, gas, internet, refuse, other, tvLicence, sewerage """ pass
1.210938
1
webapp/tests/api-tests.py
clairewlliams/cs257
0
12773326
# <NAME> and <NAME> import unittest import games_api import json class APITester(unittest.TestCase): def setUp(self): self.games_api = games_api.GamesApi() #change depending on how we code it, will it be a class? def tearDown(self): pass def test_games_endpoint(self): url = '/games' self.assertIsNotNone(self.games_api.get_games(url)) self.assertEqual(json.load(self.games_api.get_games(url))[0].keys(), ['name', 'global_sales', 'publisher', 'platform', 'genre', 'year']) url_wrong = '/games?random' self.assertEqual(json.load(self.games_api.get_games(url_wrong))[0].keys(), ['name', 'global_sales', 'publisher', 'platform', 'genre', 'year']) def test_platforms_endpoint(self): url = '/platforms' self.assertIsNotNone(self.games_api.get_platform(url)) def test_publishers_endpoint(self): url = '/publishers' self.assertIsNotNone(self.games_api.get_publisher(url)) def test_genres_endpoint(self): url = '/genres' self.assertIsNotNone(self.games_api.get_genre(url)) def test_categories_endpoint(self): url = '/categories' self.assertIsNotNone(self.games_api.get_categories(url)) self.asertEqual(self.games_api.get_categories(url).keys(), ['platforms', 'genres', 'publishers']) url_wrong = '/categories?random' self.asertEqual(self.games_api.get_categories(url).keys(), ['platforms', 'genres', 'publishers']) def test_publisher_endpoint(self): url = '/publisher?name=Nintendo' self.assertIsNotNone(self.games_api.get_publisher_by_name(url)) self.assertEqual(json.load(self.games_api.get_publisher_by_name(url))[0].keys(), ['name', 'global_sales', 'publisher', 'platform', 'genre', 'year', 'na', 'eu', 'jp', 'user_score', 'critic_score']) url_empty = '/publisher?name=' self.assertEqual(self.games_api.get_publisher_by_name(url_empty), '[]') url_publisher_not_in_set = '/publisher?name=ThisDoesNotMakeSense' self.assertEqual(self.games_api.get_publisher_by_name(url_publisher_not_in_set), '[]') def test_platform_endpoint(self): url = '/platform?name=Wii' self.assertIsNotNone(self.games_api.get_platform_by_name(url)) self.assertEqual(json.load(self.games_api.get_platform_by_name(url))[0].keys(), ['name', 'global_sales', 'publisher', 'platform', 'genre', 'year', 'na', 'eu', 'jp', 'user_score', 'critic_score']) url_empty = '/platform?name=' self.assertEqual(self.games_api.get_platform_by_name(url_empty), '[]') url_publisher_not_in_set = '/platform?name=ThisDoesNotMakeSense' self.assertEqual(self.games_api.get_platform_by_name(url_publisher_not_in_set), '[]') def test_genre_endpoint(self): url = '/genre?name=Action' self.assertIsNotNone(self.games_api.get_genre_by_name(url)) self.assertEqual(json.load(self.games_api.get_genre_by_name(url))[0].keys(), ['name', 'global_sales', 'publisher', 'platform', 'genre', 'year', 'na', 'eu', 'jp', 'user_score', 'critic_score']) url_empty = '/genre?name=' self.assertEqual(self.games_api.get_genre_by_name(url_empty), '[]') url_genre_not_in_set = '/genre?name=ThisDoesNotMakeSense' self.assertEqual(self.games_api.get_genre_by_name(url_genre_not_in_set), '[]') if __name__ == '__main__': unittest.main()
3.1875
3
_src/demo.py
badboy315/omooc.py
0
12773327
<reponame>badboy315/omooc.py<filename>_src/demo.py #!/usr/bin/python # -*- coding: utf-8 -*- def hello(words="Hollo World!-)"): print words #return words if __name__ == '__main__': hello('是也乎')
2.296875
2
voltha/adapters/brcm_openomci_onu/omci/brcm_vlan_filter_task.py
sathishms77/test
72
12773328
# # Copyright 2018 the original author or authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from voltha.extensions.omci.tasks.task import Task from twisted.internet import reactor from twisted.internet.defer import inlineCallbacks, failure, returnValue from voltha.extensions.omci.omci_defs import ReasonCodes, EntityOperations from voltha.extensions.omci.omci_me import * from voltha.adapters.brcm_openomci_onu.uni_port import UniType from voltha.adapters.brcm_openomci_onu.pon_port import BRDCM_DEFAULT_VLAN, DEFAULT_TPID RC = ReasonCodes OP = EntityOperations RESERVED_VLAN = 4095 class BrcmVlanFilterException(Exception): pass class BrcmVlanFilterTask(Task): """ Apply Vlan Tagging Filter Data and Extended VLAN Tagging Operation Configuration on an ANI and UNI """ task_priority = 200 name = "Broadcom VLAN Filter Task" def __init__(self, omci_agent, device_id, uni_port, set_vlan_id, add_tag=True, priority=task_priority): """ Class initialization :param omci_agent: (OmciAdapterAgent) OMCI Adapter agent :param device_id: (str) ONU Device ID :param uni_port: (UniPort) UNI port :param set_vlan_id: (int) VLAN to filter for and set :param add_tag: (bool) Flag to identify VLAN Tagging or Untagging :param priority: (int) OpenOMCI Task priority (0..255) 255 is the highest """ self.log = structlog.get_logger(device_id=device_id, uni_port=uni_port.port_number) super(BrcmVlanFilterTask, self).__init__(BrcmVlanFilterTask.name, omci_agent, device_id, priority=priority, exclusive=True) self._device = omci_agent.get_device(device_id) self._uni_port = uni_port self._set_vlan_id = set_vlan_id self._results = None self._local_deferred = None self._config = self._device.configuration self._add_tag = add_tag # Port numbers self._input_tpid = DEFAULT_TPID self._output_tpid = DEFAULT_TPID self._cvid = BRDCM_DEFAULT_VLAN def cancel_deferred(self): super(BrcmVlanFilterTask, self).cancel_deferred() d, self._local_deferred = self._local_deferred, None try: if d is not None and not d.called: d.cancel() except: pass def start(self): """ Start Vlan Tagging Task """ super(BrcmVlanFilterTask, self).start() self._local_deferred = reactor.callLater(0, self.perform_vlan_tagging, add_tag=self._add_tag) @inlineCallbacks def perform_vlan_tagging(self, add_tag=True): """ Perform the vlan tagging """ if add_tag: self.log.info('setting-vlan-tagging') else: self.log.info('removing-vlan-tagging') try: # TODO: parameterize these from the handler, or objects in the handler # TODO: make this a member of the onu gem port or the uni port _mac_bridge_service_profile_entity_id = 0x201 _mac_bridge_port_ani_entity_id = 0x2102 # TODO: can we just use the entity id from the anis list? vlan_tagging_entity_id = _mac_bridge_port_ani_entity_id + self._uni_port.mac_bridge_port_num extended_vlan_tagging_entity_id = _mac_bridge_service_profile_entity_id + \ self._uni_port.mac_bridge_port_num # Delete bridge ani side vlan filter yield self._send_msg(VlanTaggingFilterDataFrame(vlan_tagging_entity_id), 'delete', 'flow-delete-vlan-tagging-filter-data') forward_operation = 0x10 # VID investigation # When the PUSH VLAN is RESERVED_VLAN (4095), let ONU be transparent if self._set_vlan_id == RESERVED_VLAN: forward_operation = 0x00 # no investigation, ONU transparent if add_tag: # Re-Create bridge ani side vlan filter msg = VlanTaggingFilterDataFrame( vlan_tagging_entity_id, # Entity ID vlan_tcis=[self._set_vlan_id], # VLAN IDs forward_operation=forward_operation ) yield self._send_msg(msg, 'create', 'flow-create-vlan-tagging-filter-data') else: # Delete bridge ani side vlan filter msg = VlanTaggingFilterDataFrame( vlan_tagging_entity_id # Entity ID ) yield self._send_msg(msg, 'delete', 'flow-delete-vlan-tagging-filter-data') # Delete uni side extended vlan filter msg = ExtendedVlanTaggingOperationConfigurationDataFrame( extended_vlan_tagging_entity_id # Bridge Entity ID ) yield self._send_msg(msg, 'delete', 'flow-delete-ext-vlan-tagging-op-config-data') # Create uni side extended vlan filter if add_tag: # When flow is removed and immediately re-added tech_profile specific task is not re-played, hence # Extended VLAN Tagging Operation configuration which is part of tech_profile specific task is not # getting created. To create it, we do Extended VLAN Tagging Operation configuration here. # TODO: do this for all uni/ports... # TODO: magic. static variable for assoc_type omci_cc = self._device.omci_cc # default to PPTP if self._uni_port.type is UniType.VEIP: association_type = 10 elif self._uni_port.type is UniType.PPTP: association_type = 2 else: association_type = 2 attributes = dict( association_type=association_type, # Assoc Type, PPTP/VEIP Ethernet UNI associated_me_pointer=self._uni_port.entity_id, # Assoc ME, PPTP/VEIP Entity Id # See VOL-1311 - Need to set table during create to avoid exception # trying to read back table during post-create-read-missing-attributes # But, because this is a R/W attribute. Some ONU may not accept the # value during create. It is repeated again in a set below. input_tpid=self._input_tpid, # input TPID output_tpid=self._output_tpid, # output TPID ) msg = ExtendedVlanTaggingOperationConfigurationDataFrame( extended_vlan_tagging_entity_id, # Bridge Entity ID attributes=attributes ) yield self._send_msg(msg, 'create', 'create-extended-vlan-tagging-operation-configuration-data') attributes = dict( # Specifies the TPIDs in use and that operations in the downstream direction are # inverse to the operations in the upstream direction input_tpid=self._input_tpid, # input TPID output_tpid=self._output_tpid, # output TPID downstream_mode=0, # inverse of upstream ) msg = ExtendedVlanTaggingOperationConfigurationDataFrame( extended_vlan_tagging_entity_id, # Bridge Entity ID attributes=attributes ) yield self._send_msg(msg, 'set', 'set-extended-vlan-tagging-operation-configuration-data') # parameters: Entity Id ( 0x900), Filter Inner Vlan Id(0x1000-4096,do not filter on Inner vid, # Treatment Inner Vlan Id : 2 # Update uni side extended vlan filter # filter for untagged # probably for eapol # TODO: lots of magic # TODO: magic 0x1000 / 4096? attributes = self._generate_attributes( filter_outer_priority=15, # This entry is not a double-tag rule filter_outer_vid=4096, # Do not filter on the outer VID value filter_outer_tpid_de=0, # Do not filter on the outer TPID field filter_inner_priority=15, filter_inner_vid=4096, filter_inner_tpid_de=0, filter_ether_type=0, treatment_tags_to_remove=0, treatment_outer_priority=15, treatment_outer_vid=0, treatment_outer_tpid_de=0, treatment_inner_priority=0, treatment_inner_vid=self._cvid, treatment_inner_tpid_de=4) msg = ExtendedVlanTaggingOperationConfigurationDataFrame( extended_vlan_tagging_entity_id, # Bridge Entity ID attributes=attributes ) yield self._send_msg(msg, 'set', 'set-extended-vlan-tagging-operation-configuration-data-table') if self._set_vlan_id == RESERVED_VLAN: # Transparently send any single tagged packet. # Any other specific rules will take priority over this attributes = self._generate_attributes( filter_outer_priority=15, filter_outer_vid=4096, filter_outer_tpid_de=0, filter_inner_priority=14, filter_inner_vid=4096, filter_inner_tpid_de=0, filter_ether_type=0, treatment_tags_to_remove=0, treatment_outer_priority=15, treatment_outer_vid=0, treatment_outer_tpid_de=0, treatment_inner_priority=15, treatment_inner_vid=0, treatment_inner_tpid_de=4) msg = ExtendedVlanTaggingOperationConfigurationDataFrame( extended_vlan_tagging_entity_id, # Bridge Entity ID attributes=attributes # See above ) yield self._send_msg(msg, 'set', 'flow-set-ext-vlan-tagging-op-config-data-single-tag-fwd-transparent') else: # Update uni side extended vlan filter # filter for untagged # probably for eapol # TODO: Create constants for the operation values. See omci spec attributes = self._generate_attributes( filter_outer_priority=15, filter_outer_vid=4096, filter_outer_tpid_de=0, filter_inner_priority=15, filter_inner_vid=4096, filter_inner_tpid_de=0, filter_ether_type=0, treatment_tags_to_remove=0, treatment_outer_priority=15, treatment_outer_vid=0, treatment_outer_tpid_de=0, treatment_inner_priority=0, treatment_inner_vid=self._set_vlan_id, treatment_inner_tpid_de=4) msg = ExtendedVlanTaggingOperationConfigurationDataFrame( extended_vlan_tagging_entity_id, # Bridge Entity ID attributes=attributes # See above ) yield self._send_msg(msg, 'set', 'flow-set-ext-vlan-tagging-op-config-data-untagged') # Update uni side extended vlan filter # filter for vlan 0 # TODO: Create constants for the operation values. See omci spec attributes = self._generate_attributes( filter_outer_priority=15, # This entry is not a double-tag rule filter_outer_vid=4096, # Do not filter on the outer VID value filter_outer_tpid_de=0, # Do not filter on the outer TPID field filter_inner_priority=8, # Filter on inner vlan filter_inner_vid=0x0, # Look for vlan 0 filter_inner_tpid_de=0, # Do not filter on inner TPID field filter_ether_type=0, # Do not filter on EtherType treatment_tags_to_remove=1, treatment_outer_priority=15, treatment_outer_vid=0, treatment_outer_tpid_de=0, treatment_inner_priority=8, # Add an inner tag and insert this value as the priority treatment_inner_vid=self._set_vlan_id, # use this value as the VID in the inner VLAN tag treatment_inner_tpid_de=4) # set TPID msg = ExtendedVlanTaggingOperationConfigurationDataFrame( extended_vlan_tagging_entity_id, # Bridge Entity ID attributes=attributes # See above ) yield self._send_msg(msg, 'set', 'flow-set-ext-vlan-tagging-op-config-data-zero-tagged') else: msg = ExtendedVlanTaggingOperationConfigurationDataFrame( extended_vlan_tagging_entity_id # Bridge Entity ID ) yield self._send_msg(msg, 'delete', 'flow-delete-ext-vlan-tagging-op-config-data') self.deferred.callback(self) except Exception as e: self.log.exception('setting-vlan-tagging', e=e) self.deferred.errback(failure.Failure(e)) def check_status_and_state(self, results, operation=''): """ Check the results of an OMCI response. An exception is thrown if the task was cancelled or an error was detected. :param results: (OmciFrame) OMCI Response frame :param operation: (str) what operation was being performed :return: True if successful, False if the entity existed (already created) """ omci_msg = results.fields['omci_message'].fields status = omci_msg['success_code'] error_mask = omci_msg.get('parameter_error_attributes_mask', 'n/a') failed_mask = omci_msg.get('failed_attributes_mask', 'n/a') unsupported_mask = omci_msg.get('unsupported_attributes_mask', 'n/a') self.log.debug("OMCI Result: %s", operation, omci_msg=omci_msg, status=status, error_mask=error_mask, failed_mask=failed_mask, unsupported_mask=unsupported_mask) if status == RC.Success: self.strobe_watchdog() return True elif status == RC.InstanceExists: return False @inlineCallbacks def _send_msg(self, msg, operation, vlan_tagging_operation_msg): """ Send frame to ONU. :param msg: (VlanTaggingFilterDataFrame/ExtendedVlanTaggingOperationConfigurationDataFrame) message used to generate OMCI frame :param operation: (str) type of CUD(Create/Update/Delete) operation :param vlan_tagging_operation_msg: (str) what operation was being performed """ if operation == 'create': frame = msg.create() elif operation == 'set': frame = msg.set() else: frame = msg.delete() self.log.debug('openomci-msg', omci_msg=msg) self.strobe_watchdog() results = yield self._device.omci_cc.send(frame) self.check_status_and_state(results, vlan_tagging_operation_msg) def _generate_attributes(self, **kwargs): """ Generate ExtendedVlanTaggingOperation attributes :return: (dict) ExtendedVlanTaggingOperation attributes dictinary """ return dict( received_frame_vlan_tagging_operation_table= VlanTaggingOperation( filter_outer_priority=kwargs['filter_outer_priority'], filter_outer_vid=kwargs['filter_outer_vid'], filter_outer_tpid_de=kwargs['filter_outer_tpid_de'], filter_inner_priority=kwargs['filter_inner_priority'], filter_inner_vid=kwargs['filter_inner_vid'], filter_inner_tpid_de=kwargs['filter_inner_tpid_de'], filter_ether_type=kwargs['filter_ether_type'], treatment_tags_to_remove=kwargs['treatment_tags_to_remove'], treatment_outer_priority=kwargs['treatment_outer_priority'], treatment_outer_vid=kwargs['treatment_outer_vid'], treatment_outer_tpid_de=kwargs['treatment_outer_tpid_de'], treatment_inner_priority=kwargs['treatment_inner_priority'], treatment_inner_vid=kwargs['treatment_inner_vid'], treatment_inner_tpid_de=kwargs['treatment_inner_tpid_de'], ) )
1.984375
2
Hereditariedade_e_Polimorfismo/21.1-Multiple-Inheritance_and_The-Lookup-Tree_Diamond-Shape.py
nnsdtr/OOP-Python
0
12773329
<reponame>nnsdtr/OOP-Python class A(object): def do_this(self): print('do_this() in A') class B(A): pass class C(A): def do_this(self): print('do_this() in C') class D(B, C): pass D_instance = D() D_instance.do_this() print(D.mro()) # Method resolution order
2.765625
3
monitoring/migrations/0002_alter_aspek_kegiatan.py
eewinkk/sdnantwr
0
12773330
<gh_stars>0 # Generated by Django 4.0.3 on 2022-03-10 15:16 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('monitoring', '0001_initial'), ] operations = [ migrations.AlterField( model_name='aspek', name='kegiatan', field=models.ManyToManyField(blank=True, to='monitoring.kegiatan'), ), ]
1.359375
1
ch1/fibonacci.py
OMGZui/getSister
1
12773331
<reponame>OMGZui/getSister a, b = 0, 1 while b < 10: print(b) a, b = b, a + b # 1 # 1 # 2 # 3 # 5 # 8 a, b = 0, 1 while b < 1000: print(b, end='->') a, b = b, a + b # 1->1->2->3->5->8->13->21->34->55->89->144->233->377->610->987->%
3.234375
3
2008/1A/Milkshakes/Milkshakes.py
alexcasgarcia/GCJ
0
12773332
<reponame>alexcasgarcia/GCJ<filename>2008/1A/Milkshakes/Milkshakes.py import pdb #5 (milkshake flavors) #4 (customers) #2 1 0 2 0 or 0 0 None None None #2 1 1 2 1 or 1 1 None None None #2 1 0 2 1 or 1 0 None None None #0 0 #1 1 #1 0 #0 1 #find # for a set of customers with the same # of preferences, figure out which the set of milkshakes that will satisfy them # for each customer ## for each flavor ### compare flavor preference with all other customer's preference for that flavor #### if match, set = None #who is the pickiest customer? ##find min first number def milkshakeMatch(milkshakeFlavorCount,customers,caseNumber): print milkshakeFlavorCount print customers print caseNumber # find the set of pickiest customers # if customer picks 1 milkshake ## compare his milkshake preference with all other customers ###if it conflicts with someone elses preference, remove the other persons preference, if they only have one preference, mark as impossible ###remove that customer from the list of customers to compare to # if customer can pick multiple milkshakes ## compare customer with customers with the same number of preferences #method to remove a milkshake flavor from a customer def readTestFile(fileName): r = open('milkshakes.out', 'w') with open(fileName) as f: pdb.set_trace() i=0 customerNumber=0 caseNumber=1 customers=[] for line in f: line=[int(x) for x in line.strip('\n').split()] if i==0: testCaseCount=line[0] elif i==1: milkshakeFlavorCount=line[0] elif i==2: customerCount=line[0] else: customers.append(line) customerNumber+=1 if customerCount==customerNumber: milkshakeMatch(milkshakeFlavorCount,customers,caseNumber) customerNumber=0 customers=[] caseNumber+=1 i=0 i+=1 r.close() readTestFile('milkshakes.in')
3.546875
4
networkapi/requisicaovips/resource/RequestVipValidateResource.py
vinicius-marinho/GloboNetworkAPI
73
12773333
<reponame>vinicius-marinho/GloboNetworkAPI # -*- coding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ """ from __future__ import with_statement import logging from networkapi.admin_permission import AdminPermission from networkapi.auth import has_perm from networkapi.distributedlock import distributedlock from networkapi.distributedlock import LOCK_VIP from networkapi.exception import InvalidValueError from networkapi.infrastructure.xml_utils import dumps_networkapi from networkapi.requisicaovips.models import RequisicaoVips from networkapi.requisicaovips.models import RequisicaoVipsError from networkapi.requisicaovips.models import RequisicaoVipsNotFoundError from networkapi.rest import RestResource from networkapi.util import is_valid_int_greater_zero_param class RequestVipValidateResource(RestResource): log = logging.getLogger('RequestVipValidateResource') def handle_get(self, request, user, *args, **kwargs): """Handles get requests to validate Vip Requests by id. URLs: /vip/validate/<id_vip>/ """ self.log.info('Validate Vip Request by id') try: # Commons Validations # User permission if not has_perm(user, AdminPermission.VIP_VALIDATION, AdminPermission.WRITE_OPERATION): self.log.error( u'User does not have permission to perform the operation.') return self.not_authorized() # Business Validations id_vip = kwargs.get('id_vip') # Valid vip id if not is_valid_int_greater_zero_param(id_vip): self.log.error( u'Parameter id_vip is invalid. Value: %s.', id_vip) raise InvalidValueError(None, 'id_vip', id_vip) vip = RequisicaoVips.get_by_pk(id_vip) with distributedlock(LOCK_VIP % id_vip): vip.validado = True vip.save() return self.response(dumps_networkapi({})) except RequisicaoVipsNotFoundError: return self.response_error(152) except RequisicaoVipsError: return self.response_error(150, 'Failed to validate vip request.') except InvalidValueError, e: self.log.error( u'Parameter %s is invalid. Value: %s.', e.param, e.value) return self.response_error(269, e.param, e.value) except BaseException, e: return self.response_error(1)
1.742188
2
utilities.py
DannMensah/Project-Kodo
8
12773334
<gh_stars>1-10 import os from pathlib import Path import webbrowser import threading import socket import re import numpy as np from cv2 import resize from operator import itemgetter from PIL import Image import colorsys import random import copy def stack_npy_files_in_dir(directory): merged_array = None arrays = [] for filename in os.listdir(directory): if filename.endswith(".npy"): loaded_arr = np.load(directory / filename) arrays.append(loaded_arr) else: continue merged_array = np.stack(arrays, axis=0) return merged_array def try_make_dirs(dir): try: os.makedirs(dir) except FileExistsError: pass def launch_tensorboard(log_dir): t = threading.Thread(target=run_tensorboard_server, args=([log_dir])) t.start() #url = "http://{}:6006/".format(socket.gethostname()) url = "http://localhost:6006/" webbrowser.open(url, new=0, autoraise=True) def run_tensorboard_server(log_dir): os.system("tensorboard --logdir=" + log_dir + " --port 6006") def sorted_alphanumeric(data): convert = lambda text: int(text) if text.isdigit() else text.lower() alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] return sorted(data, key=alphanum_key) def change_colors(img_arr): im = copy.copy(Image.fromarray(img_arr)) pixdata = im.load() colors = im.getcolors(1024) sorted_colors = sorted(colors, key=itemgetter(0)) relevant_colors = sorted_colors[-3:] relevant_colors = tuple(col[1] for col in relevant_colors) # Clean the background noise, if color != white, then set to black. color_map = {} hue = random.random() for y in range(im.size[1]): for x in range(im.size[0]): orig_rgba = pixdata[x,y] norm_color = tuple(col/255 for col in orig_rgba) hls_color = list(colorsys.rgb_to_hls(*norm_color[:3])) hls_color[0] = hue rgb_color = colorsys.hls_to_rgb(*hls_color) rgb_color = tuple(int(col*255) for col in rgb_color) pixdata[x,y] = rgb_color return np.array(im) def rgb_to_hsv(rgb): # Translated from source of colorsys.rgb_to_hsv # r,g,b should be a numpy arrays with values between 0 and 255 # rgb_to_hsv returns an array of floats between 0.0 and 1.0. rgb = rgb.astype('float') hsv = np.zeros_like(rgb) # in case an RGBA array was passed, just copy the A channel hsv[..., 3:] = rgb[..., 3:] r, g, b = rgb[..., 0], rgb[..., 1], rgb[..., 2] maxc = np.max(rgb[..., :3], axis=-1) minc = np.min(rgb[..., :3], axis=-1) hsv[..., 2] = maxc mask = maxc != minc hsv[mask, 1] = (maxc - minc)[mask] / maxc[mask] rc = np.zeros_like(r) gc = np.zeros_like(g) bc = np.zeros_like(b) rc[mask] = (maxc - r)[mask] / (maxc - minc)[mask] gc[mask] = (maxc - g)[mask] / (maxc - minc)[mask] bc[mask] = (maxc - b)[mask] / (maxc - minc)[mask] hsv[..., 0] = np.select( [r == maxc, g == maxc], [bc - gc, 2.0 + rc - bc], default=4.0 + gc - rc) hsv[..., 0] = (hsv[..., 0] / 6.0) % 1.0 return hsv def hsv_to_rgb(hsv): # Translated from source of colorsys.hsv_to_rgb # h,s should be a numpy arrays with values between 0.0 and 1.0 # v should be a numpy array with values between 0.0 and 255.0 # hsv_to_rgb returns an array of uints between 0 and 255. rgb = np.empty_like(hsv) rgb[..., 3:] = hsv[..., 3:] h, s, v = hsv[..., 0], hsv[..., 1], hsv[..., 2] i = (h * 6.0).astype('uint8') f = (h * 6.0) - i p = v * (1.0 - s) q = v * (1.0 - s * f) t = v * (1.0 - s * (1.0 - f)) i = i % 6 conditions = [s == 0.0, i == 1, i == 2, i == 3, i == 4, i == 5] rgb[..., 0] = np.select(conditions, [v, q, p, p, t, v], default=v) rgb[..., 1] = np.select(conditions, [v, v, v, q, p, p], default=t) rgb[..., 2] = np.select(conditions, [v, p, t, v, v, q], default=p) return rgb.astype('uint8') def shift_hue(arr,hout): hsv=rgb_to_hsv(arr) hsv[...,0]=hout rgb=hsv_to_rgb(hsv) return rgb
2.390625
2
graph-measures/features_algorithms/accelerated_graph_features/src/accelerated_graph_features/graph_plotter.py
Unknown-Data/QGCN
3
12773335
import sys import os # Leave the path changes here!!! sys.path.append(os.path.join(os.path.dirname(__file__), '..')) sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..')) import networkx as nx import matplotlib.pyplot as plt from src.accelerated_graph_features.test_python_converter import create_graph N = 3 def plot_graph(i): G = create_graph(i) pos = nx.spring_layout(G) nx.draw(G,pos) # labels nx.draw_networkx_labels(G, pos, font_size=10, font_family='sans-serif') plt.axis('off') plt.show() if __name__ == '__main__': for i in range(1,N+1): plot_graph(i)
3
3
app.py
ruyueshuo/YOLOv4_Deployment
11
12773336
<reponame>ruyueshuo/YOLOv4_Deployment import argparse import io import os import time import numpy as np from importlib import import_module from flask import Flask, render_template, Response, jsonify, request import cv2 from PIL import Image import darknet # darknet = import_module(".").daknet app = Flask(__name__) @app.route('/') def index(): """Video streaming home page.""" return render_template('index.html') def parser(): parser = argparse.ArgumentParser(description="YOLO Object Detection") parser.add_argument("--weights", default="./results/yolov4-tiny-reflective_best.weights", help="yolo weights path") parser.add_argument("--config_file", default="./cfg/yolov4-tiny-reflective.cfg", help="path to config file") parser.add_argument("--data_file", default="./data/reflective.data", help="path to data file") parser.add_argument("--thresh", type=float, default=.25, help="remove detections with confidence below this value") return parser.parse_args() def load_model(): """load the pre-trained model.""" global network, class_names, class_colors, args args = parser() network, class_names, class_colors = darknet.load_network( args.config_file, args.data_file, args.weights, batch_size=1 ) def preprocess_img(frame): # Darknet doesn't accept numpy images. # Create one with image we reuse for each detect width = darknet.network_width(network) height = darknet.network_height(network) darknet_image = darknet.make_image(width, height, 3) frame_rgb = cv2.cvtColor(np.asarray(frame),cv2.COLOR_RGB2BGR) frame_resized = cv2.resize(frame_rgb, (width, height), interpolation=cv2.INTER_LINEAR) darknet.copy_image_from_bytes(darknet_image, frame_resized.tobytes()) return darknet_image @app.route('/predict',methods=['POST']) def predict(): """For rendering results on HTML GUI.""" # initialize the data dictionary that will be returned data = {"success": False} # ensure an image was properly uploaded to our endpoint if request.method == "POST": now = time.localtime(time.time() ) print("POST at :", time.strftime("%Y--%m--%d %H:%M:%S", now)) if request.files.get("image"): print("Inference started ...") # read the image in PIL format image = request.files["image"].read() image = Image.open(io.BytesIO(image)) # preprocess the image darknet_image = preprocess_img(image) # inference start_time = time.time() detections = darknet.detect_image(network, class_names, darknet_image, thresh=args.thresh) end_time = time.time() data["inference_time"] = end_time - start_time print("Time cost : {0:.3f}s.".format(data["inference_time"])) data["predictions"] = detections # indicate that the request was a success data["success"] = True print("Inference finished ...") return render_template('index.html', prediction_text='Employee Salary should be $ {}'.format(output)) @app.route("/predict_api", methods=["POST"]) def predict_api(): # initialize the data dictionary that will be returned data = {"success": False} # ensure an image was properly uploaded to our endpoint if request.method == "POST": now = time.localtime(time.time() ) print("POST at :", time.strftime("%Y--%m--%d %H:%M:%S", now)) if request.files.get("image"): print("Inference started ...") # read the image in PIL format image = request.files["image"].read() image = Image.open(io.BytesIO(image)) # preprocess the image darknet_image = preprocess_img(image) # inference start_time = time.time() detections = darknet.detect_image(network, class_names, darknet_image, thresh=args.thresh) end_time = time.time() data["inference_time"] = end_time - start_time print("Time cost : {0:.3f}s.".format(data["inference_time"])) data["predictions"] = detections # indicate that the request was a success data["success"] = True print("Inference finished ...") # return the data dictionary as a JSON response return jsonify(data) if __name__ == '__main__': args = parser() network, class_names, class_colors = darknet.load_network( args.config_file, args.data_file, args.weights, batch_size=1 ) app.run(host='0.0.0.0', debug=True, threaded=True)
2.640625
3
vo/AutoScaleSettingVO.py
liujiage/DevOpsK8s
1
12773337
class AutoScaleSettingVO: def __init__(self, mini_size=1, max_size=1, mem_exc=0, deploy_name = ""): self.miniSize = mini_size self.maxSize = max_size self.memExc = mem_exc self.deployName = deploy_name self.operationResult = ""
1.898438
2
cornflow/tests/custom_liveServer.py
pchtsp/corn
5
12773338
from flask_testing import LiveServerTestCase import cornflow_client as cf import json from cornflow.app import create_app from cornflow.commands import AccessInitialization from cornflow.shared.utils import db from cornflow.tests.const import PREFIX from cornflow.models import UserModel, UserRoleModel from cornflow.shared.const import ADMIN_ROLE, SERVICE_ROLE from cornflow.tests.const import LOGIN_URL, SIGNUP_URL class CustomTestCaseLive(LiveServerTestCase): def create_app(self): app = create_app("testing") return app def set_client(self, server): self.client = cf.CornFlow(url=server) return self.client def login_or_signup(self, user_data): try: response = self.client.login(user_data["username"], user_data["pwd"]) except cf.CornFlowApiError: response = self.client.sign_up(**user_data).json() return response def setUp(self, create_all=True): if create_all: db.create_all() AccessInitialization().run() user_data = dict( username="testname", email="<EMAIL>", pwd="<PASSWORD>", ) self.set_client(self.get_server_url()) response = self.login_or_signup(user_data) self.client.token = response["token"] self.url = None self.model = None self.items_to_check = [] def tearDown(self): db.session.remove() db.drop_all() def create_user_with_role(self, role_id, data=None): if data is None: data = { "username": "testuser" + str(role_id), "email": "testemail" + str(role_id) + "@test.org", "password": "<PASSWORD>", } response = self.login_or_signup(data) user_role = UserRoleModel({"user_id": response["id"], "role_id": role_id}) user_role.save() db.session.commit() return self.login_or_signup(data)["token"] def create_service_user(self, data=None): return self.create_user_with_role(SERVICE_ROLE, data=data) def create_admin(self, data=None): return self.create_user_with_role(ADMIN_ROLE, data=data) def get_server_url(self): """ Return the url of the test server """ prefix = PREFIX if prefix: prefix += "/" return "http://localhost:%s" % self._port_value.value + prefix
2.140625
2
edk2toolext/tests/test_edk2_logging.py
kuqin12/edk2-pytool-extensions
32
12773339
<reponame>kuqin12/edk2-pytool-extensions<gh_stars>10-100 ## @file test_edk2_logging.py # This contains unit tests for the edk2_logging ## # Copyright (c) Microsoft Corporation # # SPDX-License-Identifier: BSD-2-Clause-Patent ## import os import tempfile import unittest import logging from edk2toolext import edk2_logging class Test_edk2_logging(unittest.TestCase): def test_can_create_console_logger(self): console_logger = edk2_logging.setup_console_logging(False, False) self.assertIsNot(console_logger, None, "We created a console logger") edk2_logging.stop_logging(console_logger) def test_can_create_txt_logger(self): test_dir = tempfile.mkdtemp() location, txt_logger = edk2_logging.setup_txt_logger(test_dir, "test_txt") logging.info("Testing") self.assertTrue(os.path.isfile(location), "We should have created a file") self.assertIsNot(txt_logger, None, "We created a txt logger") edk2_logging.stop_logging(txt_logger) def test_can_create_md_logger(self): test_dir = tempfile.mkdtemp() location, txt_logger = edk2_logging.setup_markdown_logger(test_dir, "test_md") logging.info("Testing") self.assertTrue(os.path.isfile(location), "We should have created a file") self.assertIsNot(txt_logger, None, "We created a txt logger") edk2_logging.stop_logging(txt_logger) def test_none_to_close(self): edk2_logging.stop_logging(None) def test_can_close_logger(self): test_dir = tempfile.mkdtemp() location, txt_logger = edk2_logging.setup_txt_logger(test_dir, "test_close") logging.critical("Testing") self.assertTrue(os.path.isfile(location), "We should have created a file") file = open(location, "r") num_lines = len(file.readlines()) file.close() self.assertEqual(num_lines, 1, "We should only have one line") edk2_logging.stop_logging(txt_logger) logging.critical("Test 2") file = open(location, "r") num_lines2 = len(file.readlines()) file.close() self.assertEqual(num_lines, num_lines2, "We should only have one line") if __name__ == '__main__': unittest.main()
2.390625
2
src/monitor.py
javtges/CEES-Automated-Saturation-System
1
12773340
<gh_stars>1-10 import cv2 from .roi import ROI class Monitor(ROI): """Vision Monitor. Contains all methods and values that handle machine vision. This includes camera interfacing, region of interest (ROI), and data processing, among others. """ def __init__(self, **kwargs): """Initialize Python-Camera interface. Choose video with: 0 for laptop camera 1 for USB camera cv2.samples.findFileOrKeep(<FILEPATH>) for a file Required background subtractor parameters are: [history, varThreshold, detectShadows] Tweaking may be necessary for optimal results. More on background subtraction methods at https://docs.opencv.org/4.5.0/de/de1/group__video__motion.html Uses source in **kwargs if given. Passes kywd=arg pairs down MRO chain. """ super().__init__(**kwargs) src = kwargs.pop('src') if 'src' in kwargs else 0 self.__capture = cv2.VideoCapture(src) if not self.__capture.isOpened: raise Exception("Unable to open {}".format(src)) self.__frame = None self.frame_no = 0 self.__roi_frame = None self.__backSub = cv2.createBackgroundSubtractorMOG2(40, 60, False) bounds = (int(self.__capture.get(4)), int(self.__capture.get(3))) self.set_bounds(bounds) print(":: MONITOR INITIALIZED ::\n") def get_frame(self): """Call next frame from camera. Draw rectangle around region of interest on each frame. If next frame not found raise Exception. Returns: OpenCV frame (numerical array) """ __, self.__frame = self.__capture.read() if self.__frame is None: raise Exception("Camera error! Next frame not found.") self.__draw_rectangle() return self.__frame def show_frame(self): """Show frame in a resizeable window. Shows each frame for 30ms (~30 FPS), or until a key is pressed. Returns: ASCII value of key pressed (int) """ cv2.namedWindow('Frame', cv2.WINDOW_NORMAL) cv2.imshow('Frame', self.__frame) return cv2.waitKey(30) & 0xFF def image_processing(self): """Process image to capture moving pixels. Crop image to region of interest (ROI), then convert to grayscale. After that use background subtraction on ROI. TODO: Better variable names Returns: Amount of pixels detected to have moved (int) """ west, north, east, south = self._coordinates gray = cv2.cvtColor(self.__roi_frame, cv2.COLOR_BGR2GRAY) fg_mask = self.__backSub.apply(gray) fg_mask_rgb = cv2.cvtColor(fg_mask, cv2.COLOR_GRAY2RGB) cv2.rectangle(self.__frame, (10, 2), (100, 20), (0, 0, 0), -1 ) cv2.putText(self.__frame, str(self.__capture.get(cv2.CAP_PROP_POS_FRAMES)), (15, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255) ) self.__frame[north: south, west: east] = fg_mask_rgb noise = cv2.countNonZero(fg_mask) self.frame_no += 1 return noise def shutoff_vision(self): """Release camera interface. Destroy any associated windows.""" print("Shutting off vision...") self.__capture.release() cv2.destroyAllWindows() print("Vision released.\n") def __draw_rectangle(self): """Draw rectangle around ROI. Crop for later processing.""" west, north, east, south = self._coordinates self.__frame = cv2.rectangle(self.__frame, (west, north), (east, south), (100, 50, 200), 2, ) self.__roi_frame = self.__frame[north: south, west: east]
3.15625
3
model/user.py
sookocheff/modelviewcontroller
0
12773341
from google.appengine.ext import ndb class User(ndb.Model): name = ndb.StringProperty() @classmethod def get_or_create(cls, name): if not name: return None user = ndb.Key('User', name).get() if not user: user = User(name=name, id=name) user.put() return user
2.78125
3
scales/pool/watermark.py
steveniemitz/scales
48
12773342
from collections import deque import logging import gevent from .base import PoolSink from ..asynchronous import AsyncResult from ..constants import (Int, ChannelState, SinkProperties, SinkRole) from ..sink import ( ClientMessageSink, SinkProvider, FailingMessageSink ) from ..dispatch import ServiceClosedError from ..varz import ( Gauge, Source, VarzBase ) class QueuingMessageSink(ClientMessageSink): def __init__(self, queue): super(QueuingMessageSink, self).__init__() self._queue = queue def AsyncProcessRequest(self, sink_stack, msg, stream, headers): self._queue.append((sink_stack, msg, stream, headers)) def AsyncProcessResponse(self, sink_stack, context, stream, msg): raise NotImplementedError("This should never be called") @property def state(self): return ChannelState.Open class MaxWaitersError(Exception): pass class WatermarkPoolSink(PoolSink): """A watermark pool keeps a cached number of sinks active (the low watermark). Once the low watermark is hit, the pool will create new sinks until it hits the high watermark. At that point, it will begin queuing requests. The pool guarantees only a single request will be active on any underlying sink at any given time, that is, each sink processes requests serially. """ ROOT_LOG = logging.getLogger('scales.pool.WatermarkPool') class Varz(VarzBase): """ size - The current size of the pool. queue_size - The length of the waiter queue. min_size - The configured low-watermark. max_size - The configured high-watermark. """ _VARZ_BASE_NAME = 'scales.pool.WatermarkPool' _VARZ = { 'size': Gauge, 'queue_size': Gauge } def __init__(self, next_provider, sink_properties, global_properties): endpoint = global_properties[SinkProperties.Endpoint] name = global_properties[SinkProperties.Label] self._cache = deque() self._waiters = deque() self._min_size = sink_properties.min_watermark self._max_size = sink_properties.max_watermark self._max_queue_size = sink_properties.max_queue_len self._current_size = 0 self._state = ChannelState.Idle socket_name = '%s:%s' % (endpoint.host, endpoint.port) self.endpoint = socket_name self._varz = self.Varz(Source(service=name, endpoint=socket_name)) self._log = self.ROOT_LOG.getChild('[%s.%s]' % (name, socket_name)) super(WatermarkPoolSink, self).__init__(next_provider, global_properties) def __PropagateShutdown(self, value): self.on_faulted.Set(value) def _DiscardSink(self, sink): """Close the sink and unsubscribe from fault notifications Args: sink - The sink to discard. """ sink.on_faulted.Unsubscribe(self.__PropagateShutdown) sink.Close() def _Dequeue(self): """Attempt to get a sink from the cache. Returns: A sink if one can be taken from the cache, else None. """ while any(self._cache): item = self._cache.popleft() if item.state <= ChannelState.Open: return item else: self._DiscardSink(item) return None def _Get(self): cached = self._Dequeue() if cached: return cached elif self._current_size < self._max_size: self._current_size += 1 self._varz.size(self._current_size) sink = self._sink_provider.CreateSink(self._properties) # TODO: we could get a better failure case here by detecting that Open() # failed and retrying, however for now the simplest option is to just fail. sink.Open().wait() sink.on_faulted.Subscribe(self.__PropagateShutdown) return sink else: if len(self._waiters) + 1 > self._max_queue_size: return FailingMessageSink(MaxWaitersError()) else: self._varz.queue_size(len(self._waiters) + 1) return QueuingMessageSink(self._waiters) def _Release(self, sink): # Releasing a queuing sink is a noop if (isinstance(sink, QueuingMessageSink) or isinstance(sink, FailingMessageSink)): self._varz.queue_size(len(self._waiters)) return do_close = False # This sink is already shutting down if self.state == ChannelState.Closed: self._current_size -= 1 # One of the underlying sinks failed, shut down elif sink.state == ChannelState.Closed: self._current_size -= 1 self.Close() # There are some waiters queued, reuse this sink to process another request. elif any(self._waiters): gevent.spawn(self._ProcessQueue, sink) # We're below the min-size specified, cache this sink elif self._current_size <= self._min_size: self._cache.append(sink) # We're above the min-size, close the sink. else: self._current_size -= 1 do_close = True self._varz.size(self._current_size) if do_close: self._DiscardSink(sink) def _ProcessQueue(self, sink): """Called as a continuation of an underlying sink completing. Get the next waiter and use 'sink' to process it. Args: sink - An open sink. """ sink_stack, msg, stream, headers = self._waiters.popleft() self._varz.queue_size(len(self._waiters)) # The stack has a QueuingChannelSink on the top now, pop it off # and push the real stack back on. orig_sink, ctx = sink_stack.Pop() sink_stack.Push(orig_sink, sink) sink.AsyncProcessRequest(sink_stack, msg, stream, headers) def Open(self): ar = AsyncResult() ar.SafeLink(self._OpenImpl) return ar def _OpenImpl(self): sink = self._Get() self._Release(sink) self._state = ChannelState.Open def _FlushCache(self): [self._DiscardSink(sink) for sink in self._cache] def Close(self): self._state = ChannelState.Closed self._FlushCache() fail_sink = FailingMessageSink(ServiceClosedError) [fail_sink.AsyncProcessRequest(sink_stack, msg, stream, headers) for sink_stack, msg, stream, headers in self._waiters] @property def state(self): return self._state WatermarkPoolSink.Builder = SinkProvider( WatermarkPoolSink, SinkRole.Pool, min_watermark = 1, max_watermark = Int.MaxValue, max_queue_len = Int.MaxValue )
2
2
features/hooks/config.py
mrmayfield/pyethereum
1
12773343
<gh_stars>1-10 import uuid import mock import tempfile from pyethereum.utils import sha3 class ConfigHook(object): def before_feature(self, context, feature): ''' .. note:: `context.conf` is used instead of `context.config` because `config` is used internally in `context` by *behave* ''' context.conf = conf = mock.MagicMock() node_id = sha3(str(uuid.uuid1())).encode('hex') tempdir = tempfile.mkdtemp() def get_side_effect(section, option): if section == 'network' and option == 'client_id': return 'client id' if section == 'network' and option == 'node_id': return node_id if section == 'wallet' and option == 'coinbase': return '0'*40 if section == 'misc' and option == 'data_dir': return tempdir def getint_side_effect(section, option): if section == 'network' and option == 'listen_port': return 1234 if section == 'network' and option == 'num_peers': return 10 conf.get.side_effect = get_side_effect conf.getint.side_effect = getint_side_effect hook = ConfigHook()
2.203125
2
textX-LS/core/textx_ls_core/languages/textxfile/__init__.py
goto40/textX-LS
0
12773344
from .. import LanguageTemplate class TextxfileLang(LanguageTemplate): @property def extensions(self): return ['textxfile'] @property def language_name(self): return 'Textxfile'
2
2
log/27_remove_element.py
uteuliyeva/leetcode_python_solutions
0
12773345
#Date: 031622 #Difficulty: Easy class Solution(object): def removeElement(self, nums, val): """ :type nums: List[int] :type val: int :rtype: int """ write=0 for read in range(len(nums)): if nums[read]!=val: nums[write]=nums[read] write+=1 return write
3.375
3
setup.py
jkaluzka/flask-model-mommy
0
12773346
<reponame>jkaluzka/flask-model-mommy """Setup file for flask-model-mommy.""" import setuptools from os.path import join, dirname setuptools.setup( name='flask-model-mommy', version='0.1.0', packages=['flask_model_mommy'], include_package_data=True, # declarations in MANIFEST.in install_requires=open(join(dirname(__file__), 'requirements.txt')).readlines(), tests_require=[ 'flask>=0.12.2', 'mock>=1.0.1', 'tox==2.9.1', ], test_suite='runtests.runtests', author='jkaluzka', author_email='<EMAIL>', url='http://github.com/jkaluzka/flask-model-mommy', license='MIT', description='Simple and easy models object creation package for applic ations written in Flask.', long_description=open(join(dirname(__file__), 'README.rst')).read(), keywords='flask testing factory python model mommy', classifiers=[ 'Framework :: Flask', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Topic :: Software Development', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], )
1.390625
1
src/som/primitives/ast/system_primitives.py
smarr/RTruffleSOM
9
12773347
from som.primitives.primitives import Primitives from som.vm.globals import nilObject, falseObject, trueObject from som.vmobjects.primitive import AstPrimitive as Primitive from som.vm.universe import std_print, std_println from rpython.rlib import rgc, jit import time def _load(ivkbl, rcvr, args): argument = args[0] result = ivkbl.get_universe().load_class(argument) return result if result else nilObject def _exit(ivkbl, rcvr, args): error = args[0] return ivkbl.get_universe().exit(error.get_embedded_integer()) def _global(ivkbl, rcvr, args): argument = args[0] result = ivkbl.get_universe().get_global(argument) return result if result else nilObject def _has_global(ivkbl, rcvr, args): if ivkbl.get_universe().has_global(args[0]): return trueObject else: return falseObject def _global_put(ivkbl, rcvr, args): value = args[1] argument = args[0] ivkbl.get_universe().set_global(argument, value) return value def _print_string(ivkbl, rcvr, args): argument = args[0] std_print(argument.get_embedded_string()) return rcvr def _print_newline(ivkbl, rcvr, args): std_println() return rcvr def _time(ivkbl, rcvr, args): since_start = time.time() - ivkbl.get_universe().start_time return ivkbl.get_universe().new_integer(int(since_start * 1000)) def _ticks(ivkbl, rcvr, args): since_start = time.time() - ivkbl.get_universe().start_time return ivkbl.get_universe().new_integer(int(since_start * 1000000)) @jit.dont_look_inside def _fullGC(ivkbl, rcvr, args): rgc.collect() return trueObject class SystemPrimitives(Primitives): def install_primitives(self): self._install_instance_primitive(Primitive("load:", self._universe, _load)) self._install_instance_primitive(Primitive("exit:", self._universe, _exit)) self._install_instance_primitive(Primitive("hasGlobal:", self._universe, _has_global)) self._install_instance_primitive(Primitive("global:", self._universe, _global)) self._install_instance_primitive(Primitive("global:put:", self._universe, _global_put)) self._install_instance_primitive(Primitive("printString:", self._universe, _print_string)) self._install_instance_primitive(Primitive("printNewline", self._universe, _print_newline)) self._install_instance_primitive(Primitive("time", self._universe, _time)) self._install_instance_primitive(Primitive("ticks", self._universe, _ticks)) self._install_instance_primitive(Primitive("fullGC", self._universe, _fullGC))
2.15625
2
cvplus/model/videoController.py
AndyTsangChun/cvutil
0
12773348
#! /usr/bin/env python import os,sys import cv2, re import numpy as np try: from pyutil import PyLogger except ImportError: from .. import PyLogger __author__ = "<NAME>" __credits__ = ["<NAME>"] __version__ = "0.0.1" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" SRC_TYPE_NAME = ["WebCam","Video","IPCam"] OUTPUT_VIDEO_NAME = "source{}.avi" SAVE_FORMAT = 'XVID' DEFAULT_FPS = 20 class VideoController(): def __init__(self, video_src, video_ratio=1, record_prefix="", record_name="", isRecord=False, log=False, debug=False): # init logger self.__logger = PyLogger(log=log,debug=debug) self.__vid_caps = list() self.__vid_writers = list() self.__record_path = os.path.join(record_prefix,record_name) if record_name != "" else os.path.join(record_prefix,OUTPUT_VIDEO_NAME) self.__video_ratio = video_ratio self.fps = DEFAULT_FPS # create a VideoCapture for each src for src in video_src: self.__initVideoSource(src) # init writer parameters self.__fourcc = cv2.VideoWriter_fourcc(*SAVE_FORMAT) if isRecord: self.__initVideoWriter() def __initVideoSource(self, src, camId=-1): """ Initialise video input source Args: src (object): video source used by Opencv, could be int or String camId (int): if any cameraId was given """ if src is None or src == "": return sourceType = -1 # usb cam/web cam if type(src) is int: sourceType = 0 # search for ipcams elif re.search( r'(http)|(rstp)|(https) & *', src, re.M|re.I): sourceType = 2 # videos else: sourceType = 1 cap = cv2.VideoCapture(src) if cap.isOpened(): if camId == -1: camId = len(self.__vid_caps) if len(self.__vid_caps) > 0: cams = np.array(self.__vid_caps)[:,0] if camId in cams: camId = np.amax(cams) + 1 fps = int(cap.get(cv2.CAP_PROP_FPS)) self.__vid_caps.append([camId, sourceType, cap, src,fps]) self.__logger.info("Video Input Connected to {}".format(src)) else: self.__logger.error("No {} Source Found From {}".format(SRC_TYPE_NAME[sourceType], src)) def __initVideoWriter(self): """ Initialise video writer """ for cap_info in self.__vid_caps: cap = cap_info[2] # get cv2.cap object fps = cap_info[4] if fps == 0 or self.fps < fps: fps = self.fps self.__vid_writers.append([cap_info[0],cv2.VideoWriter(self.__record_path.format(cap_info[0]), self.__fourcc, fps, (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)/self.__video_ratio),int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)/self.__video_ratio)))]) def writeVideo(self, camId, frame): """ Write video to output Args: camId (int): if any cameraId was given frame (np.array): video frame to be written """ if len(self.__vid_writers) > 0: ids = np.array(self.__vid_writers)[:,0] if frame is not None: self.__vid_writers[np.where(ids == camId)[0][0]][1].write(frame) def getFrame(self, camId): """ Return frame from video source Args: camId (int): camera ID Returns: **frame** (np.array) - current frame """ # Capture frame-by-frame frame = None try: cap = self.__vid_caps[np.where(np.array(self.__vid_caps)[:,0]==camId)[0][0]][2] if cap is not None: ret, frame = cap.read() frame = cv2.resize(frame, (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)/self.__video_ratio),int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)/self.__video_ratio))) #frame = cv2.resize(frame, (420,240)) except cv2.error: return None return frame def showFrame(self, frame, title="Video"): """ Using OpenCV to display the current frame Title is important if need to display multi window Args: frame (np.array): frame given to be shown title (string): display window title, associate frame and display window """ # Display the resulting frame cv2.imshow(title,frame) # This line is important to keep the video showing if cv2.waitKey(1) & 0xFF == ord('q'): cap.release() cv2.destroyAllWindows() def onClose(self): for cap in self.__vid_caps: cap[2].release() for writer in self.__vid_writers: writer[1].release() cv2.destroyAllWindows() def printVideoSrcInfo(self): # header self.__logger.info("{:5}|{:10}".format("CamID","Source")) # body for cap in self.__vid_caps: src = cap[3] if type(src) is int: src = SRC_TYPE_NAME[0]+ " {}".format(src) self.__logger.info("{:5}|{}".format(cap[0],src)) def getVideoSrcInfo(self): """ Return Camera Information Returns: * **cam_info** (numpy.array) - camera information (camId, src) """ if len(self.__vid_caps) <= 0: return None return np.array(self.__vid_caps)[:,[0,3]] def drawInfo(self, frame, fps, color=(255,255,255), num_people=-1): """ Draw frame info Args: frame (numpy.array): input frame fps (int): Frame per second color (tuple): BGR color code num_people (int): number of people detected Returns: * **frame** (numpy.array) - modified frame """ frame_size = frame.shape cv2.putText(frame, "FPS:{}".format(fps), (20,frame_size[0]-20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2) if num_people >= 0: cv2.putText(frame, "Num.Person:{}".format(num_people), (frame_size[1]-150,frame_size[0]-20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2) return frame def setIsRecord(self, isRecord): """ Set is recorded video or not Args: isRecord (boolean): record video or not """ if isRecord and not self.isRecord: self.__initVideoWriter() self.isRecord = isRecord
2.40625
2
src/utils/pkldf2jsonl.py
fatterbetter/CodeSearchNet
1,681
12773349
<filename>src/utils/pkldf2jsonl.py import pandas as pd from .general_utils import chunkify from dpu_utils.utils import RichPath from multiprocessing import Pool, cpu_count def df_to_jsonl(df: pd.DataFrame, RichPath_obj: RichPath, i: int, basefilename='codedata') -> str: dest_filename = f'{basefilename}_{str(i).zfill(5)}.jsonl.gz' RichPath_obj.join(dest_filename).save_as_compressed_file(df.to_dict(orient='records')) return str(RichPath_obj.join(dest_filename)) def chunked_save_df_to_jsonl(df: pd.DataFrame, output_folder: RichPath, num_chunks: int=None, parallel: bool=True) -> None: "Chunk DataFrame (n chunks = num cores) and save as jsonl files." df.reset_index(drop=True, inplace=True) # parallel saving to jsonl files on azure n = cpu_count() if num_chunks is None else num_chunks dfs = chunkify(df, n) args = zip(dfs, [output_folder]*len(dfs), range(len(dfs))) if not parallel: for arg in args: dest_filename = df_to_jsonl(*arg) print(f'Wrote chunk to {dest_filename}') else: with Pool(cpu_count()) as pool: pool.starmap(df_to_jsonl, args)
2.78125
3
time_feature_extraction.py
paidamoyo/DEAP_classification
3
12773350
<filename>time_feature_extraction.py import os import numpy as np from scipy.io import loadmat class TimeFeatureExtraction(object): def __init__(self, ): self.dir_path = os.path.dirname(os.path.realpath(__file__)) self.subjects = 32 self.num_labels = 4 def extract_features(self, test_idx, valid_idx): train_data = [] train_lab = [] valid_data = [] valid_lab = [] test_data = [] test_lab = [] print("valid_idx:{}, test_idx:{}".format(valid_idx, test_idx)) for subj in np.arange(start=1, stop=self.subjects + 1, step=1): print("subject:{}".format(subj)) file = "DEAP_s/s_{}.mat".format(subj) path = os.path.abspath(os.path.join(self.dir_path, '', file)) print("path:{}".format(path)) s = loadmat(path) s_label = s['label'] s_data = s['data'] print("data:{}, label:{}".format(s_data.shape, s_label.shape)) for obs in np.arange(s_data.shape[0]): if subj == valid_idx: valid_data.append(s_data[obs, :, :]) valid_lab.append(s_label[obs, :]) elif subj == test_idx: test_data.append(s_data[obs, :, :]) test_lab.append(s_label[obs, :]) else: train_data.append(s_data[obs, :, :]) train_lab.append(s_label[obs, :]) data = {'train': [np.array(train_data), np.array(train_lab)], 'valid': [np.array(valid_data), np.array(valid_lab)], 'test': [np.array(test_data), np.array(test_lab)]} self.shuffle_obs(data['train'], name='train') self.shuffle_obs(data['valid'], name='valid') self.shuffle_obs(data['test'], name='test') return data def shuffle_obs(self, observations, name): signal = observations[0] lab = observations[1] print('{} cwt_signal:{}, labels:{}'.format(name, signal.shape, lab.shape)) trials = signal.shape[0] idx_range = np.arange(trials) np.random.shuffle(idx_range) data = signal[idx_range] label = lab[idx_range] np.save('CONV/{}_label'.format(name), label) np.save('CONV/{}_data'.format(name), data) return data, label if __name__ == '__main__': np.random.seed(31415) time = TimeFeatureExtraction() data = time.extract_features(valid_idx=1, test_idx=2)
2.65625
3