content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import pytest
from pathlib import Path
root = Path(__file__).parent.resolve()
@pytest.fixture(scope="session")
def csv_path() -> str:
"""
Returns a path to a CSV file. The file has a column called 'fips' representing unique county IDs. It joins
with a column called 'GEOID' in the topojson file at topo_path.
"""
return str(root / "fixtures/pa-county-pop.csv")
@pytest.fixture(scope="session")
def csv_path_non_matching():
"""
Returns a path to a CSV file. The file has a column called 'fips' representing unique county IDs. It joins
with a column called 'GEOID' in the topojson file at topo_path. However, this file is missing rows so it will not
match cleanly with the topojson.
"""
return str(root / "fixtures/pa-county-pop__non-matching-rows.csv")
@pytest.fixture(scope="session")
| [
11748,
12972,
9288,
198,
6738,
3108,
8019,
1330,
10644,
628,
198,
15763,
796,
10644,
7,
834,
7753,
834,
737,
8000,
13,
411,
6442,
3419,
628,
198,
31,
9078,
9288,
13,
69,
9602,
7,
29982,
2625,
29891,
4943,
198,
4299,
269,
21370,
62,
... | 2.896194 | 289 |
"""
3. MNE Interface Cycle Feature Distributions
============================================
Compute bycycle feature distributions using MNE objects.
"""
####################################################################################################
# Import Packages and Load Data
# -----------------------------
#
# First let's import the packages we need. This example depends on mne.
####################################################################################################
import numpy as np
import matplotlib.pyplot as plt
from mne.io import read_raw_fif
from mne.datasets import sample
from mne import pick_channels
from neurodsp.plts import plot_time_series
from bycycle.group import compute_features_2d
from bycycle.plts import plot_feature_hist
####################################################################################################
# Frequencies of interest: the alpha band
f_alpha = (8, 15)
# Get the data path for the MNE example data
raw_fname = sample.data_path() + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
# Load the file of example MNE data
raw = read_raw_fif(raw_fname, preload=True, verbose=False)
# Select EEG channels from the dataset
raw = raw.pick_types(meg=False, eeg=True, eog=False, exclude='bads')
# Grab the sampling rate from the data
fs = raw.info['sfreq']
# filter to alpha
raw = raw.filter(l_freq=None, h_freq=20.)
# Settings for exploring example channels of data
chs = ['EEG 042', 'EEG 043', 'EEG 044']
t_start = 20000
t_stop = int(t_start + (10 * fs))
# Extract an example channels to explore
sigs, times = raw.get_data(pick_channels(raw.ch_names, chs),
start=t_start, stop=t_stop, return_times=True)
####################################################################################################
#
# Plot time series for each recording
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Now let's see how each signal looks in time. This looks like standard EEG
# data.
#
####################################################################################################
# Plot the signal
plot_time_series(times, [sig * 1e6 for sig in sigs], labels=chs, title='EEG Signal')
####################################################################################################
# Compute cycle-by-cycle features
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Here we use the bycycle compute_features function to compute the cycle-by-
# cycle features of the three signals.
#
####################################################################################################
# Set parameters for defining oscillatory bursts
threshold_kwargs = {'amp_fraction_threshold': 0.3,
'amp_consistency_threshold': 0.4,
'period_consistency_threshold': 0.5,
'monotonicity_threshold': 0.8,
'min_n_cycles': 3}
# Create a dictionary of cycle feature dataframes, corresponding to each channel
kwargs = dict(threshold_kwargs=threshold_kwargs, center_extrema='trough')
dfs = compute_features_2d(sigs, fs, f_alpha, axis=0,
compute_features_kwargs=kwargs)
dfs = {ch: df for df, ch in zip(dfs, chs)}
####################################################################################################
#
# Plot feature distributions
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# As it turns out, none of the channels in the mne example audio and visual
# task has waveform asymmetry. These data were collected from a healthy
# person while they listened to beeps or saw gratings on a screen
# so this is not unexpected.
#
####################################################################################################
fig, axes = plt.subplots(figsize=(15, 15), nrows=2, ncols=2)
for ch, df in dfs.items():
# Rescale amplitude and period features
df['volt_amp'] = df['volt_amp'] * 1e6
df['period'] = df['period'] / fs * 1000
# Plot feature histograms
plot_feature_hist(df, 'volt_amp', only_bursts=False, ax=axes[0][0], label=ch,
xlabel='Cycle amplitude (mV)', bins=np.arange(0, 40, 4))
plot_feature_hist(df, 'period', only_bursts=False, ax=axes[0][1], label=ch,
xlabel='Cycle period (ms)', bins=np.arange(0, 250, 25))
plot_feature_hist(df, 'time_rdsym', only_bursts=False, ax=axes[1][0], label=ch,
xlabel='Rise-decay asymmetry', bins=np.arange(0, 1, .1))
plot_feature_hist(df, 'time_ptsym', only_bursts=False, ax=axes[1][1], label=ch,
xlabel='Peak-trough asymmetry', bins=np.arange(0, 1, .1))
| [
37811,
198,
18,
13,
337,
12161,
26491,
26993,
27018,
46567,
507,
198,
10052,
25609,
198,
198,
7293,
1133,
416,
13696,
3895,
24570,
1262,
337,
12161,
5563,
13,
198,
37811,
198,
198,
29113,
29113,
29113,
4242,
198,
2,
17267,
6400,
1095,
2... | 3.156271 | 1,459 |
import string
#Translate in python has 2 pieces, a translation table and the translate call.
#The translation table is a list of 256 characters. Changing the order of the #characters is used for mapping
norm = string.maketrans('', '') #builds list of all characters
print len(norm) #256 characters
print string.maketrans('', '')[100] #is the letter d
print string.maketrans('', '')[101] #is the letter e
print string.maketrans('d','e')[100] #is now also the letter e
#The second piece of translate, is the translate function itself.
#The translate function has 3 parts:
#1)string to translate
#2)translation table -- always required
#3)deletion list
#Let's start simple and build
#use translate to get groups of characters
#This can be done because translate's 3rd arg is to delete characters
#build list of all characters
norm = string.maketrans('', '')
#delete letters
non_letters = string.translate(norm, norm, string.letters)
#then take the list of non_letters and remove digits
non_alnum = string.translate(non_letters, all_chars, string.digits)
#You'll notice the length shrinks appropriately as we delete
print len(all_chars),'\t256-(26*2 letters)=',len(non_letters),'\t204-10 digits=',len(non_alnum)
#Norm is a handy list to have around if all you are going to do is delete
#characters. It would be nice if translate assumed Norm if the translation table arg was null.
#To translate all non-text to a '#', you have to have a one to one mapping for #each character in translate.
#Thus we make use of the python * operator to make a string of '#'
#of the appropriate length
trans_nontext=string.maketrans(non_alnum,'#'*len(non_alnum))
#A full program to examine strings in a binary file for Regents
# would look like this. We use regular expressions to convert all groups
# of '#' to a single '#'
import string,re
norm = string.maketrans('', '') #builds list of all characters
non_alnum = string.translate(norm, norm, string.letters+string.digits)
#now examine the binary file. If Regents is in it. It contains the copyright
ftp_file=open('f:/tmp/ftp.exe','rb').read()
trans_nontext=string.maketrans(non_alnum,'#'*len(non_alnum))
cleaned=string.translate(ftp_file, trans_nontext)
for i in re.sub('#+','#',cleaned).split('#'):
if i.find('Regents')!=-1:
print 'found it!',i
break
if i>5:
print i
| [
11748,
4731,
198,
198,
2,
8291,
17660,
287,
21015,
468,
362,
5207,
11,
257,
11059,
3084,
290,
262,
15772,
869,
13,
198,
2,
464,
11059,
3084,
318,
257,
1351,
286,
17759,
3435,
13,
33680,
262,
1502,
286,
262,
1303,
10641,
19858,
318,
... | 3.119048 | 756 |
from django.urls import path
from manti_by.apps.gallery import views
urlpatterns = [path("", views.index, name="gallery_list")]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
285,
17096,
62,
1525,
13,
18211,
13,
24460,
1330,
5009,
628,
198,
6371,
33279,
82,
796,
685,
6978,
7203,
1600,
5009,
13,
9630,
11,
1438,
2625,
24460,
62,
4868,
4943,
60,
... | 3.046512 | 43 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
from dataclasses import dataclass
from typing import Any, Dict, List, Optional
from omegaconf import DictConfig, OmegaConf
from hydra.core.object_type import ObjectType
from hydra.core.singleton import Singleton
from hydra.plugins.config_source import ConfigLoadError
@dataclass
| [
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
1439,
6923,
33876,
198,
11748,
4866,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
7343,
11,
32233,
198,
198,
6738... | 3.717172 | 99 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""This module contains Google Cloud Looker sensors."""
from typing import TYPE_CHECKING, Optional
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.looker import JobStatus, LookerHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class LookerCheckPdtBuildSensor(BaseSensorOperator):
"""
Check for the state of a previously submitted PDT materialization job.
:param materialization_id: Required. The materialization job ID to poll. (templated)
:param looker_conn_id: Required. The connection ID to use connecting to Looker.
:param cancel_on_kill: Optional. Flag which indicates whether cancel the hook's job or not,
when on_kill is called.
"""
template_fields = ["materialization_id"]
| [
2,
198,
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,
428,
670,
329,
3224,
1321,
198,
2,
5115,
6634,
9238,
13,
220,
... | 3.723112 | 437 |
#
# Tests the Component class bindings
#
import unittest
from test_resources import file_contents
if __name__ == '__main__':
unittest.main()
| [
2,
198,
2,
30307,
262,
35100,
1398,
34111,
198,
2,
198,
11748,
555,
715,
395,
198,
198,
6738,
1332,
62,
37540,
1330,
2393,
62,
3642,
658,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
... | 2.98 | 50 |
import pandas as pd
from ..utils._docs import fill_doc
from .average import add_average_column
@fill_doc
def ratio(df_alpha, df_delta):
"""Compute the ratio of alpha/delta band power.
Parameters
----------
%(df_psd)s
Contains alpha-band PSD.
%(df_psd)s
Contains delta-band PSD.
Returns
-------
df : DataFrame
PSD ratio alpha/delta averaged by bin and channels. Columns:
participant : int - Participant ID
session : int - Session ID (1 to 15)
run : int - Run ID
phase : str - 'regulation' or 'non-regulation'
idx : ID of the phase within the run (0 to 9)
ratio : float - Averaged ratio alpha/delta
"""
if "avg" not in df_alpha.columns:
df_alpha = add_average_column(df_alpha)
if "avg" not in df_delta.columns:
df_alpha = add_average_column(df_delta)
# check keys
keys = ["participant", "session", "run", "phase", "idx"]
assert len(set(keys).intersection(df_alpha.columns)) == len(keys)
assert len(set(keys).intersection(df_delta.columns)) == len(keys)
assert sorted(df_alpha.columns) == sorted(df_delta.columns)
# container for new df with ratio of power
data = {key: [] for key in keys + ["ratio"]}
ratio = df_alpha["avg"] / df_delta["avg"]
ratio = ratio[ratio.notna()]
# fill new df dict
for i, r in ratio.iteritems():
alpha_ = df_alpha.loc[i]
delta_ = df_delta.loc[i]
# sanity-check
try:
assert alpha_["participant"] == delta_["participant"]
assert alpha_["session"] == delta_["session"]
assert alpha_["run"] == delta_["run"]
assert alpha_["phase"] == delta_["phase"]
assert alpha_["idx"] == delta_["idx"]
except AssertionError:
continue
data["participant"].append(alpha_["participant"])
data["session"].append(alpha_["session"])
data["run"].append(alpha_["run"])
data["phase"].append(alpha_["phase"])
data["idx"].append(alpha_["idx"])
data["ratio"].append(r)
# create df
df = pd.DataFrame.from_dict(data, orient="columns")
return df
| [
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
11485,
26791,
13557,
31628,
1330,
6070,
62,
15390,
198,
6738,
764,
23913,
1330,
751,
62,
23913,
62,
28665,
628,
198,
31,
20797,
62,
15390,
198,
4299,
8064,
7,
7568,
62,
26591,
11,
47764... | 2.291022 | 969 |
import numpy as np
import pytest
import numgrad as ng
@pytest.mark.parametrize('function, expect', [
(lambda: np.asarray(ng.Variable([0, 1])), np.array([0., 1.])),
(lambda: 0. in ng.Variable(0.), TypeError),
(lambda: 0. in ng.Variable([0.]), True),
(lambda: 1. in ng.Variable([[0., 1.], [2., 3.]]), True),
(lambda: -1. not in ng.Variable([[0., 1.], [2., 3.]]), True),
(lambda: float(ng.Variable(-1)), -1.),
(lambda: float(ng.Variable([0, -1])), TypeError),
(lambda: int(ng.Variable(-1)), -1),
(lambda: int(ng.Variable([0, -1])), TypeError),
(lambda: len(ng.Variable(-1)), TypeError),
(lambda: len(ng.Variable([0, -1])), 2),
(lambda: ng.Variable(0.).item(), 0.),
(lambda: ng.Variable([0.]).item(), 0.),
(lambda: ng.Variable([0., 1.]).item(), ValueError),
(lambda: ng.Variable(1).ndim, 0),
(lambda: ng.Variable([0, 1]).ndim, 1),
(lambda: ng.Variable(0).shape, tuple()),
(lambda: ng.Variable([0, 1]).shape, (2,)),
(lambda: ng.Variable(0).size, 1),
(lambda: ng.Variable([0, 1]).size, 2),
(lambda: ng.Variable(0.).tolist(), 0.),
(lambda: ng.Variable([0., 1.]).tolist(), [0., 1.]),
(lambda: ng.Variable([[0., 1.], [2., 3.]]).tolist(), [[0., 1.], [2., 3.]]),
])
@pytest.mark.parametrize('self, method, args', [
(ng.Variable([1, -1]), '__iadd__', 1),
(ng.Variable([1, -1]), '__isub__', 1),
(ng.Variable([1, -1]), '__imul__', 2),
(ng.Variable([1, -1]), '__itruediv__', 2),
])
if __name__ == '__main__':
pytest.main([__file__])
| [
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
198,
11748,
997,
9744,
355,
23370,
628,
628,
628,
628,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
10786,
8818,
11,
1607,
3256,
685,
198,
220,
220,
220,
357,
... | 2.289318 | 674 |
# From https://stackoverflow.com/questions/30988033/
# sending-live-video-frame-over-network-in-python-opencv
from numpysocket import NumpySocket
import cv2
npSocket = NumpySocket()
npSocket.startClient(9999)
# Read until video is completed
while True:
# Capture frame-by-frame
frame = npSocket.recieveNumpy()
cv2.imshow('Frame', frame)
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
npSocket.endServer()
print("Closing")
| [
2,
3574,
3740,
1378,
25558,
2502,
11125,
13,
785,
14,
6138,
507,
14,
1270,
4089,
1795,
2091,
14,
198,
2,
220,
220,
220,
220,
220,
7216,
12,
12583,
12,
15588,
12,
14535,
12,
2502,
12,
27349,
12,
259,
12,
29412,
12,
9654,
33967,
198... | 2.587302 | 189 |
import datetime, os, sys
import subprocess
#### #### #### #### ####
#### Functions ####
#### #### #### #### ####
#### #### #### #### ####
#### Main ####
#### #### #### #### ####
try:
## Variables initialization
targetDirectory = GetTargetDirectory()
alternativeDate = GetAlternativeDate()
isRecursiveSearch = GetIsRecursiveSearch()
## Main
command = ["./main.exe", "--dir", targetDirectory]
if isinstance(alternativeDate, datetime.date):
command.extend( ["--date", alternativeDate.isoformat() ] )
if isRecursiveSearch is True:
command.append("-r")
subprocess.call(command)
except KeyboardInterrupt:
pass
finally:
ExitHandler()
| [
11748,
4818,
8079,
11,
28686,
11,
25064,
198,
11748,
850,
14681,
628,
198,
4242,
1303,
21017,
1303,
21017,
1303,
21017,
1303,
21017,
220,
198,
4242,
40480,
1303,
21017,
220,
198,
4242,
1303,
21017,
1303,
21017,
1303,
21017,
1303,
21017,
2... | 2.819277 | 249 |
from os import environ
import pytest
from opentsdb import TSDBClient, TSDBConnectProtocols, Counter
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
| [
6738,
28686,
1330,
551,
2268,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
1034,
658,
9945,
1330,
309,
10305,
33,
11792,
11,
309,
10305,
2749,
261,
1606,
19703,
4668,
82,
11,
15034,
628,
198,
31,
9078,
9288,
13,
69,
9602,
628,
198,
... | 2.573171 | 82 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
#!/usr/bin/env python2.7
import numpy as np
import os
import random
import sys
from scipy.ndimage import imread
dataset = sys.argv[1]
outputdir = sys.argv[2] if len(sys.argv) == 3 else 'data'
minibatches = 10
minibatch_size = 500
split_channels = lambda x: np.array((x[:,:,0], x[:,:,1], x[:,:,2]))
images = os.listdir(dataset)
images.sort()
image_groups = [images[i:i+4] for i in xrange(0, len(images), 4)]
path = lambda p: os.path.join(dataset, p)
records = []
random.seed()
for image_group in image_groups:
random.shuffle(image_group)
a_file, b_file = image_group[:2]
a_array = split_channels(imread(path(a_file)))
b_array = split_channels(imread(path(b_file)))
records.append((a_array, b_array, np.uint8(1)))
a_file, b_file = image_group[2:]
a_array = split_channels(imread(path(a_file)))
b_array = split_channels(imread(path(b_file)))
records.append((a_array, b_array, np.uint8(1)))
images = os.listdir(dataset)
random.shuffle(images)
images_2 = os.listdir(dataset)
random.shuffle(images_2)
for a_file, b_file in zip(images, images_2):
a_array = split_channels(imread(path(a_file)))
b_array = split_channels(imread(path(b_file)))
label = 1 if a_file[:4] == b_file[:4] else 0
records.append((a_array, b_array, np.uint8(0)))
random.shuffle(records)
for i in xrange(minibatches):
outfile_path = os.path.join(outputdir, 'data_batch_{}.bin'.format(i+1))
with open(outfile_path, 'wb') as fh:
for j in xrange(i*minibatch_size, (i+1)*minibatch_size):
for r in records[j]:
r.tofile(fh)
outfile_path = os.path.join(outputdir, 'data_test.bin')
with open(outfile_path, 'wb') as fh:
for j in xrange(minibatches*minibatch_size, len(records)):
for r in records[j]:
r.tofile(fh)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
13,
22,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
25064,
198,
198,
6738,
629,
541,
88,
13,
358,
9060,
1330,
545,
961,
198,
198,
19608,
... | 2.249377 | 802 |
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from requests import Session as BaseSession
from log_request_id import DEFAULT_NO_REQUEST_ID, OUTGOING_REQUEST_ID_HEADER_SETTING, REQUEST_ID_HEADER_SETTING, local
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
12205,
525,
306,
16934,
1522,
198,
6738,
7007,
1330,
23575,
355,
7308,
36044,
198,
198,
6738,
2604,
62,
25927,
62,
312,
1330,
5550,
388... | 3.298701 | 77 |
# future imports
from __future__ import unicode_literals
# stdlib imports
import logging
import time
import threading
# third-party imports
import pykka
from mopidy.core import CoreListener
logger = logging.getLogger(__name__)
class WebhookPlayback(pykka.ThreadingActor, CoreListener):
"""Control the tracklist and playback functionality of mopidy.
Fetches the head track, adds to tracklist, and starts playback.
If a timelapse is set, then the track is seeked to the given position.
"""
popped = False
queue = None
track = None
next_track = None
stop_update_thread = True
stop_track_thread = True
def on_start(self):
"""Grab the current head track, and add it to the tracklist.
Starts the play method.
"""
logger.info('ON START CALLED')
self.initiate()
def on_stop(self):
"""Stops the playback of the current track,
and cleans up all the treads and tracklist.
"""
# Stop playback
self.core.playback.stop()
# Stop any new timers
self.stop_update_thread = True
self.stop_track_thread = True
# Empty queue
self.core.tracklist.clear()
def on_event(self, event):
"""Fires functions base of mopidy tracklist events
"""
state = self.core.playback.state.get()
if event == 'tracklist_changed' and state == 'stopped':
logger.info('CALLING NEXT')
return self.next()
def play(self):
"""Starts playing the first track in the tracklist.
If the track has a "time_position" value then seek the track to that postion.
"""
logger.info('PLAY CALLED')
# Start track
self.core.playback.play()
# Annoyingly cant start a track at a given time,
# So once the track has started we can seek it to the correct position
if self.track['time_position']:
self.seek()
self.stop_update_thread = False
self.stop_track_thread = False
self.update_thread()
self.track_thread()
def update_thread(self):
"""Sends updates to the server every 3 seconds
on the status of the playing track.
"""
# If stop_thread is set, then return causing the loop to break
if self.stop_update_thread:
return
# Ensure there is a track to report on
if self.core.playback.current_track.get():
# Ensure track has started and that it is also not about to end.
time_position = self.core.playback.time_position.get()
total = self.track['track']['duration_ms']
if 1000 < time_position < (total - 9000):
# Send updates to the server
kwargs = {
'track_id': self.track['id'],
'queue_id': self.queue,
'state': self.core.playback.state.get(),
'time_position': self.core.playback.time_position.get(),
}
self.session.update_head(kwargs)
# Loop method every 3 seconds
thread_timer = threading.Timer(3, self.update_thread)
thread_timer.start()
def track_thread(self):
"""Watches the track to know when to trigger fetching a the next track.
"""
# If stop_thread is set, then return causing the loop to break
if self.stop_track_thread:
return
if self.track.get('track'):
# Work out the time remaining on the track
if self.track['track']['duration_ms'] is not None:
t_end = self.track['track']['duration_ms']
t_current = self.core.playback.time_position.get()
time_til_end = t_end - t_current
# If there is less than 5 seconds left on the track,
# add the next track to the tracklist,
# or if no track is currently playing
if time_til_end < 5000 or not self.core.playback.current_track.get():
# Stop updates
self.stop_update_thread = True
# Delete the current track from the server and fetch the next.
# popped param is set to ensure only one delete request is sent.
# Futher requests should be fetches rather than deletes.
logger.info('POPPING TRACK')
if self.popped:
next_track = self.session.fetch_head()
else:
self.popped = True
kwargs = {'queue_id': self.queue}
next_track = self.session.pop_head(kwargs)
logger.info('############')
# If a track is found, added it
if next_track.get('track'):
self.next_track = next_track
self.queue = self.next_track['queue']
self.popped = False
# Exit loop
return
# Loop method every 1/2 second
thread_timer = threading.Timer(1, self.track_thread)
thread_timer.start()
| [
2,
2003,
17944,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
2,
14367,
8019,
17944,
198,
11748,
18931,
198,
11748,
640,
198,
11748,
4704,
278,
198,
198,
2,
2368,
12,
10608,
17944,
198,
11748,
12972,
74,
4... | 2.189144 | 2,395 |
from lxml import html
import requests
import sqlite3
c = sqlite3.connect('bling.db')
oldscore={}
for row in c.execute('SELECT * FROM users'):
print(row)
| [
6738,
300,
19875,
1330,
27711,
198,
11748,
7007,
198,
11748,
44161,
578,
18,
198,
66,
796,
44161,
578,
18,
13,
8443,
10786,
11108,
13,
9945,
11537,
198,
198,
727,
26675,
34758,
92,
198,
198,
1640,
5752,
287,
269,
13,
41049,
10786,
465... | 2.925926 | 54 |
from vgn.exceptions import VgnGetError
from vgn.data_classes import *
import vgn.converter as conv
import datetime
import asyncio
import aiohttp
if __name__ == '__main__':
asyncio.run(main())
| [
6738,
410,
4593,
13,
1069,
11755,
1330,
569,
4593,
3855,
12331,
198,
6738,
410,
4593,
13,
7890,
62,
37724,
1330,
1635,
198,
11748,
410,
4593,
13,
1102,
332,
353,
355,
3063,
198,
11748,
4818,
8079,
198,
11748,
30351,
952,
198,
11748,
2... | 2.898551 | 69 |
"""Package for all services."""
from .validator_service import Config, ValidatorService
| [
37811,
27813,
329,
477,
2594,
526,
15931,
198,
6738,
764,
12102,
1352,
62,
15271,
1330,
17056,
11,
48951,
1352,
16177,
198
] | 4.190476 | 21 |
#!/usr/bin/env python3
# encoding: utf-8
import os
import random
from typing import NoReturn
import numpy as np
import torch as th
from rls.utils.display import colorize
from rls.utils.logging_utils import get_logger
logger = get_logger(__name__)
def check_or_create(dicpath: str, name: str = '') -> NoReturn:
"""
check dictionary whether existing, if not then create it.
"""
if not os.path.exists(dicpath):
os.makedirs(dicpath)
logger.info(colorize(
''.join([f'create {name} directionary :', dicpath]), color='green'))
def set_global_seeds(seed: int) -> NoReturn:
"""
Set the random seed of pytorch, numpy and random.
params:
seed: an integer refers to the random seed
"""
th.manual_seed(seed)
th.cuda.manual_seed_all(seed)
th.backends.cudnn.deterministic = True
np.random.seed(seed)
random.seed(seed)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
198,
11748,
28686,
198,
11748,
4738,
198,
6738,
19720,
1330,
1400,
13615,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
355,
294,... | 2.495845 | 361 |
import torch.nn as nn
from mmdet.core import bbox2result
from .. import builder
from ..registry import DETECTORS
from .seq_base import SeqBaseDetector
@DETECTORS.register_module
| [
11748,
28034,
13,
20471,
355,
299,
77,
198,
198,
6738,
8085,
15255,
13,
7295,
1330,
275,
3524,
17,
20274,
198,
6738,
11485,
1330,
27098,
198,
6738,
11485,
2301,
4592,
1330,
38267,
9782,
20673,
198,
6738,
764,
41068,
62,
8692,
1330,
1001... | 3.196429 | 56 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import operator
labels = {}
f = open('log.txt', 'r')
for line in f.readlines():
s = line.strip().split('| ')
name = s[0].split(' ')[0].strip()
if len(s) != 2:
safe_add(name, 'other')
continue
lbls = s[1].strip().split(';')
for label in lbls:
if len(label) < 1:
continue
safe_add(name, label.strip())
f.close()
for label in labels.keys():
print "\n" + label
for name, num in sorted(labels[label].items(), key=operator.itemgetter(1), reverse=True):
print "\t" + name + ": " + str(num)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
10088,
198,
198,
23912,
1424,
796,
23884,
628,
220,
220,
220,
220,
198,
198,
69,
796,
1280,
10786,
6404,
13,
141... | 2.167247 | 287 |
# Copyright 2021, 2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import click
from cpo.lib.fyre.types.ocp_quick_burn_max_hours_response import OCPQuickBurnMaxHoursResponse
| [
2,
220,
15069,
33448,
11,
33160,
19764,
10501,
198,
2,
198,
2,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 3.624365 | 197 |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 7 12:21:17 2017
@author: Rochlitz.R
"""
import matplotlib.pyplot as plt
import numpy as np
import pygimli as pg
from custEM.meshgen.invmesh_tools import PrismWorld
from custEM.meshgen import meshgen_utils as mu
from custEM.inv.inv_utils import MultiFWD
xt, zt = np.loadtxt("topo.txt", unpack=True)
zt = np.abs(zt)
# %% define mesh paramters
dataname = 'GOS_raw_inversion_ByBz_B_Tx123.npz'
invmod = dataname + '_l40'
invmesh = 'Prisms'
dataR, dataI = [], []
errorR, errorI = [], []
with np.load(dataname+".npz", allow_pickle=True) as ALL:
freqs = list(ALL["freqs"])
tx = ALL["tx"]
print(tx)
DATA = ALL["DATA"]
rxs = [data["rx"] for data in DATA]
# tx_ids = [[int(txi) for txi in data["tx_ids"]] for data in DATA]
tx_ids = [data["tx_ids"] for data in DATA]
cmps = [data["cmp"] for data in DATA]
for i, data in enumerate(DATA):
dataR = np.concatenate([dataR, data["dataR"].ravel()])
dataI = np.concatenate([dataI, data["dataI"].ravel()])
errorR = np.concatenate([errorR, data["errorR"].ravel()])
errorI = np.concatenate([errorI, data["errorI"].ravel()])
skip_domains = [0, 1]
sig_bg = 3e-3
refm_size = 1.
rxs_resolved = mu.resolve_rx_overlaps(rxs, refm_size)
rx_tri = mu.refine_rx(rxs_resolved, refm_size, 30.)
bound = 200
minrx = min([min(data["rx"][:, 0]) for data in DATA])
maxrx = max([max(data["rx"][:, 0]) for data in DATA])
##############################################################################
# %% generate 2.5D prism inversion mesh
P = PrismWorld(name=invmesh,
x_extent=[minrx-bound, maxrx+bound],
x_reduction=500.,
y_depth=1500.,
z_depth=1200.,
n_prisms=200,
tx=[txi for txi in tx],
orthogonal_tx=[True] * len(tx),
#surface_rx=rx_tri,
prism_area=50000,
prism_quality=34,
x_dim=[-1e5, 1e5],
y_dim=[-1e5, 1e5],
z_dim=[-1e5, 1e5],
topo=topo_f,
)
P.PrismWorld.add_paths(rx_tri)
for rx in rxs:
P.PrismWorld.add_rx(rx)
# %%
P.PrismWorld.call_tetgen(tet_param='-pDq1.3aA', print_infos=False)
pgmesh = pg.load('meshes/mesh_create/' + invmesh + '.bms')
# pgmesh = P.xzmesh # is 3D
if 0:
ax, cb = pg.show(pgmesh)
for rx in rxs:
ax.plot(rx[:, 0], rx[:, 2], ".")
for txi in tx:
for txii in txi:
print(txii)
ax.plot(txii[0], txii[2], "mv")
# %% run inversion
mask = np.isfinite(dataR+dataI+errorR+errorI)
datavec = np.hstack((dataR[mask], dataI[mask]))
errorvec = np.hstack((errorR[mask], errorI[mask]))
relerror = np.abs(errorvec/datavec)
fop = MultiFWD(invmod, invmesh, pgmesh, list(freqs), cmps, tx_ids,
skip_domains, sig_bg, n_cores=140, ini_data=datavec,
data_mask=mask)
fop.setRegionProperties("*", limits=[1e-4, 1])
# set up inv
inv = pg.Inversion(verbose=True) # , debug=True)
inv.setForwardOperator(fop)
C = pg.matrix.GeostatisticConstraintsMatrix(mesh=pgmesh, I=[500, 80])
# fop.setConstraints(C)
dT = pg.trans.TransSymLog(1e-3)
inv.dataTrans = dT
# run inversion
invmodel = inv.run(datavec, relerror, lam=40, # zWeight=0.3,
startModel=sig_bg, maxIter=10,
verbose=True, robustData=True)
# %% save results
np.save(fop.inv_dir + 'inv_model.npy', invmodel)
res = 1. / invmodel
pgmesh['sigma'] = invmodel # np.load(fop.inv_dir + 'inv_model.npy')
pgmesh['res'] = res # np.load(fop.inv_dir + 'inv_model.npy')
# pgmesh.setDimension(3)
# pgmesh.swapCoordinates(1, 2)
pgmesh.exportVTK(fop.inv_dir + invmod + '_final_invmodel.vtk')
# %% plot inv model
fig, ax = plt.subplots(figsize=(14, 8))
ax2, cbar = pg.show(pgmesh, res, ax=ax, cMap="Spectral", colorBar=True,
logScale=True, cMin=5, cMax=5000,
xlabel='x [m]', ylabel='z [m]',
label=r'$\rho$ [$\Omega$m]', pad=0.8)
# cbar.ax.set_xlabel(r'$\sigma$ [S/m]', labelpad=4)
# ax.figure.savefig("out.pdf")
np.save(invmod+"-response.npy", inv.response)
fop.jacobian().save("jacobian.bmat")
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
30030,
5267,
220,
767,
1105,
25,
2481,
25,
1558,
2177,
198,
198,
31,
9800,
25,
371,
5374,
75,
4224,
13,
49,
198,
37811,
198,
198,
11748,
2603,
... | 2.006229 | 2,087 |
"""
Test Prolog script.
"""
from unittest import mock
import pytest
from lm_agent.workload_managers.slurm.slurmctld_prolog import prolog as main
@pytest.mark.asyncio
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.sys")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_job_context")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_required_licenses_for_job")
@pytest.mark.asyncio
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.sys")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_job_context")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_required_licenses_for_job")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_config_from_backend")
@pytest.mark.asyncio
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_job_context")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_required_licenses_for_job")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_config_from_backend")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.update_report")
@pytest.mark.asyncio
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.sys")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_job_context")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_required_licenses_for_job")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_config_from_backend")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.update_report")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.make_booking_request")
@pytest.mark.asyncio
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.sys")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.settings")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_job_context")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_required_licenses_for_job")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.get_config_from_backend")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.update_report")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_prolog.make_booking_request")
| [
37811,
198,
14402,
1041,
6404,
4226,
13,
198,
37811,
198,
6738,
555,
715,
395,
1330,
15290,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
300,
76,
62,
25781,
13,
1818,
2220,
62,
805,
10321,
13,
6649,
333,
76,
13,
6649,
333,
76,
310,... | 2.236868 | 1,009 |
"""Context processors and other useful functions"""
from re import template
from flask import Blueprint, current_app as app
from datetime import datetime
from blogapp import fc
contexts_bp = Blueprint("contexts_bp", __name__)
@contexts_bp.app_context_processor
def datetime_processor():
"""Inject current date/time into each template before rendering"""
return dict(get_datetime=get_datetime)
@contexts_bp.app_context_processor
def form_constraints():
"""Inject form constraints into login/signup fields"""
return {
"min_name_length": fc["min_name_length"],
"max_name_length": fc["max_name_length"],
"min_username_length": fc["min_username_length"],
"max_username_length": fc["max_username_length"],
"min_pass_length": fc["min_pass_length"],
}
@contexts_bp.after_request
def after_request(response):
"""Ensure responses aren't cached"""
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response | [
37811,
21947,
20399,
290,
584,
4465,
5499,
37811,
198,
6738,
302,
1330,
11055,
198,
6738,
42903,
1330,
39932,
11,
1459,
62,
1324,
355,
598,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
6738,
4130,
1324,
1330,
277,
66,
628,
198,
... | 2.898667 | 375 |
from setuptools import setup
from pathlib import Path
subpackage_path = (Path(__file__).parent / "deps" / "subpackage").resolve()
setup(
name="mainpackage",
version="0.1",
packages="mainpackage",
install_requires=[
f"subpackage @ git+file://{subpackage_path}#subpackage-0.1",
],
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
7266,
26495,
62,
6978,
796,
357,
15235,
7,
834,
7753,
834,
737,
8000,
1220,
366,
10378,
82,
1,
1220,
366,
7266,
26495,
11074,
411,
6442,
3419,
198,
198... | 2.613445 | 119 |
#
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
"""
This file contains implementation of managing physical router configuration
"""
# Import kazoo.client before monkey patching
from cfgm_common.zkclient import ZookeeperClient
from gevent import monkey
monkey.patch_all()
from cfgm_common.vnc_kombu import VncKombuClient
import cgitb
import sys
import argparse
import requests
import ConfigParser
import socket
import time
from pprint import pformat
from pysandesh.sandesh_base import *
from pysandesh.sandesh_logger import *
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
from cfgm_common.uve.virtual_network.ttypes import *
from sandesh_common.vns.ttypes import Module
from sandesh_common.vns.constants import ModuleNames, Module2NodeType, \
NodeTypeNames, INSTANCE_ID_DEFAULT
from pysandesh.connection_info import ConnectionState
from pysandesh.gen_py.process_info.ttypes import ConnectionType as ConnType
from pysandesh.gen_py.process_info.ttypes import ConnectionStatus
import discoveryclient.client as client
from cfgm_common.exceptions import ResourceExhaustionError
from vnc_api.vnc_api import VncApi
from cfgm_common.uve.nodeinfo.ttypes import NodeStatusUVE, \
NodeStatus
from db import DBBaseDM, BgpRouterDM, PhysicalRouterDM, PhysicalInterfaceDM,\
ServiceInstanceDM, LogicalInterfaceDM, VirtualMachineInterfaceDM, \
VirtualNetworkDM, RoutingInstanceDM, GlobalSystemConfigDM, \
GlobalVRouterConfigDM, FloatingIpDM, InstanceIpDM, DMCassandraDB, PortTupleDM
from physical_router_config import PushConfigState
from cfgm_common.dependency_tracker import DependencyTracker
from cfgm_common.utils import cgitb_hook
def parse_args(args_str):
'''
Eg. python device_manager.py --rabbit_server localhost
-- rabbit_port 5672
-- rabbit_user guest
-- rabbit_password guest
--cassandra_server_list 10.1.2.3:9160
--api_server_ip 10.1.2.3
--api_server_port 8082
--api_server_use_ssl False
--zk_server_ip 10.1.2.3
--zk_server_port 2181
--collectors 127.0.0.1:8086
--disc_server_ip 127.0.0.1
--disc_server_port 5998
--http_server_port 8090
--log_local
--log_level SYS_DEBUG
--log_category test
--log_file <stdout>
--use_syslog
--syslog_facility LOG_USER
--cluster_id <testbed-name>
--repush_interval 15
--repush_max_interval 300
--push_delay_per_kb 0.01
--push_delay_max 100
--push_delay_enable True
[--reset_config]
'''
# Source any specified config/ini file
# Turn off help, so we all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file", action='append',
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(args_str.split())
defaults = {
'rabbit_server': 'localhost',
'rabbit_port': '5672',
'rabbit_user': 'guest',
'rabbit_password': 'guest',
'rabbit_vhost': None,
'rabbit_ha_mode': False,
'cassandra_server_list': '127.0.0.1:9160',
'api_server_ip': '127.0.0.1',
'api_server_port': '8082',
'api_server_use_ssl': False,
'zk_server_ip': '127.0.0.1',
'zk_server_port': '2181',
'collectors': None,
'disc_server_ip': None,
'disc_server_port': None,
'http_server_port': '8096',
'log_local': False,
'log_level': SandeshLevel.SYS_DEBUG,
'log_category': '',
'log_file': Sandesh._DEFAULT_LOG_FILE,
'use_syslog': False,
'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY,
'cluster_id': '',
'repush_interval': '15',
'repush_max_interval': '600',
'push_delay_per_kb': '0.01',
'push_delay_max': '100',
'push_delay_enable': 'True',
'sandesh_send_rate_limit': SandeshSystem.get_sandesh_send_rate_limit(),
'rabbit_use_ssl': False,
'kombu_ssl_version': '',
'kombu_ssl_keyfile': '',
'kombu_ssl_certfile': '',
'kombu_ssl_ca_certs': '',
}
secopts = {
'use_certs': False,
'keyfile': '',
'certfile': '',
'ca_certs': '',
'ifmap_certauth_port': "8444",
}
ksopts = {
'admin_user': 'user1',
'admin_password': 'password1',
'admin_tenant_name': 'default-domain',
}
cassandraopts = {
'cassandra_user': None,
'cassandra_password': None
}
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read(args.conf_file)
defaults.update(dict(config.items("DEFAULTS")))
if ('SECURITY' in config.sections() and
'use_certs' in config.options('SECURITY')):
if config.getboolean('SECURITY', 'use_certs'):
secopts.update(dict(config.items("SECURITY")))
if 'KEYSTONE' in config.sections():
ksopts.update(dict(config.items("KEYSTONE")))
if 'CASSANDRA' in config.sections():
cassandraopts.update(dict(config.items('CASSANDRA')))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
defaults.update(secopts)
defaults.update(ksopts)
defaults.update(cassandraopts)
parser.set_defaults(**defaults)
parser.add_argument(
"--cassandra_server_list",
help="List of cassandra servers in IP Address:Port format",
nargs='+')
parser.add_argument(
"--reset_config", action="store_true",
help="Warning! Destroy previous configuration and start clean")
parser.add_argument("--api_server_ip",
help="IP address of API server")
parser.add_argument("--api_server_port",
help="Port of API server")
parser.add_argument("--api_server_use_ssl",
help="Use SSL to connect with API server")
parser.add_argument("--zk_server_ip",
help="IP address:port of zookeeper server")
parser.add_argument("--collectors",
help="List of VNC collectors in ip:port format",
nargs="+")
parser.add_argument("--disc_server_ip",
help="IP address of the discovery server")
parser.add_argument("--disc_server_port",
help="Port of the discovery server")
parser.add_argument("--http_server_port",
help="Port of local HTTP server")
parser.add_argument("--log_local", action="store_true",
help="Enable local logging of sandesh messages")
parser.add_argument(
"--log_level",
help="Severity level for local logging of sandesh messages")
parser.add_argument(
"--log_category",
help="Category filter for local logging of sandesh messages")
parser.add_argument("--log_file",
help="Filename for the logs to be written to")
parser.add_argument("--use_syslog", action="store_true",
help="Use syslog for logging")
parser.add_argument("--syslog_facility",
help="Syslog facility to receive log lines")
parser.add_argument("--admin_user",
help="Name of keystone admin user")
parser.add_argument("--admin_password",
help="Password of keystone admin user")
parser.add_argument("--admin_tenant_name",
help="Tenant name for keystone admin user")
parser.add_argument("--cluster_id",
help="Used for database keyspace separation")
parser.add_argument("--repush_interval",
help="time interval for config re push")
parser.add_argument("--repush_max_interval",
help="max time interval for config re push")
parser.add_argument("--push_delay_per_kb",
help="time delay between two successful commits per kb config size")
parser.add_argument("--push_delay_max",
help="max time delay between two successful commits")
parser.add_argument("--push_delay_enable",
help="enable delay between two successful commits")
parser.add_argument("--cassandra_user",
help="Cassandra user name")
parser.add_argument("--cassandra_password",
help="Cassandra password")
parser.add_argument("--sandesh_send_rate_limit", type=int,
help="Sandesh send rate limit in messages/sec")
args = parser.parse_args(remaining_argv)
if type(args.cassandra_server_list) is str:
args.cassandra_server_list = args.cassandra_server_list.split()
if type(args.collectors) is str:
args.collectors = args.collectors.split()
return args
# end parse_args
# end main
# end run_device_manager
# end server_main
if __name__ == '__main__':
server_main()
| [
2,
198,
2,
15069,
357,
66,
8,
1946,
7653,
9346,
27862,
11,
3457,
13,
1439,
2489,
10395,
13,
198,
2,
198,
198,
37811,
198,
1212,
2393,
4909,
7822,
286,
11149,
3518,
20264,
8398,
198,
37811,
198,
198,
2,
17267,
479,
1031,
2238,
13,
... | 2.160902 | 4,568 |
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from basic_test import OrderedUnitTestCase, separated
from openpaisdk import to_screen
| [
2,
15069,
357,
66,
8,
5413,
10501,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
17168,
13789,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
286,
428,
3788,
290,
3917,
... | 3.986842 | 304 |
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import json
from pathlib import Path
from unittest.mock import Mock, patch
import botocore
import pytest
from runtool.dispatcher import JobDispatcher, group_by_instance_type
RESPONSE = {
"TrainingJobArn": "arn:aws:sagemaker:eu-west-1:012345678901:training-job/test-60a848663fa1",
"ResponseMetadata": {
"RequestId": "00924112-abcd-4aed-6d4d-28190dba0b68",
"HTTPStatusCode": 200,
"HTTPHeaders": {
"x-amzn-requestid": "00924112-abcd-4aed-6d4d-28190dba0b68",
"content-type": "application/x-amz-json-1.1",
"content-length": "92",
"date": "Tue, 16 Mar 2021 11:19:06 GMT",
},
"RetryAttempts": 0,
},
}
def client_side_effects(behaviour: list):
"""
Emulates the behaviour or a `boto3.Sagemaker.client` for
mocking purposes. The return value of this function is to
be used as a `unittest.mock().return_value`.
Takes a list of responses which will happen in sequence from
last to first.
If an item in the list is the string "busy", a
`ResourceLimitExceeded` exception is triggered.
If an item in the list is the string "throttle", a
`ResourceLimitExceeded` exception is triggered.
Otherwise the item will be returned.
>>> side_effects = client_side_effects([{}, "throttle", "busy"])
>>> side_effects()
Traceback (most recent call last):
...
botocore.exceptions.ClientError: An error occurred (ResourceLimitExceeded) when calling the operation: Unknown
>>> side_effects()
Traceback (most recent call last):
...
botocore.exceptions.ClientError: An error occurred (ThrottlingException) when calling the operation: Unknown
>>> side_effects()
{}
"""
return client_side_effect
@patch.object(JobDispatcher, "timeout_with_printer")
@patch("time.sleep", return_value=None)
@patch.object(JobDispatcher, "timeout_with_printer")
@patch("time.sleep", return_value=None)
@patch.object(JobDispatcher, "timeout_with_printer")
@patch("time.sleep", return_value=None)
| [
2,
15069,
33448,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
11074,
198,
2,
921,
743,
407,
779,
428,
2393,
... | 2.78481 | 948 |
"""A full binary tree example"""
from dbcbet.dbcbet import pre, post, inv, bet, finitize, finitize_method
from dbcbet.helpers import state, argument_types
@inv(full_tree_invariant)
if __name__ == "__main__":
bet(FullBinaryTree).run()
| [
37811,
32,
1336,
13934,
5509,
1672,
37811,
198,
198,
6738,
288,
15630,
11181,
13,
9945,
66,
11181,
1330,
662,
11,
1281,
11,
800,
11,
731,
11,
957,
270,
1096,
11,
957,
270,
1096,
62,
24396,
198,
6738,
288,
15630,
11181,
13,
16794,
36... | 2.645161 | 93 |
import argparse
import random
import numpy as np
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create train and test splits.")
parser.add_argument("-i", "--image_list", help="List of images to split.")
parser.add_argument(
"-p", "--percent", help="Percent of data used to test.", default=0.1
)
args = parser.parse_args()
np.random.seed(0)
random.seed(0)
split(args.image_list, args.percent)
| [
11748,
1822,
29572,
198,
11748,
4738,
198,
198,
11748,
299,
32152,
355,
45941,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
30751,
796,
1822,
29572,
13,
28100,
1713,
46677,
7,
11213,
2625,
... | 2.757396 | 169 |
import heapq
arr = [3, 0, 1, 0]
k = 1
li = kFrequent(arr, k)
print(li) | [
11748,
24575,
80,
628,
198,
3258,
796,
685,
18,
11,
657,
11,
352,
11,
657,
60,
198,
74,
796,
352,
198,
4528,
796,
479,
37,
46018,
7,
3258,
11,
479,
8,
198,
4798,
7,
4528,
8
] | 2 | 36 |
from math.utils import *
def main():
"""
main() -> None
"""
myVariable = complex()
print(myVariable)
sumatoria(3)
print(calVolumenParalelepipedo(2, 3, 10))
print(sumatoria(3))
print(sumatoriaLambda(3))
return None
sumatoriaLambda = lambda x: (x * (x + 1)) / 2
# print(resultado)
def count_substring(string, sub_string):
"""
Cuenta cuantas veces aparece el sub_string
en el string
Args:
string: (string)
sub_string: (string)
rerturn : int
"""
return string.count(sub_string)
if __name__ == "__main__":
main()
string = "Hola Codo a Codo" # input().strip()
sub_string = "codo" # input().strip()
count = count_substring(string, sub_string)
print(count)
str = "este es un string que tiene varias coincidencias de strings con el sub-str"
sub_str = "string"
print("La palabra [", sub_str, "] aparece ", count_substring(str, sub_str), " veces")
| [
6738,
10688,
13,
26791,
1330,
1635,
198,
198,
4299,
1388,
33529,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
1388,
3419,
4613,
6045,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
616,
43015,
796,
3716,
3419,
198,
220,
220,
220,
... | 2.335714 | 420 |
from numba import *
a = 10
b = 11
c = 12
func = jitter()
assert func() == (20, 22)
| [
6738,
997,
7012,
1330,
1635,
198,
198,
64,
796,
838,
198,
65,
796,
1367,
198,
66,
796,
1105,
198,
198,
20786,
796,
474,
1967,
3419,
198,
30493,
25439,
3419,
6624,
357,
1238,
11,
2534,
8,
198
] | 2.361111 | 36 |
"""
A generic tab that displays a serie of items in a scrollable, searchable,
sortable list. It should be inherited, to actually provide methods that
insert items in the list, and that lets the user interact with them.
"""
import curses
import collections
import logging
from typing import Dict, Callable
from poezio import windows
from poezio.core.structs import Command
from poezio.decorators import refresh_wrapper
from poezio.tabs import Tab
log = logging.getLogger(__name__)
| [
37811,
198,
32,
14276,
7400,
326,
11298,
257,
1055,
494,
286,
3709,
287,
257,
10743,
540,
11,
2989,
540,
11,
198,
30619,
540,
1351,
13,
220,
632,
815,
307,
19552,
11,
284,
1682,
2148,
5050,
326,
198,
28463,
3709,
287,
262,
1351,
11,... | 3.70229 | 131 |
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='board_home'),
path('new/', views.new, name='board_new'),
path('detail/<int:board_id>', views.detail, name='board_detail'),
path('delete/<int:board_id>', views.delete, name='board_delete'),
path('edit/<int:board_id>', views.edit, name='board_edit'),
] | [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
764,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
3256,
5009,
13,
11195,
11,
1438,
11639,
3526,
62,
11195,
33809,
198,
220,
220,
220,
3108,
... | 2.669118 | 136 |
#!/usr/bin/env python
# encoding: utf-8
"""
Advent of Code 2020 - Day 9 - Challenge 2
https://adventofcode.com/2020/day/9
Solution: 35602097
"""
__author__ = "Filippo Corradino"
__email__ = "filippo.corradino@gmail.com"
from day09_1 import find_invalid
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
37811,
198,
2782,
1151,
286,
6127,
12131,
532,
3596,
860,
532,
13879,
362,
198,
5450,
1378,
324,
1151,
1659,
8189,
13,
785,
14,
42334,
14,
820,
... | 2.504202 | 119 |
# PreviesSearchPage.py
from selenium import webdriver
from selenium.webdriver.common.by import By
from bs4 import BeautifulSoup
import BusinessPaths
import time
import PrettifyPage
import CreateDict
import json
import sys
if __name__ == '__main__':
PreviewSearchPage()
| [
2,
43280,
444,
18243,
9876,
13,
9078,
198,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11321,
13,
1525,
1330,
2750,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
732... | 3.22093 | 86 |
from typing import List, Union
from pydantic import BaseModel, validator
from app.event_models import EventModel
JOIN_ROOM = "JOIN_ROOM"
REJOIN_ROOM = "REJOIN_ROOM"
ROOM_JOINED = "ROOM_JOINED"
NEW_ROOM_JOINED = "NEW_ROOM_JOINED"
KICK_PLAYER = "KICK_PLAYER"
PLAYER_KICKED = "PLAYER_KICKED"
PLAYER_DISCONNECTED = "PLAYER_DISCONNECTED"
HOST_DISCONNECTED = "HOST_DISCONNECTED"
START_GAME = "START_GAME"
GAME_STARTED = "GAME_STARTED"
| [
6738,
19720,
1330,
7343,
11,
4479,
198,
198,
6738,
279,
5173,
5109,
1330,
7308,
17633,
11,
4938,
1352,
198,
198,
6738,
598,
13,
15596,
62,
27530,
1330,
8558,
17633,
198,
198,
45006,
1268,
62,
13252,
2662,
796,
366,
45006,
1268,
62,
13... | 2.283505 | 194 |
import torch
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
classes = ('beaver','dolphin','otter','seal','whale','aquarium fish','flatfish','ray','shark','trout','orchids','poppies','roses','sunflowers','tulips','bottles','bowls','cans','cups','plates','apples','mushrooms','oranges','pears','sweet peppers','clock','computer keyboard','lamp','telephone','television','bed','chair','couch','table','wardrobe','bee','beetle','butterfly','caterpillar','cockroach','bear','leopard','lion','tiger','wolf','bridge','castle','house','road','skyscraper','cloud','forest','mountain','plain','sea','camel','cattle','chimpanzee','elephant','kangaroo','fox','porcupine','possum','raccoon','skunk','crab','lobster','snail','spider','worm','baby','boy','girl','man','woman','crocodile','dinosaur','lizard','snake','turtle','hamster','mouse','rabbit','shrew','squirrel','maple','oak','palm','pine','willow','bicycle','bus','motorcycle','pickup truck','train','lawn-mower','rocket','streetcar','tank','tractor')
# function to show an image | [
11748,
28034,
198,
11748,
28034,
10178,
198,
11748,
28034,
10178,
13,
7645,
23914,
355,
31408,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
198,
37724,
796,
19203,
1350,
8770,
4170... | 3.175287 | 348 |
# Generated by Django 2.1.7 on 2019-04-26 09:16
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
22,
319,
13130,
12,
3023,
12,
2075,
7769,
25,
1433,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
from keras_efficientnets.efficientnet import *
from keras_efficientnets.config import BlockArgs
__version__ = '0.1.6.1'
| [
6738,
41927,
292,
62,
16814,
45938,
13,
16814,
3262,
1330,
1635,
198,
6738,
41927,
292,
62,
16814,
45938,
13,
11250,
1330,
9726,
42035,
198,
198,
834,
9641,
834,
796,
705,
15,
13,
16,
13,
21,
13,
16,
6,
198
] | 3.102564 | 39 |
from pikapi.spiders.spider_by_browser import *
from pikapi.spiders.spider_by_cookie import *
from pikapi.spiders.spider_by_req import *
all_providers = [
SpiderXiladaili,
SpiderYqie,
SpiderZdaye,
SpiderSuperfastip,
SpiderXsdaili,
SpiderCrossincode,
SpiderTxt,
SpiderKxdaili,
SpiderJiangxianli,
SpiderProxylistplus,
SpiderProxyListen,
SpiderIp3366,
Spider31f,
SpiderFeilong,
SpiderIphai,
Spider89ip,
SpiderCnProxy,
SpiderData5u,
SpiderMrhinkydink,
SpiderKuaidaili,
SpiderIpaddress,
SpiderXici,
Spider66ipcn,
Spider66ip,
SpiderGoubanjia,
SpiderCoolProxy,
]
| [
6738,
279,
1134,
15042,
13,
2777,
4157,
13,
2777,
1304,
62,
1525,
62,
40259,
1330,
1635,
198,
6738,
279,
1134,
15042,
13,
2777,
4157,
13,
2777,
1304,
62,
1525,
62,
44453,
1330,
1635,
198,
6738,
279,
1134,
15042,
13,
2777,
4157,
13,
... | 2.188119 | 303 |
import argparse
import os
from scipy.io import loadmat
import numpy as np
import cv2
import matplotlib
matplotlib.use('agg') # use matplotlib without GUI support
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='/home/xuchong/Projects/occ_edge_order/data/dataset_real/NYUv2/data/val_occ_order_raycasting_woNormal_avgROI_1mm')
parser.add_argument('--gt_depth', type=str, default='/space_sdd/NYU/nyuv2_depth.npy')
parser.add_argument('--refine_dir', type=str,
default='/space_sdd/NYU/depth_refine/depth1_grad1_occ0.1_change1_1e-5/eigen/depth_npy')
opt = parser.parse_args()
# load rgb list
img_list = sorted([name for name in os.listdir(opt.data_dir) if name.endswith("-rgb.png")])
# load gt depth
gt_depths = np.load(opt.gt_depth)
# load initial depth map list
init_depths = read_eigen()
# load refined depth map list
refine_list = sorted(os.listdir(opt.refine_dir))
eigen_crop = [21, 461, 25, 617]
index = 120
row = 300
img = cv2.imread(os.path.join(opt.data_dir, img_list[index]), -1)
print('img shape is {}'.format(img.shape))
gt_depth = gt_depths[index][21:461, 25:617]
print('gt depth shape is {}'.format(gt_depth.shape))
init_depth = init_depths[index][21:461, 25:617]
print('init depth shape is {}'.format(init_depth.shape))
refine_depth = np.load(os.path.join(opt.refine_dir, refine_list[index]))[21:461, 25:617]
print('refine depth shape is {}'.format(refine_depth.shape))
# draw the figure
fig, (ax1, ax2) = plt.subplots(nrows=2)
img[row - 3: row + 3, :, :] = (img[row - 3: row + 3, :, :] + 255) / 2
ax1.imshow(img)
t = np.arange(592)
ax2.plot(t, gt_depth[row, t], 'r-', t, init_depth[row, t], 'b-', t, refine_depth[row, t], 'g-')
asp = np.diff(ax2.get_xlim())[0] / np.diff(ax2.get_ylim())[0]
asp /= np.abs(np.diff(ax1.get_xlim())[0] / np.diff(ax1.get_ylim())[0])
ax2.set_aspect(asp)
fig.savefig('vis_row_depth.eps')
plt.close(fig)
| [
11748,
1822,
29572,
198,
11748,
28686,
198,
6738,
629,
541,
88,
13,
952,
1330,
3440,
6759,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
198,
198,
11748,
2603,
29487,
8019,
198,
6759,
29487,
8019,
13,
1904,
10786,
9460,
... | 2.347774 | 831 |
import os
import os.path as osp
import re
import numpy as np
from numpy import array, int32
from scipy.io import loadmat
from .base import BaseDataset
| [
11748,
28686,
198,
11748,
28686,
13,
6978,
355,
267,
2777,
198,
11748,
302,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
1330,
7177,
11,
493,
2624,
198,
6738,
629,
541,
88,
13,
952,
1330,
3440,
6759,
198,
198,
6738,... | 3.08 | 50 |
"""
A list of functions that return gameObject classes of primitive shapes.
"""
from math import cos, sin, pi
from .object2d import Object2D
# ----------------------------------------------------------------------------------------
def draw_square(x:float, y:float, height:float = 1, width:float = 1, fill:bool = False):
"""
Returns a Object2D class that draws a square
Arguments:
x : float : The x starting point of the square I.E the bottom left corner.
y : float : The y starting point of the square I.E the bottom left corner.
height : float : The height of the square.
width : float : The width of the square.
fill : bool : Should the shape be filled.
"""
# Calculate the other x and y cords.
cords = [[x, y]]
cords.append([x+width, y])
cords.append([x+width, y+height])
cords.append([x, y+height])
if fill:
return Object2D(cords, [[0,1,2],[0,3,2]], draw_type='triangles')
else:
return Object2D(cords, [[0,1],[1,2],[2,3],[3,0]], draw_type='lines')
# ----------------------------------------------------------------------------------------
def draw_triangle(cords:list, fill=False):
"""
Returns a Object2D class that draws a triangle
Arguments:
cords : float : The x and y cords for each vertex of the triangle, should look like [[x1,y1]...]
fill : bool : Should the shape be filled.
"""
if len(cords) > 3:
raise TypeError("At primitives.draw_triangle(): The length of the given cords is greater than 3, a triangle should only have 3 cords.")
if fill:
return Object2D(cords, [[0,1,2]], draw_type='triangles')
else:
return Object2D(cords, [[0,1],[1,2],[2,0]], draw_type='lines')
# ----------------------------------------------------------------------------------------
def draw_circle(center_x:float, center_y:float, radius:float = 0.3, segments:int = 360, fill:bool=False):
"""
Returns an Object2D class that draws a circle
Arguments:
center_x : float : The x cord for the center of the circle.
center_y : float : The y cord for the center of the circle.
radius : float : The radius of the circle.
segments : int : How many segments to make the circle from.
fill : bool : Should the shape be filled.
"""
edges = []
cords = []
for i in range(segments):
theta = (2 * pi * i)/segments # Get the current angle
x = radius * cos(theta) + center_x # Get the x cord
y = radius * sin(theta) + center_y # Get the y cord
cords.append([x, y])
if fill:
cords.insert(0, [center_x, center_y])
for i in range(len(cords)-2):
edges.append([0, i+1, i+2])
edges.append([0, segments, 1]) # Fixes a little glitch
return Object2D(cords, edges, draw_type='triangles')
else:
for i in range(len(cords)-1):
edges.append([i, i+1])
edges.append([segments-1,0]) # Fixes a little glitch
return Object2D(cords, edges, draw_type='lines')
# ----------------------------------------------------------------------------------------
def draw_arc(center_x:float, center_y:float, radius:float = 0.3, arc_angle:float = 90, start_angle:float = 0, segments:int = 360, fill:bool=False):
"""
Returns an Object2D class that draws a circle, angles should not be given in radians.
Arguments:
center_x : float : The x cord for the center of the circle.
center_y : float : The y cord for the center of the circle.
radius : float : The radius of the circle.
arc_angle : float : The angle of the arc.
start_angle : float : The angle from where the arc should start from.
segments : int : How many segments to make the circle from.
fill : bool : Should the shape be filled.
"""
edges = []
cords = []
for i in range(segments):
theta = ((arc_angle * pi * i/180) / segments) + (start_angle*90/180) # Get the current angle
x = radius * cos(theta) + center_x # Get the x cord
y = radius * sin(theta) + center_y # Get the y cord
cords.append([x, y])
if fill:
cords.insert(0, [center_x, center_y-(center_y-cords[0][1])])
for i in range(len(cords)-2):
edges.append([0, i+1, i+2])
return Object2D(cords, edges, draw_type='triangles')
else:
for i in range(len(cords)-1):
edges.append([i, i+1])
return Object2D(cords, edges, draw_type='lines')
| [
37811,
201,
198,
197,
32,
1351,
286,
5499,
326,
1441,
983,
10267,
6097,
286,
20049,
15268,
13,
201,
198,
201,
198,
37811,
201,
198,
201,
198,
6738,
10688,
1330,
8615,
11,
7813,
11,
31028,
201,
198,
201,
198,
6738,
764,
15252,
17,
67... | 2.664434 | 1,642 |
# Copyright (c) 2021, Oracle and/or its affiliates.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0,
# as published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms, as
# designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an additional
# permission to link the program and your derivative works with the
# separately licensed software that they have included with MySQL.
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Sub-Module to manage OCI Networking"""
from mysqlsh.plugin_manager import plugin_function
from mds_plugin import core, configuration
# cSpell:ignore vcns
def format_load_balancer_listing(items, current=None) -> str:
"""Formats a given list of objects in a human readable form
Args:
items: Either a list of objects or a single object
current (str): OCID of the current item
Returns:
The db_systems formated as str
"""
# If a single db_system was given, wrap it in a list
if not type(items) is list:
items = [items]
# return objects in READABLE text output
out = ""
id = 1
for i in items:
index = f"*{id:>3} " if current == i.id else f"{id:>4} "
ips = ""
for ip in i.ip_addresses:
ips += ip.ip_address + "*, " if ip.is_public else ", "
if len(ips) > 2:
ips = ips[0:-2]
out += (index +
core.fixed_len(i.display_name, 24, ' ', True) +
core.fixed_len(i.lifecycle_state, 8, ' ') +
core.fixed_len(f"{ips}", 24, '\n'))
id += 1
return out
@plugin_function('mds.list.networks')
def list_networks(**kwargs):
"""Lists all networks of the given compartment
Args:
**kwargs: Additional options
Keyword Args:
public_subnet (bool): Whether only public or private subnets should be
considered
compartment_id (str): OCID of the parent compartment.
config (object): An OCI config object or None.
return_formatted (bool): If set to true, a list object is returned.
check_privileges (bool): Checks if the user has privileges for the
subnet
Returns:
a network object
"""
public_subnet = kwargs.get("public_subnet")
compartment_id = kwargs.get("compartment_id")
config = kwargs.get("config")
return_formatted = kwargs.get("return_formatted", True)
check_privileges = kwargs.get("check_privileges", False)
# Get the active config and compartment
try:
config = configuration.get_current_config(config=config)
compartment_id = configuration.get_current_compartment_id(
compartment_id=compartment_id, config=config)
import oci.exceptions
# Create VirtualNetworkClient
virtual_network = core.get_oci_virtual_network_client(
config=config)
# List the virtual networks
vcns = virtual_network.list_vcns(
compartment_id=compartment_id).data
# Filter out all sub-nets that are not conforming to the
# public_subnet options
if public_subnet is not None:
# Loop over VCNs to see if access is granted
good_vcns = []
for vcn in vcns:
try:
if network_has_subnet(
network=vcn, compartment_id=compartment_id,
config=config,
public_subnet=public_subnet,
check_privileges=check_privileges):
good_vcns.append(vcn)
except oci.exceptions.ServiceError as e:
pass
vcns = good_vcns
if return_formatted:
return format_network_listing(vcns)
else:
return oci.util.to_dict(vcns)
except ValueError as e:
print(f"ERROR: {str(e)}")
return
@plugin_function('mds.get.network')
def get_network(**kwargs):
"""Returns a network object
If multiple or no networks are available in the current compartment,
let the user select a different compartment
Args:
**kwargs: Additional options
Keyword Args:
network_name (str): The display_name of the network
network_id (str): The OCID of the network
public_subnet (bool): Whether only public or private subnets should be
considered
compartment_id (str): OCID of the parent compartment.
config (object): An OCI config object or None.
interactive (bool): Whether to query the user for input
ignore_current (bool): Whether to ignore the current
Returns:
a network object
"""
network_name = kwargs.get("network_name")
network_id = kwargs.get("network_id")
public_subnet = kwargs.get("public_subnet")
compartment_id = kwargs.get("compartment_id")
config = kwargs.get("config")
interactive = kwargs.get("interactive", True)
ignore_current = kwargs.get("ignore_current", False)
# Get the active config and compartment
try:
config = configuration.get_current_config(config=config)
compartment_id = configuration.get_current_compartment_id(
compartment_id=compartment_id, config=config)
import oci.exceptions
from mds_plugin import compartment
# Create VirtualNetworkClient
virtual_network = core.get_oci_virtual_network_client(
config=config)
# If a specific network was specified, return this network
if network_id:
vcn = virtual_network.get_vcn(vcn_id=network_id).data
return vcn
# Loop until the user selected a compartment with vcns
vcns = []
rejected_vcns = []
while len(vcns) == 0:
try:
# List the virtual networks, filter by network_name if given
vcns = virtual_network.list_vcns(
compartment_id=compartment_id,
display_name=network_name).data
# Filter out rejected VCNs
vcns = [n for n in vcns if n not in rejected_vcns]
if len(vcns) == 0:
network_comp = compartment.get_compartment_by_id(
compartment_id=compartment_id, config=config)
print(f"The compartment {network_comp.name} does not "
"contain a suitable virtual network.")
if interactive:
print("Please select another compartment.\n")
else:
return
compartment_id = compartment.get_compartment_id(
compartment_id=compartment_id, config=config)
if compartment_id == None:
print("Operation cancelled.")
return
else:
# Filter out all sub-nets that are not conforming to the
# public_subnet options
if public_subnet is not None:
# Loop over VCNs to see if access is granted
good_vcns = []
for vcn in vcns:
newly_rejected_vcns = []
try:
if network_has_subnet(
network=vcn,
compartment_id=compartment_id,
config=config,
public_subnet=public_subnet):
good_vcns.append(vcn)
else:
newly_rejected_vcns.append(vcn)
except oci.exceptions.ServiceError as e:
if e.status == 404:
newly_rejected_vcns.append(vcn)
rejected_vcns = rejected_vcns + newly_rejected_vcns
vcns = good_vcns
except oci.exceptions.ServiceError as e:
if e.code == "NotAuthorizedOrNotFound":
print(f'You do not have privileges to list the '
f'networks of this compartment.')
else:
print(f'Could not list networks of compartment '
f'{network_comp.name}\n')
print(
f'ERROR: {e.message}. (Code: {e.code}; '
f'Status: {e.status})')
vcns = []
except (ValueError, oci.exceptions.ClientError) as e:
print(f'ERROR: {e}')
vcns = []
# If there is a single network in this compartment, return this
# one if it matches the network_name (if given)
if len(vcns) == 1 and not ignore_current:
return vcns[0]
if not interactive:
print("Error: There are multiple virtual networks in this "
"compartment.")
return
# Let the user choose from the list
vcn = core.prompt_for_list_item(
item_list=vcns, prompt_caption=("Please enter the name or index "
"of the virtual network: "),
item_name_property="display_name",
print_list=True)
return vcn
except oci.exceptions.ServiceError as e:
if e.code == "NotAuthorizedOrNotFound":
print(f'You do not have privileges to access this network.')
else:
print(f'Could not get the network.')
print(
f'ERROR: {e.message}. (Code: {e.code}; '
f'Status: {e.status})')
except (ValueError, oci.exceptions.ClientError) as e:
print(f'ERROR: {e}')
@plugin_function('mds.list.subnets')
def list_subnets(**kwargs):
"""Lists all subnets of the given network
Args:
**kwargs: Additional options
Keyword Args:
network_id (str): The OCID of the parent network_id
public_subnet (bool): Whether only public subnets should be considered
availability_domain (str): The name if the availability_domain
ignore_current_network (bool): Whether to ignore the current network
compartment_id (str): OCID of the parent compartment.
config (object): An OCI config object or None.
interactive (bool): Whether to query the user for input
return_formatted (bool): If set to true, a list object is returned.
Returns:
A list of subnets
"""
network_id = kwargs.get("network_id")
public_subnet = kwargs.get("public_subnet")
# availability_domain = kwargs.get("availability_domain")
ignore_current_network = kwargs.get("ignore_current_network")
compartment_id = kwargs.get("compartment_id")
config = kwargs.get("config")
interactive = kwargs.get("interactive", True)
return_formatted = kwargs.get("return_formatted", True)
# Get the active config and compartment
try:
config = configuration.get_current_config(config=config)
# compartment_id = configuration.get_current_compartment_id(
# compartment_id=compartment_id, config=config)
if not ignore_current_network:
network_id = configuration.get_current_network_id(
network_id=network_id, config=config)
import oci.exceptions
from mds_plugin import compartment
# Create VirtualNetworkClient
virtual_network = core.get_oci_virtual_network_client(
config=config)
# If a subnet_id was given, return the subnet of that subnet_id
# if subnet_id is not None:
# try:
# return virtual_network.get_subnet(subnet_id=subnet_id).data
# except oci.exceptions.ServiceError as e:
# print(f'ERROR: {e.message}. (Code: {e.code}; Status: {e.status})')
# return
# except (ValueError, oci.exceptions.ClientError) as e:
# print(f'ERROR: {e}')
# return
network = get_network(network_id=network_id,
compartment_id=compartment_id, config=config,
public_subnet=public_subnet, interactive=interactive)
if network is None:
return
network_name = network.display_name if network.display_name else \
network.id
network_compartment = network.compartment_id
# Get the compartment
compartment = compartment.get_compartment_by_id(
compartment_id=network_compartment, config=config)
if compartment is None:
return
# If no availability_domain was specified, use a random one
# if availability_domain is None:
# availability_domain = compartment.get_availability_domain(
# compartment_id=compartment_id,
# availability_domain=availability_domain, config=config)
subnets = virtual_network.list_subnets(
compartment_id=network_compartment, vcn_id=network.id).data
# Filter subnets by Availability Domain, None means the subnet
# spans across all Availability Domains
# subnets = [s for s in subnets
# if s.availability_domain == availability_domain or
# s.availability_domain is None]
# Filter out all sub-nets that are not conforming to the
# public_subnet options
if public_subnet is not None and public_subnet:
out = "All public "
subnets = [s for s in subnets
if subnet_is_public(subnet=s, config=config)]
elif public_subnet is not None and not public_subnet:
out = "All private "
subnets = [s for s in subnets
if not subnet_is_public(subnet=s, config=config)]
else:
out = "All "
out += f"subnets of Network '{network_name}' in compartment " + \
f"'{compartment.name}':\n\n"
if return_formatted:
return out + format_subnet_listing(subnets)
else:
return oci.util.to_dict(subnets)
except oci.exceptions.ServiceError as e:
print(f'ERROR: {e.message}. (Code: {e.code}; Status: {e.status})')
return
except (ValueError, oci.exceptions.ClientError) as e:
print(f'ERROR: {e}')
return
@plugin_function('mds.get.subnet')
def get_subnet(**kwargs):
"""Returns a subnet object
If multiple or no networks are available in the current compartment,
let the user select a different compartment
Args:
**kwargs: Additional options
Keyword Args:
subnet_name (str): The display_name of the subnet
subnet_id (str): The OCID of the subnet
network_id (str): The OCID of the parent network_id
public_subnet (bool): Whether only public subnets should be considered
availability_domain (str): The name if the availability_domain
compartment_id (str): OCID of the parent compartment.
config (object): An OCI config object or None.
interactive (bool): Whether to query the user for input
Returns:
a subnet object
"""
subnet_name = kwargs.get("subnet_name")
subnet_id = kwargs.get("subnet_id")
network_id = kwargs.get("network_id")
public_subnet = kwargs.get("public_subnet")
availability_domain = kwargs.get("availability_domain")
compartment_id = kwargs.get("compartment_id")
config = kwargs.get("config")
interactive = kwargs.get("interactive", True)
# Get the active config and compartment
try:
config = configuration.get_current_config(config=config)
compartment_id = configuration.get_current_compartment_id(
compartment_id=compartment_id, config=config)
network_id = configuration.get_current_network_id(
network_id=network_id, config=config)
except ValueError as e:
print(f"ERROR: {str(e)}")
return
import oci.exceptions
from mds_plugin import compartment
import re
# Create VirtualNetworkClient
virtual_network = core.get_oci_virtual_network_client(
config=config)
# If a subnet_id was given, return the subnet of that subnet_id
if subnet_id:
try:
return virtual_network.get_subnet(subnet_id=subnet_id).data
except oci.exceptions.ServiceError as e:
print(f'ERROR: {e.message}. (Code: {e.code}; Status: {e.status})')
return
except (ValueError, oci.exceptions.ClientError) as e:
print(f'ERROR: {e}')
return
# If no network_id was given, query the user for one
network = get_network(
network_id=network_id,
compartment_id=compartment_id, config=config,
public_subnet=public_subnet, interactive=interactive)
if network is None:
return
network_id = network.id
compartment_id = network.compartment_id
# If no availability_domain was specified, use a random one
if availability_domain is None:
availability_domain_obj = compartment.get_availability_domain(
compartment_id=compartment_id,
random_selection=True,
availability_domain=availability_domain, config=config,
interactive=False, return_python_object=True)
availability_domain = availability_domain_obj.name
try:
subnets = virtual_network.list_subnets(
compartment_id=compartment_id, vcn_id=network_id).data
# Filter subnets by Availability Domain, None means the subnet
# spans across all Availability Domains
subnets = [s for s in subnets
if s.availability_domain == availability_domain or
s.availability_domain is None]
# Filter out all sub-nets that are not conforming to the
# public_subnet options
if public_subnet:
subnets = [s for s in subnets
if subnet_is_public(subnet=s, config=config)]
elif public_subnet is not None and not public_subnet:
subnets = [s for s in subnets
if not subnet_is_public(subnet=s, config=config)]
# If there are several subnets, let the user choose
if len(subnets) == 0:
return
elif len(subnets) == 1:
# If there is exactly 1 subnet, return that
return subnets[0]
print("\nPlease choose a subnet:\n")
i = 1
for s in subnets:
s_name = re.sub(r'[\n\r]', ' ',
s.display_name[:22] + '..'
if len(s.display_name) > 24
else s.display_name)
print(f"{i:>4} {s_name:24} {s.cidr_block:15}")
i += 1
print()
return core.prompt_for_list_item(
item_list=subnets, prompt_caption=(
"Please enter the name or index of the subnet: "),
item_name_property="display_name",
given_value=subnet_name)
except oci.exceptions.ServiceError as e:
print(f'ERROR: {e.message}. (Code: {e.code}; Status: {e.status})')
return
except Exception as e:
print(f'ERROR: {e}')
return
@plugin_function('mds.list.loadBalancers', shell=True, cli=True, web=True)
def list_load_balancers(**kwargs):
"""Lists load balancers
This function will list all load balancers of the compartment with the
given compartment_id.
Args:
**kwargs: Optional parameters
Keyword Args:
compartment_id (str): OCID of the parent compartment
config (dict): An OCI config object or None
config_profile (str): The name of an OCI config profile
interactive (bool): Indicates whether to execute in interactive mode
return_type (str): "STR" will return a formatted string, "DICT" will
return the object converted to a dict structure and "OBJ" will
return the OCI Python object for internal plugin usage
raise_exceptions (bool): If set to true exceptions are raised
Returns:
Based on return_type
"""
compartment_id = kwargs.get("compartment_id")
config = kwargs.get("config")
config_profile = kwargs.get("config_profile")
interactive = kwargs.get("interactive", core.get_interactive_default())
return_type = kwargs.get(
"return_type", # In interactive mode, default to formatted str return
core.RETURN_STR if interactive else core.RETURN_DICT)
raise_exceptions = kwargs.get(
"raise_exceptions", # On internal call (RETURN_OBJ), raise exceptions
True if return_type == core.RETURN_OBJ else not interactive)
try:
config = configuration.get_current_config(
config=config, config_profile=config_profile,
interactive=interactive)
compartment_id = configuration.get_current_compartment_id(
compartment_id=compartment_id, config=config)
import oci.exceptions
try:
# Initialize the Object Store client
load_balancer_cl = core.get_oci_load_balancer_client(config=config)
# List the load balancers
load_balancers = load_balancer_cl.list_load_balancers(
compartment_id=compartment_id).data
# Filter out all deleted items
load_balancers = [
l for l in load_balancers if l.lifecycle_state != "DELETED"]
return core.oci_object(
oci_object=load_balancers,
return_type=return_type,
format_function=format_load_balancer_listing)
except oci.exceptions.ServiceError as e:
if raise_exceptions:
raise
print(f'ERROR: {e.message}. (Code: {e.code}; Status: {e.status})')
except Exception as e:
if raise_exceptions:
raise
print(f'ERROR: {e}') | [
2,
15069,
357,
66,
8,
33448,
11,
18650,
290,
14,
273,
663,
29116,
13,
201,
198,
2,
201,
198,
2,
770,
1430,
318,
1479,
3788,
26,
345,
460,
17678,
4163,
340,
290,
14,
273,
13096,
201,
198,
2,
340,
739,
262,
2846,
286,
262,
22961,
... | 2.169614 | 10,801 |
"""
This file is a part of My-PyChess application.
In this file, we define some basic gui-related functions
For a better understanding of the variables used here, checkout docs.txt
"""
import pygame
from tools.loader import CHESS, BACK, putNum, putLargeNum
from tools import sound
# Apply 'convert_alpha()' on all pieces to optimise images for speed.
# This function displays the choice menu when called, taking user input.
# Returns the piece chosen by the user
# This function draws the board
# This funtion draws all pieces onto the board
# This function displays the prompt screen when a user tries to quit
# User must choose Yes or No, this function returns True or False respectively
# This function shows a small animation when the game starts, while also
# optimising images for display - call only once per game
| [
37811,
201,
198,
1212,
2393,
318,
257,
636,
286,
2011,
12,
20519,
7376,
824,
3586,
13,
201,
198,
818,
428,
2393,
11,
356,
8160,
617,
4096,
11774,
12,
5363,
5499,
201,
198,
201,
198,
1890,
257,
1365,
4547,
286,
262,
9633,
973,
994,
... | 3.642857 | 238 |
from django.urls import path
from .views import (
TeamListView, TeamDetailView, CreateTeamView, InviteTeamMemberView,
JoinTeamView, ChangeTeamMemberRoleView, DeleteTeamMemberRoleView,
JoinTeamUserView, DeleteTeamView
)
urlpatterns = [
path('', TeamListView.as_view(), name='team-list'),
path('create/', CreateTeamView.as_view(), name='team-create'),
path('<int:pk>/', TeamDetailView.as_view(), name='team-detail'),
path('<int:pk>/invite/', InviteTeamMemberView.as_view(),
name='team-invite'),
path('<int:pk>/delete/', DeleteTeamView.as_view(),
name='team-delete'),
path('<int:pk>/change-member/', ChangeTeamMemberRoleView.as_view(),
name='team-change_member'),
path('<int:pk>/delete-member/', DeleteTeamMemberRoleView.as_view(),
name='team-delete_member'),
path('join/<int:pk>/', JoinTeamUserView.as_view(),
name='team-join_user'),
path('join/<int:pk>/<str:secret>/', JoinTeamView.as_view(),
name='team-join'),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
764,
33571,
1330,
357,
198,
220,
220,
220,
4816,
8053,
7680,
11,
4816,
11242,
603,
7680,
11,
13610,
15592,
7680,
11,
10001,
578,
15592,
27608,
7680,
11,
198,
220,
220,
220,... | 2.477941 | 408 |
import time
import board
import pwmio
from adafruit_motor import servo
| [
11748,
640,
198,
11748,
3096,
198,
11748,
279,
26377,
952,
198,
6738,
512,
1878,
4872,
62,
76,
20965,
1330,
1113,
78,
628,
198
] | 3.173913 | 23 |
# -*- coding: utf-8 -*-
import time
from django.db.models import Q
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.http import HttpResponseNotFound
from django.contrib.auth.hashers import make_password
from django.contrib.auth.hashers import check_password
from django.views.generic.edit import FormView
from django.views.generic import TemplateView
from django.views.generic import RedirectView
from .forms import SignInForm
from .forms import LoginForm
from .forms import TicketCreateForm
from .models import User
from .models import Ticket
class LoginView(FormView):
""" Login View. """
form_class = LoginForm
template_name = 'ticketing_system/login.html'
success_url = '/dashboard'
class LogoutView(RedirectView):
""" Logout View. """
url = '/login'
class RegisterView(TemplateView):
""" Register View. """
template_name = 'ticketing_system/register.html'
class DashboardView(TemplateView):
""" Dashboard View. """
template_name = 'ticketing_system/dashboard.html'
class TicketView(FormView):
""" Ticket View. """
form_class = TicketCreateForm
template_name = 'ticketing_system/ticket_form.html'
success_url = '/ticket'
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
640,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
1195,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
... | 3.203608 | 388 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 20 15:40:51 2020
@author: zettergm
known issues:
1) Need to control number of decimal places in output printing to improve readability
"""
import numpy as np
from elimtools import Gauss_elim,backsub
nrow=10
ncol=10
A=np.random.randn(nrow,ncol)
b=np.random.randn(nrow,1)
# Simple test problem for debugging
#A=np.array([[1.0, 4.0, 2.0], [3.0, 2.0, 1.0], [2.0, 1.0, 3.0]]) # system to be solved
#b=np.array([[15.0], [10.0], [13.0]]) # RHS of system
# Solve with elimtools
[Awork,order]=Gauss_elim(A,b,True)
x=backsub(Awork[order,:],True)
print("Value of x computed via Gaussian elimination and backsubstitution: ")
print(x)
# Use built-in linear algebra routines to solve and compare
xpyth=np.linalg.solve(A,b);
print("Solution vector computed via built-in numpy routine")
print(xpyth) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
26223,
2447,
1160,
1315,
25,
1821,
25,
4349,
12131,
198,
198,
31,
9800,
25,
1976,
40088,
... | 2.392 | 375 |
################################################################################
# #### Hiking_project_trailsraper.py ##### #
# #### written by: Etienne Deneault ##### #
################################################################################
# Environment-import
import json
import requests
import sqlite3
import random
from shapely.geometry import Point, Polygon
# initalize coordinates at the center of mainland US
# Enter Hiking Project API key here. Further Dev needed for encrytion through env variable external program.
api_key = 'your_api_key'
# Base URL for getTrail_datas method
url = 'https://www.hikingproject.com/data/get-trails?'
# Bounding Box Coordinates for the Mainland US obtained on
coords = [(-82.4423472248,25.062761981), (-85.6719092689,29.570088284), (-89.362854179,29.650311372), (-97.9443083368,26.5579317639), (-117.2294984422,32.2612294633), (-125.072755035,41.8816215194), (-124.2423002086,48.2581198196), (-121.566356834,49.3519856698), (-96.4679400287,48.808034488), (-92.0388082824,48.5643678729), (-84.6569184623,46.8886898289), (-81.7964176498,42.8361752569), (-76.3522736392,43.7095457837), (-70.2622322402,46.3181308653), (-68.1399397216,47.014662257), (-66.4790086112,44.7016972202), (-69.7085706554,43.3080101182), (-76.1677269302,38.7876636101), (-75.7986335121,35.6284188995), (-80.7814268432,30.8459373794), (-79.5818678699,26.4753651459), (-82.4423472248,25.062761981)]
poly = Polygon(coords)
point_in_poly = get_all_point_in_polygon(poly)
print("Number of coordinate points to check:", len(point_in_poly))
# print(point_in_poly)
# Randomize list of coordinates
random.shuffle(point_in_poly)
##### Main Loop #####
if __name__ == "__main__":
while True:
try:
maxDistance = input('Enter your search area (0-200 miles): ')
maxDistance = int(maxDistance)
except ValueError:
print("I did not understand the input, please try again using a distance numeric value in miles.")
continue
try:
maxResults = input('Enter your search area - Maximum bumber of trail results (0-500): ')
maxResults = int(maxResults)
except ValueError:
print("I did not understand the input, please try again using a numeric value.")
continue
try:
userReq_count = input('Enter the number of requests to the API (0-199): ')
userReq_count = int(userReq_count)
except ValueError:
print("I did not understand the input, please try again using a numeric value.")
continue
req_count = 0
# Note: Reverse cordinates to input (lat,lon) == (y,x)
for (x,y) in point_in_poly:
req_count = req_count + 1
print('Query Count', req_count)
maxDistance = 100
maxResults = 400
parameters = {"lat": y, "lon": x , "maxDistance": maxDistance, "maxResults": maxResults, "key": api_key}
# Make a get request with the parameters.
response = requests.get(url, params=parameters)
print(response.url)
print(response.headers)
print(response.status_code)
# print(response.text)
# print(response.headers)
# print(response.json())
db = sqlite3.connect("SQL_data/trails.sqlite")
cur = db.cursor()
cur.execute('''
CREATE TABLE IF NOT EXISTS Trailsdb (id INTEGER NOT NULL PRIMARY KEY UNIQUE, name TEXT,
type TEXT, summary TEXT UNIQUE, difficulty TEXT, stars INTEGER, starVotes INTEGER,
location TEXT, url TEXT UNIQUE, length INTEGER, ascent INTEGER, descent INTEGER, high INTEGER,
low INTEGER, longitude INTEGER, latitude INTEGER, conditionStatus TEXT, conditionDetails TEXT,
conditionDate TEXT)''')
str_data = response.text
json_data = json.loads(str_data)
# for entry in json_data:
# print(entry)
for trail in json_data['trails']:
#print(child)
cur.execute("Insert or replace into trailsdb values ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(trail['id'], trail['name'], trail['type'], trail['summary'], trail['difficulty'],
trail['stars'], trail['starVotes'], trail['location'], trail['url'], trail['length'],
trail['ascent'], trail['descent'], trail['high'], trail['low'],trail['longitude'],
trail['latitude'], trail['conditionStatus'], trail['conditionDetails'],
trail['conditionDate']))
db.commit()
if req_count == userReq_count:
break
db.commit()
print("all done")
break
| [
29113,
29113,
14468,
201,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1303,
21017,
367,
14132,
62,
16302,
62,
9535,
4487,
38545,
13,
9078,
46424,
220,
220,
220,
220,
220,
220,
220,
... | 2.190189 | 2,324 |
# Copyright 2017,2021 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import random
import os
import six
import sqlite3
import threading
import uuid
import json
from zvmsdk import config
from zvmsdk import constants as const
from zvmsdk import exception
from zvmsdk import log
CONF = config.CONF
LOG = log.LOG
_DIR_MODE = 0o755
_VOLUME_CONN = None
_NETWORK_CONN = None
_IMAGE_CONN = None
_GUEST_CONN = None
_FCP_CONN = None
_DBLOCK_VOLUME = threading.RLock()
_DBLOCK_NETWORK = threading.RLock()
_DBLOCK_IMAGE = threading.RLock()
_DBLOCK_GUEST = threading.RLock()
_DBLOCK_FCP = threading.RLock()
@contextlib.contextmanager
@contextlib.contextmanager
@contextlib.contextmanager
@contextlib.contextmanager
| [
2,
15069,
2177,
11,
1238,
2481,
19764,
11421,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
2393,
2845,
287,... | 3.011792 | 424 |
from typing import Callable
from async_couch import CouchClient
db_name = 'test_db_01'
invalid_db_name = 'invalid_%^^&_name'
non_existing_db = 'non_existing_database'
doc_id = None
| [
6738,
19720,
1330,
4889,
540,
198,
198,
6738,
30351,
62,
66,
7673,
1330,
48225,
11792,
628,
198,
9945,
62,
3672,
796,
705,
9288,
62,
9945,
62,
486,
6,
198,
259,
12102,
62,
9945,
62,
3672,
796,
705,
259,
12102,
62,
4,
18237,
5,
62,... | 2.71831 | 71 |
#!/usr/bin/python
'''Creates a folder containing text files of Cocoa keywords.'''
import os, commands, re
from sys import argv
def find(searchpath, ext):
'''Mimics the "find searchpath -name *.ext" unix command.'''
results = []
for path, dirs, files in os.walk(searchpath):
for filename in files:
if filename.endswith(ext):
results.append(os.path.join(path, filename))
return results
def find_headers(frameworks):
'''Returns list of the header files for the given frameworks.'''
headers = []
for framework in frameworks:
headers.extend(find('/System/Library/Frameworks/%s.framework'
% framework, '.h'))
return headers
def default_headers():
'''Headers for common Cocoa frameworks.'''
frameworks = ('Foundation', 'AppKit', 'AddressBook', 'CoreData',
'PreferencePanes', 'QTKit', 'ScreenSaver', 'SyncServices',
'WebKit')
return find_headers(frameworks)
def match_output(command, regex, group_num):
'''
Returns an ordered list of all matches of the supplied regex for the
output of the given command.
'''
results = []
for line in commands.getoutput(command).split("\n"):
match = re.search(regex, line)
if match and not match.group(group_num) in results:
results.append(match.group(group_num))
results.sort()
return results
def get_functions(header_files):
'''Returns list of Cocoa Functions.'''
lines = match_output(r"grep -h '^[A-Z][A-Z_]* [^;]* \**NS\w\+ *(' "
+ header_files, r'NS\w+\s*\(.*?\)', 0)
for i in range(len(lines)):
lines[i] = lines[i].replace('NSInteger', 'int')
lines[i] = lines[i].replace('NSUInteger', 'unsigned int')
lines[i] = lines[i].replace('CGFloat', 'float')
return lines
def get_types(header_files):
'''Returns a list of Cocoa Types.'''
return match_output(r"grep -h 'typedef .* _*NS[A-Za-z]*' "
+ header_files, r'(NS[A-Za-z]+)\s*(;|{)', 1)
def get_constants(header_files):
'''Returns a list of Cocoa Constants.'''
return match_output(r"awk '/^(typedef )?enum .*\{/ {pr = 1;} /\}/ {pr = 0;}"
r"{ if(pr) print $0; }' " + header_files,
r'^\s*(NS[A-Z][A-Za-z0-9_]*)', 1)
def get_notifications(header_files):
'''Returns a list of Cocoa Notifications.'''
return match_output(r"grep -h '\*NS.*Notification' "
+ header_files, r'NS\w*Notification', 0)
def write_file(filename, lines):
'''Attempts to write list to file or exits with error if it can't.'''
try:
f = open(filename, 'w')
except IOError, error:
raise SystemExit(argv[0] + ': %s' % error)
f.write("\n".join(lines))
f.close()
def extract_files_to(dirname=None):
'''Extracts .txt files to given directory or ./cocoa_indexes by default.'''
if dirname is None:
dirname = './cocoa_indexes'
if not os.path.isdir(dirname):
os.mkdir(dirname)
headers = ' '.join(default_headers())
write_file(dirname + '/functions.txt', get_functions (headers))
write_file(dirname + '/types.txt', get_types (headers))
write_file(dirname + '/constants.txt', get_constants (headers))
write_file(dirname + '/notifications.txt', get_notifications(headers))
if __name__ == '__main__':
extract_files_to(argv[1] if len(argv) > 1 else None)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
7061,
6,
16719,
274,
257,
9483,
7268,
2420,
3696,
286,
18490,
12162,
26286,
2637,
7061,
198,
11748,
28686,
11,
9729,
11,
302,
198,
6738,
25064,
1330,
1822,
85,
198,
198,
4299,
1064,
7,
12947,... | 2.285156 | 1,536 |
import iteration_utilities
import more_itertools
import toolz
import cytoolz
import pydash
| [
11748,
24415,
62,
315,
2410,
198,
11748,
517,
62,
270,
861,
10141,
198,
11748,
2891,
89,
198,
11748,
3075,
25981,
89,
198,
11748,
279,
5173,
1077,
198
] | 3.37037 | 27 |
import threading
import sys
from traceback import print_tb,print_exc
from random import randint
from time import ctime as now
class JobThread(threading.Thread):
"""
Thread that executes a job every N milliseconds
"""
def shutdown(self):
"""
Stop this thread
"""
self._finished.set()
def is_shutdown(self):
"""
Boolean check on the thread's state
"""
return bool(self._finished.isSet())
def run(self):
"""
Keep running this thread until it's shutdown
"""
self._finished.wait(10)
while not self._finished.isSet():
try:
self._func(self._ref)
self._error = False
except:
if not self._error:
print(" ")
print(">>>Exception occured in thread: {0}".format(sys.exc_info()[1]))
print_tb(sys.exc_info()[2])
print(" ")
f = open('{0} - ThreadLog.txt'.format(self._ref.config['name']),'a')
f.write("\r\n")
f.write(now())
f.write("\r\nConnection: {0}\r\n".format(self._ref.config['host']))
print_exc(None,f)
f.write("\r\n")
f.close()
self._error = True
finally:
if self._func._max:
self._finished.wait(randint(self._func._min,self._func._max)*0.001)
else:
self._finished.wait(self._func._min*0.001)
| [
11748,
4704,
278,
198,
11748,
25064,
198,
6738,
12854,
1891,
1330,
3601,
62,
83,
65,
11,
4798,
62,
41194,
198,
6738,
4738,
1330,
43720,
600,
198,
6738,
640,
1330,
269,
2435,
355,
783,
198,
198,
4871,
15768,
16818,
7,
16663,
278,
13,
... | 1.825893 | 896 |
import mpi #needed to use mpi
##################################################################
##################################################################
#setting the domain size for the problem to be solved
domain_size = 2
##################################################################
##################################################################
## ATTENTION: here the order is important
#including kratos path
kratos_libs_path = '../../../../libs' ##kratos_root/libs
kratos_applications_path = '../../../../applications' ##kratos_root/applications
kratos_python_scripts_path = '../../../../applications/structural_application/python_scripts'
import sys
sys.path.append(kratos_libs_path)
sys.path.append(kratos_applications_path)
sys.path.append(kratos_python_scripts_path)
#importing Kratos main library
from Kratos import *
kernel = Kernel() #defining kernel
#importing applications
import applications_interface
applications_interface.Import_StructuralApplication = True
applications_interface.Import_KratosTrilinosApplication = True
applications_interface.Import_KratosMetisApplication = True
applications_interface.ImportApplications(kernel, kratos_applications_path)
from KratosStructuralApplication import *
from KratosTrilinosApplication import *
from KratosMetisApplication import *
## from now on the order is not anymore crucial
##################################################################
##################################################################
#defining a model part
model_part = ModelPart("FluidPart");
#adding of Variables to Model Part should be here
import trilinos_structural_solver_static
trilinos_structural_solver_static.AddVariables(model_part)
model_part.AddNodalSolutionStepVariable(PARTITION_INDEX)
#reading a model
gid_mode = GiDPostMode.GiD_PostBinary
multifile = MultiFileFlag.MultipleFiles
deformed_mesh_flag = WriteDeformedMeshFlag.WriteUndeformed
write_conditions = WriteConditionsFlag.WriteElementsOnly
gid_io = GidIO("cantilever2d",gid_mode,multifile,deformed_mesh_flag, write_conditions)
number_of_partitions = mpi.size #we set it equal to the number of processors
print "number_of_partitions", number_of_partitions
partitioner = MetisPartitioningProcess(model_part, gid_io, number_of_partitions, domain_size);
partitioner.Execute()
mesh_name = mpi.rank
gid_io.InitializeMesh( mesh_name );
gid_io.WriteMesh((model_part).GetMesh());
gid_io.FinalizeMesh()
print "pippo"
print model_part
#print model_part.Properties
#writing the mesh
#gid_io.WriteMesh(model_part.GetMesh(),domain_size,GiDPostMode.GiD_PostBinary);
#the buffer size should be set up here after the mesh is read for the first time
model_part.SetBufferSize(2)
#importing the solver files
trilinos_structural_solver_static.AddDofs(model_part)
#creating a fluid solver object
solver = trilinos_structural_solver_static.StaticStructuralSolver(model_part,domain_size)
##pILUPrecond = ILU0Preconditioner()
##solver.structure_linear_solver = BICGSTABSolver(1e-9, 5000,pILUPrecond)
model_part.Properties[1].SetValue(CONSTITUTIVE_LAW, Isotropic2D() )
print "Linear elastic model selected"
solver.Initialize()
(solver).SetEchoLevel(2);
Dt = 0.001
nsteps = 5
print("initializing results")
gid_io.InitializeResults(mesh_name,(model_part).GetMesh())
for step in range(0,nsteps):
time = Dt*step
model_part.CloneTimeStep(time)
print time
#print model_part.ProcessInfo()[TIME]
#solving the fluid problem
solver.Solve()
#print the results
print "a"
gid_io.WriteNodalResults(DISPLACEMENT,model_part.Nodes,time,0)
gid_io.WriteNodalResults(REACTION,model_part.Nodes,time,0)
gid_io.FinalizeResults()
print "finito"
| [
11748,
285,
14415,
1303,
27938,
284,
779,
285,
14415,
198,
198,
29113,
29113,
2235,
198,
29113,
29113,
2235,
198,
2,
33990,
262,
7386,
2546,
329,
262,
1917,
284,
307,
16019,
198,
27830,
62,
7857,
796,
362,
198,
198,
29113,
29113,
2235,
... | 3.114548 | 1,196 |
import re
import time
import random
import IPython
from google.colab import output
n = 0
# アイコンの指定
BOT_ICON = 'https://3.bp.blogspot.com/-qbORCFE5qhk/UmTBJwEYKjI/AAAAAAAAZYY/nbjieynFcLQ/s800/job_uranaishi.png'
YOUR_ICON = 'https://3.bp.blogspot.com/-nHZhTWISMxk/Vw5KxMQxRhI/AAAAAAAA5tQ/HR_btIW3k1ISG3GGNG1HFpsgk38wSuGzwCLcB/s800/nuigurumi_bear.png'
# 'name', 'birthday', 'asking'
frame = {}
TYPE = []
| [
11748,
302,
198,
11748,
640,
198,
11748,
4738,
198,
11748,
6101,
7535,
198,
6738,
23645,
13,
4033,
397,
1330,
5072,
628,
198,
77,
796,
657,
220,
198,
198,
2,
220,
11839,
11482,
24679,
6527,
27032,
234,
229,
22522,
248,
198,
33,
2394,
... | 2 | 204 |
#!/usr/bin/env python
"""
_Repack_t_
Repack job splitting test
"""
import unittest
import threading
import logging
import time
from WMCore.WMBS.File import File
from WMCore.WMBS.Fileset import Fileset
from WMCore.WMBS.Subscription import Subscription
from WMCore.WMBS.Workflow import Workflow
from WMCore.DataStructs.Run import Run
from WMCore.DAOFactory import DAOFactory
from WMCore.JobSplitting.SplitterFactory import SplitterFactory
from WMCore.Services.UUIDLib import makeUUID
from WMQuality.TestInit import TestInit
class RepackTest(unittest.TestCase):
"""
_RepackTest_
Test for Repack job splitter
"""
def setUp(self):
"""
_setUp_
"""
import WMQuality.TestInit
WMQuality.TestInit.deleteDatabaseAfterEveryTest("I'm Serious")
self.testInit = TestInit(__file__)
self.testInit.setLogging()
self.testInit.setDatabaseConnection()
self.testInit.setSchema(customModules = ["WMComponent.DBS3Buffer", "T0.WMBS"])
self.splitterFactory = SplitterFactory(package = "T0.JobSplitting")
myThread = threading.currentThread()
daoFactory = DAOFactory(package = "T0.WMBS",
logger = logging,
dbinterface = myThread.dbi)
myThread.dbi.processData("""INSERT INTO wmbs_location
(id, site_name, state, state_time)
VALUES (1, 'SomeSite', 1, 1)
""", transaction = False)
myThread.dbi.processData("""INSERT INTO wmbs_pnns
(id, pnn)
VALUES (2, 'SomePNN')
""", transaction = False)
myThread.dbi.processData("""INSERT INTO wmbs_location_pnns
(location, pnn)
VALUES (1, 2)
""", transaction = False)
insertRunDAO = daoFactory(classname = "RunConfig.InsertRun")
insertRunDAO.execute(binds = { 'RUN' : 1,
'HLTKEY' : "someHLTKey" },
transaction = False)
insertLumiDAO = daoFactory(classname = "RunConfig.InsertLumiSection")
for lumi in [1, 2, 3, 4]:
insertLumiDAO.execute(binds = { 'RUN' : 1,
'LUMI' : lumi },
transaction = False)
insertStreamDAO = daoFactory(classname = "RunConfig.InsertStream")
insertStreamDAO.execute(binds = { 'STREAM' : "A" },
transaction = False)
insertStreamFilesetDAO = daoFactory(classname = "RunConfig.InsertStreamFileset")
insertStreamFilesetDAO.execute(1, "A", "TestFileset1")
self.fileset1 = Fileset(name = "TestFileset1")
self.fileset1.load()
workflow1 = Workflow(spec = "spec.xml", owner = "hufnagel", name = "TestWorkflow1", task="Test")
workflow1.create()
self.subscription1 = Subscription(fileset = self.fileset1,
workflow = workflow1,
split_algo = "Repack",
type = "Repack")
self.subscription1.create()
# keep for later
self.insertClosedLumiDAO = daoFactory(classname = "RunLumiCloseout.InsertClosedLumi")
self.currentTime = int(time.time())
# default split parameters
self.splitArgs = {}
self.splitArgs['maxSizeSingleLumi'] = 20*1024*1024*1024
self.splitArgs['maxSizeMultiLumi'] = 10*1024*1024*1024
self.splitArgs['maxInputEvents'] = 500000
self.splitArgs['maxInputFiles'] = 1000
self.splitArgs['maxLatency'] = 50000
return
def tearDown(self):
"""
_tearDown_
"""
self.testInit.clearDatabase()
return
def getNumActiveSplitLumis(self):
"""
_getNumActiveSplitLumis_
helper function that counts the number of active split lumis
"""
myThread = threading.currentThread()
results = myThread.dbi.processData("""SELECT COUNT(*)
FROM lumi_section_split_active
""", transaction = False)[0].fetchall()
return results[0][0]
def test00(self):
"""
_test00_
Test that the job name prefix feature works
Test multi lumi size threshold
Multi lumi input
"""
mySplitArgs = self.splitArgs.copy()
for lumi in [1, 2, 3, 4]:
filecount = 2
for i in range(filecount):
newFile = File(makeUUID(), size = 1000, events = 100)
newFile.addRun(Run(1, *[lumi]))
newFile.setLocation("SomePNN", immediateSave = False)
newFile.create()
self.fileset1.addFile(newFile)
self.fileset1.commit()
jobFactory = self.splitterFactory(package = "WMCore.WMBS",
subscription = self.subscription1)
mySplitArgs['maxSizeMultiLumi'] = self.splitArgs['maxSizeMultiLumi']
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 0,
"ERROR: JobFactory should have returned no JobGroup")
mySplitArgs['maxSizeMultiLumi'] = 5000
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 1,
"ERROR: JobFactory didn't create a single job")
job = jobGroups[0].jobs[0]
self.assertTrue(job['name'].startswith("Repack-"),
"ERROR: Job has wrong name")
self.assertEqual(len(job.getFiles()), 4,
"ERROR: Job does not process 4 files")
self.fileset1.markOpen(False)
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 1,
"ERROR: JobFactory didn't create a single job")
job = jobGroups[0].jobs[0]
self.assertTrue(job['name'].startswith("Repack-"),
"ERROR: Job has wrong name")
self.assertEqual(len(job.getFiles()), 4,
"ERROR: Job does not process 4 files")
self.assertEqual(self.getNumActiveSplitLumis(), 0,
"ERROR: Split lumis were created")
return
def test01(self):
"""
_test01_
Test multi lumi event threshold
Multi lumi input
"""
mySplitArgs = self.splitArgs.copy()
insertClosedLumiBinds = []
for lumi in [1, 2, 3, 4]:
filecount = 2
for i in range(filecount):
newFile = File(makeUUID(), size = 1000, events = 100)
newFile.addRun(Run(1, *[lumi]))
newFile.setLocation("SomePNN", immediateSave = False)
newFile.create()
self.fileset1.addFile(newFile)
insertClosedLumiBinds.append( { 'RUN' : 1,
'LUMI' : lumi,
'STREAM' : "A",
'FILECOUNT' : filecount,
'INSERT_TIME' : self.currentTime,
'CLOSE_TIME' : self.currentTime } )
self.fileset1.commit()
jobFactory = self.splitterFactory(package = "WMCore.WMBS",
subscription = self.subscription1)
self.insertClosedLumiDAO.execute(binds = insertClosedLumiBinds,
transaction = False)
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 0,
"ERROR: JobFactory should have returned no JobGroup")
mySplitArgs['maxInputEvents'] = 500
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 1,
"ERROR: JobFactory didn't create a single job")
job = jobGroups[0].jobs[0]
self.assertEqual(len(job.getFiles()), 4,
"ERROR: Job does not process 4 files")
self.fileset1.markOpen(False)
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 1,
"ERROR: JobFactory didn't create a single job")
job = jobGroups[0].jobs[0]
self.assertEqual(len(job.getFiles()), 4,
"ERROR: Job does not process 4 files")
self.assertEqual(self.getNumActiveSplitLumis(), 0,
"ERROR: Split lumis were created")
return
def test02(self):
"""
_test02_
Test single lumi size threshold
Single lumi input
"""
mySplitArgs = self.splitArgs.copy()
insertClosedLumiBinds = []
for lumi in [1]:
filecount = 8
for i in range(filecount):
newFile = File(makeUUID(), size = 1000, events = 100)
newFile.addRun(Run(1, *[lumi]))
newFile.setLocation("SomePNN", immediateSave = False)
newFile.create()
self.fileset1.addFile(newFile)
insertClosedLumiBinds.append( { 'RUN' : 1,
'LUMI' : lumi,
'STREAM' : "A",
'FILECOUNT' : filecount,
'INSERT_TIME' : self.currentTime,
'CLOSE_TIME' : self.currentTime } )
self.fileset1.commit()
jobFactory = self.splitterFactory(package = "WMCore.WMBS",
subscription = self.subscription1)
self.insertClosedLumiDAO.execute(binds = insertClosedLumiBinds,
transaction = False)
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 0,
"ERROR: JobFactory should have returned no JobGroup")
mySplitArgs['maxSizeSingleLumi'] = 6500
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 2,
"ERROR: JobFactory didn't create two jobs")
job = jobGroups[0].jobs[0]
self.assertEqual(len(job.getFiles()), 6,
"ERROR: Job does not process 6 files")
job = jobGroups[0].jobs[1]
self.assertEqual(len(job.getFiles()), 2,
"ERROR: Job does not process 2 files")
self.assertEqual(self.getNumActiveSplitLumis(), 1,
"ERROR: Split lumis were not created")
return
def test03(self):
"""
_test03_
Test single lumi event threshold
Single lumi input
"""
mySplitArgs = self.splitArgs.copy()
insertClosedLumiBinds = []
for lumi in [1]:
filecount = 8
for i in range(filecount):
newFile = File(makeUUID(), size = 1000, events = 100)
newFile.addRun(Run(1, *[lumi]))
newFile.setLocation("SomePNN", immediateSave = False)
newFile.create()
self.fileset1.addFile(newFile)
insertClosedLumiBinds.append( { 'RUN' : 1,
'LUMI' : lumi,
'STREAM' : "A",
'FILECOUNT' : filecount,
'INSERT_TIME' : self.currentTime,
'CLOSE_TIME' : self.currentTime } )
self.fileset1.commit()
jobFactory = self.splitterFactory(package = "WMCore.WMBS",
subscription = self.subscription1)
self.insertClosedLumiDAO.execute(binds = insertClosedLumiBinds,
transaction = False)
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 0,
"ERROR: JobFactory should have returned no JobGroup")
mySplitArgs['maxInputEvents'] = 650
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 2,
"ERROR: JobFactory didn't create two jobs")
job = jobGroups[0].jobs[0]
self.assertEqual(len(job.getFiles()), 6,
"ERROR: Job does not process 6 files")
job = jobGroups[0].jobs[1]
self.assertEqual(len(job.getFiles()), 2,
"ERROR: Job does not process 2 files")
self.assertEqual(self.getNumActiveSplitLumis(), 1,
"ERROR: Split lumis were not created")
return
def test04(self):
"""
_test04_
Test streamer count threshold (only multi lumi)
Multi lumi input
"""
mySplitArgs = self.splitArgs.copy()
insertClosedLumiBinds = []
for lumi in [1, 2, 3, 4]:
filecount = 2
for i in range(filecount):
newFile = File(makeUUID(), size = 1000, events = 100)
newFile.addRun(Run(1, *[lumi]))
newFile.setLocation("SomePNN", immediateSave = False)
newFile.create()
self.fileset1.addFile(newFile)
insertClosedLumiBinds.append( { 'RUN' : 1,
'LUMI' : lumi,
'STREAM' : "A",
'FILECOUNT' : filecount,
'INSERT_TIME' : self.currentTime,
'CLOSE_TIME' : self.currentTime } )
self.fileset1.commit()
jobFactory = self.splitterFactory(package = "WMCore.WMBS",
subscription = self.subscription1)
self.insertClosedLumiDAO.execute(binds = insertClosedLumiBinds,
transaction = False)
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 0,
"ERROR: JobFactory should have returned no JobGroup")
mySplitArgs['maxInputFiles'] = 5
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 1,
"ERROR: JobFactory didn't create a single job")
job = jobGroups[0].jobs[0]
self.assertEqual(len(job.getFiles()), 4,
"ERROR: Job does not process 4 files")
self.fileset1.markOpen(False)
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 1,
"ERROR: JobFactory didn't create a single job")
job = jobGroups[0].jobs[0]
self.assertEqual(len(job.getFiles()), 4,
"ERROR: Job does not process 4 files")
self.assertEqual(self.getNumActiveSplitLumis(), 0,
"ERROR: Split lumis were created")
return
def test05(self):
"""
_test05_
Test repacking of multiple lumis with holes in the lumi sequence
Multi lumi input
"""
mySplitArgs = self.splitArgs.copy()
insertClosedLumiBinds = []
for lumi in [1, 2, 4]:
filecount = 2
for i in range(filecount):
newFile = File(makeUUID(), size = 1000, events = 100)
newFile.addRun(Run(1, *[lumi]))
newFile.setLocation("SomePNN", immediateSave = False)
newFile.create()
self.fileset1.addFile(newFile)
insertClosedLumiBinds.append( { 'RUN' : 1,
'LUMI' : lumi,
'STREAM' : "A",
'FILECOUNT' : filecount,
'INSERT_TIME' : self.currentTime,
'CLOSE_TIME' : self.currentTime } )
self.fileset1.commit()
jobFactory = self.splitterFactory(package = "WMCore.WMBS",
subscription = self.subscription1)
self.insertClosedLumiDAO.execute(binds = insertClosedLumiBinds,
transaction = False)
mySplitArgs['maxInputFiles'] = 5
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 0,
"ERROR: JobFactory should have returned no JobGroup")
self.insertClosedLumiDAO.execute(binds = { 'RUN' : 1,
'LUMI' : 3,
'STREAM' : "A",
'FILECOUNT' : 0,
'INSERT_TIME' : self.currentTime,
'CLOSE_TIME' : self.currentTime },
transaction = False)
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 1,
"ERROR: JobFactory didn't create one job")
self.assertEqual(len(jobGroups[0].jobs[0].getFiles()), 4,
"ERROR: first job does not process 4 files")
return
def test06(self):
"""
_test06_
Test repacking of 3 lumis
2 small lumis (single job), followed by a big one (multiple jobs)
files for lumi 1 and 2 are below multi-lumi thresholds
files for lumi 3 are above single-lumi threshold
"""
mySplitArgs = self.splitArgs.copy()
insertClosedLumiBinds = []
for lumi in [1, 2, 3]:
filecount = 2
for i in range(filecount):
if lumi == 3:
nevents = 500
else:
nevents = 100
newFile = File(makeUUID(), size = 1000, events = nevents)
newFile.addRun(Run(1, *[lumi]))
newFile.setLocation("SomePNN", immediateSave = False)
newFile.create()
self.fileset1.addFile(newFile)
insertClosedLumiBinds.append( { 'RUN' : 1,
'LUMI' : lumi,
'STREAM' : "A",
'FILECOUNT' : filecount,
'INSERT_TIME' : self.currentTime,
'CLOSE_TIME' : self.currentTime } )
self.fileset1.commit()
jobFactory = self.splitterFactory(package = "WMCore.WMBS",
subscription = self.subscription1)
self.insertClosedLumiDAO.execute(binds = insertClosedLumiBinds,
transaction = False)
mySplitArgs['maxInputEvents'] = 900
jobGroups = jobFactory(**mySplitArgs)
self.assertEqual(len(jobGroups), 1,
"ERROR: JobFactory didn't return one JobGroup")
self.assertEqual(len(jobGroups[0].jobs), 3,
"ERROR: JobFactory didn't create three jobs")
self.assertEqual(len(jobGroups[0].jobs[0].getFiles()), 4,
"ERROR: first job does not process 4 files")
self.assertEqual(len(jobGroups[0].jobs[1].getFiles()), 1,
"ERROR: second job does not process 1 file")
self.assertEqual(len(jobGroups[0].jobs[2].getFiles()), 1,
"ERROR: third job does not process 1 file")
return
if __name__ == '__main__':
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
62,
6207,
441,
62,
83,
62,
198,
198,
6207,
441,
1693,
26021,
1332,
198,
198,
37811,
198,
198,
11748,
555,
715,
395,
198,
11748,
4704,
278,
198,
11748,
18931,
198,
11748,
... | 1.837599 | 11,644 |
#############################################################################
#
# Copyright (c) 2020 Teunis van Beelen
# All rights reserved.
#
# Email: teuniz@protonmail.com
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES
# LOSS OF USE, DATA, OR PROFITS OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#############################################################################
import sys
import io
import os
import string
import array
from collections import namedtuple
import numpy as np
from datetime import datetime
if sys.version_info[0] != 3 or sys.version_info[1] < 5:
print("Must be using Python version >= 3.5.0")
sys.exit()
if np.__version__ < "1.17.0":
print("Must be using NumPy version >= 1.17.0")
sys.exit()
################################################################################
# START class EDFwriter
################################################################################
class EDFwriter:
"""A writer for EDF+ and BDF+ files.
EDF header:
offset (hex, dec) length
---------------------------------------------------------------------
0x00 0 8 ascii : version of this data format (0)
0x08 8 80 ascii : local patient identification
0x58 88 80 ascii : local recording identification
0xA8 168 8 ascii : startdate of recording (dd.mm.yy)
0xB0 176 8 ascii : starttime of recording (hh.mm.ss)
0xB8 184 8 ascii : number of bytes in header record
0xC0 192 44 ascii : reserved
0xEC 236 8 ascii : number of data records (-1 if unknown)
0xF4 244 8 ascii : duration of a data record, in seconds
0xFC 252 4 ascii : number of signals
0x00 0 ns * 16 ascii : ns * label (e.g. EEG Fpz-Cz or Body temp)
ns * 0x10 ns * 16 ns * 80 ascii : ns * transducer type (e.g. AgAgCl electrode)
ns * 0x60 ns * 96 ns * 8 ascii : ns * physical dimension (e.g. uV or degreeC)
ns * 0x68 ns * 104 ns * 8 ascii : ns * physical minimum (e.g. -500 or 34)
ns * 0x70 ns * 112 ns * 8 ascii : ns * physical maximum (e.g. 500 or 40)
ns * 0x78 ns * 120 ns * 8 ascii : ns * digital minimum (e.g. -2048)
ns * 0x80 ns * 128 ns * 8 ascii : ns * digital maximum (e.g. 2047)
ns * 0x88 ns * 136 ns * 80 ascii : ns * prefiltering (e.g. HP:0.1Hz LP:75Hz N:60)
ns * 0xD8 ns * 216 ns * 8 ascii : ns * nr of samples in each data record
ns * 0xE0 ns * 224 ns * 32 ascii : ns * reserved
ns: number of signals
All fields are left aligned and filled up with spaces, no NULL's.
Only printable ASCII characters are allowed.
Decimal separator (if any) must be a dot. No grouping characters in numbers.
For more info about the EDF and EDF+ format, visit: https://edfplus.info/specs/
For more info about the BDF and BDF+ format, visit: https://www.teuniz.net/edfbrowser/bdfplus%20format%20description.html
note: In EDF, the sensitivity (e.g. uV/bit) and offset are stored using four parameters:
digital maximum and minimum, and physical maximum and minimum.
Here, digital means the raw data coming from a sensor or ADC. Physical means the units like uV.
The sensitivity in units/bit is calculated as follows:
units per bit = (physical max - physical min) / (digital max - digital min)
The digital offset is calculated as follows:
offset = (physical max / units per bit) - digital max
For a better explanation about the relation between digital data and physical data,
read the document "Coding Schemes Used with Data Converters" (PDF):
https://www.ti.com/general/docs/lit/getliterature.tsp?baseLiteratureNumber=sbaa042
note: An EDF file usually contains multiple so-called datarecords. One datarecord usually has a duration of one second (this is the default but it is not mandatory!).
In that case a file with a duration of five minutes contains 300 datarecords. The duration of a datarecord can be freely choosen but, if possible, use values from
0.1 to 1 second for easier handling. Just make sure that the total size of one datarecord, expressed in bytes, does not exceed 10MByte (15MBytes for BDF(+)).
The RECOMMENDATION of a maximum datarecordsize of 61440 bytes in the EDF and EDF+ specification was usefull in the time people were still using DOS as their main operating system.
Using DOS and fast (near) pointers (16-bit pointers), the maximum allocatable block of memory was 64KByte.
This is not a concern anymore so the maximum datarecord size now is limited to 10MByte for EDF(+) and 15MByte for BDF(+). This helps to accommodate for higher samplingrates
used by modern Analog to Digital Converters.
EDF header character encoding: The EDF specification says that only (printable) ASCII characters are allowed.
When writing the header info, EDFlib will assume you are using Latin1 encoding and it will automatically convert
characters with accents, umlauts, tilde, etc. to their "normal" equivalent without the accent/umlaut/tilde/etc.
in order to create a valid EDF file.
The description/name of an EDF+ annotation on the other hand, is encoded in UTF-8.
author: Teunis van Beelen
"""
EDFLIB_TIME_DIMENSION = 10000000
EDFLIB_MAXSIGNALS = 640
EDFLIB_MAX_ANNOTATION_LEN = 512
EDFSEEK_SET = 0
EDFSEEK_CUR = 1
EDFSEEK_END = 2
EDFLIB_FILETYPE_EDF = 0
EDFLIB_FILETYPE_EDFPLUS = 1
EDFLIB_FILETYPE_BDF = 2
EDFLIB_FILETYPE_BDFPLUS = 3
EDFLIB_MALLOC_ERROR = -1
EDFLIB_NO_SUCH_FILE_OR_DIRECTORY = -2
EDFLIB_FILE_CONTAINS_FORMAT_ERRORS = -3
EDFLIB_MAXFILES_REACHED = -4
EDFLIB_FILE_READ_ERROR = -5
EDFLIB_FILE_ALREADY_OPENED = -6
EDFLIB_FILETYPE_ERROR = -7
EDFLIB_FILE_WRITE_ERROR = -8
EDFLIB_NUMBER_OF_SIGNALS_INVALID = -9
EDFLIB_FILE_IS_DISCONTINUOUS = -10
EDFLIB_INVALID_READ_ANNOTS_VALUE = -11
EDFLIB_INVALID_ARGUMENT = -12
EDFLIB_FILE_CLOSED = -13
EDFLIB_DO_NOT_READ_ANNOTATIONS = 0
EDFLIB_READ_ANNOTATIONS = 1
EDFLIB_READ_ALL_ANNOTATIONS = 2
EDFLIB_NO_SIGNALS = -20
EDFLIB_TOO_MANY_SIGNALS = -21
EDFLIB_NO_SAMPLES_IN_RECORD = -22
EDFLIB_DIGMIN_IS_DIGMAX = -23
EDFLIB_DIGMAX_LOWER_THAN_DIGMIN = -24
EDFLIB_PHYSMIN_IS_PHYSMAX = -25
EDFLIB_DATARECORD_SIZE_TOO_BIG = -26
EDFLIB_VERSION = 100
# max size of annotationtext
__EDFLIB_WRITE_MAX_ANNOTATION_LEN = 40
# bytes in datarecord for EDF annotations, must be an integer multiple of three and two
__EDFLIB_ANNOTATION_BYTES = 114
# for writing only
__EDFLIB_MAX_ANNOTATION_CHANNELS = 64
__EDFLIB_ANNOT_MEMBLOCKSZ = 1000
__EDFAnnotationStruct = namedtuple("annotation", ["onset", "duration", "description"])
def close(self) -> int:
"""Finalizes and closes the file.
This function is required after writing. Failing to do so will cause a corrupted and incomplete file.
Returns 0 on success, otherwise -1.
"""
if self.__status_ok:
if self.__datarecords < 100000000:
self.__file_out.seek(236, io.SEEK_SET)
if self.__fprint_int_number_nonlocalized(self.__file_out, self.__datarecords, 0, 0) < 2:
self.__file_out.write(bytes(" ", encoding="ascii"))
self.__write_annotations()
self.__file_out.close()
self.__status_ok = 0
return 0
else:
return -1
def version(self) -> int:
"""If version is 1.00 then it will return 100."""
return self.EDFLIB_VERSION
def setSampleFrequency(self, s: int, sf: int) -> int:
"""Sets the samplefrequency of signal s.
(In reallity, it sets the number of samples in a datarecord.)
The samplefrequency of a signal is determined as: sf = number of samples in a datarecord / datarecord duration.
The samplefrequency equals the number of samples in a datarecord only when the datarecord duration is set to the default of one second.
This function is required for every signal and can be called only before the first sample write action.
s is the signal number (zero-based).
sf is the samplefrequency.
Returns 0 on success, otherwise -1.
"""
if (s < 0) or (s >= self.__edfsignals) or (self.__datarecords != 0) or (sf < 1):
return -1
self.__param_smp_per_record[s] = sf
return 0
def setPhysicalMaximum(self, s: int, phys_max: float) -> int:
"""Sets the maximum physical value of signal s.
This is the value of the input of the ADC when the output equals the value of "digital maximum".
This function is required for every signal and can be called only before the first sample write action.
s is the signal number (zero-based).
phys_max is the maximum input value.
Returns 0 on success, otherwise -1.
note: In EDF, the sensitivity (e.g. uV/bit) and offset are stored using four parameters:
digital maximum and minimum, and physical maximum and minimum.
Here, digital means the raw data coming from a sensor or ADC. Physical means the units like uV.
Usually they are the extreme input and output values of the ADC.
The sensitivity in units/bit is calculated as follows:
units per bit = (physical max - physical min) / (digital max - digital min)
The digital offset is calculated as follows:
offset = (physical max / units per bit) - digital max
"""
if (s < 0) or (s >= self.__edfsignals) or (self.__datarecords != 0):
return -1
self.__param_phys_max[s] = phys_max
return 0
def setPhysicalMinimum(self, s: int, phys_min: float) -> int:
"""Sets the minimum physical value of signal s.
This is the value of the input of the ADC when the output equals the value of "digital minimum".
This function is required for every signal and can be called only before the first sample write action.
s is the signal number (zero-based).
phys_min is the minimum input value.
Returns 0 on success, otherwise -1.
note: In EDF, the sensitivity (e.g. uV/bit) and offset are stored using four parameters:
digital maximum and minimum, and physical maximum and minimum.
Here, digital means the raw data coming from a sensor or ADC. Physical means the units like uV.
Usually they are the extreme input and output values of the ADC.
The sensitivity in units/bit is calculated as follows:
units per bit = (physical max - physical min) / (digital max - digital min)
The digital offset is calculated as follows:
offset = (physical max / units per bit) - digital max
"""
if (s < 0) or (s >= self.__edfsignals) or (self.__datarecords != 0):
return -1
self.__param_phys_min[s] = phys_min
return 0
def setDigitalMaximum(self, s: int, dig_max: int) -> int:
"""Sets the maximum digital value of signal s.
This is the value of the output of the ADC when the input equals the value of "physical maximum".
This function is required for every signal and can be called only before the first sample write action.
s is the signal number (zero-based).
dig_max is the maximum output value (<= 32767 for EDF and <= 8388607 for BDF).
Returns 0 on success, otherwise -1.
note: In EDF, the sensitivity (e.g. uV/bit) and offset are stored using four parameters:
digital maximum and minimum, and physical maximum and minimum.
Here, digital means the raw data coming from a sensor or ADC. Physical means the units like uV.
Usually they are the extreme input and output values of the ADC.
The sensitivity in units/bit is calculated as follows:
units per bit = (physical max - physical min) / (digital max - digital min)
The digital offset is calculated as follows:
offset = (physical max / units per bit) - digital max
"""
if (s < 0) or (s >= self.__edfsignals) or (self.__datarecords != 0):
return -1
if self.__edf != 0:
if dig_max > 32767:
return -1
else:
if dig_max > 8388607:
return -1
self.__param_dig_max[s] = dig_max
return 0
def setDigitalMinimum(self, s: int, dig_min: int) -> int:
"""Sets the minimum digital value of signal s.
This is the value of the output of the ADC when the input equals the value of "physical minimum".
This function is required for every signal and can be called only before the first sample write action.
s is the signal number (zero-based).
dig_min is the minimum output value (>= -32768 for EDF and >= -8388608 for BDF).
Returns 0 on success, otherwise -1.
note: In EDF, the sensitivity (e.g. uV/bit) and offset are stored using four parameters:
digital maximum and minimum, and physical maximum and minimum.
Here, digital means the raw data coming from a sensor or ADC. Physical means the units like uV.
Usually they are the extreme input and output values of the ADC.
The sensitivity in units/bit is calculated as follows:
units per bit = (physical max - physical min) / (digital max - digital min)
The digital offset is calculated as follows:
offset = (physical max / units per bit) - digital max
"""
if (s < 0) or (s >= self.__edfsignals) or (self.__datarecords != 0):
return -1
if self.__edf != 0:
if dig_min < -32768:
return -1
else:
if dig_min < -8388608:
return -1
self.__param_dig_min[s] = dig_min
return 0
def setSignalLabel(self, s: int, label: str) -> int:
"""Sets the label (name) of signal s.
(e.g. "FP1", "SaO2", etc.) String must contain printable ASCII only.
This function is recommended for every signal and can be called only before the first sample write action.
s is the signal number (zero-based).
label is the signallabel.
Returns 0 on success, otherwise -1.
"""
if (s < 0) or (s >= self.__edfsignals) or (self.__datarecords != 0):
return -1
self.__param_label[s] = label
return 0
def setPreFilter(self, s: int, prefilter: str) -> int:
"""Sets the prefilter description of signal s.
(e.g. "HP:0.05Hz", "LP:250Hz", "N:60Hz", etc.) String must contain printable ASCII only.
This function is optional and can be called only before the first sample write action.
s is the signal number (zero-based).
prefilter is the prefilter description.
Returns 0 on success, otherwise -1.
"""
if (s < 0) or (s >= self.__edfsignals) or (self.__datarecords != 0):
return -1
self.__param_prefilter[s] = prefilter
return 0
def setTransducer(self, s: int, transducer: str) -> int:
"""Sets the transducer description of signal s.
("AgAgCl cup electrodes", etc.) String must contain printable ASCII only.
This function is optional and can be called only before the first sample write action.
s is the signal number (zero-based).
transducer is the transducer description.
Returns 0 on success, otherwise -1.
"""
if (s < 0) or (s >= self.__edfsignals) or (self.__datarecords != 0):
return -1
self.__param_transducer[s] = transducer
return 0
def setPhysicalDimension(self, s: int, physical_dimension: str) -> int:
"""Sets the physical_dimension (unit) of signal s.
("uV", "BPM", "mA", "Degr.", etc.) String must contain printable ASCII only.
This function recommended for every signal and can be called only before the first sample write action.
s is the signal number (zero-based).
physical_dimension is the physical dimension description.
Returns 0 on success, otherwise -1.
"""
if (s < 0) or (s >= self.__edfsignals) or (self.__datarecords != 0):
return -1
self.__param_physdimension[s] = physical_dimension
return 0
def setStartDateTime(self, year: int, month: int, day: int, hour: int, minute: int, second: int, subsecond: int) -> int:
"""
Sets the startdate and starttime.
If not called, the system date and time at runtime will be used.
This function is optional and can be called only before the first sample write action.
If subsecond precision is not needed or not applicable, leave it at zero.
year: 1970 - 3000
month: 1 - 12
day: 1 - 31
hour: 0 - 23
minute: 0 - 59
second: 0 - 59
subsecond: 0 - 9999 expressed in units of 100 microSeconds
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
if (year < 1970) or (year > 3000) or \
(month < 1) or (month > 12) or \
(day < 1) or (day > 31) or \
(hour < 0) or (hour > 23) or \
(minute < 0) or (minute > 59) or \
(second < 0) or (second > 59) or \
(subsecond < 0) or (subsecond > 9999):
return -1
self.__startdate_year = year
self.__startdate_month = month
self.__startdate_day = day
self.__starttime_hour = hour
self.__starttime_minute = minute
self.__starttime_second = second
self.__starttime_offset = subsecond * 1000
return 0
def setPatientName(self, name: str) -> int:
"""Sets the patientname.
String must contain printable ASCII only.
This function is optional and can be called only before the first sample write action.
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
self.__plus_patient_name = name
return 0
def setPatientCode(self, code: str) -> int:
"""Sets the patientcode.
String must contain printable ASCII only.
This function is optional and can be called only before the first sample write action.
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
self.__plus_patientcode = code
return 0
def setPatientGender(self, gender: int) -> int:
"""Sets the patient's gender.
gender: 0 = female, 1 = male, 2 = unknown or not applicable (default)
This function is optional and can be called only before the first sample write action.
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
if (gender < 0) or (gender > 2):
return -1
self.__plus_gender = gender
return 0
def setPatientBirthDate(self, year: int, month: int, day: int) -> int:
"""Sets the patients' birthdate.
This function is optional and can be called only before the first sample write action.
year: 1800 - 3000
month: 1 - 12
day: 1 - 31
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
if (year < 1800) or (year > 3000) or \
(month < 1) or (month > 12) or \
(day < 1) or (day > 31):
return -1
self.__plus_birthdate_year = year
self.__plus_birthdate_month = month
self.__plus_birthdate_day = day
return 0
def setAdditionalPatientInfo(self, additional: str) -> int:
"""Sets the additional information related to the patient.
String must contain printable ASCII only.
This function is optional and can be called only before the first sample write action.
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
self.__plus_patient_additional = additional
return 0
def setAdministrationCode(self, admin_code: str) -> int:
"""Sets the administration code.
String must contain printable ASCII only.
This function is optional and can be called only before the first sample write action.
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
self.__plus_admincode = admin_code
return 0
def setTechnician(self, technician: str) -> int:
"""Sets the name or id of the technician who performed the recording.
String must contain printable ASCII only.
This function is optional and can be called only before the first sample write action.
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
self.__plus_technician = technician
return 0
def setEquipment(self, equipment: str) -> int:
"""Sets the description of the equipment used for the recording.
String must contain printable ASCII only.
This function is optional and can be called only before the first sample write action.
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
self.__plus_equipment = equipment
return 0
def setAdditionalRecordingInfo(self, additional: str) -> int:
"""Sets the additional info related to the recording.
String must contain printable ASCII only.
This function is optional and can be called only before the first sample write action.
Returns 0 on success, otherwise -1.
"""
if self.__datarecords != 0:
return -1
self.__plus_recording_additional = additional
return 0
def writeSamples(self, buf: np.array) -> int:
"""Write samples.
Writes sf samples into the file.
Buf must be a one-dimensional numpy array containing samples of one signal of datatype int32, float_ or float64.
For EDF, dataype int16 can also be used.
If buf is of type integer, the samples are written into the file without any conversion.
If buf is of type float, the physical samples will be converted to digital samples using the
values of physical maximum, physical minimum, digital maximum and digital minimum.
The number of samples written is equal to the samplefrequency of the signal.
(actually, it's the value that is set with setSampleFrequency()).
Size of buf should be equal to or bigger than the samplefrequency.
Call this function for every signal in the file. The order is important!
When there are 4 signals in the file, the order of calling this function
must be: signal 0, signal 1, signal 2, signal 3, signal 0, signal 1, signal 2, etc.
The end of a recording must always be at the end of a complete cycle.
buf is a one-dimensional numpy array of datatype int32, float_ or float64. For EDF, dataype int16 can also be used.
Returns 0 on success, otherwise -1.
"""
if self.__status_ok == 0:
return -1
if buf.ndim != 1:
return -1
if (buf.dtype != np.int16) and (buf.dtype != np.int32) and (buf.dtype != np.float_) and (buf.dtype != np.float64):
return -1
if (buf.dtype == np.int16) and (self.__bdf != 0):
return -1
edfsignal = self.__signal_write_sequence_pos
if self.__datarecords == 0:
if edfsignal == 0:
error = self.__write_edf_header()
if error != 0:
return error
sf = self.__param_smp_per_record[edfsignal]
digmax = self.__param_dig_max[edfsignal]
digmin = self.__param_dig_min[edfsignal]
if sf > buf.size:
return -1
if self.__edf != 0:
if (buf.dtype == np.int16) or (buf.dtype == np.int32):
for i in range(0, sf):
if buf[i] > digmax:
buf[i] = digmax
if buf[i] < digmin:
buf[i] = digmin
self.__file_out.write(buf[i].astype("int16").tobytes(order="C"))
else:
for i in range(0, sf):
value = int((buf[i] / self.__param_bitvalue[edfsignal]) - self.__param_offset[edfsignal])
if value > digmax:
value = digmax
if value < digmin:
value = digmin
self.__file_out.write(value.to_bytes(2, byteorder="little", signed=True))
else:
if buf.dtype == np.int32:
for i in range(0, sf):
value = int(buf[i])
if value > digmax:
value = digmax
if value < digmin:
value = digmin
self.__file_out.write(value.to_bytes(3, byteorder="little", signed=True))
else:
for i in range(0, sf):
value = int((buf[i] / self.__param_bitvalue[edfsignal]) - self.__param_offset[edfsignal])
if value > digmax:
value = digmax
if value < digmin:
value = digmin
self.__file_out.write(value.to_bytes(3, byteorder="little", signed=True))
self.__signal_write_sequence_pos += 1
if self.__signal_write_sequence_pos == self.__edfsignals:
self.__signal_write_sequence_pos = 0
if self.__write_tal(self.__file_out) != 0:
return -1
self.__datarecords += 1
return 0
def setDataRecordDuration(self, duration: int) -> int:
"""Sets the datarecord duration.
This function is optional, normally you don't need to change the default value of one second.
This function is NOT REQUIRED but can be called only before the first sample write action.
This function can be used when you want to use a non-integer samplerate.
For example, if you want to use a samplerate of 0.5 Hz, set the samplefrequency to 5 Hz and
the datarecord duration to 10 seconds, or alternatively, set the samplefrequency to 1 Hz and
the datarecord duration to 2 seconds.
This function can also be used when you want to use a very high samplerate.
For example, if you want to use a samplerate of 5 GHz,
set the samplefrequency to 5000 Hz and the datarecord duration to 1 microSecond.
Do not use this function if not necessary.
duration is expressed in microSeconds, range: 1 - 60000000 (1uSec. - 60 sec.)
Returns 0 on success, otherwise -1.
"""
if (duration < 1) or (duration > 60000000) or (self.__datarecords != 0):
return -1
self.__long_data_record_duration = duration * 10
return 0
def setNumberOfAnnotationSignals(self, annot_signals: int) -> int:
"""Sets the number of annotation signals.
The default value is 1.
This function is optional and, if used, must be called before the first sample write action.
Normally you don't need to change the default value. Only when the number of annotations
you want to write is higher than the number of datarecords in the recording, you can use
this function to increase the storage space for annotations.
"""
if (annot_signals < 1) or (annot_signals >= self.__EDFLIB_MAX_ANNOTATION_CHANNELS) or (self.__datarecords != 0):
return -1
self.__nr_annot_chns = annot_signals
return 0
def writeAnnotation(self, onset: int, duration: int, description: str) -> int:
"""Writes an annotation/event to the file.
onset is relative to the starttime of the recording and must be >= 0.
onset and duration are in units of 100 microSeconds. Resolution is 0.0001 second.
E.g. 34.071 seconds must be written as 340710.
If duration is unknown or not applicable: set a negative number (-1).
Description is a string containing the text that describes the event.
This function is optional.
"""
if (self.__status_ok == 0) or (onset < 0):
return -1
self.__annotationslist.append(self.__EDFAnnotationStruct(onset = onset, duration = duration, description = description))
self.__annots_in_file += 1
return 0
################################################################################
# from here only internal utils
################################################################################
# writes the EDF header
# writes a TAL
# writes the annotations to the file
# minimum is the minimum digits that will be printed (minus sign not included), leading zero's will be added if necessary
# if sign is zero, only negative numbers will have the sign '-' character
# if sign is one, the sign '+' or '-' character will always be printed
# returns the number of characters printed
# minimum is the minimum digits that will be printed (minus sign not included), leading zero's will be added if necessary
# if sign is zero, only negative numbers will have the sign '-' character
# if sign is one, the sign '+' or '-' character will always be printed
# returns the number of characters printed
# minimum is the minimum digits that will be printed (minus sign not included), leading zero's will be added if necessary
# if sign is zero, only negative numbers will have the sign '-' character
# if sign is one, the sign '+' or '-' character will always be printed
# returns the amount of characters printed
# get string length
# copy a string
# converts Latin-1 to ASCII
################################################################################
# END class EDFwriter
################################################################################
################################################################################
# START class EDFexception
################################################################################
################################################################################
# END class EDFexception
################################################################################
| [
29113,
29113,
7804,
4242,
2,
198,
2,
198,
2,
15069,
357,
66,
8,
12131,
1665,
403,
271,
5719,
1355,
417,
268,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
9570,
25,
573,
403,
528,
31,
1676,
1122,
4529,
13,
785,
198,
2,
198,
2... | 2.926959 | 10,323 |
n = int(input())
nums = []
nums2 = []
for i in range(n):
nums.append(int(input()))
print("nums.append", nums)
nums2 = sorted(nums)
print("sorted(nums)", nums2)
print(nums2[n - 2])
print(nums2[n - 1])
| [
77,
796,
493,
7,
15414,
28955,
198,
77,
5700,
796,
17635,
198,
77,
5700,
17,
796,
17635,
198,
1640,
1312,
287,
2837,
7,
77,
2599,
198,
220,
220,
220,
997,
82,
13,
33295,
7,
600,
7,
15414,
3419,
4008,
198,
220,
220,
220,
3601,
72... | 2.10101 | 99 |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 5 17:01:13 2018
@author: Pavel
"""
"""
This function will take the whole of SUN database and flaten it into a single
folder while resizing and cropping all images into given square shape.
If the file is smaller than that, it will be ignored.
"""
import os
from PIL import Image
from resizeimage import resizeimage
#the below should point to the file containing the alphabet letter folders
SUN_images_dir = "E:/LabelMeToolbox/real_data/images"
# The below folder will contain the resized images
resized_address = "D:/old_files/aaaaa/Anglie/imperial/2017-2018/group_project/OcadoLobster/data/resized_background/new_test/"
indoor_address = "D:/old_files/aaaaa/Anglie/imperial/2017-2018/group_project/OcadoLobster/data/resized_background/indoor/"
outdoor_address = "D:/old_files/aaaaa/Anglie/imperial/2017-2018/group_project/OcadoLobster/data/resized_background/outdoor/"
def resize_and_crop(image_address, output_address, f_widht, f_height):
"""
Function for resizing and cropping of single image.
The image has to be bigger than the desired size.
If smaller in any dimension, the image will be discarded
Args:
image_address (string): Image to be resized
output_address (string): Final destination of the resized image
f_widht (int): Final desired widht in pixels
f_height (int): Final desired height in pixels
Returns:
Nothing
"""
with open(image_address, 'r+b') as f:
with Image.open(f) as image:
widht, height = image.size
if(widht >= f_widht and height >= f_height):
cover = resizeimage.resize_cover(image, [f_widht, f_height])
cover.save(output_address, image.format)
else:
print("Image too small to be resized")
def find_all_files(min_pixels, origin_folder, target_folder):
"""
Function that searches all subfolders of given folder.
This function assumes that all files in that folder are image files
If this is not the case errors will occur as no check is carried out.
For each file, it checks that both of its dimensions are bigger than
min_pixels. If so, it will rescale and crop the image to
min_pixels*min_pixels and save the file to the destination given
in the top of this file
There is a testing feature count, which allows only few subfolders
to be searched, so that this function can be tested
Args:
min_pixels (int): The final image will be square of this number of pixels
origin_folder (string): Path to a folder, which will be searched for
any images in it or any of its subdirectories
target_folder (string): path to folder to which the resized images
should be saved to. This folder will have flat structure.
Returns:
root (string): Returns the root address of the original folder
"""
#count = 0
for root, dirs, files in os.walk(origin_folder):
vis_files = [f for f in files if not f[0] == '.']
copy = True
"""
copy = False
if(root.endswith("indoor")):
print("I am indoor")
target_folder = indoor_address
copy = True
if(root.endswith("outdoor")):
print("I am outdoor")
target_folder = outdoor_address
copy = True
"""
if(len(vis_files)>0 and copy):
for image_name in vis_files:
#print(root, dirs, image_name)
with Image.open(root+"/"+ image_name) as tested_image:
width, height = tested_image.size
if(width>=min_pixels and height>= min_pixels):
cover = resizeimage.resize_cover(tested_image, [min_pixels, min_pixels])
cover.convert('RGB').save(target_folder+image_name, 'JPEG')
return root
if __name__ == "__main__":
roots= find_all_files(300,SUN_images_dir, resized_address) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
2892,
3158,
220,
642,
1596,
25,
486,
25,
1485,
2864,
198,
198,
31,
9800,
25,
49612,
198,
37811,
198,
198,
37811,
198,
1212,
2163,
481,
1011,
26... | 2.459476 | 1,678 |
from pathlib import Path
import numpy as np
import torch
from torch.utils.data import Dataset
from Hyperparameters import sep, unet_width
from Util import get_spectrogram
class UNetDataset(Dataset):
"""
Dataset for accessing data opints of the autoencoder output.
"""
def __init__(self, root_dir, gt_dir, transform=None):
"""
Initialise the dataset.
:param root_dir: The path to the data points
:param gt_dir: The path to the ground truth versions of the data points
:param transform: Transformation to apply to the data points
"""
self.root_dir = root_dir
self.transform = transform
# The input for this dataset is the output from the autoencoder
input_mel_npys = Path(root_dir).rglob("*_output.npy")
# The U-Net is trained to minimise the error between the autoencoder output
# and the clean ("ground truth") versions of the synthesised files
gt_mel_npys = Path(gt_dir).rglob("*_synth_mel.npy")
self.input_mel_filenames = [str(npy) for npy in input_mel_npys]
self.gt_mel_filenames = [str(npy) for npy in gt_mel_npys]
# Create mappings between input and ground truth names (so that the order is correct)
self.input_to_gt = {}
len_suffix = len("_output.npy")
for input_path in self.input_mel_filenames:
input_filename = input_path.split(sep)[-1][:-len_suffix]
for gt_path in self.gt_mel_filenames:
if input_filename in gt_path:
self.input_to_gt[input_path] = gt_path
self.length = len(self.input_mel_filenames)
class ToTensor(object):
"""
Transformation used to convert ndarrays in sample to PyTorch tensors.
"""
| [
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
16092,
292,
316,
198,
198,
6738,
15079,
17143,
7307,
1330,
41767,
11,
555,
316,
62,
10394,
198,
6738... | 2.427984 | 729 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : Gilbert Voican, Konstantinos Anastasakis
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
from ipywidgets import (HTML, HBox, VBox, Checkbox, Layout, widgets)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
770,
2393,
318,
636,
286,
327,
65,
44,
357,
5450,
1378,
12567,
13,
785,
14,
721,
12,
73,
6015,
14,
66... | 2.771654 | 127 |
{
"targets": [{
"target_name": "catch.cc",
"type": "none",
"direct_dependent_settings": {
"include_dirs": [
"../single_include"
],
},
"sources": [
"../include/catch_with_main.hpp"
],
}]
}
| [
90,
198,
220,
366,
83,
853,
1039,
1298,
685,
90,
198,
220,
220,
220,
366,
16793,
62,
3672,
1298,
366,
40198,
13,
535,
1600,
198,
220,
220,
220,
366,
4906,
1298,
366,
23108,
1600,
198,
220,
220,
220,
366,
12942,
62,
21186,
62,
3369... | 1.936 | 125 |
import numpy
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from keras.preprocessing.text import Tokenizer
from keras.optimizers import SGD
opt = SGD(lr=100)
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# load dataset
my_data = pd.read_csv("NoBlanksAndScoreAsDummy.csv")
score = my_data["score"]
my_data = my_data.drop("score", axis=1)
my_dummies = pd.get_dummies(my_data, prefix=['T1P1', 'T1P2', 'T1P3', 'T1P4', 'T1P5', 'T1P6', 'T1P7', 'T1P8', 'T1P9', 'T1P10', 'T1P11', 'T1P12', 'T1P13', 'T1P14', 'T1P15', 'T1P16', 'T1P17', 'T1P18', 'T1G', 'T2P1', 'T2P2', 'T2P3', 'T2P4', 'T2P5', 'T2P6', 'T2P7', 'T2P8', 'T2P9', 'T2P10', 'T2P11', 'T2P12', 'T2P13', 'T2P14', 'T2P15', 'T2P16', 'T2P17', 'T2P18', 'T2G'])
my_dummies["result"] = score
print(my_dummies)
dataset = my_dummies.values
X = dataset[:,0:12993]
Y = dataset[:,12993]
# encode class values as integers
encoder = LabelEncoder()
encoder.fit(Y)
encoded_Y = encoder.transform(Y)
print("Number,results,epochs,batch_size,number of layers")
# baseline model
# evaluate model with standardized dataset
estimator = KerasClassifier(build_fn=create_baseline, epochs=500, batch_size=250, verbose=0)
kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
print("1, %.2f%% , (%.2f%%) ,500,25,3" % (results.mean()*100, results.std()*100))
# # second model
# def create_baseline_one():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_one, epochs=500, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,500,25,1" % (results.mean()*100, results.std()*100))
#
#
#
# # third model
# def create_baseline_two():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_two, epochs=500, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,500,25,2" % (results.mean()*100, results.std()*100))
#
#
# # fourth model
# def create_baseline_three():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_three, epochs=500, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,500,25,4" % (results.mean()*100, results.std()*100))
#
#
#
#
# # fifth model
# def create_baseline_four():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_four, epochs=500, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,500,25,5" % (results.mean()*100, results.std()*100))
#
#
#
#
# # sixth model
# def create_baseline_five():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_five, epochs=500, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,500,25,6" % (results.mean()*100, results.std()*100))
#
#
#
#
#
# # seventh model
# def create_baseline_six():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_six, epochs=500, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,500,25,8" % (results.mean()*100, results.std()*100))
#
#
#
# # eighth model
# def create_baseline_seven():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_seven, epochs=500, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,500,25,10" % (results.mean()*100, results.std()*100))
#
#
#
# #
# #
# #
# #
# #
# #
# #
# # These next models are the same but they have more 1000 epochs
# #
# #
# #
# #
# #
# #
# #
# #
# #
#
# def create_baseline():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline, epochs=1000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,1000,25,3" % (results.mean()*100, results.std()*100))
#
#
#
# # second model
# def create_baseline_one():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_one, epochs=1000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,1000,25,1" % (results.mean()*100, results.std()*100))
#
#
#
# # third model
# def create_baseline_two():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_two, epochs=1000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,1000,25,2" % (results.mean()*100, results.std()*100))
#
#
# # fourth model
# def create_baseline_three():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_three, epochs=1000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,1000,25,4" % (results.mean()*100, results.std()*100))
#
#
#
#
# # fifth model
# def create_baseline_four():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_four, epochs=1000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,1000,25,5" % (results.mean()*100, results.std()*100))
#
#
#
#
# # sixth model
# def create_baseline_five():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_five, epochs=1000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,1000,25,6" % (results.mean()*100, results.std()*100))
#
#
#
#
#
# # seventh model
# def create_baseline_six():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_six, epochs=1000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,1000,25,8" % (results.mean()*100, results.std()*100))
#
#
#
# # eighth model
# def create_baseline_seven():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_seven, epochs=1000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,1000,25,10" % (results.mean()*100, results.std()*100))
#
#
# #
# #
# #
# #
# #
# # these next ones have 2000 epochs
# #
# #
# #
# #
# #
# #
# #
# #
# #
# #
#
# def create_baseline():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline, epochs=2000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,2000,25,3" % (results.mean()*100, results.std()*100))
#
#
#
# # second model
# def create_baseline_one():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_one, epochs=2000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,2000,25,1" % (results.mean()*100, results.std()*100))
#
#
#
# # third model
# def create_baseline_two():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_two, epochs=2000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,2000,25,2" % (results.mean()*100, results.std()*100))
#
#
# # fourth model
# def create_baseline_three():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_three, epochs=2000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,2000,25,4" % (results.mean()*100, results.std()*100))
#
#
#
#
# # fifth model
# def create_baseline_four():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_four, epochs=2000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,2000,25,5" % (results.mean()*100, results.std()*100))
#
#
#
#
# # sixth model
# def create_baseline_five():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_five, epochs=2000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,2000,25,6" % (results.mean()*100, results.std()*100))
#
#
#
#
#
# # seventh model
# def create_baseline_six():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_six, epochs=2000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,2000,25,8" % (results.mean()*100, results.std()*100))
#
#
#
# # eighth model
# def create_baseline_seven():
# # create model
# model = Sequential()
# model.add(Dense(38, input_dim=38, kernel_initializer='normal', activation='relu'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(50, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# # Compile model
# model.compile(loss='binary_crossentropy', optimizer='adagrad', metrics=['accuracy'])
# return model
#
# # evaluate model with standardized dataset
# estimator = KerasClassifier(build_fn=create_baseline_seven, epochs=2000, batch_size=25, verbose=0)
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
# print("1, %.2f%% , (%.2f%%) ,2000,25,10" % (results.mean()*100, results.std()*100))
| [
11748,
299,
32152,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
41927,
292,
13,
27530,
1330,
24604,
1843,
198,
6738,
41927,
292,
13,
75,
6962,
1330,
360,
1072,
198,
6738,
41927,
292,
13,
29988,
11799,
13,
36216,
15813,
62,
35720,
... | 2.646545 | 10,420 |
from .stylegan_encoder_network import *
from .encoder import *
from .encoding_dataset import *
from .tensorboard import * | [
6738,
764,
7635,
1030,
62,
12685,
12342,
62,
27349,
1330,
1635,
198,
6738,
764,
12685,
12342,
1330,
1635,
198,
6738,
764,
12685,
7656,
62,
19608,
292,
316,
1330,
1635,
198,
6738,
764,
83,
22854,
3526,
1330,
1635
] | 3.27027 | 37 |
# -*- coding: utf-8 -*-
"""
s1acker.s1acker
~~~~~~~~~~~~~~
This module provides functions that deal with s1 search interface.
:copyright: (c) 2017 by quinoa42.
:license: MIT, see LICENSE for more details.
"""
import logging
import os.path as op
import re
import time
from itertools import chain
from os import makedirs
import requests
from bs4 import BeautifulSoup
flaten = chain.from_iterable
_SEARCH_URL = "http://bbs.saraba1st.com/2b/search.php?mod=forum"
_SEARCH_ADV_URL = "http://bbs.saraba1st.com/2b/search.php?mod=forum&adv=yes"
_TOPIC_URL = "http://bbs.saraba1st.com/2b/thread-{0}-1-1.html"
_HOST = "bbs.saraba1st.com"
_USER_AGENT = (
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:54.0) "
"Gecko/20100101 Firefox/54.0"
)
_TIME_OUT = 10
_SLEEP_TIME = 1
logger = logging.getLogger(__name__)
def _wait():
"""wait for _SLEEP_TIME seconds.
"""
logger.info("wait for %s seconds.", _SLEEP_TIME)
time.sleep(_SLEEP_TIME)
class S1ack(object):
"""S1ack defines a class that implement search functions."""
def __init__(self, srchtxt, srchuname=None):
"""construct a s1ack object with given srchtxt and optional srchuname.
:srchtxt: str that you want to search
:srchuname: optional str that limit search topics to posts by this user
"""
self._session = requests.Session()
self._session.headers.update({
'User-Agent': _USER_AGENT,
'Host': _HOST
})
r = self._session.get(
_SEARCH_ADV_URL,
timeout=_TIME_OUT,
headers={'Referer': _SEARCH_URL}
)
soup = BeautifulSoup(r.text, 'lxml')
formhash = soup.find("input", attrs={"name": "formhash"})['value']
self._input_data = {
"ascdesc": "desc",
"before": "",
"formhash": formhash,
"orderby": "lastpost",
"searchsubmit": "yes",
"srchfid[]": "all",
"srchfilter": "all",
"srchfrom": 0,
"srchtxt": srchtxt,
"srchuname": srchuname if srchuname is not None else ""
}
logger.debug("input data: %r", self._input_data)
def search(self):
"""Return the search result.
:returns: list of Img object
"""
search_result = self._get_search_urls()
result = list(
set(
flaten(
map(
self._get_imgs,
flaten(
map(
self._get_pages,
flaten(
map(self._get_first_page, search_result)
)
)
)
)
)
)
)
logger.debug("final result: %r", result)
logger.info("find %d pictures", len(result))
return result
def _get_search_urls(self):
"""Return the urls of the pages of searching result
:returns: a list of str, where one str represent a url of one page
"""
logger.info("trying to search")
r = self._session.post(
_SEARCH_URL,
timeout=_TIME_OUT,
headers={"Referer": _SEARCH_ADV_URL},
data=self._input_data
)
r.raise_for_status()
result = BeautifulSoup(r.text, 'lxml').find("div", class_="pg")
num = int(result.find_all("a")[-2].string) if result else 1
url = re.sub("&kw=.+$", "", r.url, 1)
urls = [r.url] + [url + "&page=" + str(i) for i in range(2, num + 1)]
logger.debug("search result: %r", urls)
return urls
def _get_first_page(self, url):
"""Return the first pages of the topics in the given search result page
:url: str, the url of search result page
:returns: list of str, represent the list of urls of topics
"""
_wait()
logger.info("trying to get the topics in %s", url)
r = self._session.get(url, timeout=_TIME_OUT)
r.raise_for_status()
s = BeautifulSoup(r.text, "lxml")
topics = [
re.findall("tid=([0-9]{1,7})", topic.a['href'])[0]
for topic in s.find_all("h3", class_="xs3")
]
urls = [_TOPIC_URL.format(topic) for topic in topics]
logger.debug("topics in %s: %r", url, urls)
return urls
def _get_pages(self, url):
"""Return the urls of all pages of a topic.
:url: str, represent the url of a topic
:returns: list of str, represent list of urls of the pages
"""
_wait()
logger.info("trying to get the pages of %s", url)
r = self._session.get(url)
r.raise_for_status()
soup = BeautifulSoup(r.text, 'lxml')
multipage = soup.find('div', class_="pg")
num = int(multipage.find_all("a")[-2].string) if multipage else 1
urls = [
re.sub("[0-9]{1,3}-1.html", str(page) + "-1.html", url)
for page in range(1, num + 1)
]
logger.debug("all pages of %s: %r", url, urls)
return urls
def _get_imgs(self, url):
"""Get list of imgs from the url.
:url: str, represent the url wish to explore
:returns: a list of Img object, represent the search result
"""
_wait()
logger.info("trying to get imgs on the page %s", url)
r = self._session.get(url)
r.raise_for_status()
soup = BeautifulSoup(r.text, 'lxml')
imgs = [
url
for url in [
img.attrs.get('file') or img.attrs.get('src') for post in
soup.find_all("td", id=re.compile("postmessage_[0-9]{1,8}"))
for img in post.find_all("img")
]
if not re.match("http://static.saraba1st.com/image/smiley/", url)
and re.search("\.(png|jpg)$", url)
and not re.search("\.photobucket\.", url)
]
result = [
Img(img, str(index), url) for index, img in enumerate(set(imgs))
]
logger.debug("Imgs in %s: %r", url, result)
return result
class Img(object):
"""Img defines an object that can be downloaded."""
def __init__(self, url, name, origin=""):
"""construct an Img object with url, name, and optional origin
:url: str represent the url of the Img
:name: the name given to this Img when downloading
:origin: str represent the origin of the Img,i.e. the url of the topic
"""
self._url = url
self._origin = origin
self._topic = re.findall("thread-([0-9]{1,9})",
origin)[0] if origin else ""
self._name = name
self._fmt = re.findall("(\.jpg|\.png)$", url)[0]
def download(self, dest):
"""download this Img to the dest directory.
:returns: None
"""
_wait()
logger.info("trying to get img at %s", self._url)
try:
img = requests.get(
self._url,
headers={"User-Agent": _USER_AGENT,
"Referer": self._origin},
timeout=_TIME_OUT
)
img.raise_for_status()
except Exception as e:
logger.error("Failed when trying to get %s : %s", self._url, e)
else:
dir_path = op.join(dest, self._topic)
if not op.exists(dir_path):
logger.info("%s not exist, making the directory", dir_path)
makedirs(dir_path)
path = op.join(dir_path, self._name + self._fmt)
logger.info("downloading img to %s", path)
with open(path, 'wb') as f:
f.write(img.content)
__str__ = __unicode__ = __repr__
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
220,
220,
220,
264,
16,
10735,
13,
82,
16,
10735,
198,
220,
220,
220,
220,
15116,
8728,
4907,
628,
220,
220,
220,
770,
8265,
3769,
5499,
326,
1730,
351,
... | 1.98672 | 3,991 |
#!/usr/bin/env python
############################################################################
#
# Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
"""
A simple program that parses untranslated.ts files
current directory *must* be the top level qtcreator source directory
Usage:
scripts/uichanges.py old_untranslated.ts qtcreator_untranslated.ts
IN TOP LEVEL QTC SOURCE DIRECTORY!
"""
import os, sys, string
import subprocess
from xml.sax import saxutils, handler, make_parser
baseDir = os.getcwd()
transDir = os.path.join(baseDir, 'share/qtcreator/translations')
unchangedContexts = 0
# --- The ContentHandler
# Generate a tree consisting of hash of context names.
# Each context name value contains a hash of messages
# Each message value contains a the file name (or '<unknown>')
# ContentHandler methods
# --- The main program
oldGenerator = Generator()
oldParser = make_parser()
oldParser.setContentHandler(oldGenerator)
oldParser.parse(sys.argv[1])
oldTree = oldGenerator.tree()
newGenerator = Generator()
newParser = make_parser()
newParser.setContentHandler(newGenerator)
newParser.parse(sys.argv[2])
newTree = newGenerator.tree()
oldContextSet = set(oldTree.keys())
newContextSet = set(newTree.keys())
for c in sorted(oldContextSet.difference(newContextSet)):
report = diffContext(c, oldTree[c], {})
if report:
print(report.encode('utf-8'))
else:
unchangedContexts += 1
for c in sorted(newContextSet.difference(oldContextSet)):
report = diffContext(c, {}, newTree[c])
if report:
print(report.encode('utf-8'))
else:
unchangedContexts += 1
for c in sorted(newContextSet.intersection(oldContextSet)):
report = diffContext(c, oldTree[c], newTree[c])
if report:
print(report.encode('utf-8'))
else:
unchangedContexts += 1
print(u'{0} unchanged contexts'.format(unchangedContexts))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
29113,
29113,
7804,
4242,
198,
2,
198,
2,
15069,
357,
34,
8,
1584,
383,
33734,
5834,
12052,
13,
198,
2,
14039,
25,
3740,
1378,
2503,
13,
39568,
13,
952,
14,
677,
26426,
14,
19... | 3.337868 | 882 |
# -*- coding: utf-8 -*-
# flake8: noqa: E501
from __future__ import unicode_literals
from datetime import datetime
from kinopoisk.movie import Movie
from .base import BaseTest
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
781,
539,
23,
25,
645,
20402,
25,
412,
33548,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
19... | 2.887097 | 62 |
import os
import sys
from argparse import ArgumentParser
from azureml.core import Dataset, Datastore, Workspace
from azureml.data.dataset_factory import DataType
from azureml.datadrift import DataDriftDetector
target_dataset_timestamp_column = "datetime"
input_schema_dir = os.path.join("input", "schema")
data_dir = "data"
input_schema_file = "schema.csv"
if __name__ == "__main__":
main()
| [
11748,
28686,
198,
11748,
25064,
198,
6738,
1822,
29572,
1330,
45751,
46677,
198,
198,
6738,
35560,
495,
4029,
13,
7295,
1330,
16092,
292,
316,
11,
16092,
459,
382,
11,
10933,
10223,
198,
6738,
35560,
495,
4029,
13,
7890,
13,
19608,
292... | 2.823944 | 142 |
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import os
from plot_constants import *
plt.rcParams.update(params)
plt.rc('font', family='serif')
if __name__ == "__main__":
fig, axs = plt.subplots(1, 4)
fig.set_size_inches(28, 6)
from params_exp_noise import *
algorithms = ["ucss", "iso_reg_ss", "platt_scal_ss"]
algorithm_df_guarantee = {
"css": True,
"ucss": False,
"iso_reg_ss": False,
"platt_scal_ss": False
}
algorithm_labels = {
"css": "CSS",
"ucss": "Uncalibrated",
"iso_reg_ss": "Isotonic",
"platt_scal_ss": "Platt"
}
algorithm_colors = {
"css": "tab:blue",
"ucss": "tab:red",
"iso_reg_ss": "tab:purple",
"platt_scal_ss": "tab:cyan"
}
algorithm_markers = {
"css": "s",
"ucss": 9,
"iso_reg_ss": 10,
"platt_scal_ss": 11
}
for umb_num_bin in umb_num_bins:
algorithms.append("umb_" + str(umb_num_bin))
algorithm_labels["umb_" + str(umb_num_bin)] = "UMB {} Bins".format(umb_num_bin)
algorithm_colors["umb_" + str(umb_num_bin)] = umb_colors[umb_num_bin]
algorithm_df_guarantee["umb_" + str(umb_num_bin)] = True
algorithm_markers["umb_" + str(umb_num_bin)] = umb_markers[umb_num_bin]
algorithms.append("css")
metrics = ["num_selected", "num_qualified", "num_unqualified", "constraint_satisfied"]
results = {}
for noise_ratio in noise_ratios:
results[noise_ratio] = {}
for algorithm in algorithms:
results[noise_ratio][algorithm] = {}
for metric in metrics:
results[noise_ratio][algorithm][metric] = {}
results[noise_ratio][algorithm][metric]["values"] = []
for noise_ratio in noise_ratios:
for run in runs:
exp_identity_string = "_".join([str(n_train), str(noise_ratio), str(n_cal), lbd, str(run)])
for algorithm in algorithms:
result_path = os.path.join(exp_dir, exp_identity_string + "_{}_result.pkl".format(algorithm))
collect_results_normal_exp(result_path, noise_ratio, algorithm, results)
for noise_ratio in noise_ratios:
for algorithm in algorithms:
for metric in metrics:
results[noise_ratio][algorithm][metric]["mean"] = np.mean(results[noise_ratio][algorithm][metric]["values"])
results[noise_ratio][algorithm][metric]["std"] = np.std(results[noise_ratio][algorithm][metric]["values"],
ddof=1)
# plotting whether constraint is satisfied
handles = []
for algorithm in algorithms:
mean_algorithm = np.array([results[noise_ratio][algorithm]["constraint_satisfied"]["mean"]
for noise_ratio in noise_ratios])
std_err_algorithm = np.array(
[results[noise_ratio][algorithm]["constraint_satisfied"]["std"] / np.sqrt(n_runs) for noise_ratio in noise_ratios])
line = axs[0].plot(noise_ratios_label, mean_algorithm, color=algorithm_colors[algorithm],
marker=algorithm_markers[algorithm], linewidth=line_width,
label=algorithm_labels[algorithm])
if algorithm == "css":
handles = [line[0]] + handles
else:
handles.append(line[0])
axs[0].errorbar(noise_ratios_label, mean_algorithm, std_err_algorithm, color=algorithm_colors[algorithm],
marker=algorithm_markers[algorithm], linewidth=line_width,
label=algorithm_labels[algorithm], capthick=capthick)
axs[0].yaxis.set_major_locator(ticker.MultipleLocator(0.5))
axs[0].set_xlabel("$r_{\mathrm{noise}}$", fontsize=font_size)
axs[0].set_ylabel("EQ", fontsize=font_size)
# plotting the number of selected applicants
for algorithm in algorithms:
if not algorithm_df_guarantee[algorithm]:
continue
mean_algorithm = np.array([results[noise_ratio][algorithm]["num_selected"]["mean"] for noise_ratio
in noise_ratios])
std_algorithm = np.array([results[noise_ratio][algorithm]["num_selected"]["std"] for noise_ratio
in noise_ratios])
axs[1].plot(noise_ratios_label, mean_algorithm, linewidth=line_width, color=algorithm_colors[algorithm], marker=algorithm_markers[algorithm]
, label=algorithm_labels[algorithm])
axs[1].fill_between(noise_ratios_label, mean_algorithm - std_algorithm,
mean_algorithm + std_algorithm, alpha=transparency,
color=algorithm_colors[algorithm])
axs[1].set_xlabel("$r_{\mathrm{noise}}$", fontsize=font_size)
axs[1].set_ylabel("SS", fontsize=font_size)
axs[1].set_ylim(top=35)
axs[1].set_ylim(bottom=5)
from params_exp_cal_size import *
results = {}
for n_cal in n_cals:
results[n_cal] = {}
for algorithm in algorithms:
results[n_cal][algorithm] = {}
for metric in metrics:
results[n_cal][algorithm][metric] = {}
results[n_cal][algorithm][metric]["values"] = []
for n_cal in n_cals:
for run in runs:
exp_identity_string = "_".join([str(n_train), str(noise_ratio), str(n_cal), lbd, str(run)])
for algorithm in algorithms:
result_path = os.path.join(exp_dir, exp_identity_string + "_{}_result.pkl".format(algorithm))
collect_results_normal_exp(result_path, n_cal, algorithm, results)
for n_cal in n_cals:
for algorithm in algorithms:
for metric in metrics:
results[n_cal][algorithm][metric]["mean"] = np.mean(results[n_cal][algorithm][metric]["values"])
results[n_cal][algorithm][metric]["std"] = np.std(results[n_cal][algorithm][metric]["values"],
ddof=1)
# plotting whether constraint is satisfied
for algorithm in algorithms:
# if algorithm_df_guarantee[algorithm] and algorithm != "css":
# continue
mean_algorithm = np.array([results[n_cal][algorithm]["constraint_satisfied"]["mean"]
for n_cal in n_cals])
std_err_algorithm = np.array(
[results[n_cal][algorithm]["constraint_satisfied"]["std"] / np.sqrt(n_runs) for n_cal in n_cals])
axs[2].errorbar(n_cals_label, mean_algorithm, std_err_algorithm, color=algorithm_colors[algorithm],
linewidth=line_width, label=algorithm_labels[algorithm], marker=algorithm_markers[algorithm],
capthick=capthick)
axs[2].yaxis.set_major_locator(ticker.MultipleLocator(0.5))
axs[2].set_xlabel("$n$", fontsize=font_size)
axs[2].set_ylabel("EQ", fontsize=font_size)
# plotting the number of selected applicants
for algorithm in algorithms:
if not algorithm_df_guarantee[algorithm]:
continue
mean_algorithm = np.array([results[n_cal][algorithm]["num_selected"]["mean"] for n_cal
in n_cals])
std_algorithm = np.array([results[n_cal][algorithm]["num_selected"]["std"] for n_cal
in n_cals])
axs[3].plot(n_cals_label, mean_algorithm, linewidth=line_width, color=algorithm_colors[algorithm], marker=algorithm_markers[algorithm]
, label=algorithm_labels[algorithm])
axs[3].fill_between(n_cals_label, mean_algorithm - std_algorithm,
mean_algorithm + std_algorithm, alpha=transparency,
color=algorithm_colors[algorithm])
axs[3].set_xlabel("$n$", fontsize=font_size)
axs[3].set_ylabel("SS", fontsize=font_size)
axs[3].set_ylim(top=35)
axs[3].set_ylim(bottom=5)
fig.legend(handles=handles, bbox_to_anchor=(0.5, 1.02), loc="upper center", ncol=5)
plt.tight_layout(rect=[0, 0, 1, 0.78])
fig.savefig("./plots/exp_normal.pdf", format="pdf")
| [
11748,
2603,
29487,
8019,
198,
6759,
29487,
8019,
13,
1904,
10786,
46384,
11537,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
13,
83,
15799,
355,
4378,
263,
198,
11748,
299,
32152,
355,
45... | 2.035994 | 4,084 |
from typing import List, Type, Union
from pydantic import BaseModel
from tortoise import Model, fields
from fast_tmp.utils.password import make_password, verify_password
| [
6738,
19720,
1330,
7343,
11,
5994,
11,
4479,
198,
198,
6738,
279,
5173,
5109,
1330,
7308,
17633,
198,
6738,
7619,
25678,
1330,
9104,
11,
7032,
198,
198,
6738,
3049,
62,
22065,
13,
26791,
13,
28712,
1330,
787,
62,
28712,
11,
11767,
62,... | 3.826087 | 46 |
import numpy as np
import matplotlib as mpl
# import matplotlib.cm as cm
import matplotlib.pyplot as plt
import matplotlib.patheffects as pe
from brightness_temperature import *
from plot_searchlight import get_intensity, font_size, iunits
#plt.rc('text.latex', preamble=r'\usepackage{cmbright}')
#plt.rc('text', usetex=False)
lambda0 = 121.56841096386111 # nm
wavelength = np.array([120.85647513019845, 121.04863120292787, 121.18861407155109,
121.29060823835265, 121.36494181786958, 121.41913498583673,
121.45866338283498, 121.48751396885412, 121.50858975582949,
121.52400450419977, 121.53529729933265, 121.54358879034517,
121.54969495175679, 121.55420991638432, 121.55756628818231,
121.56007905763141, 121.56197757770408, 121.5634288464184,
121.56455445948667, 121.56544295399095, 121.56615879614279,
121.56674892551068, 121.56724752004678, 121.56767946562972,
121.56806288233183, 121.56841096386111, 121.56875904539042,
121.56914246209253, 121.56957440767549, 121.57007300221157,
121.57066313157947, 121.57137897373131, 121.5722674682356,
121.57339308130386, 121.57484435001817, 121.57674287009084,
121.57925563953995, 121.58261201133794, 121.58712697596546,
121.5932331373771, 121.6015246283896, 121.6128174235225,
121.62823217189276, 121.64930795886815, 121.67815854488727,
121.71768694188553, 121.77188010985269, 121.84621368936962,
121.94820785617118, 122.0881907247944, 122.2803467975238]) # nm
center = np.argmin(np.abs(wavelength - lambda0))
blue_wing = center - 11
red_wing = center + 11
continuum = np.argmax(wavelength)
plt.rcParams['text.usetex'] = True
PATH = "./linedata/"
CMAX = 100
CMIN = 0
CMAX_wing = 80
CMIN_wing = 0
CMAP = "gist_gray_r"
CMAP_CONT = "gist_gray_r"
lpad = 8
if __name__ == "__main__":
intensity_half = get_intensity("half_res_ul7n12_disk_centre_1.npy", PATH)
intensity_third = get_intensity("regular_third_disk_centre.npy", PATH)
intensity_quarter = get_intensity("regular_quarter_disk_centre.npy", PATH)
intensity_cont_ext_5e5 = get_intensity("voronoi_5e5_disk_centre.npy", PATH)
intensity_cont_ext_5e5_1dot5 = get_intensity("voronoi_ul7n12_5e5_disk_centre_1dot5.npy", PATH)
intensity_cont_ext_1e6 = get_intensity("voronoi_ul7n12_1e6_disk_centre_1.npy", PATH)
intensity_cont_ext_1e6_1dot5 = get_intensity("voronoi_ul7n12_1e6_disk_centre_1dot5.npy", PATH)
intensity_cont_ext_2e6 = get_intensity("voronoi_ul7n12_2e6_disk_centre_1.npy", PATH)
intensity_cont_ext_3e6 = get_intensity("voronoi_ul7n12_3e6_disk_centre_1.npy", PATH)
intensity_tot_ext_5e5 = get_intensity("total_ext_5e5_disk_centre_1.npy", PATH)
intensity_tot_ext_1e6 = get_intensity("total_ext_1e6_disk_centre_1.npy", PATH)
intensity_tot_ext_2e6 = get_intensity("total_ext_2e6_disk_centre_1.npy", PATH)
intensity_tot_ext_3e6 = get_intensity("total_ext_3e6_disk_centre_1.npy", PATH)
intensity_destruction_5e5 = get_intensity("destruction_5e5_disk_centre_1.npy", PATH)
intensity_destruction_1e6 = get_intensity("destruction_1e6_disk_centre_1.npy", PATH)
intensity_density_5e5 = get_intensity("density_5e5_disk_centre_1.npy", PATH)
intensity_ionised_5e5 = get_intensity("ionised_hydrogen_5e5_disk_centre_1.npy", PATH)
intensity_ionised_1e6 = get_intensity("ionised_hydrogen_1e6_disk_centre_1.npy", PATH)
intensity_ionised_2e6 = get_intensity("ionised_hydrogen_2e6_disk_centre_1.npy", PATH)
intensity_uniform_1e6 = get_intensity("uniform_1e6_disk_centre_1.npy", PATH)
convergence_quarter = np.load(PATH+"regular_ul7n12_quarter.npy")
convergence_half = np.load(PATH+"regular_ul7n12_half.npy")
convergence_third = np.load(PATH+"regular_ul7n12_third.npy")
convergence_cont_5e5 = np.load("./convergence/voronoi_ul7n12_5e5_convergence.npy")
convergence_cont_1e6 = np.load("./convergence/voronoi_ul7n12_1e6_convergence.npy")
convergence_cont_2e6 = np.load("./convergence/voronoi_ul7n12_2e6_convergence.npy")
convergence_cont_3e6 = np.load("./convergence/voronoi_ul7n12_3e6_convergence.npy")
convergence_ionised_5e5 = np.load("./convergence/ionised_hydrogen_5e5_convergence.npy")
convergence_ionised_1e6 = np.load("./convergence/ionised_hydrogen_1e6_convergence.npy")
convergence_ionised_2e6 = np.load("./convergence/ionised_hydrogen_2000000_convergence.npy")
convergence_density_5e5 = np.load("./convergence/density_5e5_convergence.npy")
convergence_destruction_5e5 = np.load("./convergence/destruction_5e5_convergence.npy")
convergence_destruction_1e6 = np.load("./convergence/destruction_1e6_convergence.npy")
convergence_tot_ext_5e5 = np.load("./convergence/total_ext_5e5_convergence.npy")
convergence_tot_ext_1e6 = np.load("./convergence/total_ext_1e6_convergence.npy")
convergence_tot_ext_2e6 = np.load("./convergence/total_ext_2e6_convergence.npy")
convergence_tot_ext_3e6 = np.load("./convergence/total_ext_3e6_convergence.npy")
convergence_uniform_1e6 = np.load("./convergence/uniform_1e6_convergence.npy")
velocity = ((wavelength - lambda0)/lambda0*constants.c).to("km s-1")
print("Velocity at blue wing: %.3f" %(velocity[blue_wing].value))
print("Velocity at continuum: %.3f" %(velocity[continuum].value))
CMAX_continuum = intensity_half[continuum, :, :].max()
CMIN_continuum = intensity_half[continuum, :, :].min()
font_size()
# compare sampling methods
fig, ax = plt.subplots(1, 2, figsize=(7.5,4), constrained_layout=True)
# plot disk-centre intensity in wings and centre, and continuum
ax[0].imshow(intensity_cont_ext_1e6[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[0].axis(False)
ax[0].set_title(r"$\alpha^c~\textrm{sampling}$")
im = ax[1].imshow(intensity_uniform_1e6[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[1].axis(False)
ax[1].set_title(r"$U~\textrm{sampling}$")
x = np.load("../data/LTE/x_regular_full.npy")
pix2Mm = (x.max() - x.min())*1e-6/len(x)
# Line:
ax[0].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# ax[0].vlines(x=(40+1/pix2Mm)/2, ymin=14, ymax=18, lw=1/pix2Mm-8.25, color='w',
# path_effects=[pe.Stroke(linewidth=1/pix2Mm-6.25, foreground="black"),pe.Normal()])
# Text:
ax[0].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[1].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[1].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
fig.colorbar(im, fraction=0.043, pad=0.04, label=iunits)
fig.suptitle(r"$\textbf{Disk-centre intensity at line centre, irregular grid}$")
plt.savefig("../img/compare_line/quick_compare.pdf")
plt.close()
################################################################################
################################################################################
################################################################################
# compare sampling methods
fig, ax = plt.subplots(1, 4, figsize=(14.5,4), constrained_layout=True)
# plot disk-centre intensity in wings and centre, and continuum
ax[0].imshow(intensity_cont_ext_1e6[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[0].axis(False)
ax[0].set_title(r"$\alpha^c~\textrm{sampling}$")
ax[1].imshow(intensity_ionised_1e6[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[1].axis(False)
ax[1].set_title(r"$N_\textrm{\small{H\,II}}^\textrm{\small{LTE}}~\textrm{sampling}$")
ax[2].imshow(intensity_tot_ext_1e6[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[2].axis(False)
ax[2].set_title(r"$\alpha^\textrm{tot}~\textrm{sampling}$")
im = ax[3].imshow(intensity_destruction_1e6[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[3].axis(False)
ax[3].set_title(r"$\varepsilon~\textrm{sampling}$")
x = np.load("../data/LTE/x_regular_full.npy")
pix2Mm = (x.max() - x.min())*1e-6/len(x)
# Line:
ax[0].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# ax[0].vlines(x=(40+1/pix2Mm)/2, ymin=14, ymax=18, lw=1/pix2Mm-8.25, color='w',
# path_effects=[pe.Stroke(linewidth=1/pix2Mm-6.25, foreground="black"),pe.Normal()])
# Text:
ax[0].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[1].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[1].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[2].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[2].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[3].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[3].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
fig.colorbar(im, fraction=0.043, pad=0.04, label=iunits)
fig.suptitle(r"$\textbf{Disk-centre intensity at line centre, irregular grid}$")
# plt.show()
plt.savefig("../img/compare_line/compare_sites.pdf")
################################################################################
################################################################################
################################################################################
# Plot images over line wing, centre, and continuum, regular grid
fig, ax = plt.subplots(1, 3, figsize=(13,4), constrained_layout=True)
# plot disk-centre intensity in wings and centre, and continuum
im = ax[0].imshow(intensity_half[blue_wing, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX_wing,
vmin=CMIN_wing)
ax[0].axis(False)
wl = wavelength[blue_wing]
ax[0].set_title(r"$\textrm{Blue wing}~%.3f\,\textrm{nm}$" %wl)
cbar = plt.colorbar(im, ax=ax[0], fraction=0.046, pad=0.04)
cbar.set_label(iunits, rotation=90, labelpad=0)
im = ax[1].imshow(intensity_half[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[1].axis(False)
wl = wavelength[center]
ax[1].set_title(r"$\textrm{Line centre}~%.3f\,\textrm{nm}$" %wl)
cbar = plt.colorbar(im, ax=ax[1], fraction=0.046, pad=0.04)
cbar.set_label(iunits, rotation=90, labelpad=0)
im = ax[2].imshow(intensity_half[continuum, :, :],
cmap=CMAP_CONT,
origin="lower",
vmax=CMAX_continuum,
vmin=CMIN_continuum)
ax[2].axis(False)
wl = wavelength[continuum]
ax[2].set_title(r"$\textrm{Continuum}~%.3f\,\textrm{nm}$" %wl)
cbar = plt.colorbar(im, ax=ax[2], fraction=0.046, pad=0.04)
cbar.set_label(iunits, rotation=90, labelpad=lpad)
x = np.load("../data/LTE/x_regular_half.npy")
pix2Mm = (x.max() - x.min())*1e-6/len(x)
# Line:
ax[0].hlines(y=8, xmin=10, xmax=10 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# ax[0].vlines(x=(20+1/pix2Mm)/2, ymin=7, ymax=9, lw=1/pix2Mm, color='w',
# path_effects=[pe.Stroke(linewidth=1/pix2Mm, foreground="black"),pe.Normal()])
# Text:
ax[0].text(9, 10, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[1].hlines(y=8, xmin=10, xmax=10 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# ax[1].vlines(x=(20+1/pix2Mm)/2, ymin=7, ymax=9, lw=1/pix2Mm, color='w',
# path_effects=[pe.Stroke(linewidth=1/pix2Mm, foreground="black"),pe.Normal()])
# Text:
ax[1].text(9, 10, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[2].hlines(y=8, xmin=10, xmax=10 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# ax[2].vlines(x=(20+1/pix2Mm)/2, ymin=7, ymax=9, lw=1/pix2Mm, color='w',
# path_effects=[pe.Stroke(linewidth=1/pix2Mm, foreground="black"),pe.Normal()])
# Text:
ax[2].text(9, 10, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
fig.suptitle(r"$\textbf{Disk-centre intensity, regular Grid}$")
plt.savefig("../img/compare_line/regular_disk_centre.pdf")
################################################################################
################################################################################
################################################################################
# Plot images over line wing, centre, and continuum, irregular grid
fig, ax = plt.subplots(1, 3, figsize=(13,4), constrained_layout=True)
# plot disk-centre intensity in wings and centre, and continuum
im = ax[0].imshow(intensity_cont_ext_3e6[blue_wing, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX_wing,
vmin=CMIN_wing)
ax[0].axis(False)
wl = wavelength[blue_wing]
ax[0].set_title(r"$\textrm{Blue wing}~%.3f\,\textrm{nm}$" %wl)
cbar = plt.colorbar(im, ax=ax[0], fraction=0.046, pad=0.04)
cbar.set_label(iunits, rotation=90, labelpad=0)
im = ax[1].imshow(intensity_cont_ext_3e6[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[1].axis(False)
wl = wavelength[center]
ax[1].set_title(r"$\textrm{Line centre}~%.3f\,\textrm{nm}$" %wl)
cbar = plt.colorbar(im, ax=ax[1], fraction=0.046, pad=0.04)
cbar.set_label(iunits, rotation=90, labelpad=0)
im = ax[2].imshow(intensity_cont_ext_3e6[continuum, :, :],
cmap=CMAP_CONT,
origin="lower",
vmax=CMAX_continuum,
vmin=CMIN_continuum)
ax[2].axis(False)
wl = wavelength[continuum]
ax[2].set_title(r"$\textrm{Continuum}~%.3f\,\textrm{nm}$" %wl)
cbar = plt.colorbar(im, ax=ax[2], fraction=0.046, pad=0.04)
cbar.set_label(iunits, rotation=90, labelpad=lpad)
x = np.load("../data/LTE/x_regular_full.npy")
pix2Mm = (x.max() - x.min())*1e-6/len(x)
# Line:
ax[0].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# ax[0].vlines(x=(40+1/pix2Mm)/2, ymin=14, ymax=18, lw=1/pix2Mm-8.25, color='w',
# path_effects=[pe.Stroke(linewidth=1/pix2Mm-6.25, foreground="black"),pe.Normal()])
# Text:
ax[0].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[1].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# ax[1].vlines(x=(40+1/pix2Mm)/2, ymin=14, ymax=18, lw=1/pix2Mm-8.25, color='w',
# path_effects=[pe.Stroke(linewidth=1/pix2Mm-6.25, foreground="black"),pe.Normal()])
# Text:
ax[1].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[2].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# ax[2].vlines(x=(40+1/pix2Mm)/2, ymin=14, ymax=18, lw=1/pix2Mm-8.25, color='w',
# path_effects=[pe.Stroke(linewidth=1/pix2Mm-6.25, foreground="black"),pe.Normal()])
# Text:
ax[2].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
fig.suptitle(r"$\textbf{Disk-centre intensity, irregular grid}$")
plt.savefig("../img/compare_line/irregular_disk_centre.pdf")
################################################################################
################################################################################
################################################################################
# compare regular resolutions
fig, ax = plt.subplots(1, 3, figsize=(11.75,4), constrained_layout=True)
ax[0].imshow(intensity_quarter[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[0].axis(False)
ax[0].set_title(r"$\textrm{Quarter resolution}$")
ax[1].imshow(intensity_third[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[1].axis(False)
ax[1].set_title(r"$\textrm{One-third resolution}$")
im = ax[2].imshow(intensity_half[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[2].axis(False)
ax[2].set_title(r"$\textrm{Half resolution}$")
# Line:
x = np.load("../data/LTE/x_regular_quarter.npy")
pix2Mm = (x.max() - x.min())*1e-6/len(x)
ax[0].hlines(y=8/2, xmin=10/2, xmax=10/2 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[0].text(9/2, 10/2, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
x = np.load("../data/LTE/x_regular_third.npy")
pix2Mm = (x.max() - x.min())*1e-6/len(x)
ax[1].hlines(y=8*2/3, xmin=10*2/3, xmax=10*2/3 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[1].text(9*2/3, 10*2/3, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
x = np.load("../data/LTE/x_regular_half.npy")
pix2Mm = (x.max() - x.min())*1e-6/len(x)
ax[2].hlines(y=8, xmin=10, xmax=10 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[2].text(9, 10, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# fig.suptitle(r"$\textbf{Disk-centre Intensity line centre, Regular Grid}$")
fig.colorbar(im, fraction=0.043, pad=0.04, label=iunits)
plt.savefig("../img/compare_line/regular_resolutions.pdf")
################################################################################
################################################################################
################################################################################
# plot convergence
fig, ax = plt.subplots(1, 3, figsize=(14, 5.5), sharey=True)
ax[0].plot(convergence_quarter, label=r"$\textrm{regular (1/4 res.)}$", color="k", ls="solid")
ax[0].plot(convergence_ionised_5e5, label=r"$N_\textrm{H\,\small{II}}$", color="red", ls="solid")
ax[0].plot(convergence_cont_5e5, label=r"$\alpha^\textrm{cont}$", color="blue", ls="dashed")
ax[0].plot(convergence_tot_ext_5e5, label=r"$\alpha^\textrm{tot}$", color="gold", ls="solid")
ax[0].plot(convergence_density_5e5, label=r"$\rho$", color="gray", ls="dashdot")
ax[0].plot(convergence_destruction_5e5, label=r"$\varepsilon$", color="cyan", ls="solid")
ax[1].plot(convergence_third, label=r"$\textrm{regular (1/3 res.)}$", color="k", ls="solid")
ax[1].plot(convergence_destruction_1e6, label=r"$\varepsilon$", color="cyan", ls="solid")
ax[1].plot(convergence_cont_1e6, label=r"$\alpha^\textrm{cont}$", color="blue", ls="dashed")
ax[1].plot(convergence_ionised_1e6, label=r"$N_\textrm{H\,\small{II}}$", color="red", ls="solid")
ax[1].plot(convergence_uniform_1e6, label=r"$U~\textrm{(uniform)}$", color="pink", ls="solid")
ax[2].plot(convergence_half, label=r"$\textrm{regular (1/2 res.)}$", color="k", ls="solid")
ax[2].plot(convergence_cont_3e6, label=r"$\alpha^\textrm{cont}$", color="blue", ls="dashed")
# ax.plot(convergence_cont_2e6, label=r"$\alpha^\textrm{cont}~2\cdot 10^6~\textrm{sites}$", color="b", ls="dashdot")
# ax.plot(convergence_tot_ext_2e6, label=r"$\alpha^\textrm{tot}~2\cdot 10^6~\textrm{sites}$", color="g", ls="dashdot")
# ax.plot(convergence_tot_ext_1e6, label=r"$\alpha^\textrm{tot}~1\cdot 10^6~\textrm{sites}$", color="g", ls="dashed")
ax[0].set_ylabel(r"$\textrm{Max rel. change,}~\max\left(1 - S_\textrm{new}/S_\textrm{old}\right)$")
ax[0].set_yscale("log")
ax[0].legend()
ax[1].legend()
ax[2].legend()
ax[0].set_xlabel(r"$\textrm{Iterations}$")
ax[1].set_xlabel(r"$\textrm{Iterations}$")
ax[2].set_xlabel(r"$\textrm{Iterations}$")
ax[0].set_title(r"$\sim 5\cdot 10^5~\textrm{points}$")
ax[1].set_title(r"$\sim 10^6~\textrm{points}$")
ax[2].set_title(r"$\sim 3\cdot10^6~\textrm{points}$")
#ax.set_title(r"$\textrm{Convergence}$")
fig.tight_layout()
plt.savefig("../img/compare_line/convergence.pdf")
################################################################################
################################################################################
################################################################################
# resolution irregular grid
fig, ax = plt.subplots(1, 3, figsize=(11.75,4), constrained_layout=True)
ax[0].imshow(intensity_cont_ext_5e5[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[0].set_title(r"$5\cdot 10^5~\textrm{sites}$")
ax[0].axis(False)
ax[1].imshow(intensity_cont_ext_2e6[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[1].set_title(r"$2 \cdot 10^6~\textrm{sites}$")
ax[1].axis(False)
im = ax[2].imshow(intensity_cont_ext_3e6[center, :, :],
cmap=CMAP,
origin="lower",
vmax=CMAX,
vmin=CMIN)
ax[2].set_title(r"$3 \cdot 10^6~\textrm{sites}$")
ax[2].axis(False)
x = np.load("../data/LTE/x_regular_full.npy")
pix2Mm = (x.max() - x.min())*1e-6/len(x)
# Line:
ax[0].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[0].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[1].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[1].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
# Line:
ax[2].hlines(y=16, xmin=20, xmax=20 + 1/pix2Mm, lw=3, color='w',
path_effects=[pe.Stroke(linewidth=5, foreground="black"),pe.Normal()])
# Text:
ax[2].text(18, 20, r"\textbf{1 Mm}", color='w', fontsize=14,
path_effects=[pe.Stroke(linewidth=2, foreground="black"),pe.Normal()])
fig.colorbar(im, fraction=0.046, pad=0.04, label=iunits)
# fig.suptitle(r"$\textbf{Disk-Centre~Intensity~\textit{I}}_{\lambda_0}$")
# plt.show()
plt.savefig("../img/compare_line/disk_centre_irregular_resolution.pdf")
################################################################################
################################################################################
################################################################################
# plot all lines to highlight differences
fig, ax = plt.subplots(1, 2, figsize=(10, 5.5), constrained_layout=True, sharey=True)
I_regular = intensity_half.reshape(len(wavelength), -1)
I_regular *= units.kW*units.m**(-2)*units.nm**(-1)
I_irregular = intensity_cont_ext_3e6.reshape(len(wavelength), -1)
I_irregular *= units.kW*units.m**(-2)*units.nm**(-1)
Tb_regular = T_b(wavelength[:, np.newaxis]*units.nm, I_regular)
Tb_irregular = T_b(wavelength[:, np.newaxis]*units.nm, I_irregular)
ax[0].plot(wavelength[center-17:center+18],
Tb_regular[center-17:center+18, ::4].value,
color='k',
lw=0.03,
alpha=0.5,
rasterized=True)
ax[0].plot(wavelength[center-17:center+18],
np.mean(Tb_regular[center-17:center+18], axis=1).value,
color="crimson", label=r"$\textrm{spatial average}$")
ax[0].axvline(lambda0, ls="dashed", color="royalblue", lw=0.75)
ax[0].axvline(wavelength[blue_wing], ls="dashed", color="deepskyblue", lw=0.75)
ax[0].set_xlabel(r"$\textrm{Wavelength [nm]}$")
ax[0].set_ylabel(r"$\textrm{Brightness temperature [K]}$")
ax[0].set_title(r"$\textrm{Regular grid}$")
ax[0].legend(loc="upper right")
ax[0].text(x=lambda0+0.001, y=6150, s=r"$\lambda_0$", color="royalblue")
ax[0].text(x=wavelength[blue_wing]-0.006, y=6150,
s=r"$\textrm{wing}$", color="deepskyblue", rotation="vertical")
# ax[0].set_xticks(list(ax[0].get_xticks()) + [lambda0])
# ax[0].set_xticklabels([r"$%.2f$" %x for x in list(ax[0].get_xticks())[:-1]] + [r"$\lambda_0$"])
ax[1].plot(wavelength[center-17:center+18],
Tb_irregular[center-17:center+18, ::16].value,
color='k',
lw=0.03,
alpha=0.5,
rasterized=True)
ax[1].plot(wavelength[center-17:center+18],
np.mean(Tb_irregular[center-17:center+18], axis=1).value,
color="crimson", label=r"$\textrm{spatial average}$")
ax[1].axvline(lambda0, ls="dashed", color="royalblue", lw=0.75)
ax[1].axvline(wavelength[blue_wing], ls="dashed", color="deepskyblue", lw=0.75)
ax[1].set_xlabel(r"$\textrm{Wavelength [nm]}$")
ax[1].set_ylim(6000,12000)
ax[1].set_title(r"$\textrm{Irregular grid}$")
ax[1].legend(loc="upper right")
ax[1].text(x=lambda0+0.001, y=6150, s=r"$\lambda_0$", color="royalblue")
ax[1].text(x=wavelength[blue_wing]-0.006, y=6150,
s=r"$\textrm{wing}$", color="deepskyblue", rotation="vertical")
# ax[1].set_xticklabels([r"$%.2f$" %x for x in list(ax[0].get_xticks())[:-1]] + [r"$\lambda_0$"])
# ax[1].set_xticks(list(ax[1].get_xticks()) + [lambda0])
# fig.suptitle(r"$\textrm{Disk-Centre Intensity}$")
plt.savefig("../img/compare_line/lines.pdf", dpi=300)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
355,
285,
489,
198,
2,
1330,
2603,
29487,
8019,
13,
11215,
355,
12067,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
13,
8071,
... | 2.032543 | 14,166 |
import numpy as np
import matplotlib.pylab as plt
from Gradient_2D import numerical_gradient
init_x = np.array([-4.0, 5.0])
lr = 0.1
step_num = 30
x, x_history = gradient_descent(function, init_x, lr=lr, step_num=step_num)
plt.plot( [-6, 6], [0,0], '--b')
plt.plot( [0,0], [-6, 6], '--b')
plt.plot(x_history[:,0], x_history[:,1], 'o')
plt.xlim(-4.5, 4.5)
plt.ylim(-5.5, 5.5)
plt.xlabel("X0")
plt.ylabel("X1")
plt.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
79,
2645,
397,
355,
458,
83,
198,
6738,
17701,
1153,
62,
17,
35,
1330,
29052,
62,
49607,
628,
628,
198,
15003,
62,
87,
796,
45941,
13,
18747,
26933,
12,
19,
13,
15,
... | 1.995327 | 214 |
import cv2
import numpy as np
cap=cv2.VideoCapture(0)
max_radius=0
max_center=(0,0)
lower=np.array([7,137,132])
upper=np.array([25,255,255])
while True:
ret, frame = cap.read()
if frame is None:
break
hsv=cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
out=cv2.inRange(hsv,lower,upper)
erosion=cv2.erode(out,None,iterations=1)
dilate=cv2.dilate(erosion,None,iterations=2)
cnts,_=cv2.findContours(dilate,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for c in cnts:
(x, y),r=cv2.minEnclosingCircle(c)
center=(int(x),int(y))
r=int(r)
if r>max_radius:
max_radius=r
max_center=center
cv2.circle(frame,center,r,(0,255,0),2)
cv2.imshow("image",frame)
if cv2.waitKey(30)==ord('q'):
break
cv2.destroyAllWindows()
cap.release()
| [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11128,
28,
33967,
17,
13,
10798,
49630,
7,
15,
8,
198,
198,
9806,
62,
42172,
28,
15,
198,
9806,
62,
16159,
16193,
15,
11,
15,
8,
198,
198,
21037,
28,
37659,
13,
1... | 1.90205 | 439 |
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" DisplayManager
This module provides basic "state" for the visual representation associated
with this Mycroft instance. The current states are:
ActiveSkill - The skill that last interacted with the display via the
Enclosure API.
Currently, a wakeword sets the ActiveSkill to "wakeword", which will auto
clear after 10 seconds.
A skill is set to Active when it matches an intent, outputs audio, or
changes the display via the EnclosureAPI()
A skill is automatically cleared from Active two seconds after audio
output is spoken, or 2 seconds after resetting the display.
So it is common to have '' as the active skill.
"""
import json
from threading import Thread, Timer
import os
from mycroft.messagebus.client import MessageBusClient
from mycroft.util import get_ipc_directory
from mycroft.util.log import LOG
def _write_data(dictionary):
""" Writes the dictionary of state data to the IPC directory.
Args:
dictionary (dict): information to place in the 'disp_info' file
"""
managerIPCDir = os.path.join(get_ipc_directory(), "managers")
# change read/write permissions based on if file exists or not
path = os.path.join(managerIPCDir, "disp_info")
permission = "r+" if os.path.isfile(path) else "w+"
if permission == "w+" and os.path.isdir(managerIPCDir) is False:
os.makedirs(managerIPCDir)
os.chmod(managerIPCDir, 0o777)
try:
with open(path, permission) as dispFile:
# check if file is empty
if os.stat(str(dispFile.name)).st_size != 0:
data = json.load(dispFile)
else:
data = {}
LOG.info("Display Manager is creating " + dispFile.name)
for key in dictionary:
data[key] = dictionary[key]
dispFile.seek(0)
dispFile.write(json.dumps(data))
dispFile.truncate()
os.chmod(path, 0o777)
except Exception as e:
LOG.error(e)
LOG.error("Error found in display manager file, deleting...")
os.remove(path)
_write_data(dictionary)
def _read_data():
""" Writes the dictionary of state data from the IPC directory.
Returns:
dict: loaded state information
"""
managerIPCDir = os.path.join(get_ipc_directory(), "managers")
path = os.path.join(managerIPCDir, "disp_info")
permission = "r" if os.path.isfile(path) else "w+"
if permission == "w+" and os.path.isdir(managerIPCDir) is False:
os.makedirs(managerIPCDir)
data = {}
try:
with open(path, permission) as dispFile:
if os.stat(str(dispFile.name)).st_size != 0:
data = json.load(dispFile)
except Exception as e:
LOG.error(e)
os.remove(path)
_read_data()
return data
class DisplayManager:
""" The Display manager handles the basic state of the display,
be it a mark-1 or a mark-2 or even a future Mark-3.
"""
def set_active(self, skill_name=None):
""" Sets skill name as active in the display Manager
Args:
string: skill_name
"""
name = skill_name if skill_name is not None else self.name
_write_data({"active_skill": name})
def get_active(self):
""" Get the currenlty active skill from the display manager
Returns:
string: The active skill's name
"""
data = _read_data()
active_skill = ""
if "active_skill" in data:
active_skill = data["active_skill"]
return active_skill
def remove_active(self):
""" Clears the active skill """
LOG.debug("Removing active skill...")
_write_data({"active_skill": ""})
def init_display_manager_bus_connection():
""" Connects the display manager to the messagebus """
LOG.info("Connecting display manager to messagebus")
# Should remove needs to be an object so it can be referenced in functions
# [https://stackoverflow.com/questions/986006/how-do-i-pass-a-variable-by-reference]
display_manager = DisplayManager()
should_remove = [True]
bus = MessageBusClient()
bus.on('recognizer_loop:audio_output_end', set_delay)
bus.on('recognizer_loop:audio_output_start', set_remove_flag)
bus.on('recognizer_loop:record_begin', set_wakeword_skill)
event_thread = Thread(target=connect)
event_thread.setDaemon(True)
event_thread.start()
| [
2,
15069,
2177,
2011,
36714,
9552,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2... | 2.670719 | 1,892 |
from typing import Any, Tuple
from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey import RSA
def keyGen(key_size: int) -> Tuple[Any, Any]:
"""Genrates key for RSA algorithm.
Parameters
----------
key_size : int
Size of key
"""
key_pair = RSA.generate(key_size)
private_key = key_pair.exportKey()
public_key = key_pair.publickey()
public_key = public_key.exportKey()
return private_key, public_key, key_pair
def encrypt(plain_text: bytes, key_size: int) -> Tuple[bytes, Any]:
"""Encrypts plain text using RSA.
Parameters
----------
plain_message : bytes
Message to encrypt
key_size : int
Size of key
Returns
-------
Tuple[bytes, Any]
Private Key, Encrypted text
"""
private_key, public_key, key_pair = keyGen(key_size)
key = RSA.importKey(public_key)
encryptor = PKCS1_OAEP.new(key)
cipher_text = encryptor.encrypt(plain_text)
return key_pair, plain_text, cipher_text
def decrypt(cipher_text: bytes, key_pair: Any) -> bytes:
"""Decrypts RSA encrypted text.
Parameters
----------
ciphertext : bytes
Encrypted text
key_size : Any
Size of key
Returns
-------
bytes
Decrypted text
"""
decryptor = PKCS1_OAEP.new(key_pair)
plain_text = decryptor.decrypt(cipher_text)
return plain_text
| [
6738,
19720,
1330,
4377,
11,
309,
29291,
198,
198,
6738,
36579,
13,
34,
10803,
1330,
29673,
7902,
16,
62,
23621,
8905,
198,
6738,
36579,
13,
15202,
9218,
1330,
42319,
628,
198,
4299,
1994,
13746,
7,
2539,
62,
7857,
25,
493,
8,
4613,
... | 2.523381 | 556 |
import sys
import requests
import getopt
import json
import calendar
import math
from datetime import datetime, timezone, timedelta
from veracode_api_signing.plugin_requests import RequestsAuthPluginVeracodeHMAC
api_base = "https://api.veracode.com/was/configservice/v1/"
headers = {
"User-Agent": "Dynamic Analysis API Example Client",
"Content-Type": "application/json"
}
analysis_update_template= r'''{
"schedule": {
"duration": {
"length": {duration_length},
"unit": "{duration_unit}"
},
"end_date": "",
"now": false,
"scan_recurrence_schedule": {
"day_of_week": "{day_of_week}",
"recurrence_interval": {recurrence_interval},
"recurrence_type": "{recurrence_type}",
"schedule_end_after": {schedule_end_after},
"week_of_month": "{week_of_month}"
},
"schedule_status": "ACTIVE",
"start_date": "{start_date}"
}
}'''
weekly_update_template= r'''{
"schedule": {
"duration": {
"length": {duration_length},
"unit": "{duration_unit}"
},
"end_date": "",
"now": false,
"scan_recurrence_schedule": {
"day_of_week": "{day_of_week}",
"recurrence_interval": {recurrence_interval},
"recurrence_type": "{recurrence_type}",
"schedule_end_after": {schedule_end_after}
},
"schedule_status": "ACTIVE",
"start_date": "{start_date}"
}
}'''
monthly_update_template= r'''{
"schedule": {
"duration": {
"length": {duration_length},
"unit": "{duration_unit}"
},
"end_date": "",
"now": false,
"scan_recurrence_schedule": {
"day_of_week": "{day_of_week}",
"recurrence_interval": {recurrence_interval},
"recurrence_type": "{recurrence_type}",
"schedule_end_after": {schedule_end_after},
"week_of_month": "{week_of_month}"
},
"schedule_status": "ACTIVE",
"start_date": "{start_date}"
}
}'''
scan_update_hold = r'''{
"name": "{name}"
}
'''
cmdsettings = CommandSettings()
def print_help():
"""Prints command line options and exits"""
print("""veracode-da-reset-scheduler.py [-h] [-d] [-v] -x
Updates all Dynamic Analysis Recurring Scheduled scans that have expired with recurrences for one year.
Passing of the -x or --execute is required to run program
Options:
-h --help shows this help menu
-d --dry-run performs a dry run for updating content without committing changes
-v --verbose turns on the verbose debug logging for the program
-x --execute performs a live update to content
""")
sys.exit()
def main(argv):
"""Simple command line support for creating, deleting, and listing DA scanner variables"""
try:
#TODO: Add to commandline functionality
application_name = ''
target_url = ''
#print('ARGV :', argv)
options, args = getopt.getopt(argv, "hvdixa:u:",
["help","verbose","dry-run","execute"])
#, "interactive","application_name=", "target_url="])
#print('OPTIONS :', options)
for opt, arg in options:
if opt == '-h':
print_help()
elif opt == '-v':
cmdsettings.verbose = True
elif opt in ('-d', '--dry-run'):
cmdsettings.dry_run = True
elif opt in ('-x', '--execute'):
cmdsettings.dry_run = True
#elif opt in ('-i', '--interactive'):
# cmdsettings.interactive = True
#if opt in ('-a', '--application_name'):
# application_name = arg
#if opt in ('-u', '--url'):
# target_url = arg
#print('VERBOSE :', cmdsettings.verbose)
#print('DRY RUN :', cmdsettings.dry_run)
#print('INTERACTIVE :', cmdsettings.interactive)
#print('APPLICATION NAME:', application_name)
#print('TARGET URL :', target_url)
#print('REMAINING :', args)
if cmdsettings.execute or cmdsettings.dry_run:
execution_process()
else:
print_help()
except requests.RequestException as e:
print("An error occurred!")
print(e)
sys.exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
| [
11748,
25064,
198,
11748,
7007,
198,
11748,
651,
8738,
198,
11748,
33918,
198,
11748,
11845,
198,
11748,
10688,
198,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
640,
11340,
11,
28805,
12514,
198,
6738,
3326,
330,
1098,
62,
15042,
62,
1... | 2.065421 | 2,247 |
from django.contrib import admin
from django.urls import path, include
from .views import *
urlpatterns = [
path('staff/all', StaffListView.as_view()),
path('staff/<int:pk>', StaffRetrieveView.as_view()),
path('staff/update/<int:pk>', StaffUpdateView.as_view()),
path('staff/new', StaffCreateView.as_view()),
path('staff/delete/<int:pk>', StaffRetrieveView.as_view()),
path('room/all', RoomListView.as_view()),
path('room/<int:pk>', RoomRetrieveView.as_view()),
path('room/update/<int:pk>', RoomUpdateView.as_view()),
path('room/new', RoomCreateView.as_view()),
path('room/delete/<int:pk>', RoomRetrieveView.as_view()),
path('guest/all', GuestListView.as_view()),
path('guest/<int:pk>', GuestRetrieveView.as_view()),
path('guest/update/<int:pk>', GuestUpdateView.as_view()),
path('guest/new', GuestCreateView.as_view()),
path('guest/delete/<int:pk>', GuestRetrieveView.as_view()),
path('schedule/all', ScheduleListView.as_view()),
path('schedule/<int:pk>', ScheduleRetrieveView.as_view()),
path('schedule/update/<int:pk>', ScheduleUpdateView.as_view()),
path('schedule/new', ScheduleCreateView.as_view()),
path('schedule/delete/<int:pk>', ScheduleRetrieveView.as_view()),
] | [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
201,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
11,
2291,
201,
198,
6738,
764,
33571,
1330,
1635,
201,
198,
201,
198,
201,
198,
6371,
33279,
82,
796,
685,
201,
198,
220,
220,
2... | 2.363803 | 547 |
import torch.nn as nn
from torchvision import models
import torch.nn.functional as F
import torch
"""
# https://zhuanlan.zhihu.com/p/93806755
class res50(torch.nn.Module):
def __init__(self, num_classes):
super(res50, self).__init__()
resnet = resnet50(pretrained=True)
self.backbone = torch.nn.Sequential(
resnet.conv1,
resnet.bn1,
resnet.relu,
resnet.layer1,
resnet.layer2,
resnet.layer3,
resnet.layer4
)
self.pool = torch.nn.AdaptiveMaxPool2d(1)
self.bnneck = nn.BatchNorm1d(2048)
self.bnneck.bias.requires_grad_(False) # no shift
self.classifier = nn.Linear(2048, num_classes, bias=False)
def forward(self, x):
x = self.backbone(x)
x = self.pool(x)
feat = x.view(x.shape[0], -1)
feat = self.bnneck(feat)
if not self.training:
return nn.functional.normalize(feat, dim=1, p=2)
x = self.classifier(feat)
return x
""" | [
11748,
28034,
13,
20471,
355,
299,
77,
201,
198,
6738,
28034,
10178,
1330,
4981,
201,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
201,
198,
11748,
28034,
201,
198,
201,
198,
201,
198,
37811,
201,
198,
2,
3740,
1378,
23548,
7258... | 1.790447 | 649 |
v = [17, 92, 18, 33, 58, 7, 33, 42, 79, 37]
print(find_max(v))
| [
201,
198,
85,
796,
685,
1558,
11,
10190,
11,
1248,
11,
4747,
11,
7618,
11,
767,
11,
4747,
11,
5433,
11,
9225,
11,
5214,
60,
201,
198,
4798,
7,
19796,
62,
9806,
7,
85,
4008,
201,
198
] | 1.810811 | 37 |
from flask_app.database.db import *
from flask_app.database.models.User import *
| [
6738,
42903,
62,
1324,
13,
48806,
13,
9945,
1330,
1635,
198,
198,
6738,
42903,
62,
1324,
13,
48806,
13,
27530,
13,
12982,
1330,
1635,
198
] | 3.28 | 25 |
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
from tensorflow.python import keras
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python import layers as tfl
from tensorflow_probability.python.internal import \
distribution_util as dist_util
from tensorflow_probability.python.layers import DistributionLambda
from tensorflow_probability.python.layers.distribution_layer import _event_size
from odin.backend import parse_activation
from odin.backend.maths import softplus1
from odin.bay.distributions import NegativeBinomialDisp, ZeroInflated
__all__ = [
'PoissonLayer',
'NegativeBinomialDispLayer',
'NegativeBinomialLayer',
'ZINegativeBinomialDispLayer',
'ZINegativeBinomialLayer',
'ZIPoissonLayer',
'MultinomialLayer',
'DirichletMultinomialLayer',
'BinomialLayer',
]
PoissonLayer = tfl.IndependentPoisson
# ===========================================================================
# Negative binomial
# ===========================================================================
class NegativeBinomialLayer(DistributionLambda):
r"""An independent NegativeBinomial Keras layer.
Arguments:
event_shape: integer vector `Tensor` representing the shape of single
draw from this distribution.
count_activation: activation function return non-negative floating-point,
i.e. the `total_count` of failures
dispersion : {'full', 'share', 'single'}
- 'full' creates a dispersion value for each individual data point,
- 'share' creates a single vector of dispersion for all examples, and
- 'single' uses a single value as dispersion for all data points.
Note: the dispersion in this case is the probability of success.
convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`
instance and returns a `tf.Tensor`-like object.
Default value: `tfd.Distribution.sample`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
Default value: `False`.
**kwargs: Additional keyword arguments passed to `tf.keras.Layer`.
"""
@staticmethod
def new(params,
event_shape=(),
count_activation=tf.exp,
validate_args=False,
name="NegativeBinomialLayer",
disp=None):
r"""Create the distribution instance from a `params` vector."""
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(
tf.convert_to_tensor(value=event_shape,
name='event_shape',
dtype=tf.int32),
tensor_name='event_shape',
)
output_shape = tf.concat(
[tf.shape(input=params)[:-1], event_shape],
axis=0,
)
if disp is None:
total_count, logits = tf.split(params, 2, axis=-1)
logits = tf.reshape(logits, output_shape)
else:
total_count = params
logits = disp
total_count = tf.reshape(total_count, output_shape)
total_count = count_activation(total_count)
return tfd.Independent(
tfd.NegativeBinomial(total_count=total_count,
logits=logits,
validate_args=validate_args),
reinterpreted_batch_ndims=tf.size(input=event_shape),
name=name,
)
@staticmethod
def params_size(event_shape=(),
dispersion='full',
name="NegativeBinomialLayer_params_size"):
r"""The number of `params` needed to create a single distribution."""
if dispersion == 'full':
return 2 * _event_size(event_shape, name=name)
return _event_size(event_shape, name=name)
class NegativeBinomialDispLayer(DistributionLambda):
r"""An alternative parameterization of the NegativeBinomial Keras layer.
The order of input parameters are: mean, dispersion
Arguments:
event_shape: integer vector `Tensor` representing the shape of single
draw from this distribution.
mean_activation : activation for the non-negative mean
disp_activation : activation for the non-negative dispersion
dispersion : {'full', 'share', 'single'}
'full' creates a dispersion value for each individual data point,
'share' creates a single dispersion vector of `event_shape` for all examples,
and 'single' uses a single value as dispersion for all data points.
convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`
instance and returns a `tf.Tensor`-like object.
Default value: `tfd.Distribution.sample`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
Default value: `False`.
**kwargs: Additional keyword arguments passed to `tf.keras.Layer`.
"""
@staticmethod
def new(params,
event_shape=(),
mean_activation=tf.nn.softplus,
disp_activation=softplus1,
validate_args=False,
name="NegativeBinomialDispLayer",
disp=None):
r""" Create the distribution instance from a `params` vector. """
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(
tf.convert_to_tensor(value=event_shape,
name='event_shape',
dtype=tf.int32),
tensor_name='event_shape',
)
output_shape = tf.concat(
[tf.shape(input=params)[:-1], event_shape],
axis=0,
)
if disp is None:
loc, disp = tf.split(params, 2, axis=-1)
disp = tf.reshape(disp, output_shape)
else:
loc = params
loc = tf.reshape(loc, output_shape)
loc = mean_activation(loc)
disp = disp_activation(disp)
return tfd.Independent(
NegativeBinomialDisp(loc=loc, disp=disp, validate_args=validate_args),
reinterpreted_batch_ndims=tf.size(input=event_shape),
name=name,
)
@staticmethod
def params_size(event_shape=(),
dispersion='full',
name="NegativeBinomialDispLayer_params_size"):
r"""The number of `params` needed to create a single distribution."""
if dispersion == 'full':
return 2 * _event_size(event_shape, name=name)
return _event_size(event_shape, name=name)
# ===========================================================================
# Zero inflated
# ===========================================================================
class ZIPoissonLayer(DistributionLambda):
r"""A Independent zero-inflated Poisson keras layer """
@staticmethod
def new(params,
event_shape=(),
activation=tf.identity,
validate_args=False,
name="ZIPoissonLayer"):
"""Create the distribution instance from a `params` vector."""
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(
tf.convert_to_tensor(value=event_shape,
name='event_shape',
dtype=tf.int32),
tensor_name='event_shape',
)
output_shape = tf.concat(
[tf.shape(input=params)[:-1], event_shape],
axis=0,
)
(log_rate_params, logits_params) = tf.split(params, 2, axis=-1)
return tfd.Independent(
ZeroInflated(count_distribution=tfd.Poisson(
log_rate=activation(tf.reshape(log_rate_params, output_shape)),
validate_args=validate_args),
logits=tf.reshape(logits_params, output_shape),
validate_args=validate_args),
reinterpreted_batch_ndims=tf.size(input=event_shape),
name=name,
)
@staticmethod
def params_size(event_shape=(), name="ZeroInflatedPoisson_params_size"):
r"""The number of `params` needed to create a single distribution."""
return 2 * _event_size(event_shape, name=name)
class ZINegativeBinomialLayer(DistributionLambda):
r"""A Independent zero-inflated negative binomial keras layer
Arguments:
event_shape: integer vector `Tensor` representing the shape of single
draw from this distribution.
count_activation: activation function return non-negative floating-point,
i.e. the `total_count` of failures
dispersion, inflation : {'full', 'share', 'single'}
'full' creates a dispersion value for each individual data point,
'share' creates a single vector of dispersion for all examples, and
'single' uses a single value as dispersion for all data points.
convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`
instance and returns a `tf.Tensor`-like object.
Default value: `tfd.Distribution.sample`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
Default value: `False`.
**kwargs: Additional keyword arguments passed to `tf.keras.Layer`.
"""
@staticmethod
def new(params,
event_shape=(),
count_activation=tf.exp,
validate_args=False,
name="ZINegativeBinomialLayer",
disp=None,
rate=None):
r"""Create the distribution instance from a `params` vector."""
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(
tf.convert_to_tensor(value=event_shape,
name='event_shape',
dtype=tf.int32),
tensor_name='event_shape',
)
output_shape = tf.concat((tf.shape(input=params)[:-1], event_shape), axis=0)
if disp is None: # full dispersion
if rate is None:
total_count, logits, rate = tf.split(params, 3, axis=-1)
rate = tf.reshape(rate, output_shape)
else:
total_count, logits = tf.split(params, 2, axis=-1)
logits = tf.reshape(logits, output_shape)
else: # share dispersion
if rate is None:
total_count, rate = tf.split(params, 2, axis=-1)
rate = tf.reshape(rate, output_shape)
else:
total_count = params
logits = disp
total_count = tf.reshape(total_count, output_shape)
total_count = count_activation(total_count)
nb = tfd.NegativeBinomial(total_count=total_count,
logits=logits,
validate_args=validate_args)
zinb = ZeroInflated(count_distribution=nb,
logits=rate,
validate_args=validate_args)
return tfd.Independent(zinb,
reinterpreted_batch_ndims=tf.size(input=event_shape),
name=name)
@staticmethod
def params_size(event_shape=(),
dispersion='full',
inflation='full',
name="ZeroInflatedNegativeBinomial_params_size"):
r"""The number of `params` needed to create a single distribution."""
size = _event_size(event_shape, name=name)
total = 3 * size
if dispersion != 'full':
total -= size
if inflation != 'full':
total -= size
return total
class ZINegativeBinomialDispLayer(DistributionLambda):
r"""A Independent zero-inflated negative binomial (alternative
parameterization) keras layer.
The order of input parameters are: mean, dispersion, dropout rate
Arguments:
event_shape: integer vector `Tensor` representing the shape of single
draw from this distribution.
mean_activation : activation for the non-negative mean
disp_activation : activation for the non-negative dispersion
dispersion, inflation : {'full', 'share', 'single'}
'full' creates a dispersion value for each individual data point,
'share' creates a single dispersion vector of `event_shape` for all examples,
and 'single' uses a single value as dispersion for all data points.
convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`
instance and returns a `tf.Tensor`-like object.
Default value: `tfd.Distribution.sample`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
Default value: `False`.
**kwargs: Additional keyword arguments passed to `tf.keras.Layer`.
"""
@staticmethod
def new(params,
event_shape=(),
mean_activation=tf.nn.softplus,
disp_activation=softplus1,
validate_args=False,
name="ZINegativeBinomialDispLayer",
disp=None,
rate=None):
r"""Create the distribution instance from a `params` vector."""
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(
tf.convert_to_tensor(value=event_shape,
name='event_shape',
dtype=tf.int32),
tensor_name='event_shape',
)
output_shape = tf.concat((tf.shape(input=params)[:-1], event_shape), axis=0)
### splitting the parameters
if disp is None: # full dispersion
if rate is None:
loc, disp, rate = tf.split(params, 3, axis=-1)
rate = tf.reshape(rate, output_shape)
else:
loc, disp = tf.split(params, 2, axis=-1)
disp = tf.reshape(disp, output_shape)
else: # share dispersion
if rate is None:
loc, rate = tf.split(params, 2, axis=-1)
rate = tf.reshape(rate, output_shape)
else:
loc = params
# as count value, do exp if necessary
loc = tf.reshape(loc, output_shape)
loc = mean_activation(loc)
disp = disp_activation(disp)
# create the distribution
nb = NegativeBinomialDisp(loc=loc, disp=disp, validate_args=validate_args)
zinb = ZeroInflated(count_distribution=nb,
logits=rate,
validate_args=validate_args)
return tfd.Independent(zinb,
reinterpreted_batch_ndims=tf.size(input=event_shape),
name=name)
@staticmethod
def params_size(event_shape=(),
dispersion='full',
inflation='full',
name="ZINegativeBinomialDisp_params_size"):
r"""The number of `params` needed to create a single distribution."""
size = _event_size(event_shape, name=name)
total = 3 * size
if dispersion != 'full':
total -= size
if inflation != 'full':
total -= size
return total
# ===========================================================================
# Binomial Multinomial layer
# ===========================================================================
class MultinomialLayer(tfl.DistributionLambda):
r""" Parameterization:
- total_count : `[batch_size, 1]`
- logits : `[batch_size, ndim]`
- sample : `[batch_size, ndim]` with `sum(x, axis=1) = total_count`
"""
@staticmethod
def new(params,
event_shape=(),
count_activation=tf.nn.softplus,
validate_args=False,
name='MultinomialLayer'):
r"""Create the distribution instance from a `params` vector."""
params = tf.convert_to_tensor(value=params, name='params')
count_activation = parse_activation(count_activation, 'tf')
total_count = count_activation(params[..., 0])
logits = params[..., 1:]
return tfd.Multinomial(total_count=total_count,
logits=logits,
validate_args=validate_args,
name=name)
@staticmethod
def params_size(event_shape=(), name='MultinomialLayer_params_size'):
r"""The number of `params` needed to create a single distribution."""
return _event_size(event_shape, name=name) + 1.
class DirichletMultinomialLayer(tfl.DistributionLambda):
r""" Dirichlet-Multinomial compound distribution.
K=2 equal to Beta-Binomial distribution
"""
@staticmethod
def new(params,
event_shape=(),
count_activation=tf.nn.softplus,
concentration_activation=softplus1,
clip_for_stable=True,
validate_args=False,
name='DirichletMultinomialLayer'):
r"""Create the distribution instance from a `params` vector."""
params = tf.convert_to_tensor(value=params, name='params')
count_activation = parse_activation(count_activation, 'tf')
concentration_activation = parse_activation(concentration_activation, 'tf')
total_count = count_activation(params[..., 0])
concentration = concentration_activation(params[..., 1:])
if clip_for_stable:
concentration = tf.clip_by_value(concentration, 1e-3, 1e3)
return tfd.DirichletMultinomial(total_count=total_count,
concentration=concentration,
validate_args=validate_args,
name=name)
@staticmethod
def params_size(event_shape=(), name='DirichletMultinomialLayer_params_size'):
r"""The number of `params` needed to create a single distribution."""
return _event_size(event_shape, name=name) + 1.
class BinomialLayer(tfl.DistributionLambda):
r""" Binomial distribution, each entry is a flipping of the coin K times (
parameterized by `total_count` """
@staticmethod
def new(params,
event_shape=(),
count_activation=tf.nn.softplus,
validate_args=False,
name='BinomialLayer'):
r"""Create the distribution instance from a `params` vector."""
count_activation = parse_activation(count_activation, 'tf')
params = tf.convert_to_tensor(value=params, name='params')
event_shape = dist_util.expand_to_vector(
tf.convert_to_tensor(value=event_shape,
name='event_shape',
dtype=tf.int32),
tensor_name='event_shape',
)
output_shape = tf.concat((tf.shape(params)[:-1], event_shape), axis=0)
total_count, logits = tf.split(params, 2, axis=-1)
total_count = tf.reshape(total_count, output_shape)
logits = tf.reshape(logits, output_shape)
return tfd.Independent(
tfd.Binomial(total_count=count_activation(total_count),
logits=logits,
validate_args=validate_args),
reinterpreted_batch_ndims=tf.size(event_shape),
name=name,
)
@staticmethod
def params_size(event_shape=(), name='BinomialLayer_params_size'):
r"""The number of `params` needed to create a single distribution."""
return 2 * _event_size(event_shape, name=name)
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
11192,
273,
11125,
13,
29412,
1330,
41927,
292,
198,
6738,
1119... | 2.510845 | 7,607 |
# -*- coding: utf-8 -*-
##
# \file pe_absorbing_layer.py
# \title Definition of an absorbing layer for the parabolic equation.
# \author Pierre Chobeau
# \version 0.1
# \license BSD 3-Clause License
# \inst UMRAE (Ifsttar Nantes), LAUM (Le Mans Université)
# \date 2017, 20 Nov.
##
import numpy as np
def abs_lay_top(p_ij, dy, Ny, y_start_abs):
"""
Absorbing layer for the parabolic equation defined following the
vertical direction - i.e. along the y axis.
For more details see **[chevret_phd1994, Eq.(4.38), p.59]**.
:param p_ij: pressure at the discrete location i,j ~ (x, y) (Pa).
:type p_ij: 2D numpy arrays of complexes
:param dy: spatial step for the y directions (m).
:type dy: float
:param Ny: length of the domain in number of nodes following the y dir.
:type Ny: int
:param y_start_abs: y coordinate of the layer starting position (m).
:type y_start_abs: float
:return: the pressure array inclunding the absorption of the layer (Pa).
:rtype: 2D numpy arrays of complexes
"""
a_empirical = 4.5
b_empirical = int(round(1.4/dy))
abs_lay_top = np.ones((Ny + 1), dtype=np.float64)
for j in range(int(y_start_abs / dy) + 1, Ny + 1):
abs_lay_top[j] = np.exp(-((j - int(y_start_abs / dy)) /
(a_empirical * (Ny + b_empirical - j)))**2)
p_ij[j] = p_ij[j] * abs_lay_top[j]
plot_abs_profil = False
if plot_abs_profil:
import matplotlib.pyplot as plt
plt.figure(1)
plt.plot(range(int(y_start_abs / dy) + 1, Ny + 1),
abs_lay_top[int(y_start_abs / dy) + 1: Ny + 1])
plt.show()
return p_ij
def abs_lay_bottom_top(p_ij, dy, Ny, y_start_abs):
"""
Absorbing layer for the ground (low part) of the parabolic equation, in
order to simulate free field propagation.
The absorbing layer is defined following the vertical direction -
i.e. along the y axis.
For more details see **[chevret_phd1994, Eq.(4.38), p.59]**.
:param p_ij: pressure at the discrete location i,j ~ (x, y) (Pa).
:type p_ij: 2D numpy arrays of complexes
:param dy: spatial step for the y directions (m).
:type dy: float
:param Ny: length of the domain in number of nodes following the y dir.
:type Ny: int
:param y_start_abs: y coordinate of the layer starting position (m).
:type y_start_abs: float
:return: the pressure array inclunding the absorption of the layer (Pa).
:rtype: 2D numpy arrays of complexes
"""
a_empirical = 4.5
b_empirical = int(round(1.4/dy))
abs_lay_top = np.ones((Ny + 1), dtype=np.float64)
abs_lay_total = np.ones((Ny + 1), dtype=np.float64)
abs_lay_bottom = np.ones((Ny + 1), dtype=np.float64)
for j in range(int(y_start_abs / dy) + 1, Ny + 1):
abs_lay_top[j] = np.exp(-((j - int(y_start_abs / dy)) /
(a_empirical * (Ny + b_empirical - j)))**2)
abs_lay_bottom[Ny - j - 1] = np.exp(-((j + 1 - int(y_start_abs / dy)) /
(a_empirical *
(- Ny - b_empirical + j))) ** 2)
for j in range(Ny + 1):
abs_lay_total[j] = abs_lay_bottom[j] * abs_lay_top[j]
p_ij[j] = p_ij[j] * abs_lay_total[j]
plot_abs_profil = False
if plot_abs_profil:
import matplotlib.pyplot as plt
plt.figure(1)
plt.plot(range(int(y_start_abs / dy) + 1, Ny + 1),
abs_lay_top[int(y_start_abs / dy) + 1: Ny + 1])
plt.figure(2)
plt.plot(range(Ny - int(y_start_abs / dy)),
abs_lay_bottom[: Ny - int(y_start_abs / dy)])
plt.show()
return p_ij
def abs_lay_top_1(k, dy, Ny, y_start_abs):
"""
Absorbing layer for the parabolic equation defined following the
vertical direction - It has to be applied on the wavenumber directly.
:param p_ij: pressure at the discrete location i,j ~ (x, y) (Pa).
:type p_ij: 2D numpy arrays of complexes
:param dy: spatial step for the y directions (m).
:type dy: float
:param Ny: length of the domain in number of nodes following the y dir.
:type Ny: int
:param y_start_abs: y coordinate of the layer starting position (m).
:type y_start_abs: float
:return: the wavenumber.
:rtype: 2D numpy arrays of complexes
"""
abs_lay_top = np.ones((Ny + 1), dtype=np.float64)
A = np.ones((Ny + 1), dtype=np.float64)
for j in range(int(y_start_abs / dy) + 1, Ny + 1):
A[Ny + 1 - j + int(y_start_abs / dy)] = np.exp(-4. *
((j * dy) - y_start_abs) / ((Ny + 1) * dy - y_start_abs))
abs_lay_top[Ny + 1 - j + int(y_start_abs / dy)] = A[j] * \
((j * dy - y_start_abs) /
((Ny + 1) * dy - y_start_abs)) ** 2
k[j] = k[j] * abs_lay_top[j]
return k
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2235,
198,
2,
3467,
7753,
220,
220,
220,
220,
613,
62,
46303,
4623,
62,
29289,
13,
9078,
198,
2,
3467,
7839,
220,
220,
220,
30396,
286,
281,
34418,
7679,
329,
262,
1... | 2.156469 | 2,288 |
#
# Copyright (C) 2005, Giovanni Bajo
#
# Based on previous work under copyright (c) 2002 McMillan Enterprises, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
#
import sys, string, os, imp, marshal, dircache, glob
try:
# zipimport is supported starting with Python 2.3
import zipimport
except ImportError:
zipimport = None
try:
# if ctypes is present, we can enable specific dependency discovery
import ctypes
from ctypes.util import find_library
except ImportError:
ctypes = None
import suffixes
try:
STRINGTYPE = basestring
except NameError:
STRINGTYPE = type("")
if not os.environ.has_key('PYTHONCASEOK') and sys.version_info >= (2, 1):
else:
def pyco():
"""
Returns correct extension ending: 'c' or 'o'
"""
if __debug__:
return 'c'
else:
return 'o'
#=======================Owners==========================#
# An Owner does imports from a particular piece of turf
# That is, there's an Owner for each thing on sys.path
# There are owners for directories and .pyz files.
# There could be owners for zip files, or even URLs.
# Note that they replace the string in sys.path,
# but str(sys.path[n]) should yield the original string.
ZipOwner = None
if zipimport:
# We cannot use zipimporter here because it has a stupid bug:
#
# >>> z.find_module("setuptools.setuptools.setuptools.setuptools.setuptools") is not None
# True
#
# So mf will go into infinite recursion.
# Instead, we'll reuse the BaseDirOwner logic, simply changing
# the template methods.
_globalownertypes = filter(None, [
DirOwner,
ZipOwner,
PYZOwner,
Owner,
])
#===================Import Directors====================================#
# ImportDirectors live on the metapath
# There's one for builtins, one for frozen modules, and one for sys.path
# Windows gets one for modules gotten from the Registry
# There should be one for Frozen modules
# Mac would have them for PY_RESOURCE modules etc.
# A generalization of Owner - their concept of "turf" is broader
# for Windows only
#=================Import Tracker============================#
# This one doesn't really import, just analyzes
# If it *were* importing, it would be the one-and-only ImportManager
# ie, the builtin import
UNTRIED = -1
imptyps = ['top-level', 'conditional', 'delayed', 'delayed, conditional']
import hooks
if __debug__:
import sys
import UserDict
else:
LogDict = dict
# really the equivalent of builtin import
#====================Modules============================#
# All we're doing here is tracking, not importing
# If we were importing, these would be hooked to the real module objects
#======================== Utility ================================#
# Scan the code object for imports, __all__ and wierd stuff
import dis
IMPORT_NAME = dis.opname.index('IMPORT_NAME')
IMPORT_FROM = dis.opname.index('IMPORT_FROM')
try:
IMPORT_STAR = dis.opname.index('IMPORT_STAR')
except:
IMPORT_STAR = 999
STORE_NAME = dis.opname.index('STORE_NAME')
STORE_FAST = dis.opname.index('STORE_FAST')
STORE_GLOBAL = dis.opname.index('STORE_GLOBAL')
try:
STORE_MAP = dis.opname.index('STORE_MAP')
except:
STORE_MAP = 999
LOAD_GLOBAL = dis.opname.index('LOAD_GLOBAL')
LOAD_ATTR = dis.opname.index('LOAD_ATTR')
LOAD_NAME = dis.opname.index('LOAD_NAME')
EXEC_STMT = dis.opname.index('EXEC_STMT')
try:
SET_LINENO = dis.opname.index('SET_LINENO')
except ValueError:
SET_LINENO = 999
BUILD_LIST = dis.opname.index('BUILD_LIST')
LOAD_CONST = dis.opname.index('LOAD_CONST')
if getattr(sys, 'version_info', (0,0,0)) > (2,5,0):
LOAD_CONST_level = LOAD_CONST
else:
LOAD_CONST_level = 999
if getattr(sys, 'version_info', (0,0,0)) >= (2,7,0):
COND_OPS = [dis.opname.index('POP_JUMP_IF_TRUE'),
dis.opname.index('POP_JUMP_IF_FALSE'),
dis.opname.index('JUMP_IF_TRUE_OR_POP'),
dis.opname.index('JUMP_IF_FALSE_OR_POP'),
]
else:
COND_OPS = [dis.opname.index('JUMP_IF_FALSE'),
dis.opname.index('JUMP_IF_TRUE'),
]
JUMP_FORWARD = dis.opname.index('JUMP_FORWARD')
try:
STORE_DEREF = dis.opname.index('STORE_DEREF')
except ValueError:
STORE_DEREF = 999
STORE_OPS = [STORE_NAME, STORE_FAST, STORE_GLOBAL, STORE_DEREF, STORE_MAP]
#IMPORT_STAR -> IMPORT_NAME mod ; IMPORT_STAR
#JUMP_IF_FALSE / JUMP_IF_TRUE / JUMP_FORWARD
def scan_code_for_ctypes(co, instrs, i):
"""Detects ctypes dependencies, using reasonable heuristics that should
cover most common ctypes usages; returns a tuple of two lists, one
containing names of binaries detected as dependencies, the other containing
warnings.
"""
def _libFromConst(i):
"""Extracts library name from an expected LOAD_CONST instruction and
appends it to local binaries list.
"""
op, oparg, conditional, curline = instrs[i]
if op == LOAD_CONST:
soname = co.co_consts[oparg]
b.append(soname)
b = []
op, oparg, conditional, curline = instrs[i]
if op in (LOAD_GLOBAL, LOAD_NAME):
name = co.co_names[oparg]
if name in ("CDLL", "WinDLL"):
# Guesses ctypes imports of this type: CDLL("library.so")
# LOAD_GLOBAL 0 (CDLL) <--- we "are" here right now
# LOAD_CONST 1 ('library.so')
_libFromConst(i+1)
elif name == "ctypes":
# Guesses ctypes imports of this type: ctypes.DLL("library.so")
# LOAD_GLOBAL 0 (ctypes) <--- we "are" here right now
# LOAD_ATTR 1 (CDLL)
# LOAD_CONST 1 ('library.so')
op2, oparg2, conditional2, curline2 = instrs[i+1]
if op2 == LOAD_ATTR:
if co.co_names[oparg2] in ("CDLL", "WinDLL"):
# Fetch next, and finally get the library name
_libFromConst(i+2)
elif name in ("cdll", "windll"):
# Guesses ctypes imports of these types:
# * cdll.library (only valid on Windows)
# LOAD_GLOBAL 0 (cdll) <--- we "are" here right now
# LOAD_ATTR 1 (library)
# * cdll.LoadLibrary("library.so")
# LOAD_GLOBAL 0 (cdll) <--- we "are" here right now
# LOAD_ATTR 1 (LoadLibrary)
# LOAD_CONST 1 ('library.so')
op2, oparg2, conditional2, curline2 = instrs[i+1]
if op2 == LOAD_ATTR:
if co.co_names[oparg2] != "LoadLibrary":
# First type
soname = co.co_names[oparg2] + ".dll"
b.append(soname)
else:
# Second type, needs to fetch one more instruction
_libFromConst(i+2)
# If any of the libraries has been requested with anything different from
# the bare filename, drop that entry and warn the user - pyinstaller would
# need to patch the compiled pyc file to make it work correctly!
w = []
for bin in list(b):
if bin != os.path.basename(bin):
b.remove(bin)
w.append("W: ignoring %s - ctypes imports only supported using bare filenames" % (bin,))
return b, w
def _resolveCtypesImports(cbinaries):
"""Completes ctypes BINARY entries for modules with their full path.
"""
if sys.platform.startswith("linux"):
envvar = "LD_LIBRARY_PATH"
elif sys.platform.startswith("darwin"):
envvar = "DYLD_LIBRARY_PATH"
else:
envvar = "PATH"
ret = []
# Try to locate the shared library on disk. This is done by
# executing ctypes.utile.find_library prepending ImportTracker's
# local paths to library search paths, then replaces original values.
old = _savePaths()
for cbin in cbinaries:
ext = os.path.splitext(cbin)[1]
# On Windows, only .dll files can be loaded.
if os.name == "nt" and ext.lower() in [".so", ".dylib"]:
continue
cpath = find_library(os.path.splitext(cbin)[0])
if sys.platform == "linux2":
# CAVEAT: find_library() is not the correct function. Ctype's
# documentation says that it is meant to resolve only the filename
# (as a *compiler* does) not the full path. Anyway, it works well
# enough on Windows and Mac. On Linux, we need to implement
# more code to find out the full path.
if cpath is None:
cpath = cbin
# "man ld.so" says that we should first search LD_LIBRARY_PATH
# and then the ldcache
for d in os.environ["LD_LIBRARY_PATH"].split(":"):
if os.path.isfile(d + "/" + cpath):
cpath = d + "/" + cpath
break
else:
for L in os.popen("ldconfig -p").read().splitlines():
if cpath in L:
cpath = L.split("=>", 1)[1].strip()
assert os.path.isfile(cpath)
break
else:
cpath = None
if cpath is None:
print "W: library %s required via ctypes not found" % (cbin,)
else:
ret.append((cbin, cpath, "BINARY"))
_restorePaths(old)
return ret
| [
2,
198,
2,
15069,
357,
34,
8,
5075,
11,
50191,
347,
34944,
198,
2,
198,
2,
13403,
319,
2180,
670,
739,
6634,
357,
66,
8,
6244,
15359,
359,
272,
41253,
11,
3457,
13,
198,
2,
198,
2,
770,
1430,
318,
1479,
3788,
26,
345,
460,
176... | 2.380062 | 4,223 |
import os
import numpy as np
import dolfin as df
import matplotlib.pyplot as plt
from finmag import Simulation
from finmag.energies import Demag, Exchange
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
IMAGE = os.path.join(MODULE_DIR, 'precession.png')
ts = np.linspace(0, 3e-10)
subfigures = ("without precession", "with precession")
figure, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 4))
for i, subfigure_name in enumerate(subfigures):
m = zip(* run_simulation(bool(i)))
for dim in xrange(3):
axes[i].plot(ts, m[dim], label="m{}".format(chr(120+dim)))
axes[i].legend()
axes[i].set_title(subfigure_name)
axes[i].set_xlabel("time (s)")
axes[i].set_ylabel("unit magnetisation")
axes[i].set_ylim([-0.1, 1.0])
figure.savefig(IMAGE)
| [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
288,
4024,
259,
355,
47764,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
957,
19726,
1330,
41798,
198,
6738,
957,
19726,
13,
877,
70,
444,
1330,
... | 2.366366 | 333 |
#!/usr/bin/env python3
import sys
import matplotlib.pyplot as plt
import numpy as np
from typex.encryptor import Encryptor
plt.rcdefaults()
input_text = sys.stdin.read()
letter_appearances = {}
for char in Encryptor.ALPHABET:
letter_appearances[char] = 0
for char in input_text:
if char.upper() in Encryptor.ALPHABET:
letter_appearances[char.upper()] += 1
sorted_letters = sorted(
letter_appearances.keys(),
key=lambda letter: letter_appearances[letter], reverse=True)
sorted_appearances = sorted(letter_appearances.values(), reverse=True)
y_pos = np.arange(len(sorted_appearances))
plt.bar(y_pos, sorted_appearances, align='center', alpha=1)
plt.xticks(y_pos, sorted_letters)
plt.ylabel('Number of occurances')
plt.xlabel('Letter')
plt.title('Letter Frequency')
plt.show()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
25064,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
2099,
87,
13,
12685,
6012,
273,
1330,
14711,
6012,
273,
1... | 2.623794 | 311 |
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import pecan
from pecan import rest
from inventory.api.controllers.v1 import base
from inventory.api.controllers.v1 import cpu
from inventory.api.controllers.v1 import ethernet_port
from inventory.api.controllers.v1 import host
from inventory.api.controllers.v1 import link
from inventory.api.controllers.v1 import lldp_agent
from inventory.api.controllers.v1 import lldp_neighbour
from inventory.api.controllers.v1 import memory
from inventory.api.controllers.v1 import node
from inventory.api.controllers.v1 import pci_device
from inventory.api.controllers.v1 import port
from inventory.api.controllers.v1 import sensor
from inventory.api.controllers.v1 import sensorgroup
from inventory.api.controllers.v1 import system
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
class MediaType(base.APIBase):
"""A media type representation."""
base = wtypes.text
type = wtypes.text
class V1(base.APIBase):
"""The representation of the version 1 of the API."""
id = wtypes.text
"The ID of the version, also acts as the release number"
media_types = [MediaType]
"An array of supported media types for this version"
links = [link.Link]
"Links that point to a specific URL for this version and documentation"
systems = [link.Link]
"Links to the system resource"
hosts = [link.Link]
"Links to the host resource"
lldp_agents = [link.Link]
"Links to the lldp agents resource"
lldp_neighbours = [link.Link]
"Links to the lldp neighbours resource"
@classmethod
class Controller(rest.RestController):
"""Version 1 API controller root."""
systems = system.SystemController()
hosts = host.HostController()
nodes = node.NodeController()
cpus = cpu.CPUController()
memorys = memory.MemoryController()
ports = port.PortController()
ethernet_ports = ethernet_port.EthernetPortController()
lldp_agents = lldp_agent.LLDPAgentController()
lldp_neighbours = lldp_neighbour.LLDPNeighbourController()
pci_devices = pci_device.PCIDeviceController()
sensors = sensor.SensorController()
sensorgroups = sensorgroup.SensorGroupController()
@wsme_pecan.wsexpose(V1)
__all__ = ('Controller',)
| [
2,
198,
2,
15069,
357,
66,
8,
2864,
3086,
5866,
11998,
11,
3457,
13,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
2,
628,
198,
11748,
613,
5171,
198,
6738,
613,
5171,
1330,
1334,
198,
... | 3.051248 | 761 |
import pytest
from thumbor_extras.detectors.dnn_face_detector import Detector
@pytest.mark.parametrize('image_context_arg', [
'face_image_context', 'gray_face_image_context', 'cmyk_face_image_context'
])
| [
11748,
12972,
9288,
198,
6738,
15683,
273,
62,
2302,
8847,
13,
15255,
478,
669,
13,
67,
20471,
62,
2550,
62,
15255,
9250,
1330,
4614,
9250,
198,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
10786,
9060,
62,
22866,
62,
8... | 2.64557 | 79 |
import time
hall1 = Sensor(1)
hall1.enable()
# set start times to show display is working
t1 = 999
while True:
td1 = hall1.getDiff()
if td1 != None:
t1 = td1
printToDisplay(t1)
| [
11748,
640,
198,
198,
18323,
16,
796,
35367,
7,
16,
8,
198,
18323,
16,
13,
21633,
3419,
198,
198,
2,
900,
923,
1661,
284,
905,
3359,
318,
1762,
198,
83,
16,
796,
36006,
198,
198,
4514,
6407,
25,
198,
197,
8671,
16,
796,
6899,
16... | 2.410256 | 78 |
"""
By David Oswald, d.f.oswald@cs.bham.ac.uk
26 August 2015
Some of this code is based on information or code from
- Sam Kerr: http://samuelkerr.com/?p=431
- Eli Bendersky: http://eli.thegreenplace.net/2009/03/07/computing-modular-square-roots-in-python/
- http://cr.yp.to/highspeed/naclcrypto-20090310.pdf, page 7
The code of Eli is in the public domain:
"Some of the blog posts contain code; unless otherwise stated, all of it is
in the public domain"
=======================================================================
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
=======================================================================
If this software is useful to you, I'd appreciate an attribution,
contribution (e.g. bug fixes, improvements, ...), or a beer.
"""
from smartcard.Exceptions import NoCardException
from smartcard.System import *
from smartcard.util import toHexString
from struct import *
from timeit import default_timer as timer
if __name__ == '__main__':
main()
| [
37811,
198,
2750,
3271,
34374,
11,
288,
13,
69,
13,
418,
21667,
31,
6359,
13,
65,
2763,
13,
330,
13,
2724,
198,
2608,
2932,
1853,
198,
220,
198,
2773,
286,
428,
2438,
318,
1912,
319,
1321,
393,
2438,
422,
198,
220,
198,
532,
3409,... | 3.621053 | 475 |
#!/usr/bin/env python3
import math
import time
if __name__ == "__main__":
start = time.time()
main()
end = time.time()
print("Duration: {0:0.6f}s".format(end - start))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
10688,
198,
11748,
640,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
923,
796,
640,
13,
2435,
3419,
198,
220,
220,
220,... | 2.3625 | 80 |
#!/bin/python
# coding=utf-8
from optparse import OptionParser
import urllib
import re
osmap = {
'mac' : 'Mac',
'osx' : 'Mac',
'win' : 'Windows',
'win64' : 'Windows64',
'win32' : 'Windows32',
}
# -------------- main --------------
if __name__ == '__main__':
usage = "usage: %prog [options]"
parser = OptionParser(usage=usage)
parser.add_option(
'-v', '--version', dest='version',
help='filter unity version, beta last latest 5 5.3[+|-] 5.3.5')
parser.add_option(
'-o', '--os', dest='os',
help='filter os, win[64|32] or osx or mac')
parser.add_option(
'-l', '--list', dest='list', action='store_true', default=False,
help='show a list')
(opts, args) = parser.parse_args()
if opts.version == 'beta':
urlTuples = getBetaUrlTuples()
else:
urlTuples = getUrlTuples()
urlTuples = filterUrlTuples(urlTuples, opts.os, opts.version)
if opts.list:
for urlTuple in urlTuples:
print urlTuple[0]
elif len(urlTuples) > 0:
print urlTuples[0][0]
| [
2,
48443,
8800,
14,
29412,
198,
2,
19617,
28,
40477,
12,
23,
628,
198,
6738,
2172,
29572,
1330,
16018,
46677,
198,
11748,
2956,
297,
571,
198,
11748,
302,
198,
198,
418,
8899,
796,
1391,
198,
220,
220,
220,
705,
20285,
6,
220,
220,
... | 2.195219 | 502 |
dicts={
0xe88f9c:
[0x00,0x00,0x00,0x00,0x00,0x3F,0x00,0x00,0x00,0x00,0x07,0x00,0x02,0x01,0x00,0x00,0x00,0x00,0x7F,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x0C,0x30,0x00,0x00,
0x00,0x00,0x1C,0x18,0x18,0xFF,0x18,0x18,0x10,0x00,0xFF,0x02,0x01,0x81,0xC0,0xC0,0x41,0x01,0xFF,0x07,0x0D,0x19,0x31,0x61,0xC1,0x01,0x01,0x01,0x01,0x00,
0x00,0x00,0x38,0x30,0x30,0xFF,0x30,0x30,0x07,0xFF,0x00,0x01,0x81,0xC3,0xC2,0x86,0xCC,0x88,0xFF,0xA0,0xA0,0x90,0x8C,0x86,0x83,0x81,0x80,0x80,0x00,0x00,
0x00,0x00,0x00,0x00,0x38,0xFC,0x00,0x80,0xC0,0xC0,0x00,0x80,0xC0,0x00,0x00,0x00,0x00,0x18,0xFC,0x00,0x00,0x00,0x00,0x00,0x80,0xF0,0x7C,0x10,0x00,0x00],#/*"菜",0*/
0xe58d95:
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x00,0x00,0x3F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x40,0x20,0x38,0x18,0x0C,0x08,0xFF,0x01,0x01,0x01,0x01,0xFF,0x01,0x01,0x01,0x01,0xFF,0x01,0x01,0x01,0xFF,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x00,
0x00,0x00,0x0C,0x0C,0x18,0x10,0x20,0xFF,0x80,0x80,0x80,0x80,0xFF,0x80,0x80,0x80,0x80,0xFF,0x80,0x80,0x80,0xFF,0x80,0x80,0x80,0x80,0x80,0x80,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xC0,0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x00,0x00,0x38,0xFC,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00],#/*"单",1*/
0xe697b6:
[0x00,0x00,0x00,0x00,0x00,0x10,0x1F,0x18,0x18,0x18,0x18,0x18,0x18,0x1F,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x1F,0x18,0x18,0x10,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x30,0xF0,0x30,0x30,0x3F,0x30,0x30,0x30,0xF3,0x31,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0xF0,0x30,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0xFF,0x03,0x03,0x03,0x03,0x83,0xC3,0xE3,0x63,0x43,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x3F,0x06,0x04,0x00,
0x00,0x00,0x00,0x80,0x00,0x00,0x00,0x00,0x18,0xFC,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00],#/*"时",0*/
0xe9929f:
[0x00,0x00,0x03,0x03,0x03,0x06,0x06,0x05,0x0C,0x08,0x08,0x1F,0x13,0x23,0x43,0x03,0x03,0x3F,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x01,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x18,0xE0,0x01,0x01,0x11,0xF9,0x01,0x01,0x01,0x01,0x19,0xE1,0x01,0x00,0x00,0x08,0x10,0x60,0xC0,0x80,0x00,0x00,0x00,0x00,
0x00,0x00,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0xFF,0x86,0x86,0x86,0x86,0x86,0x86,0xFF,0x86,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x06,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0xF8,0x10,0x10,0x10,0x10,0x10,0x10,0xF0,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00],#/*"钟",1*/
0xe997b9:
[0x00,0x00,0x01,0x00,0x00,0x00,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x0C,0x08,0x00,
0x00,0x00,0x00,0xC0,0xE7,0x60,0x42,0x03,0x01,0x00,0xFF,0x01,0x01,0x01,0x7F,0x61,0x61,0x61,0x61,0x61,0x61,0x61,0x61,0x01,0x01,0x01,0x01,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0xFF,0x00,0x00,0x80,0x80,0x82,0xFF,0x80,0x80,0x80,0xFE,0x86,0x86,0x86,0x86,0x86,0x86,0xBC,0x8C,0x80,0x80,0x80,0x03,0x00,0x00,0x00,
0x00,0x00,0x00,0x30,0xF0,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0xE0,0xE0,0x00,0x00],#/*"闹",0*/
'30':
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x03,0x07,0x0F,0x0F,0x1F,0x1F,0x1F,0x1F,0x1F,0x1F,0x1F,0x0F,0x0F,0x07,0x03,0x01,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x07,0x7C,0xF0,0xC0,0x80,0x80,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0x80,0xC0,0xE0,0x7C,0x07,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xE0,0x1E,0x07,0x03,0x01,0x01,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x01,0x01,0x03,0x07,0x1E,0xE0,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xC0,0xE0,0xE0,0xF0,0xF0,0xF8,0xF8,0xF8,0xF8,0xF8,0xF0,0xF0,0xE0,0xE0,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x00],#/*"0",0*/
'31':
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x01,0xFF,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x07,0xFF,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x40,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xC0,0xF0,0xFF,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xC0,0x00,0x00,0x00,0x00],#/*"1",1*/
'32':
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,0x07,0x0F,0x0F,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x07,0x0C,0x1F,0x1F,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x0F,0xF0,0x80,0x00,0x00,0xC0,0xC0,0x00,0x00,0x00,0x00,0x00,0x01,0x07,0x1C,0x70,0xC0,0x00,0x00,0xFF,0xFF,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xF0,0x1F,0x03,0x01,0x01,0x01,0x01,0x03,0x07,0x0F,0x1C,0x70,0xC0,0x00,0x00,0x00,0x00,0x00,0x01,0xFF,0xFF,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xC0,0xE0,0xE0,0xE0,0xE0,0xE0,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x30,0x60,0xE0,0xE0,0xE0,0x00,0x00,0x00,0x00],#/*"2",2*/
'33':
[0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x07,0x07,0x07,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x0F,0x0F,0x07,0x01,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x1F,0xE0,0x80,0x80,0x80,0x80,0x00,0x00,0x00,0x0F,0x00,0x00,0x00,0x00,0x00,0x80,0x80,0x80,0x80,0xE0,0x1F,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xE0,0x3E,0x0F,0x07,0x03,0x03,0x03,0x07,0x1E,0xE0,0x7C,0x07,0x03,0x01,0x01,0x01,0x01,0x01,0x03,0x1E,0xE0,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0xC0,0xC0,0xC0,0xC0,0x80,0x00,0x00,0x00,0x80,0xE0,0xE0,0xF0,0xF0,0xF0,0xE0,0xC0,0x00,0x00,0x00,0x00,0x00,0x00],#/*"3",3*/
'34':
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x07,0x0C,0x18,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x07,0x0C,0x18,0x70,0xC0,0x80,0x00,0x00,0x00,0xFF,0x00,0x00,0x00,0x00,0x00,0x0F,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x0E,0x1E,0x7E,0xDE,0x9E,0x1E,0x1E,0x1E,0x1E,0x1E,0x1E,0x1E,0x1E,0x1E,0xFF,0x1E,0x1E,0x1E,0x1E,0x3F,0xFF,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xF8,0x00,0x00,0x00,0x00,0x00,0xF8,0x00,0x00,0x00,0x00],#/*"4",4*/
'35':
[0x00,0x00,0x00,0x00,0x00,0x01,0x01,0x01,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x00,0x00,0x00,0x00,0x0F,0x0F,0x0F,0x07,0x01,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x3F,0xE0,0x80,0x00,0x00,0x00,0x00,0x80,0x80,0x00,0x00,0xE0,0x1F,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0xFE,0x07,0x03,0x01,0x00,0x00,0x00,0x00,0x01,0x01,0x07,0x1F,0xF0,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xE0,0xE0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0xE0,0xE0,0xF0,0xF0,0xF0,0xF0,0xE0,0xE0,0x80,0x00,0x00,0x00,0x00,0x00,0x00],#/*"5",5*/
'36':
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x03,0x07,0x0F,0x0F,0x1F,0x1F,0x1F,0x1F,0x1F,0x1F,0x1F,0x0F,0x07,0x07,0x01,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x01,0x3E,0xE0,0xC0,0x80,0x80,0x00,0x00,0x0F,0x78,0xC0,0x80,0x00,0x00,0x00,0x00,0x80,0xC0,0xE0,0x78,0x07,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xFC,0x07,0x07,0x07,0x00,0x00,0x00,0x00,0xFF,0x07,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x07,0xF8,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x80,0xC0,0xC0,0x00,0x00,0x00,0x00,0x00,0xC0,0xE0,0xF0,0xF8,0xF8,0xF8,0xF8,0xF0,0xE0,0xC0,0x00,0x00,0x00,0x00,0x00,0x00],#/*"6",6*/
'37':
[0x00,0x00,0x00,0x00,0x00,0x03,0x07,0x07,0x06,0x0C,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x03,0x07,0x07,0x0F,0x0F,0x1F,0x1F,0x1F,0x07,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x00,0x01,0x03,0x06,0x0C,0x18,0x30,0x70,0xE0,0xC0,0xC0,0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xF0,0xF0,0xE0,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00],#/*"7",7*/
'38':
[0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x07,0x0F,0x0E,0x0F,0x0F,0x07,0x01,0x00,0x00,0x03,0x07,0x0E,0x1C,0x1C,0x1C,0x0E,0x07,0x01,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x0F,0xF0,0x80,0x00,0x00,0x00,0x80,0xE0,0xFC,0x3F,0x77,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE0,0x1F,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xF0,0x0F,0x01,0x00,0x00,0x00,0x00,0x01,0x07,0xF8,0xFC,0x3F,0x07,0x01,0x00,0x00,0x00,0x00,0x01,0x0F,0xF0,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xC0,0xE0,0xF0,0xF0,0xE0,0xC0,0x00,0x00,0x00,0x80,0xC0,0xE0,0xF0,0xF0,0xF0,0xE0,0xC0,0x00,0x00,0x00,0x00,0x00,0x00],#/*"8",8*/
'39':
[0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x07,0x0F,0x1E,0x1E,0x1E,0x1E,0x1E,0x1F,0x0F,0x03,0x00,0x00,0x00,0x00,0x00,0x07,0x07,0x03,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x1F,0xE0,0x80,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0xE0,0xFF,0x00,0x00,0x00,0x00,0xC0,0xC0,0xE0,0x7F,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0xE0,0x1E,0x07,0x03,0x01,0x00,0x00,0x00,0x01,0x03,0x06,0x38,0xE1,0x01,0x01,0x03,0x03,0x07,0x1E,0x78,0x80,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,0xC0,0xE0,0xF0,0xF0,0xF0,0xF0,0xF0,0xF0,0xF0,0xF0,0xF0,0xE0,0xC0,0xC0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00],#/*"9",9*/
'20':
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00],#/*" ",1*/
'3a':
[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x40,0x40,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x40,0x40,0x00,0x00,0x00,0x00]#/*":",1*/
} | [
11600,
82,
34758,
198,
15,
27705,
3459,
69,
24,
66,
25,
198,
197,
58,
15,
87,
405,
11,
15,
87,
405,
11,
15,
87,
405,
11,
15,
87,
405,
11,
15,
87,
405,
11,
15,
87,
18,
37,
11,
15,
87,
405,
11,
15,
87,
405,
11,
15,
87,
4... | 1.202212 | 8,501 |
import tensorflow as tf
import numpy as np
# computes VGG loss or content loss
| [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
628,
220,
220,
220,
220,
220,
220,
220,
220,
198,
220,
220,
220,
1303,
552,
1769,
569,
11190,
2994,
393,
2695,
2994,
628,
220,
220,
220,
220
] | 2.512821 | 39 |
from ..utils import Object
class GetSuitableDiscussionChats(Object):
"""
Returns a list of basic group and supergroup chats, which can be used as a discussion group for a channel. Basic group chats need to be first upgraded to supergroups before they can be set as a discussion group
Attributes:
ID (:obj:`str`): ``GetSuitableDiscussionChats``
No parameters required.
Returns:
Chats
Raises:
:class:`telegram.Error`
"""
ID = "getSuitableDiscussionChats"
@staticmethod
| [
198,
198,
6738,
11485,
26791,
1330,
9515,
628,
198,
4871,
3497,
5606,
4674,
34255,
1925,
1381,
7,
10267,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
16409,
257,
1351,
286,
4096,
1448,
290,
2208,
8094,
40815,
11,
543,
460,
307... | 3.028249 | 177 |