hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cb495600a41947b1d94bf9580b821c4073762c83 | 1,431 | py | Python | builder/wheel.py | allenporter/home-assistant-wheels | c7ae5629b7045691d495cf40e9ae123946ba9c76 | [
"Apache-2.0"
] | 14 | 2021-01-17T23:31:10.000Z | 2022-02-25T18:23:53.000Z | builder/wheel.py | allenporter/home-assistant-wheels | c7ae5629b7045691d495cf40e9ae123946ba9c76 | [
"Apache-2.0"
] | 140 | 2020-02-03T11:12:29.000Z | 2022-03-29T07:24:34.000Z | builder/wheel.py | allenporter/home-assistant-wheels | c7ae5629b7045691d495cf40e9ae123946ba9c76 | [
"Apache-2.0"
] | 14 | 2020-07-05T07:40:57.000Z | 2022-03-10T23:26:14.000Z | """Utils for wheel."""
from contextlib import suppress
from pathlib import Path
import re
import shutil
from .utils import run_command, build_arch
RE_WHEEL_PLATFORM = re.compile(r"^(?P<name>.*-)cp\d{2}m?-linux_\w+\.whl$")
ARCH_PLAT = {
"amd64": "linux_x86_64",
"i386": "linux_i686",
"aarch64": "linux_aarch64",
"armhf": "linux_armv7l",
"armv7": "linux_armv7l",
}
def fix_wheels_name(wheels_folder: Path) -> None:
"""Remove platform tag from filename."""
for package in wheels_folder.glob("*.whl"):
match = RE_WHEEL_PLATFORM.match(package.name)
if not match:
continue
package.rename(Path(package.parent, f"{match.group('name')}none-any.whl"))
def copy_wheels_from_cache(cache_folder: Path, wheels_folder: Path) -> None:
"""Preserve wheels from cache on timeout error."""
for wheel_file in cache_folder.glob("**/*.whl"):
with suppress(OSError):
shutil.copy(wheel_file, wheels_folder)
def run_auditwheel(wheels_folder: Path) -> None:
"""Run auditwheel to include shared library."""
platform = ARCH_PLAT[build_arch()]
for wheel_file in wheels_folder.glob("*.whl"):
if not RE_WHEEL_PLATFORM.match(wheel_file.name):
continue
run_command(
f"auditwheel repair --plat {platform} --no-update-tags -w {wheels_folder} {wheel_file}"
)
| 30.446809 | 100 | 0.639413 | """Utils for wheel."""
from contextlib import suppress
from pathlib import Path
import re
import shutil
from .utils import run_command, build_arch
RE_WHEEL_PLATFORM = re.compile(r"^(?P<name>.*-)cp\d{2}m?-linux_\w+\.whl$")
ARCH_PLAT = {
"amd64": "linux_x86_64",
"i386": "linux_i686",
"aarch64": "linux_aarch64",
"armhf": "linux_armv7l",
"armv7": "linux_armv7l",
}
def fix_wheels_name(wheels_folder: Path) -> None:
"""Remove platform tag from filename."""
for package in wheels_folder.glob("*.whl"):
match = RE_WHEEL_PLATFORM.match(package.name)
if not match:
continue
package.rename(Path(package.parent, f"{match.group('name')}none-any.whl"))
def copy_wheels_from_cache(cache_folder: Path, wheels_folder: Path) -> None:
"""Preserve wheels from cache on timeout error."""
for wheel_file in cache_folder.glob("**/*.whl"):
with suppress(OSError):
shutil.copy(wheel_file, wheels_folder)
def run_auditwheel(wheels_folder: Path) -> None:
"""Run auditwheel to include shared library."""
platform = ARCH_PLAT[build_arch()]
for wheel_file in wheels_folder.glob("*.whl"):
if not RE_WHEEL_PLATFORM.match(wheel_file.name):
continue
run_command(
f"auditwheel repair --plat {platform} --no-update-tags -w {wheels_folder} {wheel_file}"
)
| 0 | 0 | 0 |
1cb684d2bf98f35c08eef045efcf405bf30ab2ea | 1,135 | py | Python | packnet/interface.py | c0mplh4cks/packnet | 8a4fa4f398e59a9c0e35d35ad8194d3ed8b56cb7 | [
"MIT"
] | 2 | 2021-01-08T12:26:51.000Z | 2021-09-17T05:16:33.000Z | packnet/interface.py | c0mplh4cks/packnet | 8a4fa4f398e59a9c0e35d35ad8194d3ed8b56cb7 | [
"MIT"
] | null | null | null | packnet/interface.py | c0mplh4cks/packnet | 8a4fa4f398e59a9c0e35d35ad8194d3ed8b56cb7 | [
"MIT"
] | 2 | 2020-12-14T21:18:48.000Z | 2021-09-17T11:04:36.000Z | """
PACKNET - c0mplh4cks
INTERFACE
"""
# === Importing Dependencies === #
import socket
from time import time
from .standards import encode, decode
from . import ADDR, MAC
# === Interface === #
| 19.568966 | 92 | 0.574449 | """
PACKNET - c0mplh4cks
INTERFACE
"""
# === Importing Dependencies === #
import socket
from time import time
from .standards import encode, decode
from . import ADDR, MAC
# === Interface === #
class Interface():
def __init__(self, card=None, port=0, passive=False, timeout=64):
self.passive = passive
self.sock = socket.socket( socket.AF_PACKET, socket.SOCK_RAW, socket.htons(0x0003) )
self.sock.settimeout(timeout)
if not card:
self.card = [ i[1] for i in socket.if_nameindex() ][-1]
else:
self.card = card
if not passive:
s = socket.socket( socket.AF_INET, socket.SOCK_DGRAM )
s.setsockopt( socket.SOL_SOCKET, 25, f"{self.card}".encode() )
s.connect( ("1.1.1.1", 80) )
ip = s.getsockname()[0]
self.sock.bind( (self.card, 0) )
mac = MAC.decode( self.sock.getsockname()[4] )[1]
self.addr = ADDR(ip, port, mac)
def send(self, packet):
self.sock.send(packet)
def recv(self, length=2048):
return self.sock.recvfrom(length)
| 819 | -3 | 102 |
95d66a14992e28b3c373cbfacf4f6d039c4b0e91 | 359 | py | Python | smp_manifold_learning/scripts/create_dir_if_not_exist.py | gsutanto/smp_manifold_learning | 60ef8278942c784c8d3bcd0a09031475f80d96fb | [
"MIT"
] | 11 | 2020-09-26T12:13:01.000Z | 2022-03-23T07:34:14.000Z | smp_manifold_learning/scripts/create_dir_if_not_exist.py | gsutanto/smp_manifold_learning | 60ef8278942c784c8d3bcd0a09031475f80d96fb | [
"MIT"
] | 1 | 2021-04-10T10:42:28.000Z | 2021-04-16T07:04:26.000Z | smp_manifold_learning/scripts/create_dir_if_not_exist.py | gsutanto/smp_manifold_learning | 60ef8278942c784c8d3bcd0a09031475f80d96fb | [
"MIT"
] | 5 | 2020-09-24T18:52:46.000Z | 2022-03-23T07:26:15.000Z | import argparse
from smp_manifold_learning.differentiable_models.utils import create_dir_if_not_exist
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument("-d", "--dir_path", default='../plot/ecmnn/', type=str)
if __name__ == '__main__':
args = parser.parse_args()
dir_path = args.dir_path
create_dir_if_not_exist(dir_path) | 29.916667 | 85 | 0.774373 | import argparse
from smp_manifold_learning.differentiable_models.utils import create_dir_if_not_exist
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument("-d", "--dir_path", default='../plot/ecmnn/', type=str)
if __name__ == '__main__':
args = parser.parse_args()
dir_path = args.dir_path
create_dir_if_not_exist(dir_path) | 0 | 0 | 0 |
9f4fe7c4c5b6a35be415fce1b8b7462a20ce88df | 432 | py | Python | dashboard/migrations/0010_antenna_ant_name.py | HERA-Team/heranow | 1bc827459a7a92f600cefbd0c8a08f629a211cda | [
"BSD-3-Clause"
] | null | null | null | dashboard/migrations/0010_antenna_ant_name.py | HERA-Team/heranow | 1bc827459a7a92f600cefbd0c8a08f629a211cda | [
"BSD-3-Clause"
] | 6 | 2020-09-10T05:33:17.000Z | 2021-03-16T20:36:47.000Z | dashboard/migrations/0010_antenna_ant_name.py | HERA-Team/heranow | 1bc827459a7a92f600cefbd0c8a08f629a211cda | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 3.0.3 on 2020-08-13 18:04
from django.db import migrations, models
| 21.6 | 64 | 0.592593 | # Generated by Django 3.0.3 on 2020-08-13 18:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("dashboard", "0009_hookupnotes"),
]
operations = [
migrations.AddField(
model_name="antenna",
name="ant_name",
field=models.CharField(default="N/A", max_length=5),
preserve_default=False,
),
]
| 0 | 318 | 23 |
55f3a6b88a946898726d384c6d0d5e2eb62cc48f | 828 | py | Python | website-backend/ava/common/uuid_utils.py | kbladin/ava-capture | 2fc24f4a3712f721c3a229b499631e00697209a5 | [
"BSD-3-Clause"
] | null | null | null | website-backend/ava/common/uuid_utils.py | kbladin/ava-capture | 2fc24f4a3712f721c3a229b499631e00697209a5 | [
"BSD-3-Clause"
] | null | null | null | website-backend/ava/common/uuid_utils.py | kbladin/ava-capture | 2fc24f4a3712f721c3a229b499631e00697209a5 | [
"BSD-3-Clause"
] | null | null | null | #
# Copyright (c) 2017 Electronic Arts Inc. All Rights Reserved
#
import uuid
# https://stackoverflow.com/questions/1181919/python-base-36-encoding/1181924
def base36encode(number, alphabet='0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'):
"""Converts an integer to a base36 string."""
if not isinstance(number, (int, long)):
raise TypeError('number must be an integer')
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(alphabet):
return sign + alphabet[number]
while number != 0:
number, i = divmod(number, len(alphabet))
base36 = alphabet[i] + base36
return sign + base36
| 25.090909 | 77 | 0.655797 | #
# Copyright (c) 2017 Electronic Arts Inc. All Rights Reserved
#
import uuid
# https://stackoverflow.com/questions/1181919/python-base-36-encoding/1181924
def base36encode(number, alphabet='0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'):
"""Converts an integer to a base36 string."""
if not isinstance(number, (int, long)):
raise TypeError('number must be an integer')
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(alphabet):
return sign + alphabet[number]
while number != 0:
number, i = divmod(number, len(alphabet))
base36 = alphabet[i] + base36
return sign + base36
def uuid_node_base36():
# Returns the computer's unique hardware address, encoded as base36
return base36encode(uuid.getnode())
| 114 | 0 | 23 |
2c90a03c1e24538f9c99a9df8168f749aec70377 | 4,801 | py | Python | med_log.py | Zierman/medlog | ad0d932187d67b34895c7a74dc7deea8df1921c3 | [
"MIT"
] | null | null | null | med_log.py | Zierman/medlog | ad0d932187d67b34895c7a74dc7deea8df1921c3 | [
"MIT"
] | 2 | 2021-12-05T06:29:41.000Z | 2021-12-05T07:50:14.000Z | med_log.py | Zierman/medlog | ad0d932187d67b34895c7a74dc7deea8df1921c3 | [
"MIT"
] | null | null | null | from __future__ import annotations
from dataclasses import dataclass, Field
from datetime import datetime, timedelta
from typing import Union, Optional, Tuple, List
from parse import parse
from med import Med, MedRegistry, DOSAGE_PARSE_FORMAT
DEFAULT_LOG_FILE = 'logs/med.log'
DEFAULT_DATE_TIME_FORMAT = r'%m/%d/%Y %H:%M'
@dataclass
| 35.043796 | 113 | 0.656738 | from __future__ import annotations
from dataclasses import dataclass, Field
from datetime import datetime, timedelta
from typing import Union, Optional, Tuple, List
from parse import parse
from med import Med, MedRegistry, DOSAGE_PARSE_FORMAT
DEFAULT_LOG_FILE = 'logs/med.log'
DEFAULT_DATE_TIME_FORMAT = r'%m/%d/%Y %H:%M'
class NextDose:
def __init__(self, *, entry=None, med=None, t_override=None):
if isinstance(entry, Med):
med, entry = entry, None
if entry is None:
if med is None:
raise TypeError("NextDose requires either an entry or med argument")
self._d = {'time': datetime.now(), 'amount': f'{med.standard_dose_amount}{med.standard_dose_unit}'}
else:
t = max((entry.dose_administrated_date_time + entry.med.time_between_standard_doses), datetime.now())
self._d = {'time': t,
'amount': f'{entry.med.standard_dose_amount}{entry.med.standard_dose_unit}'}
if t_override is not None:
self._d['time'] = t_override
@property
def time(self) -> datetime:
return self._d['time']
@property
def amount(self) -> str:
return self._d['amount']
def __str__(self):
return f"next dose: {self.amount} at {self.time.strftime(DEFAULT_DATE_TIME_FORMAT)}"
@dataclass
class MedLogEntry:
med: Med
dose_administrated_amount: float
dose_administrated_unit: str
dose_administrated_date_time: datetime
def __str__(self):
t = self.dose_administrated_date_time.strftime(DEFAULT_DATE_TIME_FORMAT)
return f'{t} {self.med.name} {self.dose_administrated_amount}{self.dose_administrated_unit}'
@classmethod
def from_str(cls, s: str) -> MedLogEntry:
words = s.split(' ')
date, time = words[:2]
datetime_obj = datetime.strptime(f'{date} {time}', DEFAULT_DATE_TIME_FORMAT)
dose_amount, dose_unit = parse(DOSAGE_PARSE_FORMAT, ' '.join(words[-1:]))
med_name = ' '.join(words[2:-1])
return MedLogEntry(med=MedRegistry.get(med_name), dose_administrated_amount=dose_amount,
dose_administrated_unit=dose_unit, dose_administrated_date_time=datetime_obj)
@property
def next_dose(self):
return NextDose(entry=self)
def log(med,
dose_administrated_amount = None,
dose_administrated_unit = None,
dose_administrated_date_time=None,
log_file=None) -> MedLogEntry:
if not log_file:
log_file = DEFAULT_LOG_FILE
if dose_administrated_date_time is None:
dose_administrated_date_time = datetime.now()
if dose_administrated_unit is None:
dose_administrated_unit = med.standard_dose_unit
if dose_administrated_amount is None:
dose_administrated_amount = med.standard_dose_amount
entry = MedLogEntry(med=med,
dose_administrated_amount=dose_administrated_amount,
dose_administrated_unit=dose_administrated_unit,
dose_administrated_date_time=dose_administrated_date_time)
with open(log_file, 'a') as file:
file.write(f'{entry}\n')
return entry
def next_dose(med: Med,
log_file=None) -> NextDose:
if not log_file:
log_file = DEFAULT_LOG_FILE
name = med.name
matched_lines = []
with open(log_file, 'r') as file:
lines = file.readlines()
for line in lines:
entry = MedLogEntry.from_str(line)
if med == entry.med:
matched_lines.append(line)
max_per_24hr = med.max_standard_doses_per_day
if not matched_lines or not max_per_24hr:
return NextDose(med=med)
elif len(matched_lines) < max_per_24hr:
return MedLogEntry.from_str(matched_lines[-1]).next_dose
else:
t = MedLogEntry.from_str(matched_lines[-max_per_24hr]).dose_administrated_date_time + timedelta(hours=24)
last = MedLogEntry.from_str(matched_lines[-1])
if t > last.next_dose.time:
return NextDose(entry=last, t_override=t)
else:
return NextDose(entry=last)
def print_log(meds: Optional[Tuple[Med], List[Med]] = None,
log_file=None, ignore_case=False):
if not log_file:
log_file = DEFAULT_LOG_FILE
with open(log_file, 'r') as file:
lines = file.readlines()
is_filtered = bool(not meds)
for line in lines:
entry = MedLogEntry.from_str(line)
if ignore_case and is_filtered:
match_found = any(m.name.casefold() == entry.med.name.casefold() for m in meds)
elif is_filtered:
match_found = any(m == entry.med for m in meds)
else:
match_found = True
if match_found:
print(line, end='')
| 3,982 | 364 | 114 |
a9ab98b718aa838357f77aed9bea96e0588c0a11 | 10,896 | py | Python | ffta/analysis/filtering.py | lindat18/ffta | f510a2068b7626e2984e54afc1a577450e560e97 | [
"MIT"
] | null | null | null | ffta/analysis/filtering.py | lindat18/ffta | f510a2068b7626e2984e54afc1a577450e560e97 | [
"MIT"
] | null | null | null | ffta/analysis/filtering.py | lindat18/ffta | f510a2068b7626e2984e54afc1a577450e560e97 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 26 17:15:36 2018
@author: Raj
"""
import pycroscopy as px
from pycroscopy.processing.fft import FrequencyFilter
import pyUSID as usid
import numpy as np
from scipy import signal as sps
from ffta.load import get_utils
from ffta import pixel
from matplotlib import pyplot as plt
import warnings
'''
For filtering data using the pycroscopy filter command
To set up a filter, you can choose any of the following:
Harmonic Filter: pick a frequency and bandpass filters that + 2w + 3e etc
Bandpass Filter: pick a specific frequency and pass that
Lowpass Filter: pick a frequency and pass all below that
Noise Filter: pick frequencies to selectively remove (like electrical noise, etc)
# a harmonic filter center of 2000 points long at 100kHz and 2*100 kHz, with a 5000 Hz wide window, at 1 MHz sampling
>>> hbf = px.processing.fft.HarmonicPassFilter(2000, 10e6, 100e3, 5000, 2)
>>> ffta.hdf_utils.filtering.test_filter(h5_main, hbf) #will display the result before applying to the whole dataset
>>> ffta.hdf_utils.filtering.fft_filter(h5_main, hbf)
'''
def test_filter(hdf_file, freq_filts, parameters={}, pixelnum=[0, 0], noise_tolerance=5e-7,
show_plots=True, check_filter=True):
"""
Applies FFT Filter to the file at a specific line and displays the result
Parameters
----------
hdf_file : h5Py file or Nx1 NumPy array (preferred is NumPy array)
hdf_file to work on, e.g. hdf.file['/FF-raw'] if that's a Dataset
if ndarray, uses passed or default parameters
Use ndarray.flatten() to ensure correct dimensions
freq_filts : list of FrequencyFilter class objects
Contains the filters to apply to the test signal
parameters : dict, optional
Contains parameters in FF-raw file for constructing filters. Automatic if a Dataset/File
Must contain num_pts and samp_rate to be functional
pixelnum : int, optional
For extracting a specific pixel to do FFT Filtering on
show_plots : bool, optional
Turns on FFT plots from Pycroscopy
noise_tolerance : float 0 to 1
Amount of noise below which signal is set to 0
Returns
-------
filt_line : numpy.ndarray
Filtered signal of hdf_file
freq_filts : list
The filter parameters to be passed to SignalFilter
fig_filt, axes_filt: matplotlib controls
Only functional if show_plots is on
"""
reshape = False
ftype = str(type(hdf_file))
if ('h5py' in ftype) or ('Dataset' in ftype): # hdf file
parameters = get_utils.get_params(hdf_file)
hdf_file = get_utils.get_pixel(hdf_file, [pixelnum[0], pixelnum[1]], array_form=True, transpose=False)
hdf_file = hdf_file.flatten()
if len(hdf_file.shape) == 2:
reshape = True
hdf_file = hdf_file.flatten()
sh = hdf_file.shape
# Test filter on a single line:
filt_line, fig_filt, axes_filt = px.processing.gmode_utils.test_filter(hdf_file,
frequency_filters=freq_filts,
noise_threshold=noise_tolerance,
show_plots=show_plots)
# If need to reshape
if reshape:
filt_line = np.reshape(filt_line, sh)
# Test filter out in Pixel
if check_filter:
plt.figure()
plt.plot(hdf_file, 'b')
plt.plot(filt_line, 'k')
h5_px_filt = pixel.Pixel(filt_line, parameters)
h5_px_filt.clear_filter_flags()
h5_px_filt.analyze()
h5_px_filt.plot(newplot=True)
h5_px_raw = pixel.Pixel(hdf_file, parameters)
h5_px_raw.analyze()
h5_px_raw.plot(newplot=True)
# h5_px_raw_unfilt = pixel.Pixel(hdf_file, parameters)
# h5_px_raw_unfilt.clear_filter_flags()
# h5_px_raw_unfilt.analyze()
# h5_px_raw_unfilt.plot(newplot=False,c1='y', c2='c')
return filt_line, freq_filts, fig_filt, axes_filt
def fft_filter(h5_main, freq_filts, noise_tolerance=5e-7, make_new=False, verbose=False):
"""
Stub for applying filter above to the entire FF image set
Parameters
----------
h5_main : h5py.Dataset object
Dataset to work on, e.g. h5_main = px.hdf_utils.getDataSet(hdf.file, 'FF_raw')[0]
freq_filts : list
List of frequency filters usually generated in test_line above
noise_tolerance : float, optional
Level below which data are set to 0. Higher values = more noise (more tolerant)
make_new : bool, optional
Allows for re-filtering the data by creating a new folder
Returns
-------
h5_filt : Dataset
Filtered dataset within latest -FFT_Filtering Group
"""
h5_filt_grp = usid.hdf_utils.check_for_old(h5_main, 'FFT_Filtering')
if make_new == True or not any(h5_filt_grp):
sig_filt = px.processing.SignalFilter(h5_main, frequency_filters=freq_filts,
noise_threshold=noise_tolerance,
write_filtered=True, write_condensed=False,
num_pix=1, verbose=verbose, cores=2, max_mem_mb=512)
h5_filt_grp = sig_filt.compute()
else:
print('Taking previously computed results')
h5_filt = h5_filt_grp[0]['Filtered_Data']
h5_filt = h5_filt_grp['Filtered_Data']
usid.hdf_utils.copy_attributes(h5_main.parent, h5_filt)
usid.hdf_utils.copy_attributes(h5_main.parent, h5_filt.parent)
return h5_filt
def lowpass(hdf_file, parameters={}, pixelnum=[0, 0], f_cutoff=None):
'''
Interfaces to px.pycroscopy.fft.LowPassFilter
:param hdf_file:
:param parameters:
:param pixelnum:
See test_filter below
:param f_cutoff: int
frequency to cut off. Defaults to 2*drive frequency rounded to nearest 100 kHz
'''
hdf_file, num_pts, drive, samp_rate = _get_pixel_for_filtering(hdf_file, parameters, pixelnum)
if not f_cutoff:
lpf_cutoff = np.round(drive / 1e5, decimals=0) * 2 * 1e5 # 2times the drive frequency, round up
lpf = px.processing.fft.LowPassFilter(num_pts, samp_rate, lpf_cutoff)
return lpf
def bandpass(hdf_file, parameters={}, pixelnum=[0, 0], f_center=None, f_width=10e3, harmonic=None, fir=False):
'''
Interfaces to pycroscopy.processing.fft.BandPassFilter
Note that this is effectively a Harmonic Filter of number_harmonics 1, but with finite impulse response option
:param hdf_file:
:param parameters:
:param pixelnum:
See test_filter below
:param f_center: int
center frequency for the specific band to pass
:param f_width: int
width of frequency to pass
:param harmonic: int
if specified, sets the band to this specific multiple of the drive frequency
:param fir: bool
uses an Finite Impulse Response filter instead of a normal boxcar
'''
hdf_file, num_pts, drive, samp_rate = _get_pixel_for_filtering(hdf_file, parameters, pixelnum)
# default is the 2*w signal (second harmonic for KPFM)
if not f_center:
if not harmonic:
f_center = drive * 2
else:
f_center = int(drive * harmonic)
bpf = px.processing.fft.BandPassFilter(num_pts, samp_rate, f_center, f_width, fir=fir)
return bpf
def harmonic(hdf_file, parameters={}, pixelnum=[0, 0], first_harm=1, bandwidth=None, num_harmonics=5):
'''
Interfaces with px.processing.fft.HarmonicFilter
Parameters
----------
hdf_file, parameters, pixelnum : see comments in test_filter below
first_harm : int
The first harmonic based on the drive frequency to use
For G-KPFM this should be explicitly set to 2
bandwidth : int
bandwidth for filtering. For computational purposes this is hard-set to 2500 (2.5 kHz)
num_harmonics : int
The number of harmonics to use (omega, 2*omega, 3*omega, etc)
'''
hdf_file, num_pts, drive, samp_rate = _get_pixel_for_filtering(hdf_file, parameters, pixelnum)
if not bandwidth:
bandwidth = 2500
elif bandwidth > 2500:
warnings.warn('Bandwidth of that level might cause errors')
bandwidth = 2500
first_harm = drive * first_harm
hbf = px.processing.fft.HarmonicPassFilter(num_pts, samp_rate, first_harm, bandwidth, num_harmonics)
return hbf
def noise_filter(hdf_file, parameters={}, pixelnum=[0, 0],
centers=[10E3, 50E3, 100E3, 150E3, 200E3],
widths=[20E3, 1E3, 1E3, 1E3, 1E3]):
'''
Interfaces with pycroscopy.processing.fft.NoiseBandFilter
:param hdf_file:
:param parameters:
:param pixelnum:
See test_filter
:param centers: list
List of Frequencies to filter out
:param widths:
List of frequency widths for each filter. e,g. in default case (10 kHz center, 20 kHz width) is from 0 to 20 kHz
'''
hdf_file, num_pts, drive, samp_rate = _get_pixel_for_filtering(hdf_file, parameters, pixelnum)
nf = px.processing.fft.NoiseBandFilter(num_pts, samp_rate, centers, widths)
return nf
# placeholder until accepted in pull request
| 28.978723 | 117 | 0.73192 | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 26 17:15:36 2018
@author: Raj
"""
import pycroscopy as px
from pycroscopy.processing.fft import FrequencyFilter
import pyUSID as usid
import numpy as np
from scipy import signal as sps
from ffta.load import get_utils
from ffta import pixel
from matplotlib import pyplot as plt
import warnings
'''
For filtering data using the pycroscopy filter command
To set up a filter, you can choose any of the following:
Harmonic Filter: pick a frequency and bandpass filters that + 2w + 3e etc
Bandpass Filter: pick a specific frequency and pass that
Lowpass Filter: pick a frequency and pass all below that
Noise Filter: pick frequencies to selectively remove (like electrical noise, etc)
# a harmonic filter center of 2000 points long at 100kHz and 2*100 kHz, with a 5000 Hz wide window, at 1 MHz sampling
>>> hbf = px.processing.fft.HarmonicPassFilter(2000, 10e6, 100e3, 5000, 2)
>>> ffta.hdf_utils.filtering.test_filter(h5_main, hbf) #will display the result before applying to the whole dataset
>>> ffta.hdf_utils.filtering.fft_filter(h5_main, hbf)
'''
def test_filter(hdf_file, freq_filts, parameters={}, pixelnum=[0, 0], noise_tolerance=5e-7,
show_plots=True, check_filter=True):
"""
Applies FFT Filter to the file at a specific line and displays the result
Parameters
----------
hdf_file : h5Py file or Nx1 NumPy array (preferred is NumPy array)
hdf_file to work on, e.g. hdf.file['/FF-raw'] if that's a Dataset
if ndarray, uses passed or default parameters
Use ndarray.flatten() to ensure correct dimensions
freq_filts : list of FrequencyFilter class objects
Contains the filters to apply to the test signal
parameters : dict, optional
Contains parameters in FF-raw file for constructing filters. Automatic if a Dataset/File
Must contain num_pts and samp_rate to be functional
pixelnum : int, optional
For extracting a specific pixel to do FFT Filtering on
show_plots : bool, optional
Turns on FFT plots from Pycroscopy
noise_tolerance : float 0 to 1
Amount of noise below which signal is set to 0
Returns
-------
filt_line : numpy.ndarray
Filtered signal of hdf_file
freq_filts : list
The filter parameters to be passed to SignalFilter
fig_filt, axes_filt: matplotlib controls
Only functional if show_plots is on
"""
reshape = False
ftype = str(type(hdf_file))
if ('h5py' in ftype) or ('Dataset' in ftype): # hdf file
parameters = get_utils.get_params(hdf_file)
hdf_file = get_utils.get_pixel(hdf_file, [pixelnum[0], pixelnum[1]], array_form=True, transpose=False)
hdf_file = hdf_file.flatten()
if len(hdf_file.shape) == 2:
reshape = True
hdf_file = hdf_file.flatten()
sh = hdf_file.shape
# Test filter on a single line:
filt_line, fig_filt, axes_filt = px.processing.gmode_utils.test_filter(hdf_file,
frequency_filters=freq_filts,
noise_threshold=noise_tolerance,
show_plots=show_plots)
# If need to reshape
if reshape:
filt_line = np.reshape(filt_line, sh)
# Test filter out in Pixel
if check_filter:
plt.figure()
plt.plot(hdf_file, 'b')
plt.plot(filt_line, 'k')
h5_px_filt = pixel.Pixel(filt_line, parameters)
h5_px_filt.clear_filter_flags()
h5_px_filt.analyze()
h5_px_filt.plot(newplot=True)
h5_px_raw = pixel.Pixel(hdf_file, parameters)
h5_px_raw.analyze()
h5_px_raw.plot(newplot=True)
# h5_px_raw_unfilt = pixel.Pixel(hdf_file, parameters)
# h5_px_raw_unfilt.clear_filter_flags()
# h5_px_raw_unfilt.analyze()
# h5_px_raw_unfilt.plot(newplot=False,c1='y', c2='c')
return filt_line, freq_filts, fig_filt, axes_filt
def fft_filter(h5_main, freq_filts, noise_tolerance=5e-7, make_new=False, verbose=False):
"""
Stub for applying filter above to the entire FF image set
Parameters
----------
h5_main : h5py.Dataset object
Dataset to work on, e.g. h5_main = px.hdf_utils.getDataSet(hdf.file, 'FF_raw')[0]
freq_filts : list
List of frequency filters usually generated in test_line above
noise_tolerance : float, optional
Level below which data are set to 0. Higher values = more noise (more tolerant)
make_new : bool, optional
Allows for re-filtering the data by creating a new folder
Returns
-------
h5_filt : Dataset
Filtered dataset within latest -FFT_Filtering Group
"""
h5_filt_grp = usid.hdf_utils.check_for_old(h5_main, 'FFT_Filtering')
if make_new == True or not any(h5_filt_grp):
sig_filt = px.processing.SignalFilter(h5_main, frequency_filters=freq_filts,
noise_threshold=noise_tolerance,
write_filtered=True, write_condensed=False,
num_pix=1, verbose=verbose, cores=2, max_mem_mb=512)
h5_filt_grp = sig_filt.compute()
else:
print('Taking previously computed results')
h5_filt = h5_filt_grp[0]['Filtered_Data']
h5_filt = h5_filt_grp['Filtered_Data']
usid.hdf_utils.copy_attributes(h5_main.parent, h5_filt)
usid.hdf_utils.copy_attributes(h5_main.parent, h5_filt.parent)
return h5_filt
def lowpass(hdf_file, parameters={}, pixelnum=[0, 0], f_cutoff=None):
'''
Interfaces to px.pycroscopy.fft.LowPassFilter
:param hdf_file:
:param parameters:
:param pixelnum:
See test_filter below
:param f_cutoff: int
frequency to cut off. Defaults to 2*drive frequency rounded to nearest 100 kHz
'''
hdf_file, num_pts, drive, samp_rate = _get_pixel_for_filtering(hdf_file, parameters, pixelnum)
if not f_cutoff:
lpf_cutoff = np.round(drive / 1e5, decimals=0) * 2 * 1e5 # 2times the drive frequency, round up
lpf = px.processing.fft.LowPassFilter(num_pts, samp_rate, lpf_cutoff)
return lpf
def bandpass(hdf_file, parameters={}, pixelnum=[0, 0], f_center=None, f_width=10e3, harmonic=None, fir=False):
'''
Interfaces to pycroscopy.processing.fft.BandPassFilter
Note that this is effectively a Harmonic Filter of number_harmonics 1, but with finite impulse response option
:param hdf_file:
:param parameters:
:param pixelnum:
See test_filter below
:param f_center: int
center frequency for the specific band to pass
:param f_width: int
width of frequency to pass
:param harmonic: int
if specified, sets the band to this specific multiple of the drive frequency
:param fir: bool
uses an Finite Impulse Response filter instead of a normal boxcar
'''
hdf_file, num_pts, drive, samp_rate = _get_pixel_for_filtering(hdf_file, parameters, pixelnum)
# default is the 2*w signal (second harmonic for KPFM)
if not f_center:
if not harmonic:
f_center = drive * 2
else:
f_center = int(drive * harmonic)
bpf = px.processing.fft.BandPassFilter(num_pts, samp_rate, f_center, f_width, fir=fir)
return bpf
def harmonic(hdf_file, parameters={}, pixelnum=[0, 0], first_harm=1, bandwidth=None, num_harmonics=5):
'''
Interfaces with px.processing.fft.HarmonicFilter
Parameters
----------
hdf_file, parameters, pixelnum : see comments in test_filter below
first_harm : int
The first harmonic based on the drive frequency to use
For G-KPFM this should be explicitly set to 2
bandwidth : int
bandwidth for filtering. For computational purposes this is hard-set to 2500 (2.5 kHz)
num_harmonics : int
The number of harmonics to use (omega, 2*omega, 3*omega, etc)
'''
hdf_file, num_pts, drive, samp_rate = _get_pixel_for_filtering(hdf_file, parameters, pixelnum)
if not bandwidth:
bandwidth = 2500
elif bandwidth > 2500:
warnings.warn('Bandwidth of that level might cause errors')
bandwidth = 2500
first_harm = drive * first_harm
hbf = px.processing.fft.HarmonicPassFilter(num_pts, samp_rate, first_harm, bandwidth, num_harmonics)
return hbf
def noise_filter(hdf_file, parameters={}, pixelnum=[0, 0],
centers=[10E3, 50E3, 100E3, 150E3, 200E3],
widths=[20E3, 1E3, 1E3, 1E3, 1E3]):
'''
Interfaces with pycroscopy.processing.fft.NoiseBandFilter
:param hdf_file:
:param parameters:
:param pixelnum:
See test_filter
:param centers: list
List of Frequencies to filter out
:param widths:
List of frequency widths for each filter. e,g. in default case (10 kHz center, 20 kHz width) is from 0 to 20 kHz
'''
hdf_file, num_pts, drive, samp_rate = _get_pixel_for_filtering(hdf_file, parameters, pixelnum)
nf = px.processing.fft.NoiseBandFilter(num_pts, samp_rate, centers, widths)
return nf
def _get_pixel_for_filtering(hdf_file, parameters={}, pixelnum=[0, 0]):
ftype = str(type(hdf_file))
if ('h5py' in ftype) or ('Dataset' in ftype): # hdf file
parameters = usid.hdf_utils.get_attributes(hdf_file)
hdf_file = get_utils.get_pixel(hdf_file, [pixelnum[0], pixelnum[1]], array_form=True, transpose=False)
hdf_file = hdf_file.flatten()
if len(hdf_file.shape) == 2:
hdf_file = hdf_file.flatten()
num_pts = hdf_file.shape[0]
drive = parameters['drive_freq']
samp_rate = parameters['sampling_rate']
return hdf_file, num_pts, drive, samp_rate
# placeholder until accepted in pull request
class BandPassFilter(FrequencyFilter):
def __init__(self, signal_length, samp_rate, f_center, f_width,
fir=False, fir_taps=1999):
"""
Builds a bandpass filter
Parameters
----------
signal_length : unsigned int
Points in the FFT. Assuming Signal in frequency space (ie - after FFT shifting)
samp_rate : unsigned integer
Sampling rate
f_center : unsigned integer
Center frequency for filter
f_width : unsigned integer
Frequency width of the pass band
fir : bool, optional
True uses a finite impulse response (FIR) response instead of a standard boxcar. FIR is causal
fir_taps : int
Number of taps (length of filter) for finite impulse response filter
Returns
-------
bpf : 1D numpy array describing the bandpass filter
"""
if f_center >= 0.5 * samp_rate:
raise ValueError('Filter cutoff exceeds Nyquist rate')
self.f_center = f_center
self.f_width = f_width
super(BandPassFilter, self).__init__(signal_length, samp_rate)
cent = int(round(0.5 * signal_length))
# very simple boxcar
ind = int(round(signal_length * (f_center / samp_rate)))
sz = int(round(cent * f_width / samp_rate))
bpf = np.zeros(signal_length, dtype=np.float32)
# Finite Impulse Response or Boxcar
if not fir:
bpf[cent - ind - sz:cent - ind + sz + 1] = 1
bpf[cent + ind - sz:cent + ind + sz + 1] = 1
else:
freq_low = (f_center - f_width) / (0.5 * samp_rate)
freq_high = (f_center + f_width) / (0.5 * samp_rate)
band = [freq_low, freq_high]
taps = sps.firwin(int(fir_taps), band, pass_zero=False,
window='blackman')
bpf = np.abs(np.fft.fftshift(np.fft.fft(taps, n=signal_length)))
self.value = bpf
def get_parms(self):
basic_parms = super(BandPassFilter, self).get_parms()
prefix = 'band_pass_'
this_parms = {prefix + 'start_freq': self.f_center, prefix + 'band_width': self.f_width}
this_parms.update(basic_parms)
return this_parms
| 767 | 1,692 | 45 |
52823e59cd073588f3ba138cace3b20df0d5b5b5 | 3,395 | py | Python | calvin/runtime/north/plugins/port/queue/test/test_collect_tagged.py | gabrielcercel/calvin-base | c0315f100643230d65aed1745e1c22df3e7a7c2c | [
"Apache-2.0"
] | 334 | 2015-06-04T15:14:28.000Z | 2022-02-09T11:14:17.000Z | calvin/runtime/north/plugins/port/queue/test/test_collect_tagged.py | gabrielcercel/calvin-base | c0315f100643230d65aed1745e1c22df3e7a7c2c | [
"Apache-2.0"
] | 89 | 2015-06-13T19:15:35.000Z | 2019-12-03T19:23:20.000Z | calvin/runtime/north/plugins/port/queue/test/test_collect_tagged.py | gabrielcercel/calvin-base | c0315f100643230d65aed1745e1c22df3e7a7c2c | [
"Apache-2.0"
] | 112 | 2015-06-06T19:16:54.000Z | 2020-10-19T01:27:55.000Z | import pytest
from calvin.runtime.north.plugins.port import queue
from calvin.runtime.north.calvin_token import Token
from calvin.runtime.north.plugins.port.queue.common import QueueEmpty
from calvin.runtime.north.plugins.port.queue.test.test_collect_unordered import TestCollectUnorderedFIFO
pytest_unittest = pytest.mark.unittest
@pytest_unittest
| 34.642857 | 104 | 0.554345 | import pytest
from calvin.runtime.north.plugins.port import queue
from calvin.runtime.north.calvin_token import Token
from calvin.runtime.north.plugins.port.queue.common import QueueEmpty
from calvin.runtime.north.plugins.port.queue.test.test_collect_unordered import TestCollectUnorderedFIFO
pytest_unittest = pytest.mark.unittest
class DummyPort(object):
pass
def create_port(routing="collect-unordered"):
port = DummyPort()
port.properties = {'routing': routing, "direction": "in"}
return queue.get(port)
def unwrap(data):
return data.value.items()[0]
@pytest_unittest
class TestCollectUnorderedFIFOTagged(TestCollectUnorderedFIFO):
def setUp(self):
self.inport = create_port(routing="collect-tagged")
def testType(self):
queue_type = self.inport.queue_type
self.assertEqual(queue_type, "collect:tagged")
def testSerialize(self):
self.setup_writers(3)
for i in [1,2,3]:
for j in [0,3]:
self.inport.write(Token("data-%d" % (i+j)), "writer-%d" % i)
for i in [1,2,3]:
self.inport.peek(None)
state = self.inport._state()
port = create_port(routing="collect-tagged")
port._set_state(state)
for i in [1,2,3]:
tag, data = unwrap(port.peek(None))
self.assertEqual(tag, "writer-%d" % i)
self.assertEqual(data, "data-%d" % (i+3))
def testPeek_Normal(self):
self.setup_writers(3)
for i in [1,2,3]:
for j in [0,3]:
self.inport.write(Token("data-%d" % (i+j)), "writer-%d" % i)
for i in [1,2,3]:
tag, data = unwrap(self.inport.peek(None))
self.assertEqual(tag, "writer-%d" % i)
self.assertEqual(data, "data-%d" % i)
for i in [1,2,3]:
tag, data = unwrap(self.inport.peek(None))
self.assertEqual(tag, "writer-%d" % i)
self.assertEqual(data, "data-%d" % (i+3))
def testCancel(self):
self.setup_writers(3)
for i in [1,2,3]:
for j in [0,3]:
self.inport.write(Token("data-%d" % (i+j)), "writer-%d" % i)
data_1 = {}
for i in [1,2,3]:
tag, data = unwrap(self.inport.peek(None))
data_1.setdefault(tag, []).append(data)
self.inport.cancel(None)
data_2 = {}
try :
while True:
tag, data = unwrap(self.inport.peek(None))
data_2.setdefault(tag, []).append(data)
except QueueEmpty:
pass
for tag, data in data_1.items():
self.assertEqual(data, data_2[tag][:len(data)])
def testCommit(self):
self.setup_writers(3)
for i in [1,2,3]:
for j in [0,3]:
self.inport.write(Token("data-%d" % (i+j)), "writer-%d" % i)
values = {}
for i in [1,2,3,4,5,6]:
tag, data = unwrap(self.inport.peek(None))
values.setdefault(tag, []).append(data)
self.inport.commit(None)
# should be empty now
with self.assertRaises(QueueEmpty):
self.inport.peek(None)
for tag, data in values.items():
for i in [1,2,3]:
if tag == "writer-%d" % i:
self.assertEqual(["data-%d" % d for d in [i, i+3]], data)
| 2,704 | 54 | 273 |
92c65123a6c522968afa5a515527ab9b438ba792 | 1,706 | py | Python | python/odml/nix_demo.py | mpsonntag/snippets | fc3cc42ea49b885c1f29c0aef1379055a931a978 | [
"BSD-3-Clause"
] | null | null | null | python/odml/nix_demo.py | mpsonntag/snippets | fc3cc42ea49b885c1f29c0aef1379055a931a978 | [
"BSD-3-Clause"
] | null | null | null | python/odml/nix_demo.py | mpsonntag/snippets | fc3cc42ea49b885c1f29c0aef1379055a931a978 | [
"BSD-3-Clause"
] | null | null | null | import matplotlib.pyplot as plt
import pandas as pd
from pathlib import Path
import nixio as nix
fnbase = Path.joinpath(Path.home(), 'Chaos', 'work')
fnraw = str(Path.joinpath(fnbase,
'_Lab_Zimmer/calcium_imaging/results/N2/urx/shift210421/20120705Pflp178GCaMP5kshift210421W7URXx2.log'))
nixfn = str(Path.joinpath(fnbase, 'ginrepos', 'demo', 'elegans_oxygen.nix'))
# row_wise read in of csv file
data = pd.read_csv(fnraw)
# transpose to get columns
tdata = data.transpose()
# get df/f column as array
steps = tdata.values[0]
dff = tdata.values[5]
# load data into nix
nixfile = nix.File.open(nixfn, nix.FileMode.Overwrite)
b = nixfile.create_block(name="oxygen_shift_trials", type_="calcium_imaging")
# use a group to structure the individual trials within a block
g = b.create_group(name="N2_URX_shift_210421_20120705", type_="trial.datacollection")
# add steps column
da = b.create_data_array(name="20120705_frames", array_type="trial.column", data=steps)
da.label = "frames"
# add dF/F column
da = b.create_data_array(name="20120705_df_over_f", array_type="trial.column", data=dff)
da.label = "dF/F"
# Add the second dimension to the data array
dim = da.append_sampled_dimension(steps[1] - steps[0])
dim.label = "frames"
# Structuring our data
g.data_arrays.append(b.data_arrays["20120705_frames"])
g.data_arrays.append(b.data_arrays["20120705_df_over_f"])
# plot figure from file
fig, ax = plt.subplots()
ax.plot(b.data_arrays["20120705_df_over_f"][:])
ax.set(xlabel=b.data_arrays["20120705_df_over_f"].dimensions[0].label,
ylabel=b.data_arrays["20120705_df_over_f"].label,
title="URX oxygen shift trial (21-04-21)")
plt.show()
nixfile.close()
| 28.915254 | 129 | 0.743259 | import matplotlib.pyplot as plt
import pandas as pd
from pathlib import Path
import nixio as nix
fnbase = Path.joinpath(Path.home(), 'Chaos', 'work')
fnraw = str(Path.joinpath(fnbase,
'_Lab_Zimmer/calcium_imaging/results/N2/urx/shift210421/20120705Pflp178GCaMP5kshift210421W7URXx2.log'))
nixfn = str(Path.joinpath(fnbase, 'ginrepos', 'demo', 'elegans_oxygen.nix'))
# row_wise read in of csv file
data = pd.read_csv(fnraw)
# transpose to get columns
tdata = data.transpose()
# get df/f column as array
steps = tdata.values[0]
dff = tdata.values[5]
# load data into nix
nixfile = nix.File.open(nixfn, nix.FileMode.Overwrite)
b = nixfile.create_block(name="oxygen_shift_trials", type_="calcium_imaging")
# use a group to structure the individual trials within a block
g = b.create_group(name="N2_URX_shift_210421_20120705", type_="trial.datacollection")
# add steps column
da = b.create_data_array(name="20120705_frames", array_type="trial.column", data=steps)
da.label = "frames"
# add dF/F column
da = b.create_data_array(name="20120705_df_over_f", array_type="trial.column", data=dff)
da.label = "dF/F"
# Add the second dimension to the data array
dim = da.append_sampled_dimension(steps[1] - steps[0])
dim.label = "frames"
# Structuring our data
g.data_arrays.append(b.data_arrays["20120705_frames"])
g.data_arrays.append(b.data_arrays["20120705_df_over_f"])
# plot figure from file
fig, ax = plt.subplots()
ax.plot(b.data_arrays["20120705_df_over_f"][:])
ax.set(xlabel=b.data_arrays["20120705_df_over_f"].dimensions[0].label,
ylabel=b.data_arrays["20120705_df_over_f"].label,
title="URX oxygen shift trial (21-04-21)")
plt.show()
nixfile.close()
| 0 | 0 | 0 |
1e91a5afa6e064ba7a41a2c596e40f3890588e36 | 12,082 | py | Python | jupyros/ros_widgets.py | ihuicatl/jupyter-ros | ea9c9e89f0ebf82108ac4d96897b2c64e6f74563 | [
"BSD-3-Clause"
] | null | null | null | jupyros/ros_widgets.py | ihuicatl/jupyter-ros | ea9c9e89f0ebf82108ac4d96897b2c64e6f74563 | [
"BSD-3-Clause"
] | null | null | null | jupyros/ros_widgets.py | ihuicatl/jupyter-ros | ea9c9e89f0ebf82108ac4d96897b2c64e6f74563 | [
"BSD-3-Clause"
] | null | null | null | #############################################################################
# Copyright (c) Wolf Vollprecht, QuantStack #
# #
# Distributed under the terms of the BSD 3-Clause License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
#############################################################################
try:
import rospy
except:
print("The rospy package is not found in your $PYTHONPATH. Subscribe and publish are not going to work.")
print("Do you need to activate your ROS environment?")
try:
from cv_bridge import CvBridge, CvBridgeError
import cv2
bridge = CvBridge()
except:
pass
import bqplot as bq
import ipywidgets as widgets
import numpy as np
import threading
import subprocess, yaml, os
def add_widgets(msg_instance, widget_dict, widget_list, prefix=''):
"""
Adds widgets.
@param msg_type The message type
@param widget_dict The form list
@param widget_list The widget list
@return widget_dict and widget_list
"""
# import only here so non ros env doesn't block installation
from genpy import Message
if msg_instance._type.split('/')[-1] == 'Image':
w = widgets.Text()
widget_dict['img'] = w
w_box = widgets.HBox([widgets.Label(value='Image path:'), w])
widget_list.append(w_box)
return widget_dict, widget_list
for idx, slot in enumerate(msg_instance.__slots__):
attr = getattr(msg_instance, slot)
s_t = msg_instance._slot_types[idx]
w = None
if s_t in ['float32', 'float64']:
w = widgets.FloatText()
if s_t in ['int8', 'uint8', 'int32', 'uint32', 'int64', 'uint64']:
w = widgets.IntText()
if s_t in ['string']:
w = widgets.Text()
if isinstance(attr, Message):
widget_list.append(widgets.Label(value=slot))
widget_dict[slot] = {}
add_widgets(attr, widget_dict[slot], widget_list, slot)
if w:
widget_dict[slot] = w
w_box = widgets.HBox([widgets.Label(value=slot, layout=widgets.Layout(width="100px")), w])
widget_list.append(w_box)
return widget_dict, widget_list
thread_map = {}
def publish(topic, msg_type):
"""
Create a form widget for message type msg_type.
This function analyzes the fields of msg_type and creates
an appropriate widget.
A publisher is automatically created which publishes to the
topic given as topic parameter. This allows pressing the
"Send Message" button to send the message to ROS.
@param msg_type The message type
@param topic The topic name to publish to
@return jupyter widget for display
"""
publisher = rospy.Publisher(topic, msg_type, queue_size=10)
widget_list = []
widget_dict = {}
latch_check = widgets.Checkbox(description="Latch Message")
rate_field = widgets.IntText(description="Rate", value=5)
stop_btn = widgets.Button(description="Start")
latch_check.observe(latch_value_change, 'value')
add_widgets(msg_type(), widget_dict, widget_list)
send_btn = widgets.Button(description="Send Message")
send_btn.on_click(send_msg)
thread_map[topic] = False
stop_btn.on_click(start_thread)
btm_box = widgets.HBox((send_btn, latch_check, rate_field, stop_btn))
widget_list.append(btm_box)
vbox = widgets.VBox(children=widget_list)
return vbox
def bag_player(bagfile=''):
"""
Create a form widget for playing ROS bags.
This function takes the bag file path, extracts the bag summary
and play the bag with the given arguments.
@param bagfile The ROS bag file path
@return jupyter widget for display
"""
widget_list = []
bag_player.sp = None
###### Fields #########################################################
bgpath_txt = widgets.Text()
bgpath_box = widgets.HBox([widgets.Label("Bag file path:"), bgpath_txt])
bgpath_txt.value = bagfile
play_btn = widgets.Button(description="Play", icon='play')
pause_btn = widgets.Button(description="Pause", icon='pause', disabled=True)
step_btn = widgets.Button(description="Step", icon='step-forward', disabled=True)
ibox = widgets.Checkbox(description="Immediate")
lbox = widgets.Checkbox(description="Loop")
clockbox = widgets.Checkbox(description="Clock")
dzbox = widgets.Checkbox(description="Duration")
kabox = widgets.Checkbox(description="Keep alive")
start_float = widgets.FloatText(value=0)
start_box = widgets.HBox([widgets.Label("Start time:"), start_float])
que_int = widgets.IntText(value=100)
que_box = widgets.HBox([widgets.Label("Queue size:"), que_int])
factor_float = widgets.FloatText(value=1)
factor_box = widgets.HBox([widgets.Label("Multiply the publish rate by:"), factor_float])
delay_float = widgets.FloatText(value=0)
delay_box = widgets.HBox([widgets.Label("Delay after every advertise call:"), delay_float])
duration_float = widgets.FloatText(value=0)
duration_box = widgets.HBox([dzbox, widgets.Label("Duration in secs:"), duration_float])
out_box = widgets.Output(layout={'border': '1px solid black'})
######## Play Button ##################################################
play_btn.on_click(ply_clk)
###################### Pause Button #########################
pause_btn.on_click(pause_clk)
################## step Button ###############################
step_btn.on_click(step_clk)
options_hbox = widgets.HBox([ibox, lbox, clockbox, kabox])
buttons_hbox = widgets.HBox([play_btn, pause_btn, step_btn])
btm_box = widgets.VBox(
[bgpath_box, options_hbox, duration_box, start_box, que_box, factor_box, delay_box, buttons_hbox, out_box])
widget_list.append(btm_box)
vbox = widgets.VBox(children=widget_list)
return vbox
def client(srv_name, srv_type):
"""
Create a form widget for message type srv_type.
This function analyzes the fields of srv_type and creates
an appropriate widget.
@param srv_type The service message type
@param srv_name The service name to call
@return jupyter widget for display
"""
rospy.wait_for_service(srv_name, timeout=5)
widget_list = []
widget_dict = {}
add_widgets(srv_type._request_class(), widget_dict, widget_list)
call_btn = widgets.Button(description="Call Service")
call_btn.on_click(call_srv)
widget_list.append(call_btn)
vbox = widgets.VBox(children=widget_list)
return vbox
| 35.02029 | 131 | 0.595514 | #############################################################################
# Copyright (c) Wolf Vollprecht, QuantStack #
# #
# Distributed under the terms of the BSD 3-Clause License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
#############################################################################
try:
import rospy
except:
print("The rospy package is not found in your $PYTHONPATH. Subscribe and publish are not going to work.")
print("Do you need to activate your ROS environment?")
try:
from cv_bridge import CvBridge, CvBridgeError
import cv2
bridge = CvBridge()
except:
pass
import bqplot as bq
import ipywidgets as widgets
import numpy as np
import threading
import subprocess, yaml, os
def add_widgets(msg_instance, widget_dict, widget_list, prefix=''):
"""
Adds widgets.
@param msg_type The message type
@param widget_dict The form list
@param widget_list The widget list
@return widget_dict and widget_list
"""
# import only here so non ros env doesn't block installation
from genpy import Message
if msg_instance._type.split('/')[-1] == 'Image':
w = widgets.Text()
widget_dict['img'] = w
w_box = widgets.HBox([widgets.Label(value='Image path:'), w])
widget_list.append(w_box)
return widget_dict, widget_list
for idx, slot in enumerate(msg_instance.__slots__):
attr = getattr(msg_instance, slot)
s_t = msg_instance._slot_types[idx]
w = None
if s_t in ['float32', 'float64']:
w = widgets.FloatText()
if s_t in ['int8', 'uint8', 'int32', 'uint32', 'int64', 'uint64']:
w = widgets.IntText()
if s_t in ['string']:
w = widgets.Text()
if isinstance(attr, Message):
widget_list.append(widgets.Label(value=slot))
widget_dict[slot] = {}
add_widgets(attr, widget_dict[slot], widget_list, slot)
if w:
widget_dict[slot] = w
w_box = widgets.HBox([widgets.Label(value=slot, layout=widgets.Layout(width="100px")), w])
widget_list.append(w_box)
return widget_dict, widget_list
def widget_dict_to_msg(msg_instance, d):
for key in d:
if isinstance(d[key], widgets.Widget):
if key == 'img':
img_msg = img_to_msg(d[key].value)
for slot in img_msg.__slots__:
setattr(msg_instance, slot, getattr(img_msg, slot))
return
else:
setattr(msg_instance, key, d[key].value)
else:
submsg = getattr(msg_instance, key)
widget_dict_to_msg(submsg, d[key])
thread_map = {}
def img_to_msg(imgpath):
if not cv2 or not CvBridge:
raise RuntimeError(
"CV Bridge is not installed, please install it to publish Images\nsudo apt-get install ros-$(rosversion -d)-cv-bridge")
img = cv2.imread(imgpath)
if img is None:
raise FileNotFoundError('Image File Not Found')
else:
imgmsg = bridge.cv2_to_imgmsg(img)
return imgmsg
def publish(topic, msg_type):
"""
Create a form widget for message type msg_type.
This function analyzes the fields of msg_type and creates
an appropriate widget.
A publisher is automatically created which publishes to the
topic given as topic parameter. This allows pressing the
"Send Message" button to send the message to ROS.
@param msg_type The message type
@param topic The topic name to publish to
@return jupyter widget for display
"""
publisher = rospy.Publisher(topic, msg_type, queue_size=10)
widget_list = []
widget_dict = {}
latch_check = widgets.Checkbox(description="Latch Message")
rate_field = widgets.IntText(description="Rate", value=5)
stop_btn = widgets.Button(description="Start")
def latch_value_change(arg):
publisher.impl.is_latch = arg['new']
latch_check.observe(latch_value_change, 'value')
add_widgets(msg_type(), widget_dict, widget_list)
send_btn = widgets.Button(description="Send Message")
def send_msg(arg):
msg_to_send = msg_type()
widget_dict_to_msg(msg_to_send, widget_dict)
publisher.publish(msg_to_send)
send_btn.on_click(send_msg)
thread_map[topic] = False
def thread_target():
rate = rospy.Rate(rate_field.value)
while thread_map[topic]:
send_msg(None)
rate.sleep()
def start_thread(click_args):
thread_map[topic] = not thread_map[topic]
if thread_map[topic]:
local_thread = threading.Thread(target=thread_target)
local_thread.start()
stop_btn.description = "Stop"
else:
stop_btn.description = "Start"
stop_btn.on_click(start_thread)
btm_box = widgets.HBox((send_btn, latch_check, rate_field, stop_btn))
widget_list.append(btm_box)
vbox = widgets.VBox(children=widget_list)
return vbox
def live_plot(plot_string, topic_type, history=100, title=None):
topic = plot_string[:plot_string.find(':') - 1]
title = title if title else topic
fields = plot_string.split(':')[1:]
x_sc = bq.LinearScale()
y_sc = bq.LinearScale()
ax_x = bq.Axis(label='X', scale=x_sc, grid_lines='solid')
ax_y = bq.Axis(label='Y', scale=y_sc, orientation='vertical', grid_lines='solid')
lines = bq.Lines(x=np.array([]), y=np.array([]), scales={'x': x_sc, 'y': y_sc})
fig = bq.Figure(axes=[ax_x, ax_y], marks=[lines], labels=fields, display_legend=True, title=title)
data = []
def cb(msg, data=data):
data_el = []
for f in fields:
data_el.append(getattr(msg, f))
data.append(data_el)
data = data[-history:]
ndat = np.asarray(data).T
if lines:
lines.y = ndat
lines.x = np.arange(len(data))
rospy.Subscriber(topic, topic_type, cb)
return fig
def bag_player(bagfile=''):
"""
Create a form widget for playing ROS bags.
This function takes the bag file path, extracts the bag summary
and play the bag with the given arguments.
@param bagfile The ROS bag file path
@return jupyter widget for display
"""
widget_list = []
bag_player.sp = None
###### Fields #########################################################
bgpath_txt = widgets.Text()
bgpath_box = widgets.HBox([widgets.Label("Bag file path:"), bgpath_txt])
bgpath_txt.value = bagfile
play_btn = widgets.Button(description="Play", icon='play')
pause_btn = widgets.Button(description="Pause", icon='pause', disabled=True)
step_btn = widgets.Button(description="Step", icon='step-forward', disabled=True)
ibox = widgets.Checkbox(description="Immediate")
lbox = widgets.Checkbox(description="Loop")
clockbox = widgets.Checkbox(description="Clock")
dzbox = widgets.Checkbox(description="Duration")
kabox = widgets.Checkbox(description="Keep alive")
start_float = widgets.FloatText(value=0)
start_box = widgets.HBox([widgets.Label("Start time:"), start_float])
que_int = widgets.IntText(value=100)
que_box = widgets.HBox([widgets.Label("Queue size:"), que_int])
factor_float = widgets.FloatText(value=1)
factor_box = widgets.HBox([widgets.Label("Multiply the publish rate by:"), factor_float])
delay_float = widgets.FloatText(value=0)
delay_box = widgets.HBox([widgets.Label("Delay after every advertise call:"), delay_float])
duration_float = widgets.FloatText(value=0)
duration_box = widgets.HBox([dzbox, widgets.Label("Duration in secs:"), duration_float])
out_box = widgets.Output(layout={'border': '1px solid black'})
######## Play Button ##################################################
def ply_clk(arg):
if play_btn.description == "Play":
info_dict = yaml.load(subprocess.Popen(['rosbag', 'info', '--yaml', bgpath_txt.value],
stdout=subprocess.PIPE).communicate()[0])
if info_dict is None:
raise FileNotFoundError("Bag file not found!")
else:
cmd = ['rosbag', 'play', bgpath_txt.value]
if ibox.value:
cmd.append('-i')
if lbox.value:
cmd.append('-l')
if kabox.value:
cmd.append('-k')
if clockbox.value:
cmd.append('--clock')
if dzbox.value:
cmd.append("--duration={}".format(max(0, duration_float.value)))
cmd.append("--rate={}".format(max(0, factor_float.value)))
cmd.append("--start={}".format(max(0, start_float.value)))
cmd.append("--queue={}".format(max(0, que_int.value)))
cmd.append("--delay={}".format(max(0, delay_float.value)))
play_btn.description = "Stop"
play_btn.icon = 'stop'
pause_btn.disabled = False
bag_player.sp = subprocess.Popen(cmd, stdin=subprocess.PIPE)
with out_box:
print("Bag summary:")
for key, val in info_dict.items():
print(key, ":", val)
else:
try:
os.killpg(os.getpgid(bag_player.sp.pid), subprocess.signal.SIGINT)
except KeyboardInterrupt:
pass
play_btn.description = "Play"
play_btn.icon = 'play'
pause_btn.disabled = True
pause_btn.description = 'Pause'
pause_btn.icon = 'pause'
step_btn.disabled = True
play_btn.on_click(ply_clk)
###################### Pause Button #########################
def pause_clk(arg):
bag_player.sp.stdin.write(b' \n')
bag_player.sp.stdin.flush()
if pause_btn.description == 'Pause':
pause_btn.description = 'Continue'
pause_btn.icon = 'play'
step_btn.disabled = False
else:
pause_btn.description = 'Pause'
pause_btn.icon = 'pause'
step_btn.disabled = True
pause_btn.on_click(pause_clk)
################## step Button ###############################
def step_clk(arg):
bag_player.sp.stdin.write(b's\n')
bag_player.sp.stdin.flush()
step_btn.on_click(step_clk)
options_hbox = widgets.HBox([ibox, lbox, clockbox, kabox])
buttons_hbox = widgets.HBox([play_btn, pause_btn, step_btn])
btm_box = widgets.VBox(
[bgpath_box, options_hbox, duration_box, start_box, que_box, factor_box, delay_box, buttons_hbox, out_box])
widget_list.append(btm_box)
vbox = widgets.VBox(children=widget_list)
return vbox
def client(srv_name, srv_type):
"""
Create a form widget for message type srv_type.
This function analyzes the fields of srv_type and creates
an appropriate widget.
@param srv_type The service message type
@param srv_name The service name to call
@return jupyter widget for display
"""
rospy.wait_for_service(srv_name, timeout=5)
widget_list = []
widget_dict = {}
add_widgets(srv_type._request_class(), widget_dict, widget_list)
call_btn = widgets.Button(description="Call Service")
def call_srv(arg):
msg_to_send = srv_type._request_class()
widget_dict_to_msg(msg_to_send, widget_dict)
try:
service = rospy.ServiceProxy(srv_name, srv_type)
return service(msg_to_send)
except rospy.ServiceException as e:
print("Service call failed: %s" % e)
call_btn.on_click(call_srv)
widget_list.append(call_btn)
vbox = widgets.VBox(children=widget_list)
return vbox
| 5,035 | 0 | 282 |
a292b4b0b721e7945377bc0b2da0181e22768ebd | 4,369 | bzl | Python | nixpkgs/repositories.bzl | seewpx/rules_nixpkgs | 1c02fe914763448398739bb295b858362bb2810b | [
"Apache-2.0"
] | null | null | null | nixpkgs/repositories.bzl | seewpx/rules_nixpkgs | 1c02fe914763448398739bb295b858362bb2810b | [
"Apache-2.0"
] | null | null | null | nixpkgs/repositories.bzl | seewpx/rules_nixpkgs | 1c02fe914763448398739bb295b858362bb2810b | [
"Apache-2.0"
] | null | null | null | load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
def rules_nixpkgs_dependencies(rules_nixpkgs_name = "io_tweag_rules_nixpkgs"):
"""Load repositories required by rules_nixpkgs.
Args:
rules_nixpkgs_name: name under which this repository is known in your workspace
"""
maybe(
http_archive,
"platforms",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/platforms/releases/download/0.0.4/platforms-0.0.4.tar.gz",
"https://github.com/bazelbuild/platforms/releases/download/0.0.4/platforms-0.0.4.tar.gz",
],
sha256 = "079945598e4b6cc075846f7fd6a9d0857c33a7afc0de868c2ccb96405225135d",
)
maybe(
http_archive,
"bazel_skylib",
urls = [
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz",
],
sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c",
)
maybe(
http_archive,
"rules_java",
url = "https://github.com/bazelbuild/rules_java/releases/download/4.0.0/rules_java-4.0.0.tar.gz",
sha256 = "34b41ec683e67253043ab1a3d1e8b7c61e4e8edefbcad485381328c934d072fe",
)
# the following complication is due to migrating to `bzlmod`.
# fetch extracted submodules as external repositories from an existing source tree, based on the import type.
rules_nixpkgs = native.existing_rule(rules_nixpkgs_name)
if not rules_nixpkgs:
errormsg = [
"External repository `rules_nixpkgs` not found as `{}`.".format(rules_nixpkgs_name),
"Specify `rules_nixpkgs_dependencies(rules_nixpkgs_name=<name>)`",
"with `<name>` as used for importing `rules_nixpkgs`.",
]
fail("\n".join(errormsg))
kind = rules_nixpkgs.get("kind")
strip_prefix = rules_nixpkgs.get("strip_prefix", "")
if strip_prefix:
strip_prefix += "/"
for name, prefix in [
("rules_nixpkgs_core", "core"),
("rules_nixpkgs_cc", "toolchains/cc"),
("rules_nixpkgs_java", "toolchains/java"),
("rules_nixpkgs_python", "toolchains/python"),
("rules_nixpkgs_go", "toolchains/go"),
("rules_nixpkgs_rust", "toolchains/rust"),
("rules_nixpkgs_posix", "toolchains/posix"),
]:
# case analysis in inner loop to reduce code duplication
if kind == "local_repository":
path = rules_nixpkgs.get("path")
maybe(native.local_repository, name, path = "{}/{}".format(path, prefix))
elif kind == "http_archive":
maybe(
http_archive,
name,
strip_prefix = strip_prefix + prefix,
# there may be more attributes needed. please submit a pull request to support your use case.
url = rules_nixpkgs.get("url"),
urls = rules_nixpkgs.get("urls"),
sha256 = rules_nixpkgs.get("sha256"),
)
elif kind == "git_repository":
maybe(
git_repository,
name,
strip_prefix = strip_prefix + prefix,
# there may be more attributes needed. please submit a pull request to support your use case.
remote = rules_nixpkgs.get("remote"),
commit = rules_nixpkgs.get("commit"),
branch = rules_nixpkgs.get("branch"),
tag = rules_nixpkgs.get("tag"),
shallow_since = rules_nixpkgs.get("shallow_since"),
)
else:
errormsg = [
"Could not find any import type for `rules_nixpkgs`.",
"This should not happen. If you encounter this using the latest release",
"of `rules_nixpkgs`, please file an issue describing your use case:",
"https://github.com/tweag/rules_nixpkgs/issues",
"or submit a pull request with corrections:",
"https://github.com/tweag/rules_nixpkgs/pulls",
]
fail("\n".join(errormsg))
| 45.041237 | 126 | 0.613642 | load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
def rules_nixpkgs_dependencies(rules_nixpkgs_name = "io_tweag_rules_nixpkgs"):
"""Load repositories required by rules_nixpkgs.
Args:
rules_nixpkgs_name: name under which this repository is known in your workspace
"""
maybe(
http_archive,
"platforms",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/platforms/releases/download/0.0.4/platforms-0.0.4.tar.gz",
"https://github.com/bazelbuild/platforms/releases/download/0.0.4/platforms-0.0.4.tar.gz",
],
sha256 = "079945598e4b6cc075846f7fd6a9d0857c33a7afc0de868c2ccb96405225135d",
)
maybe(
http_archive,
"bazel_skylib",
urls = [
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz",
],
sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c",
)
maybe(
http_archive,
"rules_java",
url = "https://github.com/bazelbuild/rules_java/releases/download/4.0.0/rules_java-4.0.0.tar.gz",
sha256 = "34b41ec683e67253043ab1a3d1e8b7c61e4e8edefbcad485381328c934d072fe",
)
# the following complication is due to migrating to `bzlmod`.
# fetch extracted submodules as external repositories from an existing source tree, based on the import type.
rules_nixpkgs = native.existing_rule(rules_nixpkgs_name)
if not rules_nixpkgs:
errormsg = [
"External repository `rules_nixpkgs` not found as `{}`.".format(rules_nixpkgs_name),
"Specify `rules_nixpkgs_dependencies(rules_nixpkgs_name=<name>)`",
"with `<name>` as used for importing `rules_nixpkgs`.",
]
fail("\n".join(errormsg))
kind = rules_nixpkgs.get("kind")
strip_prefix = rules_nixpkgs.get("strip_prefix", "")
if strip_prefix:
strip_prefix += "/"
for name, prefix in [
("rules_nixpkgs_core", "core"),
("rules_nixpkgs_cc", "toolchains/cc"),
("rules_nixpkgs_java", "toolchains/java"),
("rules_nixpkgs_python", "toolchains/python"),
("rules_nixpkgs_go", "toolchains/go"),
("rules_nixpkgs_rust", "toolchains/rust"),
("rules_nixpkgs_posix", "toolchains/posix"),
]:
# case analysis in inner loop to reduce code duplication
if kind == "local_repository":
path = rules_nixpkgs.get("path")
maybe(native.local_repository, name, path = "{}/{}".format(path, prefix))
elif kind == "http_archive":
maybe(
http_archive,
name,
strip_prefix = strip_prefix + prefix,
# there may be more attributes needed. please submit a pull request to support your use case.
url = rules_nixpkgs.get("url"),
urls = rules_nixpkgs.get("urls"),
sha256 = rules_nixpkgs.get("sha256"),
)
elif kind == "git_repository":
maybe(
git_repository,
name,
strip_prefix = strip_prefix + prefix,
# there may be more attributes needed. please submit a pull request to support your use case.
remote = rules_nixpkgs.get("remote"),
commit = rules_nixpkgs.get("commit"),
branch = rules_nixpkgs.get("branch"),
tag = rules_nixpkgs.get("tag"),
shallow_since = rules_nixpkgs.get("shallow_since"),
)
else:
errormsg = [
"Could not find any import type for `rules_nixpkgs`.",
"This should not happen. If you encounter this using the latest release",
"of `rules_nixpkgs`, please file an issue describing your use case:",
"https://github.com/tweag/rules_nixpkgs/issues",
"or submit a pull request with corrections:",
"https://github.com/tweag/rules_nixpkgs/pulls",
]
fail("\n".join(errormsg))
| 0 | 0 | 0 |
8d09485db8efd21615a8b003ba88ca0a4332df60 | 2,041 | py | Python | src/_dependencies/injectable.py | dry-python/dependencies | 1a8bba41ab42d0b5249b36471f5300d9faba81e7 | [
"BSD-2-Clause"
] | 175 | 2018-07-21T13:04:44.000Z | 2020-05-27T15:31:06.000Z | src/_dependencies/injectable.py | proofit404/dependencies | 204e0cfadca801d64857f24aa4c74e7939ed9af0 | [
"BSD-2-Clause"
] | 325 | 2016-05-16T11:16:11.000Z | 2022-03-04T00:45:57.000Z | src/_dependencies/injectable.py | dry-python/dependencies | 1a8bba41ab42d0b5249b36471f5300d9faba81e7 | [
"BSD-2-Clause"
] | 18 | 2018-06-17T09:33:16.000Z | 2020-05-20T18:12:30.000Z | from inspect import isclass
from inspect import signature
from _dependencies.exceptions import DependencyError
# Messages.
default_class_value_template = """
{owner} has a default value of {argument!r} argument set to {value!r} class.
You should either change the name of the argument into '{argument}_class'
or set the default value to an instance of that class.
""".strip()
| 30.014706 | 76 | 0.680059 | from inspect import isclass
from inspect import signature
from _dependencies.exceptions import DependencyError
def _function_args(func, funcname, owner):
arguments = _args(func, funcname, owner)
return _separate(arguments)
def _method_args(func, funcname, owner):
arguments = _args(func, funcname, owner)
return _separate(arguments[1:])
def _args(func, funcname, owner):
args = []
for name, param in signature(func).parameters.items():
have_default = param.default is not param.empty
args.append((name, have_default))
if have_default:
_check_argument_default(name, param.default, owner)
if param.kind is param.VAR_POSITIONAL:
raise DependencyError(
f"{funcname!r} have variable-length positional arguments"
)
if param.kind is param.VAR_KEYWORD:
raise DependencyError(
f"{funcname!r} have variable-length keyword arguments"
)
return args
def _separate(arguments):
args = {}
required = set()
optional = set()
for name, have_default in arguments:
args[name] = have_default
target = optional if have_default else required
target.add(name)
return args, required, optional
def _check_argument_default(argument, value, owner):
expect_class = argument.endswith("_class")
is_class = isclass(value)
if expect_class and not is_class:
message = "{0!r} default value should be a class"
raise DependencyError(message.format(argument))
if not expect_class and is_class:
message = default_class_value_template.format(
owner=owner, argument=argument, value=value.__name__
)
raise DependencyError(message)
# Messages.
default_class_value_template = """
{owner} has a default value of {argument!r} argument set to {value!r} class.
You should either change the name of the argument into '{argument}_class'
or set the default value to an instance of that class.
""".strip()
| 1,539 | 0 | 115 |
a027763e2428f3baf3eb8729ecd4250edf35d58d | 347 | py | Python | 4_Applied ML with Python/Review/plot1.py | syedmeesamali/CourseraPlus | 0e729d10938ecb55fde69433c6b02cb02b8e6d10 | [
"MIT"
] | null | null | null | 4_Applied ML with Python/Review/plot1.py | syedmeesamali/CourseraPlus | 0e729d10938ecb55fde69433c6b02cb02b8e6d10 | [
"MIT"
] | null | null | null | 4_Applied ML with Python/Review/plot1.py | syedmeesamali/CourseraPlus | 0e729d10938ecb55fde69433c6b02cb02b8e6d10 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn import datasets
iris = datasets.load_iris()
| 24.785714 | 62 | 0.855908 | import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn import datasets
iris = datasets.load_iris()
| 0 | 0 | 0 |
925fda38891dd3f10a9708584698f2913670ab94 | 3,267 | py | Python | sstubs_miner/analysers/DataAnalyser.py | louislefevre/sstubs-research | 95755b7adcc49149f9b13ee6a8d05a95b356570b | [
"Apache-2.0"
] | 1 | 2020-12-06T18:49:10.000Z | 2020-12-06T18:49:10.000Z | sstubs_miner/analysers/DataAnalyser.py | louislefevre/sstubs-miner | 95755b7adcc49149f9b13ee6a8d05a95b356570b | [
"Apache-2.0"
] | null | null | null | sstubs_miner/analysers/DataAnalyser.py | louislefevre/sstubs-miner | 95755b7adcc49149f9b13ee6a8d05a95b356570b | [
"Apache-2.0"
] | null | null | null | from datetime import datetime, timedelta
from math import inf as infinity
from statistics import mean
| 32.029412 | 96 | 0.607591 | from datetime import datetime, timedelta
from math import inf as infinity
from statistics import mean
class DataAnalyser:
def __init__(self, sstubs):
self._sstubs = sstubs
self._builds = self._initialise_builds(sstubs)
def sstub_count(self):
return len(self._sstubs)
def project_count(self):
return len(self._get_projects(self._sstubs))
def project_sstub_count(self):
projects = dict.fromkeys(self._get_projects(self._sstubs), 0)
for sstub in self._sstubs:
projects[sstub.project_name] += 1
return dict(sorted(projects.items(), key=lambda item: item[1]))
def build_sstub_count(self):
build_sstubs = {}
for build, sstubs in self._builds.items():
build_sstubs[build] = len(sstubs)
return build_sstubs
def build_project_count(self):
build_projects = {}
for build, sstubs in self._builds.items():
projects = self._get_projects(sstubs)
build_projects[build] = len(projects)
return build_projects
def loc_range_count(self):
ranges = self._get_loc_ranges(self._sstubs)
range_counts = {}
for limit, sstubs in ranges.items():
range_counts[limit] = len(sstubs)
return range_counts
def average_time(self):
return self._get_mean_time(self._sstubs)
def average_build_time(self):
build_times = {}
for build, sstubs in self._builds.items():
mean_time = self._get_mean_time(sstubs)
build_times[build] = mean_time
return build_times
def average_loc_time(self):
ranges = self._get_loc_ranges(self._sstubs)
range_times = {}
for limit, sstubs in ranges.items():
range_times[limit] = self._get_mean_time(sstubs)
return range_times
def _get_mean_time(self, sstubs):
time_differences = []
for sstub in sstubs:
difference = self._get_time_difference(sstub)
time_differences.append(difference)
mean_time = int(mean(time_differences))
return str(timedelta(seconds=mean_time))
@staticmethod
def _initialise_builds(sstubs):
builds = {}
for sstub in sstubs:
build = sstub.build_system
if build in builds:
builds[build].append(sstub)
else:
builds[build] = [sstub]
return builds
@staticmethod
def _get_projects(sstubs):
projects = set()
for sstub in sstubs:
projects.add(sstub.project_name)
return projects
@staticmethod
def _get_loc_ranges(sstubs):
ranges = {50: [], 100: [], 250: [], 500: [], 1000: [], 2500: [], 5000: [], infinity: []}
for sstub in sstubs:
loc = int(sstub.loc)
for limit in ranges.keys():
if loc <= limit:
ranges[limit].append(sstub)
break
return ranges
@staticmethod
def _get_time_difference(sstub):
bug_date = datetime.strptime(sstub.bug_date, '%Y-%m-%d %H:%M:%S')
fix_date = datetime.strptime(sstub.fix_date, '%Y-%m-%d %H:%M:%S')
return (fix_date - bug_date).total_seconds()
| 2,667 | 474 | 23 |
b70fdc8bf52039d2c2d5c4f1ddb9c314fa9b21cc | 7,140 | py | Python | src/datasource.py | Muhazerin/auto-star-planner | 4a4a2054dacdfd614b82c300eb8760fddc0a7260 | [
"MIT"
] | 1 | 2021-06-05T17:13:47.000Z | 2021-06-05T17:13:47.000Z | src/datasource.py | Muhazerin/auto-star-planner | 4a4a2054dacdfd614b82c300eb8760fddc0a7260 | [
"MIT"
] | null | null | null | src/datasource.py | Muhazerin/auto-star-planner | 4a4a2054dacdfd614b82c300eb8760fddc0a7260 | [
"MIT"
] | 1 | 2020-12-03T02:32:13.000Z | 2020-12-03T02:32:13.000Z | import requests
import re
from bs4 import BeautifulSoup
import indexParser
source = DataSource()
| 46.666667 | 117 | 0.594818 | import requests
import re
from bs4 import BeautifulSoup
import indexParser
class DataSource:
def __init__(self):
self.__acad_sem_dict = None # Dict
self.__chosen_acad_sem = None # String
self.__course_year_value = None # List
self.__course_year_key = None # List
self.__modules = None # List
self.__indexes = None # List of list of dict --> [[{}, {}, {}], [{}, {}]]
@property
def acad_sem_dict(self):
return self.__acad_sem_dict
@property
def chosen_acad_sem(self):
return self.__chosen_acad_sem
@chosen_acad_sem.setter
def chosen_acad_sem(self, new_chosen_acad_sem):
self.__chosen_acad_sem = new_chosen_acad_sem
@property
def course_year_value(self):
return self.__course_year_value
@property
def course_year_key(self):
return self.__course_year_key
@property
def modules(self):
return self.__modules
@property
def indexes(self):
return self.__indexes
# Visit NTU Class Schedule website and try to save the acadSem to self.__acad_sem
def load_acad_sem(self):
try:
# Create a GET request to this website
response = requests.get('https://wish.wis.ntu.edu.sg/webexe/owa/aus_schedule.main')
if response.status_code != 200:
self.__acad_sem_dict = f'DataSource load_acad_sem Error!\nStatus Code: {response.status_code}'
else:
# Parse the request to soup and get the first select tag's children
acad_sem_list = BeautifulSoup(response.text, 'html.parser').find('select').contents
# Remove the /n in the list
acad_sem_list = [elem for elem in acad_sem_list if elem != '\n']
# Get the value(key) and string(value) of every element in the list and store it as a dictionary
acad_sem_dict = {elem['value']: elem.string for elem in acad_sem_list}
self.__acad_sem_dict = acad_sem_dict
except Exception as err:
self.__acad_sem_dict = f'DataSource load_acad_sem Exception Error!\nError msg: {err}'
# Visit NTU Class Schedule website and try to save the courseYear to self.__course_year
def load_course_year(self, acad_sem):
try:
# The data to be POSTed
data = {
'acadsem': acad_sem,
'r_subj_code': 'Enter Keywords or Course Code',
'r_search_type': 'F',
'boption': 'x',
'staff_access': 'false'
}
# Create a POST requests to this website with the data
response = requests.post('https://wish.wis.ntu.edu.sg/webexe/owa/AUS_SCHEDULE.main_display', data=data)
if response.status_code != 200:
self.__course_year_value = f'DataSource load_course_year Error!\nStatus Code: {response.status_code}'
self.__course_year_key = f'DataSource load_course_year Error!\nStatus Code: {response.status_code}'
else:
# Parse the request to soup and find all the select tags
selects = BeautifulSoup(response.text, 'html.parser').find_all('select')
# The info that I'm looking for is the content of the second select tag
course_year_list = selects[1].contents
# Remove the /n in the list
course_year_list = [elem for elem in course_year_list if elem != '\n']
# Separate the list into 2 lists (key, value)
self.__course_year_value = [elem.string for elem in course_year_list]
self.__course_year_key = [elem['value'] for elem in course_year_list]
except Exception as err:
self.__course_year_value = f'DataSource load_course_year Exception Error!\nError msg: {err}'
self.__course_year_key = f'DataSource load_course_year Exception Error!\nError msg: {err}'
# Visit NTU Class Schedule website and try to load the modules using the course_year_key
def load_modules(self, course_year_key):
try:
# The data to be POSTed
data = {
'acadsem': list(self.__acad_sem_dict.keys())[
list(self.__acad_sem_dict.values()).index(self.__chosen_acad_sem)],
'r_course_yr': course_year_key,
'r_subj_code': 'Enter Keywords or Course Code',
'r_search_type': 'F',
'boption': 'CLoad',
'staff_access': 'false'
}
# Create a POST request to this website with this data
response = requests.post('https://wish.wis.ntu.edu.sg/webexe/owa/AUS_SCHEDULE.main_display1', data=data)
if response.status_code != 200:
self.__modules = f'DataSource load_modules Error!\nStatus Code: {response.status_code}'
else:
# Initialize self.__modules to a list
self.__modules = []
# Parse the request to soup and find all the table tags
tables = BeautifulSoup(response.text, 'html.parser').find_all('table')
# Compile the regex to be used later
regex = re.compile('[^a-zA-Z0-9\s]')
# The table consists of 2 sections (course info, course timetable info)
# Course info section
for i in range(0, len(tables), 2):
# The data is stored in font tag
fonts = tables[i].find_all('font')
# Sometimes the course name has special chars(*,~), I want those removed
course_name = regex.split(fonts[1].string)[0]
# Append the information to self.__modules
self.__modules.append(f'{fonts[0].string}: {course_name}')
# Course timetable info section
# Initialize the indexes list
self.__indexes = []
for i in range(1, len(tables) + 1, 2):
# Split the info into indexes
self.retrieve_indexes(tables[i])
except Exception as err:
self.__modules = f'DataSource load_modules Exception Error\nError msg: {err}'
# Split the info into indexes
def retrieve_indexes(self, table):
# Append a new list to self.__indexes
self.__indexes.append([])
current_list = self.__indexes[len(self.__indexes) - 1]
# Get all the rows in the table
trs = table.find_all('tr')
for i in range(1, len(trs)):
# Get all the columns in a row. There are exactly 7 columns
tds = trs[i].find_all('td')
# If first column is a string, increment index and add dict to list and parse
if tds[0].string:
current_list.append({})
indexParser.parse_new(tds, len(current_list) - 1, current_list)
else:
indexParser.parse(tds, len(current_list) - 1, current_list)
source = DataSource()
| 6,281 | 736 | 23 |
5424c7e93680618774daa851b9a17447ac5087bb | 421 | py | Python | Library/app/migrations/0003_user_slug.py | kumarnishu/BOOKS-LIBRARY | 28690850248acbe90bb47a37700f6152e047829c | [
"Apache-2.0"
] | 1 | 2020-03-30T16:39:42.000Z | 2020-03-30T16:39:42.000Z | Library/app/migrations/0003_user_slug.py | kumarnishu/BOOKS-LIBRARY | 28690850248acbe90bb47a37700f6152e047829c | [
"Apache-2.0"
] | 8 | 2021-03-19T01:21:01.000Z | 2022-03-12T00:21:57.000Z | Library/app/migrations/0003_user_slug.py | kumarnishu/BOOKS-LIBRARY | 28690850248acbe90bb47a37700f6152e047829c | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.0.2 on 2020-02-02 05:09
from django.db import migrations, models
| 21.05 | 59 | 0.589074 | # Generated by Django 3.0.2 on 2020-02-02 05:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0002_auto_20200201_1054'),
]
operations = [
migrations.AddField(
model_name='user',
name='slug',
field=models.SlugField(default=1, unique=True),
preserve_default=False,
),
]
| 0 | 307 | 23 |
e6b9b1217c4b0f3170afd80044dc33127aaf422c | 4,210 | py | Python | eval_shapes.py | VLL-HD/HINT | b26c5026bd486bb392a9416430cb1dfebe5aa06f | [
"MIT"
] | 14 | 2020-03-23T20:31:27.000Z | 2022-03-02T21:12:23.000Z | eval_shapes.py | VLL-HD/HINT | b26c5026bd486bb392a9416430cb1dfebe5aa06f | [
"MIT"
] | 1 | 2020-04-02T17:44:26.000Z | 2020-05-04T21:50:35.000Z | eval_shapes.py | VLL-HD/HINT | b26c5026bd486bb392a9416430cb1dfebe5aa06f | [
"MIT"
] | 4 | 2020-04-02T17:45:40.000Z | 2021-06-24T19:12:49.000Z | import numpy as np
import traceback
import multiprocessing as mp
from tqdm import tqdm
from best_shape_fit import *
from data import PlusShapeModel
configs = [
# 'plus_shape.conditional_cinn_4',
# 'plus_shape.conditional_cinn_8',
# 'plus_shape.conditional_hint_4_full',
# 'plus_shape.conditional_hint_8_full',
# 'plus_shape.unconditional_inn_4_Q',
# 'plus_shape.unconditional_inn_8',
# 'plus_shape.unconditional_hint_4_full',
# 'plus_shape.unconditional_hint_8_full',
# 'plus_shape.unconditional_inn_16',
# 'plus_shape.unconditional_inn_32',
# 'plus_shape.unconditional_hint_4_1',
# 'plus_shape.unconditional_hint_8_1',
# 'plus_shape.unconditional_hint_16_1',
# 'plus_shape.unconditional_hint_4_2',
# 'plus_shape.unconditional_hint_8_2',
# 'plus_shape.unconditional_hint_4_3',
# 'plus_shape.unconditional_hint_4_0_small',
# 'plus_shape.unconditional_hint_8_0_small',
# 'plus_shape.unconditional_hint_16_0_small',
# 'plus_shape.unconditional_hint_32_0_small',
# 'plus_shape.unconditional_hint_4_1_small',
# 'plus_shape.unconditional_hint_8_1_small',
# 'plus_shape.unconditional_hint_16_1_small',
# 'plus_shape.unconditional_hint_4_2_small',
# 'plus_shape.unconditional_hint_8_2_small',
# 'plus_shape.unconditional_hint_4_3_small',
# 'plus_shape.unconditional_hint_4_0_big',
# 'plus_shape.unconditional_hint_8_0_big',
# 'plus_shape.unconditional_hint_16_0_big',
# 'plus_shape.unconditional_hint_32_0_big',
# 'plus_shape.unconditional_hint_4_1_big',
# 'plus_shape.unconditional_hint_8_1_big',
# 'plus_shape.unconditional_hint_16_1_big',
# 'plus_shape.unconditional_hint_4_2_big',
# 'plus_shape.unconditional_hint_8_2_big',
# 'plus_shape.unconditional_hint_4_3_big',
]
if __name__ == '__main__':
pass
evaluate_all()
collect_results()
| 31.41791 | 94 | 0.609026 | import numpy as np
import traceback
import multiprocessing as mp
from tqdm import tqdm
from best_shape_fit import *
from data import PlusShapeModel
configs = [
# 'plus_shape.conditional_cinn_4',
# 'plus_shape.conditional_cinn_8',
# 'plus_shape.conditional_hint_4_full',
# 'plus_shape.conditional_hint_8_full',
# 'plus_shape.unconditional_inn_4_Q',
# 'plus_shape.unconditional_inn_8',
# 'plus_shape.unconditional_hint_4_full',
# 'plus_shape.unconditional_hint_8_full',
# 'plus_shape.unconditional_inn_16',
# 'plus_shape.unconditional_inn_32',
# 'plus_shape.unconditional_hint_4_1',
# 'plus_shape.unconditional_hint_8_1',
# 'plus_shape.unconditional_hint_16_1',
# 'plus_shape.unconditional_hint_4_2',
# 'plus_shape.unconditional_hint_8_2',
# 'plus_shape.unconditional_hint_4_3',
# 'plus_shape.unconditional_hint_4_0_small',
# 'plus_shape.unconditional_hint_8_0_small',
# 'plus_shape.unconditional_hint_16_0_small',
# 'plus_shape.unconditional_hint_32_0_small',
# 'plus_shape.unconditional_hint_4_1_small',
# 'plus_shape.unconditional_hint_8_1_small',
# 'plus_shape.unconditional_hint_16_1_small',
# 'plus_shape.unconditional_hint_4_2_small',
# 'plus_shape.unconditional_hint_8_2_small',
# 'plus_shape.unconditional_hint_4_3_small',
# 'plus_shape.unconditional_hint_4_0_big',
# 'plus_shape.unconditional_hint_8_0_big',
# 'plus_shape.unconditional_hint_16_0_big',
# 'plus_shape.unconditional_hint_32_0_big',
# 'plus_shape.unconditional_hint_4_1_big',
# 'plus_shape.unconditional_hint_8_1_big',
# 'plus_shape.unconditional_hint_16_1_big',
# 'plus_shape.unconditional_hint_4_2_big',
# 'plus_shape.unconditional_hint_8_2_big',
# 'plus_shape.unconditional_hint_4_3_big',
]
def evaluate_all():
with mp.Pool(1) as p:
p.map(evaluate_config, configs)
def evaluate_config(config):
data_model = PlusShapeModel()
try:
results = {'IoU': [], 'DICE': [], 'max_h': [], 'avg_h': []}
sample = np.load(f'results/{config.replace(".", "-")}_0_sample.npy')
# print(config, sample.shape)
# return
curves = data_model.trace_fourier_curves(sample)
curves_dense = data_model.trace_fourier_curves(sample, n_points=1000)
for j in range(len(curves)):
points = torch.tensor(curves[j]).float().cpu()
params = fit_plus_shape_to_points(points)
iou, dice = iou_and_dice_plus_shape(params, points)
max_h, avg_h = max_and_avg_hausdorff_distance_plus_shape(params, curves_dense[j])
print(config, j, iou, dice, max_h, avg_h, flush=True)
results['IoU'].append(iou)
results['DICE'].append(dice)
results['max_h'].append(max_h)
results['avg_h'].append(avg_h)
iou = np.mean(results['IoU'])
dice = np.mean(results['DICE'])
max_h = np.mean(results['max_h'])
avg_h = np.mean(results['avg_h'])
print(iou, dice, max_h, avg_h)
np.save(f'results/{config.replace(".", "-")}_0', np.stack([iou, dice, max_h, avg_h]))
except Exception as e:
print(f'ERROR with config "{config}"')
print(e)
traceback.print_exc()
# return
def collect_results():
for config in configs:
results = np.array([np.load(f'results/{config.replace(".", "-")}_0.npy')])
means, stds = results.mean(axis=0), results.std(axis=0)
print(config)
# print(f'iou: {means[0]:.4f} | {stds[0]:.4f}')
# print(f'dice: {means[1]:.4f} | {stds[1]:.4f}')
# print(f'max_h: {means[2]:.4f} | {stds[2]:.4f}')
# print(f'avg_h: {means[3]:.4f} | {stds[3]:.4f}')
print(f'{means[0]:.3f}')
print(f'{means[3]:.3f}')
print()
if __name__ == '__main__':
pass
evaluate_all()
collect_results()
| 1,997 | 0 | 74 |
f7fca72e52232376356f35434e420a846dc84c45 | 850 | py | Python | backend/utils.py | DenX/pixyship | b8c75d18bfcaf86e1d1c4d7de5b58a51175b9110 | [
"MIT"
] | null | null | null | backend/utils.py | DenX/pixyship | b8c75d18bfcaf86e1d1c4d7de5b58a51175b9110 | [
"MIT"
] | null | null | null | backend/utils.py | DenX/pixyship | b8c75d18bfcaf86e1d1c4d7de5b58a51175b9110 | [
"MIT"
] | null | null | null | import time
from config import CONFIG
| 20.238095 | 81 | 0.635294 | import time
from config import CONFIG
def float_range(values, start_key, end_key):
start = 0
if values[start_key]:
start = float(values[start_key])
end = 0
if values[end_key]:
end = float(values[end_key])
return start, end
def int_range(values, start_key, end_key):
start = 0
if values[start_key]:
start = int(values[start_key])
end = 0
if values[end_key]:
end = int(values[end_key])
return start, end
def api_sleep(secs, force_sleep=False):
if not CONFIG['SAVY_PUBLIC_API_TOKEN'] or force_sleep:
time.sleep(secs)
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
| 667 | 48 | 92 |
9d9175a033fb44ad4b48daf8451547ae55922b48 | 6,298 | py | Python | botto/core/models/jmdict.py | MusicOnline/Tango-Legacy | 6c7b65924392a182fc01686cdd3a83638a64aaa4 | [
"MIT"
] | 2 | 2019-05-27T06:41:02.000Z | 2020-03-18T19:08:45.000Z | botto/core/models/jmdict.py | MusicOnline/Tango | 6c7b65924392a182fc01686cdd3a83638a64aaa4 | [
"MIT"
] | null | null | null | botto/core/models/jmdict.py | MusicOnline/Tango | 6c7b65924392a182fc01686cdd3a83638a64aaa4 | [
"MIT"
] | null | null | null | from botto.core.bot import Botto
db = Botto.db
# pylint: disable=no-member
| 34.415301 | 88 | 0.636075 | from botto.core.bot import Botto
db = Botto.db
# pylint: disable=no-member
class Entry(db.Model): # type: ignore # <entry>
__tablename__ = "JMdict_Entry"
id = db.Column(db.Integer, primary_key=True) # <ent_seq>
def __repr__(self) -> str:
return "<Entry id={0.id}>".format(self)
class WritingElement(db.Model): # type: ignore # <k_ele>
__tablename__ = "JMdict_WritingElement"
entry_id = db.Column(db.ForeignKey("JMdict_Entry.id"), primary_key=True)
literal = db.Column(db.String, primary_key=True) # <keb>
priority = db.Column(db.ARRAY(db.String), nullable=False) # <ke_pri>
info = db.Column(db.ARRAY(db.String), nullable=False) # <ke_inf>
def __repr__(self) -> str:
return (
"<JMdict_WritingElement entry_id={0.entry_id} "
"literal={0.literal!r}>".format(self)
)
class ReadingElement(db.Model): # type: ignore # <r_ele>
__tablename__ = "JMdict_ReadingElement"
entry_id = db.Column(db.ForeignKey("JMdict_Entry.id"), primary_key=True)
literal = db.Column(db.String, primary_key=True) # <reb>
priority = db.Column(db.ARRAY(db.String), nullable=False) # <re_pri>
info = db.Column(db.ARRAY(db.String), nullable=False) # <re_inf>
def __repr__(self) -> str:
return (
"<JMdict_ReadingElement entry_id={0.entry_id} "
"literal={0.literal!r}>".format(self)
)
class Sense(db.Model): # type: ignore # <sense>
__tablename__ = "JMdict_Sense"
entry_id = db.Column(db.ForeignKey("JMdict_Entry.id"), primary_key=True)
index = db.Column(db.Integer, primary_key=True) # incremental id
references = db.Column(db.ARRAY(db.String), nullable=False) # <xref>
antonyms = db.Column(db.ARRAY(db.String), nullable=False) # <ant>
parts_of_speech = db.Column(db.ARRAY(db.String), nullable=False) # <pos>
fields = db.Column(db.ARRAY(db.String), nullable=False) # <field>
misc = db.Column(db.ARRAY(db.String), nullable=False) # <misc>
dialects = db.Column(db.ARRAY(db.String), nullable=False) # <dial>
info = db.Column(db.ARRAY(db.String), nullable=False) # <s_inf>
def __repr__(self) -> str:
return "<JMdict_Sense entry_id={0.entry_id} index={0.index}>".format(self)
class Gloss(db.Model): # type: ignore # <gloss> (within <sense>)
__tablename__ = "JMdict_Gloss"
entry_id = db.Column("entry_id", db.Integer)
sense_index = db.Column("sense_index", db.Integer)
text = db.Column(db.String) # text # null in <ent_seq> 1422200
lang = db.Column(db.String, nullable=False) # xml:lang attr (implied: eng)
gender = db.Column(db.String) # g_gend attr
type = db.Column(db.String) # g_type attr
__table_args__ = (
db.ForeignKeyConstraint([entry_id, sense_index], [Sense.entry_id, Sense.index]),
)
def __repr__(self) -> str:
return (
"<JMdict_Gloss entry_id={0.entry_id} "
"sense_index={0.sense_index}>".format(self)
)
class LSource(db.Model): # type: ignore # <lsource> (within <sense>)
__tablename__ = "JMdict_LSource"
entry_id = db.Column("entry_id", db.Integer)
sense_index = db.Column("sense_index", db.Integer)
text = db.Column(db.String) # text
lang = db.Column(db.String, nullable=False) # xml:lang attr (implied: eng)
type = db.Column(db.String, nullable=False) # ls_type attr (implied: full)
wasei = db.Column(db.String) # ls_type attr
__table_args__ = (
db.ForeignKeyConstraint([entry_id, sense_index], [Sense.entry_id, Sense.index]),
)
def __repr__(self) -> str:
return (
"<JMdict_LSource entry_id={0.entry_id} "
"sense_index={0.sense_index}>".format(self)
)
class ReadingWriting(db.Model): # type: ignore
__tablename__ = "JMdict_ReadingWriting"
entry_id = db.Column("entry_id", db.Integer)
reading_literal = db.Column("reading_literal", db.String)
_entry_id = db.Column("_entry_id", db.Integer)
writing_literal = db.Column("writing_literal", db.String)
__table_args__ = (
db.ForeignKeyConstraint(
[entry_id, reading_literal],
[ReadingElement.entry_id, ReadingElement.literal],
),
db.ForeignKeyConstraint(
[_entry_id, writing_literal],
[WritingElement.entry_id, WritingElement.literal],
),
)
def __repr__(self) -> str:
return (
"<JMdict_ReadingWriting entry_id={0.entry_id} "
"reading_literal={0.reading_literal!r} "
"writing_literal={0.writing_literal!r}>".format(self)
)
class ReadingSense(db.Model): # type: ignore
__tablename__ = "JMdict_ReadingSense"
entry_id = db.Column("entry_id", db.Integer)
reading_literal = db.Column("reading_literal", db.String)
_entry_id = db.Column("_entry_id", db.Integer)
sense_index = db.Column("sense_index", db.Integer)
__table_args__ = (
db.ForeignKeyConstraint(
[entry_id, reading_literal],
[ReadingElement.entry_id, ReadingElement.literal],
),
db.ForeignKeyConstraint(
[_entry_id, sense_index], [Sense.entry_id, Sense.index]
),
)
def __repr__(self) -> str:
return (
"<JMdict_ReadingSense entry_id={0.entry_id} "
"reading_literal={0.reading_literal!r} "
"sense_index={0.sense_index}>".format(self)
)
class WritingSense(db.Model): # type: ignore
__tablename__ = "JMdict_WritingSense"
entry_id = db.Column("entry_id", db.Integer)
writing_literal = db.Column("writing_literal", db.String)
_entry_id = db.Column("_entry_id", db.Integer)
sense_index = db.Column("sense_index", db.Integer)
__table_args__ = (
db.ForeignKeyConstraint(
[entry_id, writing_literal],
[WritingElement.entry_id, WritingElement.literal],
),
db.ForeignKeyConstraint(
[_entry_id, sense_index], [Sense.entry_id, Sense.index]
),
)
def __repr__(self) -> str:
return (
"<JMdict_WritingSense entry_id={0.entry_id} "
"writing_literal={0.writing_literal!r} "
"sense_index={0.sense_index}>".format(self)
)
| 1,314 | 4,691 | 207 |
06248ee30f4354aece78fbe7fb4ab9742f2eb407 | 1,161 | py | Python | utils/parse_arguments.py | xujinzh/csk | 136002da069d300fc2da9ae8530861818087ff40 | [
"BSD-2-Clause"
] | 4 | 2021-09-21T14:24:11.000Z | 2022-02-27T06:46:11.000Z | utils/parse_arguments.py | xujinzh/CSK | 136002da069d300fc2da9ae8530861818087ff40 | [
"BSD-2-Clause"
] | null | null | null | utils/parse_arguments.py | xujinzh/CSK | 136002da069d300fc2da9ae8530861818087ff40 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : Jinzhong Xu
# @Contact : jinzhongxu@csu.ac.cn
# @Time : 10/12/2020 3:56 PM
# @File : parse_arguments.py
# @Software: PyCharm
import os
from optparse import OptionParser
| 32.25 | 88 | 0.587425 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : Jinzhong Xu
# @Contact : jinzhongxu@csu.ac.cn
# @Time : 10/12/2020 3:56 PM
# @File : parse_arguments.py
# @Software: PyCharm
import os
from optparse import OptionParser
def parse_arguments():
parser = OptionParser()
parser.description = \
"This program will track objects " \
"on videos in the MILTrack paper format. " \
"See http://goo.gl/pSTo9r"
parser.add_option("-i", "--input", dest="video_path",
metavar="PATH", type="string", default=None,
help="path to a folder o a MILTrack video")
parser.add_option("-s", "--show", dest="show_result", type="string", default="yes",
help="show tracking result or not")
(options, args) = parser.parse_args()
# print (options, args)
if not options.video_path:
parser.error("'input' option is required to run this program")
if not os.path.exists(options.video_path):
parser.error("Could not find the input file %s"
% options.video_path)
return options
| 889 | 0 | 25 |
fda6cb12c17fb53e321e374fd86da7176480a1f1 | 1,005 | py | Python | Python OOP/OOP/Static and Class Methods/Lab/Task03.py | IvanTodorovBG/SoftUni | 7b667f6905d9f695ab1484efbb02b6715f6d569e | [
"MIT"
] | 1 | 2022-03-16T10:23:04.000Z | 2022-03-16T10:23:04.000Z | Python OOP/OOP/Static and Class Methods/Lab/Task03.py | IvanTodorovBG/SoftUni | 7b667f6905d9f695ab1484efbb02b6715f6d569e | [
"MIT"
] | null | null | null | Python OOP/OOP/Static and Class Methods/Lab/Task03.py | IvanTodorovBG/SoftUni | 7b667f6905d9f695ab1484efbb02b6715f6d569e | [
"MIT"
] | null | null | null | import math
first_num = Integer(10)
print(first_num.value)
second_num = Integer.from_roman("IV")
print(second_num.value)
print(Integer.from_float("2.6"))
print(Integer.from_string(2.6))
| 25.125 | 72 | 0.579104 | import math
class Integer:
def __init__(self, value: int):
self.value = value
@classmethod
def from_float(cls, float_value):
if type(float_value) != float:
return "value is not a float"
return cls(math.floor(float_value))
@classmethod
def from_roman(cls, value):
rom_val = {'I':1,'V':5,'X':10,'L':50,'C':100,'D':500,'M':1000}
int_val = 0
for i in range(len(value)):
if i > 0 and rom_val[value[i]] > rom_val[value[i - 1]]:
int_val += rom_val[value[i]] - 2 * rom_val[value[i - 1]]
else:
int_val += rom_val[value[i]]
return cls(int_val)
@classmethod
def from_string(cls, value):
if type(value) == str:
return cls(int(value))
return "wrong type"
first_num = Integer(10)
print(first_num.value)
second_num = Integer.from_roman("IV")
print(second_num.value)
print(Integer.from_float("2.6"))
print(Integer.from_string(2.6))
| 640 | 151 | 23 |
0bb36db59fbc86162f8a7314f21f36a272c8c5ba | 2,067 | py | Python | InvenTree/InvenTree/test_api.py | linucks/InvenTree | fa789036e0ae7d56ced3c9e1f2d2ff596983a365 | [
"MIT"
] | null | null | null | InvenTree/InvenTree/test_api.py | linucks/InvenTree | fa789036e0ae7d56ced3c9e1f2d2ff596983a365 | [
"MIT"
] | 8 | 2020-06-06T01:14:46.000Z | 2022-03-12T00:14:35.000Z | InvenTree/InvenTree/test_api.py | linucks/InvenTree | fa789036e0ae7d56ced3c9e1f2d2ff596983a365 | [
"MIT"
] | null | null | null | """ Low level tests for the InvenTree API """
from rest_framework.test import APITestCase
from rest_framework import status
from django.urls import reverse
from django.contrib.auth import get_user_model
class APITests(APITestCase):
""" Tests for the InvenTree API """
fixtures = [
'location',
'stock',
'part',
'category',
]
username = 'test_user'
password = 'test_pass'
def test_get_token_fail(self):
""" Ensure that an invalid user cannot get a token """
token_url = reverse('api-token')
response = self.client.post(token_url, format='json', data={'username': 'bad', 'password': 'also_bad'})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse('token' in response.data)
def test_get_token_pass(self):
""" Ensure that a valid user can request an API token """
token_url = reverse('api-token')
# POST to retreive a token
response = self.client.post(token_url, format='json', data={'username': self.username, 'password': self.password})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue('token' in response.data)
self.assertTrue('pk' in response.data)
self.assertTrue(len(response.data['token']) > 0)
# Now, use the token to access other data
token = response.data['token']
part_url = reverse('api-part-list')
# Try to access without a token
response = self.client.get(part_url, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
# Now, with the token
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)
response = self.client.get(part_url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
| 30.397059 | 122 | 0.652637 | """ Low level tests for the InvenTree API """
from rest_framework.test import APITestCase
from rest_framework import status
from django.urls import reverse
from django.contrib.auth import get_user_model
class APITests(APITestCase):
""" Tests for the InvenTree API """
fixtures = [
'location',
'stock',
'part',
'category',
]
username = 'test_user'
password = 'test_pass'
def setUp(self):
# Create a user (but do not log in!)
User = get_user_model()
User.objects.create_user(self.username, 'user@email.com', self.password)
def test_get_token_fail(self):
""" Ensure that an invalid user cannot get a token """
token_url = reverse('api-token')
response = self.client.post(token_url, format='json', data={'username': 'bad', 'password': 'also_bad'})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse('token' in response.data)
def test_get_token_pass(self):
""" Ensure that a valid user can request an API token """
token_url = reverse('api-token')
# POST to retreive a token
response = self.client.post(token_url, format='json', data={'username': self.username, 'password': self.password})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue('token' in response.data)
self.assertTrue('pk' in response.data)
self.assertTrue(len(response.data['token']) > 0)
# Now, use the token to access other data
token = response.data['token']
part_url = reverse('api-part-list')
# Try to access without a token
response = self.client.get(part_url, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
# Now, with the token
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)
response = self.client.get(part_url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
| 154 | 0 | 27 |
d88bfd8d597623db85bd482852228fa9b0e0d6c4 | 580 | py | Python | test/test_formatter.py | ta08/girepo | 73ce6dd108428590a69fbe19fe1ad8b337d4462f | [
"Apache-2.0"
] | null | null | null | test/test_formatter.py | ta08/girepo | 73ce6dd108428590a69fbe19fe1ad8b337d4462f | [
"Apache-2.0"
] | 27 | 2019-09-05T15:36:10.000Z | 2021-07-13T20:23:14.000Z | test/test_formatter.py | ta08/girepo | 73ce6dd108428590a69fbe19fe1ad8b337d4462f | [
"Apache-2.0"
] | null | null | null | from unittest import TestCase
from girepo.formatter import beautify_text
| 29 | 57 | 0.55 | from unittest import TestCase
from girepo.formatter import beautify_text
class TestFormatter(TestCase):
def test_beautify_text(self):
test_data = [
("Hello World", "|", "Hello World"),
("Hello | World", "|", "Hello \| World"),
("Hello $ World", "$", "Hello \$ World"),
("Hello | World", "$", "Hello | World")
]
for target, separator, expected in test_data:
with self.subTest():
actual = beautify_text(target, separator)
self.assertEqual(expected, actual)
| 446 | 9 | 50 |
8fbe9592818a42e980cd5c36fa7ec5bfffa7c074 | 146 | py | Python | ex014.py | ChrysWillians/exercicios-python3 | 354c8684cbc9de0c734e6a40e76e2f613845de96 | [
"MIT"
] | 1 | 2021-09-26T01:08:36.000Z | 2021-09-26T01:08:36.000Z | ex014.py | ChrysWillians/exercicios-python3 | 354c8684cbc9de0c734e6a40e76e2f613845de96 | [
"MIT"
] | null | null | null | ex014.py | ChrysWillians/exercicios-python3 | 354c8684cbc9de0c734e6a40e76e2f613845de96 | [
"MIT"
] | null | null | null | c = float(input('Digite a temperatura para ser convertida: '))
f = ((9*c)/5)+32
print('A temperatura de {}ºC, é igual a {}ºF!'.format(c, f))
| 29.2 | 63 | 0.616438 | c = float(input('Digite a temperatura para ser convertida: '))
f = ((9*c)/5)+32
print('A temperatura de {}ºC, é igual a {}ºF!'.format(c, f))
| 0 | 0 | 0 |
9f25517180c05a14e99b59867617e829be8fc282 | 19,378 | py | Python | capsnet-arch/train.py | eaaskt/nlu | 77382be572ce59f15d8ea9c5cd653615c39891d1 | [
"MIT"
] | 3 | 2019-03-11T09:15:36.000Z | 2020-04-06T15:06:33.000Z | capsnet-arch/train.py | eaaskt/nlu | 77382be572ce59f15d8ea9c5cd653615c39891d1 | [
"MIT"
] | 24 | 2020-03-31T11:22:54.000Z | 2022-03-12T00:23:49.000Z | capsnet-arch/train.py | eaaskt/nlu | 77382be572ce59f15d8ea9c5cd653615c39891d1 | [
"MIT"
] | 5 | 2020-03-29T10:04:31.000Z | 2020-05-28T06:50:18.000Z | import math
import os
from random import *
import data_loader
import model_s2i
import util
import flags
import errno
import json
import numpy as np
import tensorflow as tf
from seqeval.metrics import accuracy_score
from seqeval.metrics import f1_score
from seqeval.metrics import precision_score
from seqeval.metrics import recall_score
from sklearn.metrics import accuracy_score as scikit_accuracy
from sklearn.metrics import f1_score as scikit_f1
from sklearn.model_selection import StratifiedKFold
a = Random()
a.seed(1)
def dump_flags(FLAGS):
""" Dumps the TF app flags in a JSON file. Filename will be determined based on the model name.
Args:
FLAGS: App flags
"""
flags_dict = dict()
for k, v in tf.flags.FLAGS.__flags.items():
flags_dict[k] = v.value
filename = FLAGS.scenario_num + '.json'
filename = os.path.join(FLAGS.hyperparams_dir, filename)
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open(filename, 'w', encoding='utf-8') as f:
json.dump(flags_dict, f, indent=4)
def evaluate_validation(capsnet, val_data, FLAGS, sess, epoch, fold, log=False, calculate_learning_curves=False):
""" Evaluates the model on the validation set
Args:
capsnet: CapsNet model
val_data: validation data dict
FLAGS: TensorFlow flags
sess: TensorFlow session in which the training was run
epoch: current epoch of training
fold: current fold of K-fold cross-validation
Returns:
f_score: intent detection F1 score
scores['f1']: slot filling F1 score
"""
x_te = val_data['x_val']
sentences_length_te = val_data['sentences_len_val']
y_intents_te = val_data['y_intents_val']
y_slots_te = val_data['y_slots_val']
one_hot_intents = val_data['one_hot_intents_val']
one_hot_slots = val_data['one_hot_slots_val']
slots_dict = val_data['slots_dict']
intents_dict = val_data['intents_dict']
# Define TensorBoard writer
if log:
writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/' + FLAGS.scenario_num + '-validation-' + str(fold), sess.graph)
if calculate_learning_curves:
writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/' + FLAGS.scenario_num + '-validation-lc', sess.graph)
total_intent_pred = []
total_slots_pred = []
num_samples = len(x_te)
batch_size = FLAGS.batch_size
test_batch = int(math.ceil(num_samples / float(batch_size)))
loss_val = 1
for i in range(test_batch):
begin_index = i * batch_size
end_index = min((i + 1) * batch_size, num_samples)
batch_te = x_te[begin_index: end_index]
batch_sentences_len = sentences_length_te[begin_index: end_index]
batch_intents_one_hot = one_hot_intents[begin_index: end_index]
batch_slots_one_hot = one_hot_slots[begin_index: end_index]
batch_size = end_index - begin_index
# Get predictions for current validation batch
feed_dict = {capsnet.input_x: batch_te, capsnet.sentences_length: batch_sentences_len,
capsnet.encoded_intents: batch_intents_one_hot, capsnet.encoded_slots: batch_slots_one_hot,
capsnet.keep_prob: 1.0}
if FLAGS.use_attention:
mask = util.calculate_mask(batch_sentences_len, FLAGS.max_sentence_length, batch_size, FLAGS.r)
feed_dict[capsnet.attention_mask] = mask
[intent_outputs, slots_outputs, slot_weights_c, cross_entropy_summary,
margin_loss_summary, loss_summary] = sess.run([
capsnet.intent_output_vectors, capsnet.slot_output_vectors, capsnet.slot_weights_c,
capsnet.cross_entropy_val_summary,
capsnet.margin_loss_val_summary, capsnet.loss_tr_summary],
feed_dict=feed_dict)
loss_val = loss_summary
# Add TensorBoard summaries to FileWriter
if log:
# writer.add_summary(cross_entropy_summary, epoch * test_batch + i)
# writer.add_summary(margin_loss_summary, epoch * test_batch + i)
# writer.add_summary(loss_summary, epoch * test_batch + i)
writer.add_summary(cross_entropy_summary, epoch + i)
writer.add_summary(margin_loss_summary, epoch + i)
writer.add_summary(loss_summary, epoch + i)
# Modify prediction vectors dimensions to prepare for argmax
intent_outputs_reduced_dim = tf.squeeze(intent_outputs, axis=[1, 4])
intent_outputs_norm = util.safe_norm(intent_outputs_reduced_dim)
slot_weights_c_reduced_dim = tf.squeeze(slot_weights_c, axis=[3, 4])
[intent_predictions, slot_predictions] = sess.run([intent_outputs_norm, slot_weights_c_reduced_dim])
# Obtain intent prediction
te_batch_intent_pred = np.argmax(intent_predictions, axis=1)
total_intent_pred += np.ndarray.tolist(te_batch_intent_pred)
# Obtain slots prediction
te_batch_slots_pred = np.argmax(slot_predictions, axis=2)
total_slots_pred += (np.ndarray.tolist(te_batch_slots_pred))
if calculate_learning_curves:
writer.add_summary(loss_val, fold)
print(' VALIDATION SET PERFORMANCE ')
print('Intent detection')
intents_acc = scikit_accuracy(y_intents_te, total_intent_pred)
y_intents_true = np.ndarray.tolist(y_intents_te)
y_intent_labels_true = [intents_dict[i] for i in y_intents_true]
y_intent_labels_pred = [intents_dict[i] for i in total_intent_pred]
intents = sorted(list(set(y_intent_labels_true)))
f_score = scikit_f1(y_intent_labels_true, y_intent_labels_pred, average='micro', labels=intents)
# print(classification_report(y_intent_labels_true, y_intent_labels_pred, digits=4))
print('Intent accuracy %lf' % intents_acc)
print('F score %lf' % f_score)
y_slots_te_true = np.ndarray.tolist(y_slots_te)
y_slot_labels_true = [[slots_dict[slot_idx] for slot_idx in ex] for ex in y_slots_te_true]
y_slot_labels_pred = [[slots_dict[slot_idx] for slot_idx in ex] for ex in total_slots_pred]
scores = eval_seq_scores(y_slot_labels_true, y_slot_labels_pred)
print('Slot filling')
print('F1 score: %lf' % scores['f1'])
print('Accuracy: %lf' % scores['accuracy'])
# print('Precision: %lf' % scores['precision'])
# print('Recall: %lf' % scores['recall'])
return f_score, scores['f1']
def eval_seq_scores(y_true, y_pred):
""" Performs sequence evaluation on slot labels
Args:
y_true: true slot labels
y_pred: predicted slot labels
Returns:
scores: dict containing the evaluation scores: f1, accuracy, precision, recall
"""
scores = dict()
scores['f1'] = f1_score(y_true, y_pred)
scores['accuracy'] = accuracy_score(y_true, y_pred)
scores['precision'] = precision_score(y_true, y_pred)
scores['recall'] = recall_score(y_true, y_pred)
return scores
def generate_batch(n, batch_size):
""" Generates a set of batch indices
Args:
n: total number of samples in set
batch_size: size of batch
Returns:
batch_index: a list of length batch_size containing randomly sampled indices
"""
batch_index = a.sample(range(n), batch_size)
return batch_index
def assign_pretrained_word_embedding(sess, embedding, capsnet):
""" Assigns word embeddings to the CapsNet model
Args:
sess: TensorFlow session
embedding: array containing the word embeddings
capsnet: CapsNet model
"""
print('using pre-trained word emebedding.begin...')
word_embedding_placeholder = tf.placeholder(dtype=tf.float32, shape=embedding.shape)
sess.run(capsnet.Embedding.assign(word_embedding_placeholder), {word_embedding_placeholder: embedding})
print('using pre-trained word emebedding.ended...')
def train_cross_validation(model, train_data, val_data, embedding, FLAGS, fold, best_f_score, batches_rand=False, log=False,
calculate_learning_curves=False):
""" Trains the model for one cross-validation fold
Args:
train_data: training data dictionary
val_data: validation data dictionary
embedding: array containing pre-trained word embeddings
FLAGS: TensorFlow application flags
fold: current fold index
best_f_score: best overall F1 score (across all folds so far)
batches_rand: whether to random sample mini batches or not (shuffle + seq)
log: toggle TensorBoard visualization on/off
Returns:
best_f_score: best overall F1 score (across all folds so far, including after this one)
best_f_score_mean_fold: best overall F1 score for this fold
best_f_score_intent_fold: best intent F1 score for this fold
best_f_score_slot_fold: best slot F1 score for this fold
"""
# start
x_train = train_data['x_tr']
sentences_length_train = train_data['sentences_len_tr']
one_hot_intents_train = train_data['one_hot_intents_tr']
one_hot_slots_train = train_data['one_hot_slots_tr']
best_f_score_mean_fold = 0.0
best_f_score_intent_fold = 0.0
best_f_score_slot_fold = 0.0
# We must reset the graph to start a brand new training of the model
tf.reset_default_graph()
config = tf.ConfigProto()
with tf.Session(config=config) as sess:
# Instantiate Model
capsnet = model(FLAGS)
print('Initializing Variables')
sess.run(tf.global_variables_initializer())
if FLAGS.use_embedding:
# load pre-trained word embedding
assign_pretrained_word_embedding(sess, embedding, capsnet)
# Initial evaluation on validation set
intent_f_score, slot_f_score = evaluate_validation(capsnet, val_data, FLAGS, sess, epoch=0, fold=fold)
f_score_mean = (intent_f_score + slot_f_score) / 2
if f_score_mean > best_f_score:
best_f_score = f_score_mean
var_saver = tf.train.Saver()
if f_score_mean > best_f_score_mean_fold:
# best mean in this fold, save scores
best_f_score_mean_fold = f_score_mean
best_f_score_intent_fold = intent_f_score
best_f_score_slot_fold = slot_f_score
if log:
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/' + FLAGS.scenario_num + '-train-fold' + str(fold), sess.graph)
# Training cycle
train_sample_num = x_train.shape[0]
batch_num = int(math.ceil(train_sample_num / FLAGS.batch_size))
loss_train = 1
for epoch in range(FLAGS.num_epochs):
for batch in range(batch_num):
if batches_rand:
batch_index = generate_batch(train_sample_num, FLAGS.batch_size)
batch_x = x_train[batch_index]
batch_sentences_len = sentences_length_train[batch_index]
batch_intents_one_hot = one_hot_intents_train[batch_index]
batch_slots_one_hot = one_hot_slots_train[batch_index]
batch_size = FLAGS.batch_size
else:
# Training samples are already shuffled in the file
begin_index = batch * FLAGS.batch_size
end_index = min((batch + 1) * FLAGS.batch_size, train_sample_num)
batch_x = x_train[begin_index: end_index]
batch_sentences_len = sentences_length_train[begin_index: end_index]
batch_intents_one_hot = one_hot_intents_train[begin_index: end_index]
batch_slots_one_hot = one_hot_slots_train[begin_index: end_index]
batch_size = end_index - begin_index
feed_dict = {capsnet.input_x: batch_x,
capsnet.encoded_intents: batch_intents_one_hot,
capsnet.encoded_slots: batch_slots_one_hot,
capsnet.sentences_length: batch_sentences_len,
capsnet.keep_prob: FLAGS.keep_prob}
if FLAGS.use_attention:
mask = util.calculate_mask(batch_sentences_len, FLAGS.max_sentence_length, batch_size, FLAGS.r)
feed_dict[capsnet.attention_mask] = mask
[_, loss, _, _,
cross_entropy_summary, margin_loss_summary,
loss_summary] = sess.run([capsnet.train_op, capsnet.loss_val,
capsnet.intent_output_vectors,
capsnet.slot_output_vectors, capsnet.cross_entropy_tr_summary,
capsnet.margin_loss_tr_summary, capsnet.loss_tr_summary],
feed_dict=feed_dict)
loss_train = loss_summary
if log:
train_writer.add_summary(cross_entropy_summary, batch_num * epoch + batch)
train_writer.add_summary(margin_loss_summary, batch_num * epoch + batch)
train_writer.add_summary(loss_summary, batch_num * epoch + batch)
print('------------------epoch : ', epoch, ' Loss: ', loss, '----------------------')
# TODO: figure out a more permanent fix for correct epoch numbering (so that validation and training are
# not shifted, and it still works for various train/validation splits
intent_f_score, slot_f_score = evaluate_validation(capsnet, val_data, FLAGS,
# sess, epoch=epoch + 1, fold=fold, log=log)
sess, epoch=batch_num * epoch, fold=fold, log=log)
f_score_mean = (intent_f_score + slot_f_score) / 2
if f_score_mean > best_f_score:
# best score overall -> save model
best_f_score = f_score_mean
if FLAGS.scenario_num != '':
ckpt_dir = FLAGS.ckpt_dir + 'scenario' + FLAGS.scenario_num + '/'
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
else:
ckpt_dir = FLAGS.ckpt_dir
var_saver.save(sess, os.path.join(ckpt_dir, 'model.ckpt'), 1)
print('Current F score mean', f_score_mean)
print('Best F score mean', best_f_score)
if f_score_mean > best_f_score_mean_fold:
# best mean in this fold, save scores
best_f_score_mean_fold = f_score_mean
best_f_score_intent_fold = intent_f_score
best_f_score_slot_fold = slot_f_score
if calculate_learning_curves:
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/' + FLAGS.scenario_num + '-train-lc', sess.graph)
train_writer.add_summary(loss_train, fold)
intent_f_score, slot_f_score = evaluate_validation(capsnet, val_data, FLAGS,
sess, epoch=epoch + 1, fold=fold, log=log,
calculate_learning_curves=True)
return best_f_score, best_f_score_mean_fold, best_f_score_intent_fold, best_f_score_slot_fold
if __name__ == '__main__':
main()
| 45.275701 | 136 | 0.655537 | import math
import os
from random import *
import data_loader
import model_s2i
import util
import flags
import errno
import json
import numpy as np
import tensorflow as tf
from seqeval.metrics import accuracy_score
from seqeval.metrics import f1_score
from seqeval.metrics import precision_score
from seqeval.metrics import recall_score
from sklearn.metrics import accuracy_score as scikit_accuracy
from sklearn.metrics import f1_score as scikit_f1
from sklearn.model_selection import StratifiedKFold
a = Random()
a.seed(1)
def dump_flags(FLAGS):
""" Dumps the TF app flags in a JSON file. Filename will be determined based on the model name.
Args:
FLAGS: App flags
"""
flags_dict = dict()
for k, v in tf.flags.FLAGS.__flags.items():
flags_dict[k] = v.value
filename = FLAGS.scenario_num + '.json'
filename = os.path.join(FLAGS.hyperparams_dir, filename)
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open(filename, 'w', encoding='utf-8') as f:
json.dump(flags_dict, f, indent=4)
def evaluate_validation(capsnet, val_data, FLAGS, sess, epoch, fold, log=False, calculate_learning_curves=False):
""" Evaluates the model on the validation set
Args:
capsnet: CapsNet model
val_data: validation data dict
FLAGS: TensorFlow flags
sess: TensorFlow session in which the training was run
epoch: current epoch of training
fold: current fold of K-fold cross-validation
Returns:
f_score: intent detection F1 score
scores['f1']: slot filling F1 score
"""
x_te = val_data['x_val']
sentences_length_te = val_data['sentences_len_val']
y_intents_te = val_data['y_intents_val']
y_slots_te = val_data['y_slots_val']
one_hot_intents = val_data['one_hot_intents_val']
one_hot_slots = val_data['one_hot_slots_val']
slots_dict = val_data['slots_dict']
intents_dict = val_data['intents_dict']
# Define TensorBoard writer
if log:
writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/' + FLAGS.scenario_num + '-validation-' + str(fold), sess.graph)
if calculate_learning_curves:
writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/' + FLAGS.scenario_num + '-validation-lc', sess.graph)
total_intent_pred = []
total_slots_pred = []
num_samples = len(x_te)
batch_size = FLAGS.batch_size
test_batch = int(math.ceil(num_samples / float(batch_size)))
loss_val = 1
for i in range(test_batch):
begin_index = i * batch_size
end_index = min((i + 1) * batch_size, num_samples)
batch_te = x_te[begin_index: end_index]
batch_sentences_len = sentences_length_te[begin_index: end_index]
batch_intents_one_hot = one_hot_intents[begin_index: end_index]
batch_slots_one_hot = one_hot_slots[begin_index: end_index]
batch_size = end_index - begin_index
# Get predictions for current validation batch
feed_dict = {capsnet.input_x: batch_te, capsnet.sentences_length: batch_sentences_len,
capsnet.encoded_intents: batch_intents_one_hot, capsnet.encoded_slots: batch_slots_one_hot,
capsnet.keep_prob: 1.0}
if FLAGS.use_attention:
mask = util.calculate_mask(batch_sentences_len, FLAGS.max_sentence_length, batch_size, FLAGS.r)
feed_dict[capsnet.attention_mask] = mask
[intent_outputs, slots_outputs, slot_weights_c, cross_entropy_summary,
margin_loss_summary, loss_summary] = sess.run([
capsnet.intent_output_vectors, capsnet.slot_output_vectors, capsnet.slot_weights_c,
capsnet.cross_entropy_val_summary,
capsnet.margin_loss_val_summary, capsnet.loss_tr_summary],
feed_dict=feed_dict)
loss_val = loss_summary
# Add TensorBoard summaries to FileWriter
if log:
# writer.add_summary(cross_entropy_summary, epoch * test_batch + i)
# writer.add_summary(margin_loss_summary, epoch * test_batch + i)
# writer.add_summary(loss_summary, epoch * test_batch + i)
writer.add_summary(cross_entropy_summary, epoch + i)
writer.add_summary(margin_loss_summary, epoch + i)
writer.add_summary(loss_summary, epoch + i)
# Modify prediction vectors dimensions to prepare for argmax
intent_outputs_reduced_dim = tf.squeeze(intent_outputs, axis=[1, 4])
intent_outputs_norm = util.safe_norm(intent_outputs_reduced_dim)
slot_weights_c_reduced_dim = tf.squeeze(slot_weights_c, axis=[3, 4])
[intent_predictions, slot_predictions] = sess.run([intent_outputs_norm, slot_weights_c_reduced_dim])
# Obtain intent prediction
te_batch_intent_pred = np.argmax(intent_predictions, axis=1)
total_intent_pred += np.ndarray.tolist(te_batch_intent_pred)
# Obtain slots prediction
te_batch_slots_pred = np.argmax(slot_predictions, axis=2)
total_slots_pred += (np.ndarray.tolist(te_batch_slots_pred))
if calculate_learning_curves:
writer.add_summary(loss_val, fold)
print(' VALIDATION SET PERFORMANCE ')
print('Intent detection')
intents_acc = scikit_accuracy(y_intents_te, total_intent_pred)
y_intents_true = np.ndarray.tolist(y_intents_te)
y_intent_labels_true = [intents_dict[i] for i in y_intents_true]
y_intent_labels_pred = [intents_dict[i] for i in total_intent_pred]
intents = sorted(list(set(y_intent_labels_true)))
f_score = scikit_f1(y_intent_labels_true, y_intent_labels_pred, average='micro', labels=intents)
# print(classification_report(y_intent_labels_true, y_intent_labels_pred, digits=4))
print('Intent accuracy %lf' % intents_acc)
print('F score %lf' % f_score)
y_slots_te_true = np.ndarray.tolist(y_slots_te)
y_slot_labels_true = [[slots_dict[slot_idx] for slot_idx in ex] for ex in y_slots_te_true]
y_slot_labels_pred = [[slots_dict[slot_idx] for slot_idx in ex] for ex in total_slots_pred]
scores = eval_seq_scores(y_slot_labels_true, y_slot_labels_pred)
print('Slot filling')
print('F1 score: %lf' % scores['f1'])
print('Accuracy: %lf' % scores['accuracy'])
# print('Precision: %lf' % scores['precision'])
# print('Recall: %lf' % scores['recall'])
return f_score, scores['f1']
def eval_seq_scores(y_true, y_pred):
""" Performs sequence evaluation on slot labels
Args:
y_true: true slot labels
y_pred: predicted slot labels
Returns:
scores: dict containing the evaluation scores: f1, accuracy, precision, recall
"""
scores = dict()
scores['f1'] = f1_score(y_true, y_pred)
scores['accuracy'] = accuracy_score(y_true, y_pred)
scores['precision'] = precision_score(y_true, y_pred)
scores['recall'] = recall_score(y_true, y_pred)
return scores
def generate_batch(n, batch_size):
""" Generates a set of batch indices
Args:
n: total number of samples in set
batch_size: size of batch
Returns:
batch_index: a list of length batch_size containing randomly sampled indices
"""
batch_index = a.sample(range(n), batch_size)
return batch_index
def assign_pretrained_word_embedding(sess, embedding, capsnet):
""" Assigns word embeddings to the CapsNet model
Args:
sess: TensorFlow session
embedding: array containing the word embeddings
capsnet: CapsNet model
"""
print('using pre-trained word emebedding.begin...')
word_embedding_placeholder = tf.placeholder(dtype=tf.float32, shape=embedding.shape)
sess.run(capsnet.Embedding.assign(word_embedding_placeholder), {word_embedding_placeholder: embedding})
print('using pre-trained word emebedding.ended...')
def train_cross_validation(model, train_data, val_data, embedding, FLAGS, fold, best_f_score, batches_rand=False, log=False,
calculate_learning_curves=False):
""" Trains the model for one cross-validation fold
Args:
train_data: training data dictionary
val_data: validation data dictionary
embedding: array containing pre-trained word embeddings
FLAGS: TensorFlow application flags
fold: current fold index
best_f_score: best overall F1 score (across all folds so far)
batches_rand: whether to random sample mini batches or not (shuffle + seq)
log: toggle TensorBoard visualization on/off
Returns:
best_f_score: best overall F1 score (across all folds so far, including after this one)
best_f_score_mean_fold: best overall F1 score for this fold
best_f_score_intent_fold: best intent F1 score for this fold
best_f_score_slot_fold: best slot F1 score for this fold
"""
# start
x_train = train_data['x_tr']
sentences_length_train = train_data['sentences_len_tr']
one_hot_intents_train = train_data['one_hot_intents_tr']
one_hot_slots_train = train_data['one_hot_slots_tr']
best_f_score_mean_fold = 0.0
best_f_score_intent_fold = 0.0
best_f_score_slot_fold = 0.0
# We must reset the graph to start a brand new training of the model
tf.reset_default_graph()
config = tf.ConfigProto()
with tf.Session(config=config) as sess:
# Instantiate Model
capsnet = model(FLAGS)
print('Initializing Variables')
sess.run(tf.global_variables_initializer())
if FLAGS.use_embedding:
# load pre-trained word embedding
assign_pretrained_word_embedding(sess, embedding, capsnet)
# Initial evaluation on validation set
intent_f_score, slot_f_score = evaluate_validation(capsnet, val_data, FLAGS, sess, epoch=0, fold=fold)
f_score_mean = (intent_f_score + slot_f_score) / 2
if f_score_mean > best_f_score:
best_f_score = f_score_mean
var_saver = tf.train.Saver()
if f_score_mean > best_f_score_mean_fold:
# best mean in this fold, save scores
best_f_score_mean_fold = f_score_mean
best_f_score_intent_fold = intent_f_score
best_f_score_slot_fold = slot_f_score
if log:
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/' + FLAGS.scenario_num + '-train-fold' + str(fold), sess.graph)
# Training cycle
train_sample_num = x_train.shape[0]
batch_num = int(math.ceil(train_sample_num / FLAGS.batch_size))
loss_train = 1
for epoch in range(FLAGS.num_epochs):
for batch in range(batch_num):
if batches_rand:
batch_index = generate_batch(train_sample_num, FLAGS.batch_size)
batch_x = x_train[batch_index]
batch_sentences_len = sentences_length_train[batch_index]
batch_intents_one_hot = one_hot_intents_train[batch_index]
batch_slots_one_hot = one_hot_slots_train[batch_index]
batch_size = FLAGS.batch_size
else:
# Training samples are already shuffled in the file
begin_index = batch * FLAGS.batch_size
end_index = min((batch + 1) * FLAGS.batch_size, train_sample_num)
batch_x = x_train[begin_index: end_index]
batch_sentences_len = sentences_length_train[begin_index: end_index]
batch_intents_one_hot = one_hot_intents_train[begin_index: end_index]
batch_slots_one_hot = one_hot_slots_train[begin_index: end_index]
batch_size = end_index - begin_index
feed_dict = {capsnet.input_x: batch_x,
capsnet.encoded_intents: batch_intents_one_hot,
capsnet.encoded_slots: batch_slots_one_hot,
capsnet.sentences_length: batch_sentences_len,
capsnet.keep_prob: FLAGS.keep_prob}
if FLAGS.use_attention:
mask = util.calculate_mask(batch_sentences_len, FLAGS.max_sentence_length, batch_size, FLAGS.r)
feed_dict[capsnet.attention_mask] = mask
[_, loss, _, _,
cross_entropy_summary, margin_loss_summary,
loss_summary] = sess.run([capsnet.train_op, capsnet.loss_val,
capsnet.intent_output_vectors,
capsnet.slot_output_vectors, capsnet.cross_entropy_tr_summary,
capsnet.margin_loss_tr_summary, capsnet.loss_tr_summary],
feed_dict=feed_dict)
loss_train = loss_summary
if log:
train_writer.add_summary(cross_entropy_summary, batch_num * epoch + batch)
train_writer.add_summary(margin_loss_summary, batch_num * epoch + batch)
train_writer.add_summary(loss_summary, batch_num * epoch + batch)
print('------------------epoch : ', epoch, ' Loss: ', loss, '----------------------')
# TODO: figure out a more permanent fix for correct epoch numbering (so that validation and training are
# not shifted, and it still works for various train/validation splits
intent_f_score, slot_f_score = evaluate_validation(capsnet, val_data, FLAGS,
# sess, epoch=epoch + 1, fold=fold, log=log)
sess, epoch=batch_num * epoch, fold=fold, log=log)
f_score_mean = (intent_f_score + slot_f_score) / 2
if f_score_mean > best_f_score:
# best score overall -> save model
best_f_score = f_score_mean
if FLAGS.scenario_num != '':
ckpt_dir = FLAGS.ckpt_dir + 'scenario' + FLAGS.scenario_num + '/'
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
else:
ckpt_dir = FLAGS.ckpt_dir
var_saver.save(sess, os.path.join(ckpt_dir, 'model.ckpt'), 1)
print('Current F score mean', f_score_mean)
print('Best F score mean', best_f_score)
if f_score_mean > best_f_score_mean_fold:
# best mean in this fold, save scores
best_f_score_mean_fold = f_score_mean
best_f_score_intent_fold = intent_f_score
best_f_score_slot_fold = slot_f_score
if calculate_learning_curves:
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/' + FLAGS.scenario_num + '-train-lc', sess.graph)
train_writer.add_summary(loss_train, fold)
intent_f_score, slot_f_score = evaluate_validation(capsnet, val_data, FLAGS,
sess, epoch=epoch + 1, fold=fold, log=log,
calculate_learning_curves=True)
return best_f_score, best_f_score_mean_fold, best_f_score_intent_fold, best_f_score_slot_fold
def train(model, data, FLAGS, batches_rand=False, log=False):
# Dump flags in log file
dump_flags(FLAGS)
x_tr = data['x_tr']
y_intents_tr = data['y_intents_tr']
y_slots_tr = data['y_slots_tr']
sentences_length_tr = data['sentences_len_tr']
one_hot_intents_tr = data['encoded_intents_tr']
one_hot_slots_tr = data['encoded_slots_tr']
embedding = data['embedding']
# k-fold cross-validation
intent_scores = 0
slot_scores = 0
mean_scores = 0
best_f_score = 0.0
print('------------------start cross-validation-------------------')
fold = 1
for train_index, val_index in StratifiedKFold(FLAGS.n_splits).split(x_tr, y_intents_tr):
print('FOLD %d' % fold)
# Split the data according to train_index, val_index
x_train, x_val = x_tr[train_index], x_tr[val_index]
y_intents_train, y_intents_val = y_intents_tr[train_index], y_intents_tr[val_index]
y_slots_train, y_slots_val = y_slots_tr[train_index], y_slots_tr[val_index]
sentences_length_train, sentences_length_val = sentences_length_tr[train_index], sentences_length_tr[
val_index]
one_hot_intents_train, one_hot_intents_val = one_hot_intents_tr[train_index], one_hot_intents_tr[
val_index]
one_hot_slots_train, one_hot_slots_val = one_hot_slots_tr[train_index], one_hot_slots_tr[val_index]
train_data = dict()
train_data['x_tr'] = x_train
train_data['y_intents_tr'] = y_intents_train
train_data['y_slots_tr'] = y_slots_train
train_data['sentences_len_tr'] = sentences_length_train
train_data['one_hot_intents_tr'] = one_hot_intents_train
train_data['one_hot_slots_tr'] = one_hot_slots_train
val_data = dict()
val_data['x_val'] = x_val
val_data['y_intents_val'] = y_intents_val
val_data['y_slots_val'] = y_slots_val
val_data['sentences_len_val'] = sentences_length_val
val_data['one_hot_intents_val'] = one_hot_intents_val
val_data['one_hot_slots_val'] = one_hot_slots_val
val_data['slots_dict'] = data['slots_dict']
val_data['intents_dict'] = data['intents_dict']
# Train on split
best_f_score, best_f_score_mean_fold, best_f_score_intent_fold, best_f_score_slot_fold = train_cross_validation(model,
train_data, val_data, embedding, FLAGS, fold, best_f_score, batches_rand=batches_rand, log=log)
fold += 1
# For each fold, add best scores to mean
intent_scores += best_f_score_intent_fold
slot_scores += best_f_score_slot_fold
mean_scores += best_f_score_mean_fold
# Compute mean score
mean_intent_score = intent_scores / FLAGS.n_splits
mean_slot_score = slot_scores / FLAGS.n_splits
mean_score = mean_scores / FLAGS.n_splits
print('Mean intent F1 score %lf' % mean_intent_score)
print('Mean slot F1 score %lf' % mean_slot_score)
print('Mean F1 score %lf' % mean_score)
def main():
word2vec_path = '../../romanian_word_vecs/cleaned-vectors-diacritice-cc-100.vec'
training_data_path = '../data-capsnets/diacritics/scenario1/train.txt'
test_data_path = '../data-capsnets/diacritics/scenario1/test.txt'
# Define the flags
FLAGS = flags.define_app_flags('1-spikes-test')
# Load data
print('------------------load word2vec begin-------------------')
w2v = data_loader.load_w2v(word2vec_path)
print('------------------load word2vec end---------------------')
data = data_loader.read_datasets(w2v, training_data_path, test_data_path)
flags.set_data_flags(data)
train(model_s2i.SemCapsNet, data, FLAGS, log=True)
if __name__ == '__main__':
main()
| 3,650 | 0 | 46 |
fa35b3feb2f0012a2c86cb320ec2b287c6644569 | 2,132 | py | Python | dataset.py | youngsend/behavioral-cloning | 8d4e3aeb964005c0594b8b7468e3b5352b54fb90 | [
"MIT"
] | null | null | null | dataset.py | youngsend/behavioral-cloning | 8d4e3aeb964005c0594b8b7468e3b5352b54fb90 | [
"MIT"
] | null | null | null | dataset.py | youngsend/behavioral-cloning | 8d4e3aeb964005c0594b8b7468e3b5352b54fb90 | [
"MIT"
] | null | null | null | import os
import random
import torch
import pandas as pd
import torchvision.transforms.functional as T_F
from torch.utils.data import Dataset
from PIL import Image
class BehaviorCloneDataset(Dataset):
"""
Behavioral cloning dataset.
I referred to https://pytorch.org/tutorials/beginner/data_loading_tutorial.html
"""
def __init__(self, csv_file, root_dir, transform=None):
"""
Read the csv here and leave the reading of images to __getitem__. This is memory efficient because all images
are not stored in the memory at once but read as required.
:param csv_file: path to the csv file with relative image paths and corresponding control commands, velocity.
:param root_dir: directory with all the images.
:param transform: optional transform to be applied on a sample.
"""
self.driving_records = pd.read_csv(csv_file)
self.root_dir = root_dir
self.transform = transform
self.steer_correction = 0.1
| 34.95082 | 117 | 0.640713 | import os
import random
import torch
import pandas as pd
import torchvision.transforms.functional as T_F
from torch.utils.data import Dataset
from PIL import Image
class BehaviorCloneDataset(Dataset):
"""
Behavioral cloning dataset.
I referred to https://pytorch.org/tutorials/beginner/data_loading_tutorial.html
"""
def __init__(self, csv_file, root_dir, transform=None):
"""
Read the csv here and leave the reading of images to __getitem__. This is memory efficient because all images
are not stored in the memory at once but read as required.
:param csv_file: path to the csv file with relative image paths and corresponding control commands, velocity.
:param root_dir: directory with all the images.
:param transform: optional transform to be applied on a sample.
"""
self.driving_records = pd.read_csv(csv_file)
self.root_dir = root_dir
self.transform = transform
self.steer_correction = 0.1
def __len__(self):
# center, left, right images are all used
return 3 * len(self.driving_records)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# use center, left, right image
row_index = int(idx / 3)
column_index = idx % 3
# take the center image
img_name = os.path.join(self.root_dir,
self.driving_records.iloc[row_index, column_index])
image = Image.open(img_name)
steer = torch.tensor(self.driving_records.iloc[row_index, 3], dtype=torch.float32)
if column_index == 1:
# left image
steer = steer + self.steer_correction
elif column_index == 2:
# right image
steer = steer - self.steer_correction
# add random hflip because steer needs to be modified when horizontally flipping image
if random.random() < 0.5:
image = T_F.hflip(image)
steer = -1.0 * steer
if self.transform:
image = self.transform(image)
return image, steer
| 1,070 | 0 | 54 |
5055c590ee27d86e094c6eb93148926211ad9ca4 | 415 | py | Python | zerver/migrations/0188_userprofile_enable_login_emails.py | TylerPham2000/zulip | 2e7aaba0dde5517b4a55cb0bd782f009be45e3ba | [
"Apache-2.0"
] | 17,004 | 2015-09-25T18:27:24.000Z | 2022-03-31T22:02:32.000Z | zerver/migrations/0188_userprofile_enable_login_emails.py | TylerPham2000/zulip | 2e7aaba0dde5517b4a55cb0bd782f009be45e3ba | [
"Apache-2.0"
] | 20,344 | 2015-09-25T19:02:42.000Z | 2022-03-31T23:54:40.000Z | zerver/migrations/0188_userprofile_enable_login_emails.py | TylerPham2000/zulip | 2e7aaba0dde5517b4a55cb0bd782f009be45e3ba | [
"Apache-2.0"
] | 7,271 | 2015-09-25T18:48:39.000Z | 2022-03-31T21:06:11.000Z | # Generated by Django 1.11.14 on 2018-08-22 09:57
from django.db import migrations, models
| 21.842105 | 56 | 0.626506 | # Generated by Django 1.11.14 on 2018-08-22 09:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("zerver", "0187_userprofile_is_billing_admin"),
]
operations = [
migrations.AddField(
model_name="userprofile",
name="enable_login_emails",
field=models.BooleanField(default=True),
),
]
| 0 | 299 | 23 |
dd0482f8708b53c479a409a1122ba3e19eb16cdc | 1,061 | py | Python | nettest/utils.py | public0821/nettest | 0fed7949526640dd55dda51ba37a98e1ab7e9109 | [
"Apache-2.0"
] | 2 | 2016-06-29T08:01:58.000Z | 2020-11-25T02:51:29.000Z | nettest/utils.py | public0821/nettest | 0fed7949526640dd55dda51ba37a98e1ab7e9109 | [
"Apache-2.0"
] | null | null | null | nettest/utils.py | public0821/nettest | 0fed7949526640dd55dda51ba37a98e1ab7e9109 | [
"Apache-2.0"
] | 1 | 2020-11-24T07:05:24.000Z | 2020-11-24T07:05:24.000Z | import struct
import array
def mac2str(mac):
"""Converts mac address to string .
Args:
mac: 6 bytes mac address
Returns:
readable string
"""
return '%02x:%02x:%02x:%02x:%02x:%02x'%tuple(int(x) for x in struct.unpack('BBBBBB', mac))
def str2mac(s):
"""Converts string to mac address .
Args:
s: 'xx:xx:xx:xx:xx:xx' format string
Returns:
6 bytes mac address
"""
mac = tuple(int(x,16) for x in s.split(":"))
return struct.pack('BBBBBB', mac[0], mac[1], mac[2], mac[3], mac[4], mac[5])
def checksum(data):
'''Calculate checksum
more about checksum, see http://tools.ietf.org/html/rfc1071
'''
if len(data) & 1:
data = data + '\0'
words = array.array('h', data)
checksum = 0
for word in words:
checksum += (word & 0xffff)
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum = checksum + (checksum >> 16)
return (~checksum) & 0xffff
| 23.065217 | 94 | 0.531574 | import struct
import array
def mac2str(mac):
"""Converts mac address to string .
Args:
mac: 6 bytes mac address
Returns:
readable string
"""
return '%02x:%02x:%02x:%02x:%02x:%02x'%tuple(int(x) for x in struct.unpack('BBBBBB', mac))
def str2mac(s):
"""Converts string to mac address .
Args:
s: 'xx:xx:xx:xx:xx:xx' format string
Returns:
6 bytes mac address
"""
mac = tuple(int(x,16) for x in s.split(":"))
return struct.pack('BBBBBB', mac[0], mac[1], mac[2], mac[3], mac[4], mac[5])
def checksum(data):
'''Calculate checksum
more about checksum, see http://tools.ietf.org/html/rfc1071
'''
if len(data) & 1:
data = data + '\0'
words = array.array('h', data)
checksum = 0
for word in words:
checksum += (word & 0xffff)
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum = checksum + (checksum >> 16)
return (~checksum) & 0xffff
| 0 | 0 | 0 |
343669861f664697fc3420138a36fe9eb392d316 | 21,927 | py | Python | script/panda_grasp.py | Hymwgk/gpg | 07bb0c3a357ba2ac21bc179ba57584fee9eda6d3 | [
"BSD-2-Clause"
] | null | null | null | script/panda_grasp.py | Hymwgk/gpg | 07bb0c3a357ba2ac21bc179ba57584fee9eda6d3 | [
"BSD-2-Clause"
] | null | null | null | script/panda_grasp.py | Hymwgk/gpg | 07bb0c3a357ba2ac21bc179ba57584fee9eda6d3 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
#coding=utf-8
"""
moveit_ik_demo.py - Version 0.1 2014-01-14
Use inverse kinemtatics to move the end effector to a specified pose
Created for the Pi Robot Project: http://www.pirobot.org
Copyleft (c) 2014 Patrick Goebel. All lefts reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.5
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.html
"""
from sqlalchemy import false
import rospy, sys
import moveit_commander
import os
#print('Hello,'+os.environ.get('ROS_MASTER_URI')+'!')
#import moveit_commander
import tf
import argparse
import math
import numpy as np
from math import pi
import time
import copy
from moveit_msgs.msg import RobotTrajectory,DisplayTrajectory
from trajectory_msgs.msg import JointTrajectoryPoint
from threading import Lock, Event
from franka_msgs.srv import SetCartesianImpedance, \
SetCartesianImpedanceRequest, \
SetCartesianImpedanceResponse
from controller_manager_msgs.srv import SwitchController,SwitchControllerRequest,SwitchControllerResponse
import actionlib
from geometry_msgs.msg import PoseStamped, Pose
from tf.transformations import euler_from_quaternion, quaternion_from_euler,quaternion_multiply,quaternion_from_matrix,quaternion_matrix
#from autolab_core import RigidTransform,transformations
#from pyquaternion import Quaternion
from gpg.msg import GraspConfig,GraspConfigList
from franka_gripper.msg import GraspAction, GraspGoal
from franka_gripper.msg import GraspEpsilon
#解析命令行参数
parser = argparse.ArgumentParser(description='Panda go grasp')
parser.add_argument('--test',type=int, default=0) #设置同时处理几个场景
parameters,unknow =parser.parse_known_args()
if __name__ == "__main__":
try:
MoveItDemo()
rospy.spin()
except rospy.ROSInterruptException:
rospy.loginfo("Arm tracker node terminated.")
| 38.333916 | 136 | 0.634879 | #!/usr/bin/env python
#coding=utf-8
"""
moveit_ik_demo.py - Version 0.1 2014-01-14
Use inverse kinemtatics to move the end effector to a specified pose
Created for the Pi Robot Project: http://www.pirobot.org
Copyleft (c) 2014 Patrick Goebel. All lefts reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.5
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.html
"""
from sqlalchemy import false
import rospy, sys
import moveit_commander
import os
#print('Hello,'+os.environ.get('ROS_MASTER_URI')+'!')
#import moveit_commander
import tf
import argparse
import math
import numpy as np
from math import pi
import time
import copy
from moveit_msgs.msg import RobotTrajectory,DisplayTrajectory
from trajectory_msgs.msg import JointTrajectoryPoint
from threading import Lock, Event
from franka_msgs.srv import SetCartesianImpedance, \
SetCartesianImpedanceRequest, \
SetCartesianImpedanceResponse
from controller_manager_msgs.srv import SwitchController,SwitchControllerRequest,SwitchControllerResponse
import actionlib
from geometry_msgs.msg import PoseStamped, Pose
from tf.transformations import euler_from_quaternion, quaternion_from_euler,quaternion_multiply,quaternion_from_matrix,quaternion_matrix
#from autolab_core import RigidTransform,transformations
#from pyquaternion import Quaternion
from gpg.msg import GraspConfig,GraspConfigList
from franka_gripper.msg import GraspAction, GraspGoal
from franka_gripper.msg import GraspEpsilon
#解析命令行参数
parser = argparse.ArgumentParser(description='Panda go grasp')
parser.add_argument('--test',type=int, default=0) #设置同时处理几个场景
parameters,unknow =parser.parse_known_args()
class MoveItDemo:
def __init__(self):
#初始化moveit的 API接口
moveit_commander.roscpp_initialize(sys.argv)
#初始化ros节点 名为panda_grasp
rospy.init_node('panda_grasp', anonymous=True)
rospy.set_param("/robot_state", "Initializing")
rospy.loginfo("Robot initializing")
self.lock = Lock()
#rospy.wait_for_service('/controller_manager/switch_controller')
switcher = rospy.ServiceProxy('/controller_manager/switch_controller', SwitchController)
cartesian_impedance_proxy = rospy.ServiceProxy('/franka_control/set_cartesian_impedance', SetCartesianImpedance)
cartesian_impedance_proxy.wait_for_service()
request = SetCartesianImpedanceRequest
request.cartesian_impedance = [2000,500,500,50,50,50]
con = SwitchControllerRequest()
#con.start_controllers = 'position_joint_trajectory_controller'
con.strictness= con.STRICT
con.start_controllers =[]
con.stop_controllers = ['position_joint_trajectory_controller']
with self.lock:
rospy.sleep(0.1)
switcher(con)
result = cartesian_impedance_proxy(request.cartesian_impedance)
con.stop_controllers =[]
con.start_controllers = ['position_joint_trajectory_controller']
print(result)
switcher(con)
self.gripper_client = actionlib.SimpleActionClient('/franka_gripper/grasp', GraspAction)
self.gripper_client.wait_for_server()
#构建tf发布器
self.tf_broadcaster=tf.TransformBroadcaster()
self.grasp_config=GraspConfig()
#创建多用途的TF监听器
self.tf_listener = tf.TransformListener()
#变换关系正确读取的标志位
get_transform=False
#等待并获取正确的tf变换关系
while not get_transform:
try:
if parameters.test==1:
get_transform = True
#print(parameters.test)
rospy.loginfo("==================Test mode====================")
else:
self.tf_listener.waitForTransform('/kinect2_link', '/panda_link0', rospy.Time(), rospy.Duration(5.0))
#相机坐标系kinect2_link相对于base坐标系panda_link0的位姿
self.btc_trans, self.btc_quater = self.tf_listener.lookupTransform('/panda_link0', '/kinect2_link', rospy.Time(0))
#将trans转换成为ndarry
self.btc_trans=np.array(self.btc_trans)
self.btc_quater= np.array(self.btc_quater)
get_transform = True
rospy.loginfo("got transform complete")
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
raise SystemError("got transform failed")
# 初始化场景对象
self.scene = moveit_commander.PlanningSceneInterface()
#为场景添加桌子,防止机械臂碰撞桌面
self.add_table()
rospy.sleep(2)
# 创建机械臂规划组对象
self.panda_arm = moveit_commander.MoveGroupCommander('panda_arm')
#创建机械手规划对象
self.panda_hand=moveit_commander.MoveGroupCommander('hand')
#
self.panda_arm.set_max_acceleration_scaling_factor(0.1)
self.panda_arm.set_max_velocity_scaling_factor(0.5)
#通过此发布器发布规划的轨迹
display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path',
DisplayTrajectory,
queue_size=20)
# 获取末端执行器名称
self.end_effector_link = self.panda_arm.get_end_effector_link()
rospy.loginfo("End effector detected {}".format(self.end_effector_link))
# 设置允许机械臂末位姿的错误余量
self.panda_arm.set_goal_position_tolerance(0.01)#1cm
self.panda_arm.set_goal_orientation_tolerance(0.05)#
#不允许规划失败重规划,规划时间只允许5秒钟,否则很浪费时间
self.panda_arm.allow_replanning(False)
self.panda_arm.set_planning_time(5)
# 设置panda的初始姿态,和预备姿态
self.initial_joints = [0.04, -0.70, 0.18, -2.80, 0.19, 2.13, 0.92]
self.ready_joints = [1.544, -0.755, 0.190, -2.713, 0.149, 2.027, 0.799]
#移动到home
self.move_to_joints(self.panda_arm,self.ready_joints,tag="reday pose")
#张开夹爪
self.set_gripper(0.078,epsilon=0.01)#张开8cm
rospy.set_param("/robot_state", "ready")
rospy.loginfo("Ready to grasp, initial pose")
######################开始等待接收夹爪姿态#########################
rospy.loginfo("Waiting for gripper pose")
self.callback_done=False
if parameters.test==1:#测试模式
self.grasp_test()
else:
rospy.Subscriber('best_grasp', GraspConfig, self.Callback,queue_size=1)
#######################执行抓取####################################
while not rospy.is_shutdown():
#等待回调函数处理完
if self.callback_done:
self.callback_done=False
#rospy.set_param("/robot_state", "ready")
#continue
else:
rospy.sleep(0.5)
continue
#移动至预抓取姿态
rospy.set_param("/robot_state", "moving")
rospy.loginfo('Move to pre_grasp pose')
self.panda_arm.set_start_state_to_current_state() #以当前姿态作为规划起始点
success=self.panda_arm.go(self.pre_grasp_pose_link8,wait=True)
self.panda_arm.stop()
self.panda_arm.clear_pose_targets()
if not success:
rospy.loginfo('Failed to move to pre_grasp pose!')
rospy.sleep(1)
rospy.set_param("/robot_state", "ready")
continue
rospy.loginfo('Succeed')
rospy.sleep(1)#等待机械臂稳定
#再设置当前姿态为起始姿态
self.panda_arm.set_start_state_to_current_state()
#
waypoints = []
wpose=self.panda_arm.get_current_pose().pose
wpose.position.x= self.grasp_pose_link8.position.x
wpose.position.y= self.grasp_pose_link8.position.y
wpose.position.z= self.grasp_pose_link8.position.z
waypoints.append(copy.deepcopy(wpose))
#规划从当前位姿,保持姿态,转移到目标夹爪姿态的路径
(plan, fraction) = self.panda_arm.compute_cartesian_path(
waypoints, # waypoints to follow
0.01, # eef_step
0.0) # jump_threshold
##显示轨迹
display_trajectory = DisplayTrajectory()
display_trajectory.trajectory_start = self.panda_arm.get_current_state()
display_trajectory.trajectory.append(plan)
display_trajectory_publisher.publish(display_trajectory)
#执行,并等待这个轨迹执行成功
new_plan=self.scale_trajectory_speed(plan,0.2)
self.panda_arm.execute(new_plan,wait=True)
#执行抓取
rospy.loginfo("Start to grasp")
self.set_gripper(0.01,epsilon=0.4)#张开3cm
rospy.sleep(1)
####################抓取完后撤####################
waypoints = []
wpose=self.panda_arm.get_current_pose().pose
wpose.position.x= self.pre_grasp_pose_link8.position.x
wpose.position.y= self.pre_grasp_pose_link8.position.y
wpose.position.z= self.pre_grasp_pose_link8.position.z
waypoints.append(copy.deepcopy(wpose))
#规划从当前位姿,保持姿态,转移到目标夹爪姿态的路径
(plan, fraction) = self.panda_arm.compute_cartesian_path(
waypoints, # waypoints to follow
0.01, # eef_step
0.0) # jump_threshold
#显示轨迹
display_trajectory = DisplayTrajectory()
display_trajectory.trajectory_start = self.panda_arm.get_current_state()
display_trajectory.trajectory.append(plan)
display_trajectory_publisher.publish(display_trajectory)
#执行,并等待后撤成功
new_plan=self.scale_trajectory_speed(plan,0.6)
self.panda_arm.execute(new_plan,wait=True)
######################移动到预备姿态############################
self.move_to_joints(self.panda_arm,self.ready_joints,tag="ready pose")
self.set_gripper(0.08,epsilon=0.4)#张开8cm
rospy.set_param("/robot_state", "ready")
rospy.loginfo("Ready to grasp, ready pose")
rospy.sleep(2)
if parameters.test==1:#测试模式
self.callback_done=True
rospy.set_param("/robot_state", "moving")
self.move_to_joints(self.panda_arm,self.initial_joints)
rospy.set_param("/robot_state", "ready")
# Shut down MoveIt cleanly
moveit_commander.roscpp_shutdown()
# Exit MoveIt
moveit_commander.os._exit(0)
def lookupTransform(self,tf_listener, target, source):
tf_listener.waitForTransform(target, source, rospy.Time(), rospy.Duration(4.0)) #等待时间为4秒
trans, rot = tf_listener.lookupTransform(target, source, rospy.Time())
euler = tf.transformations.euler_from_quaternion(rot)
source_target = tf.transformations.compose_matrix(translate = trans, angles = euler)
return source_target
def getTfFromMatrix(self,matrix):
scale, shear, angles, trans, persp = tf.transformations.decompose_matrix(matrix)
return trans, tf.transformations.quaternion_from_euler(*angles), angles
def quater_multi_vec(self,quater,vec):
quater_=tf.transformations.quaternion_inverse(quater)
vec_quater=np.c_[vec,[0]]
temp=quaternion_multiply(quater,vec_quater)
temp=quaternion_multiply(temp,quater_)
return temp[:3]
def move_to_joints(self,group,joints,tag="initial pose"):
#先从Initial 移动到HOME
case,plan = self.planJointGoal(group,joints)#返回真 就是找到轨迹
if case==2:
rospy.loginfo("Move to {}".format(tag))
group.execute(plan,wait=True)
elif case==1:
rospy.loginfo("Already at {}".format(tag))
else:
raise SystemError("Home pose trajectory not found")
def planJointGoal(self,movegroup,joint_goal,lable='Next'):
current_joint = movegroup.get_current_joint_values()
dis_pose =np.linalg.norm(np.array(joint_goal)-np.array(current_joint))
#print(current_joint)
#print(joint_goal)
if dis_pose<0.008:
return 1,None #已经到位
else:
movegroup.set_joint_value_target(joint_goal)
plan = movegroup.plan()
if not plan.joint_trajectory.points:
return 0,plan
else:#执行规划
return 2,plan
def Callback(self,grasp):
"""根据接收的夹爪抓取姿态,计算预抓取夹爪的位置姿态
接收的抓取默认以相机坐标系为参考系
使用的抓取坐标系为典范抓取坐标系(抓取中心点位于两指中心)
"""
rospy.set_param("/robot_state", "caculating")
#data是GraspConfigList,data.grasps是GraspConfig[]类型,
#data.grasps[0]是list中第一个GraspConfig类型的数据,代表的最优的那个抓取配置
self.grasp_config=grasp
#最终抓取姿态
self.grasp_pose_link8=Pose()
#设置后撤距离(10cm)
dis =0.15
#LTE panda_EE(夹爪)相对于link8坐标系的位姿(机械臂固定参数值)
lte_trans=np.array([0.0000,0.0000,0.1034])
lte_quater = np.array([0.0000,0.0000,-0.38268,0.92388])
lte_rot = quaternion_matrix(lte_quater)#[4,4]
#预抓取姿态
self.pre_grasp_pose_link8=Pose()
#以下是读取grasp的pose,需要注意的是,此时pose的参考系为相机坐标系
top_center = np.array([self.grasp_config.position.x,self.grasp_config.position.y,self.grasp_config.position.z])
approach=np.array([self.grasp_config.approach.x,self.grasp_config.approach.y,self.grasp_config.approach.z])#接近轴
binormal=np.array([self.grasp_config.binormal.x,self.grasp_config.binormal.y,self.grasp_config.binormal.z])#合并轴
axis=np.array([self.grasp_config.axis.x,self.grasp_config.axis.y,self.grasp_config.axis.z])#
#进行方向向量归一化
approach=approach/np.linalg.norm(approach)
binormal=binormal/np.linalg.norm(binormal)
axis=axis/np.linalg.norm(axis)
#得到典范抓取坐标系在相机坐标系下的位置姿态
ctg_rot_ = np.concatenate((approach,binormal,axis),axis=0).reshape(3,3).T#3*3
#panda_EE坐标系和gpg使用的典范抓取坐标系不同,由于获取的是gpg标准的抓取坐标系
#在这里转换为panda_EE 使用的坐标系形式,实质上就是绕着gpg坐标系y轴旋转90度
gteg_rot_ = np.array([0,0,1,0,1,0,-1,0,0]).reshape(3,3)
#得到panda_EE抓取坐标系相对于kinect的姿态
cteg_rot_ = ctg_rot_.dot(gteg_rot_)
cteg_rot=np.identity(4)
cteg_rot[0:3,0:3] = cteg_rot_
cteg_quater = quaternion_from_matrix(cteg_rot)
cteg_trans = top_center
#相机在基座坐标系下的位姿
btc_rot = quaternion_matrix(self.btc_quater)
btc_trans = self.btc_trans
#BTEg 最终抓取状态时,panda_EE相对于基座的位姿
bteg_trans =btc_trans+btc_rot[:3,:3].dot(cteg_trans)
bteg_rot = btc_rot.dot(cteg_rot)#4*4
bteg_quater=quaternion_from_matrix(bteg_rot)
#BTEp 预抓取状态时,panda_EE相对于基座的位姿
btep_trans = bteg_trans - bteg_rot[:3,2]*dis #[3,]
btep_rot = bteg_rot
#BTLg 最终抓取状态时,panda_link8相对于基座的位姿
btlg_rot = bteg_rot.dot(lte_rot.T)#姿态
btlg_trans = bteg_trans - btlg_rot[:3,:3].dot(lte_trans)#位置
btlg_quater=quaternion_from_matrix(btlg_rot)
self.grasp_pose_link8.position.x = btlg_trans[0]
self.grasp_pose_link8.position.y = btlg_trans[1]
self.grasp_pose_link8.position.z = btlg_trans[2]
self.grasp_pose_link8.orientation.x = btlg_quater[0]
self.grasp_pose_link8.orientation.y = btlg_quater[1]
self.grasp_pose_link8.orientation.z = btlg_quater[2]
self.grasp_pose_link8.orientation.w = btlg_quater[3]
#BTLp 预抓取状态时,panda_link8相对于基座的位姿
self.pre_grasp_pose_link8 = copy.deepcopy(self.grasp_pose_link8)
btlp_rot = btlg_rot
btlp_trans = btep_trans - btlp_rot[:3,:3].dot(lte_trans)
self.pre_grasp_pose_link8.position.x = btlp_trans[0]
self.pre_grasp_pose_link8.position.y = btlp_trans[1]
self.pre_grasp_pose_link8.position.z = btlp_trans[2]
#发布CTEg 最终抓取状态时,panda_EE相对于基座的位姿
self.tf_broadcaster.sendTransform(
self.btc_trans,
self.btc_quater,
rospy.Time.now(),
"kinect2",
"panda_link0")
#发布BTEg 最终抓取状态时,panda_EE相对于基座的位姿
self.tf_broadcaster.sendTransform(
bteg_trans,
bteg_quater,
rospy.Time.now(),
"base2grasp",
"panda_link0")
#发布BTEp 预抓取状态时,panda_EE相对于基座的位姿
self.tf_broadcaster.sendTransform(
btep_trans,
bteg_quater,#与抓取姿态相同
rospy.Time.now(),
"base2pre",
"panda_link0")
#标志回调函数处理完毕
self.callback_done=True
def grasp_test(self):
"""
给定panda_EE坐标系在panda_link0坐标系下的最终抓取姿态 BTEg
分别计算:
panda_link8坐标系相对于panda_link0坐标系下的预抓取姿态 BTLp
panda_link8坐标系相对于panda_link0坐标系下的最终抓取姿态 BTLg
"""
#设置后撤距离(10cm)
dis =0.15
#BTLg 最终抓取状态时,panda_link8相对于基座的位姿
self.grasp_pose_link8=Pose()
#BTLp 预抓取状态时,panda_link8相对于基座的位姿
self.pre_grasp_pose_link8=Pose()
#LTE panda_EE(夹爪)相对于link8坐标系的位姿(机械臂固定参数值)
lte_trans=np.array([0.0000,0.0000,0.1034])
lte_quater = np.array([0.0000,0.0000,-0.38268,0.92388])
lte_rot = quaternion_matrix(lte_quater)#[4,4]
#lte_rot = lte_rot[:3,:3]
#BTEg 最终抓取状态时,panda_EE相对于基座的位姿
bteg_quater=np.array([0.98609,0.16538,0.01226,-0.011129])#将姿态转换为四元数形式
bteg_trans =np.array([0.55608,-0.04333,0.072476])
bteg_rot = quaternion_matrix(bteg_quater)
#bteg_rot = bteg_rot[:3,:3]#截取旋转矩阵[3,3]
#BTEp 预抓取状态时,panda_EE相对于基座的位姿
btep_trans = bteg_trans - bteg_rot[:3,2]*dis #[3,]
btep_rot = bteg_rot
#BTLg 最终抓取状态时,panda_link8相对于基座的位姿
btlg_rot = bteg_rot.dot(lte_rot.T)#姿态
btlg_trans = bteg_trans - btlg_rot[:3,:3].dot(lte_trans)#位置
btlg_quater=quaternion_from_matrix(btlg_rot)
self.grasp_pose_link8.position.x = btlg_trans[0]
self.grasp_pose_link8.position.y = btlg_trans[1]
self.grasp_pose_link8.position.z = btlg_trans[2]
self.grasp_pose_link8.orientation.x = btlg_quater[0]
self.grasp_pose_link8.orientation.y = btlg_quater[1]
self.grasp_pose_link8.orientation.z = btlg_quater[2]
self.grasp_pose_link8.orientation.w = btlg_quater[3]
#BTLp 预抓取状态时,panda_link8相对于基座的位姿
self.pre_grasp_pose_link8 = copy.deepcopy(self.grasp_pose_link8)
btlp_rot = btlg_rot
btlp_trans = btep_trans - btlp_rot[:3,:3].dot(lte_trans)
self.pre_grasp_pose_link8.position.x = btlp_trans[0]
self.pre_grasp_pose_link8.position.y = btlp_trans[1]
self.pre_grasp_pose_link8.position.z = btlp_trans[2]
#发布BTEg 最终抓取状态时,panda_EE相对于基座的位姿
self.tf_broadcaster.sendTransform(
bteg_trans,
bteg_quater,
rospy.Time.now(),
"base2grasp",
"panda_link0")
#发布BTEp 预抓取状态时,panda_EE相对于基座的位姿
self.tf_broadcaster.sendTransform(
btep_trans,
bteg_quater,#与抓取姿态相同
rospy.Time.now(),
"base2pre",
"panda_link0")
#标志回调函数处理完毕
self.callback_done=True
def scale_trajectory_speed(self,traj,spd=0.1):
new_traj = RobotTrajectory()
new_traj = traj
n_joints = len(traj.joint_trajectory.joint_names)
n_points = len(traj.joint_trajectory.points)
#spd = 3.0
points = list(traj.joint_trajectory.points)
for i in range(n_points):
point = JointTrajectoryPoint()
point.time_from_start = traj.joint_trajectory.points[i].time_from_start / spd
point.velocities = list(traj.joint_trajectory.points[i].velocities)
point.accelerations = list(traj.joint_trajectory.points[i].accelerations)
point.positions = traj.joint_trajectory.points[i].positions
for j in range(n_joints):
point.velocities[j] = point.velocities[j] * spd
point.accelerations[j] = point.accelerations[j] * spd
points[i] = point
new_traj.joint_trajectory.points = points
return new_traj
def add_table(self):
"""为场景中添加抓取桌面,防止机械臂与桌子发生碰撞
"""
#清除场景可能存在的遗留物体
self.scene.remove_world_object('table')
#设置桌面尺寸 x y z
table_size = [0.6, 1.2, 0.01]
#设置桌子的位置姿态
table_pose = PoseStamped()
table_pose.header.frame_id = 'panda_link0'
table_pose.pose.position.x = 0.55
table_pose.pose.position.y = 0.0
table_pose.pose.position.z = 0.025
table_pose.pose.orientation.w = 1.0
# 将table加入场景当中
self.scene.add_box('table', table_pose, table_size)
def set_gripper(self,gripper_width,epsilon=0.0):
"""设置panda 夹爪的开合大小
gripper_width 最大0.08m
"""
if gripper_width>0.08 or gripper_width<0.0:
raise Exception
#帮助维持夹爪力度
grasp_epsilon = GraspEpsilon(epsilon,epsilon)
goal = GraspGoal(width = gripper_width, speed = 0.08,epsilon=grasp_epsilon ,force=5.0)
self.gripper_client.send_goal(goal )
self.gripper_client.wait_for_result(rospy.Duration.from_sec(5.0))
rospy.loginfo("Gripper action completed")
if __name__ == "__main__":
try:
MoveItDemo()
rospy.spin()
except rospy.ROSInterruptException:
rospy.loginfo("Arm tracker node terminated.")
| 12,325 | 9,540 | 23 |
3f86479e0b03f5d196f79e5392543c14b05df6c0 | 20,008 | py | Python | opennem/db/models/opennem.py | tourdownunder/opennem | deec3e2079db9d9d84171010fd0c239170d1e7ce | [
"MIT"
] | null | null | null | opennem/db/models/opennem.py | tourdownunder/opennem | deec3e2079db9d9d84171010fd0c239170d1e7ce | [
"MIT"
] | 1 | 2020-09-06T04:17:59.000Z | 2020-09-06T04:17:59.000Z | opennem/db/models/opennem.py | tourdownunder/opennem | deec3e2079db9d9d84171010fd0c239170d1e7ce | [
"MIT"
] | null | null | null | """
OpenNEM primary schema adapted to support multiple energy sources
Currently supported:
- NEM
- WEM
"""
from decimal import Decimal
from typing import Optional
from dictalchemy import DictableModel
from geoalchemy2 import Geometry
from shapely import wkb
from sqlalchemy import (
JSON,
Boolean,
Column,
Date,
DateTime,
Enum,
ForeignKey,
Index,
Integer,
LargeBinary,
Numeric,
Sequence,
Text,
func,
text,
)
from sqlalchemy.dialects.postgresql import TIMESTAMP
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship
from opennem.core.dispatch_type import DispatchType
from opennem.core.oid import get_ocode, get_oid
Base = declarative_base(cls=DictableModel)
metadata = Base.metadata
class BaseModel(object):
"""
Base model for both NEM and WEM
"""
created_by = Column(Text, nullable=True)
# updated_by = Column(Text, nullable=True)
# processed_at = Column(DateTime(timezone=True), nullable=True)
created_at = Column(DateTime(timezone=True), server_default=func.now())
updated_at = Column(DateTime(timezone=True), onupdate=func.now())
class FacilityScada(Base, BaseModel):
"""
Facility Scada
"""
__tablename__ = "facility_scada"
__table_args__ = (
Index(
"idx_facility_scada_trading_interval_year",
text("date_trunc('year', trading_interval AT TIME ZONE 'UTC')"),
),
Index(
"idx_facility_scada_trading_interval_month",
text("date_trunc('month', trading_interval AT TIME ZONE 'UTC')"),
),
Index(
"idx_facility_scada_trading_interval_day",
text("date_trunc('day', trading_interval AT TIME ZONE 'UTC')"),
),
Index(
"idx_facility_scada_trading_interval_hour",
text("date_trunc('hour', trading_interval AT TIME ZONE 'UTC')"),
),
# new timezone based indicies
# @NOTE: other indicies in migration files
)
network_id = Column(
Text,
ForeignKey("network.code", name="fk_balancing_summary_network_code"),
primary_key=True,
)
network = relationship("Network")
trading_interval = Column(
TIMESTAMP(timezone=True), index=True, primary_key=True
)
facility_code = Column(Text, nullable=False, primary_key=True, index=True)
generated = Column(Numeric, nullable=True)
eoi_quantity = Column(Numeric, nullable=True)
| 27.445816 | 141 | 0.636795 | """
OpenNEM primary schema adapted to support multiple energy sources
Currently supported:
- NEM
- WEM
"""
from decimal import Decimal
from typing import Optional
from dictalchemy import DictableModel
from geoalchemy2 import Geometry
from shapely import wkb
from sqlalchemy import (
JSON,
Boolean,
Column,
Date,
DateTime,
Enum,
ForeignKey,
Index,
Integer,
LargeBinary,
Numeric,
Sequence,
Text,
func,
text,
)
from sqlalchemy.dialects.postgresql import TIMESTAMP
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship
from opennem.core.dispatch_type import DispatchType
from opennem.core.oid import get_ocode, get_oid
Base = declarative_base(cls=DictableModel)
metadata = Base.metadata
class BaseModel(object):
"""
Base model for both NEM and WEM
"""
created_by = Column(Text, nullable=True)
# updated_by = Column(Text, nullable=True)
# processed_at = Column(DateTime(timezone=True), nullable=True)
created_at = Column(DateTime(timezone=True), server_default=func.now())
updated_at = Column(DateTime(timezone=True), onupdate=func.now())
class FuelTech(Base, BaseModel):
__tablename__ = "fueltech"
code = Column(Text, primary_key=True)
label = Column(Text, nullable=True)
renewable = Column(Boolean, default=False)
facilities = relationship("Facility")
class Network(Base, BaseModel):
__tablename__ = "network"
code = Column(Text, primary_key=True)
country = Column(Text, nullable=False)
label = Column(Text, nullable=True)
timezone = Column(Text, nullable=False)
timezone_database = Column(Text, nullable=True)
offset = Column(Integer, nullable=True)
interval_size = Column(Integer, nullable=False)
class FacilityStatus(Base, BaseModel):
__tablename__ = "facility_status"
code = Column(Text, primary_key=True)
label = Column(Text)
class Participant(Base, BaseModel):
__tablename__ = "participant"
id = Column(
Integer,
Sequence("seq_participant_id", start=1000, increment=1),
primary_key=True,
)
code = Column(Text, unique=True, index=True)
name = Column(Text)
network_name = Column(Text)
network_code = Column(Text)
country = Column(Text)
abn = Column(Text)
approved = Column(Boolean, default=False)
approved_by = Column(Text)
approved_at = Column(DateTime(timezone=True), nullable=True)
class Photo(Base):
__tablename__ = "photo"
id = Column(
Integer, Sequence("seq_photo_id", start=1000), primary_key=True
)
station_id = Column(
Integer,
ForeignKey("station.id", name="fk_photos_station_id"),
nullable=True,
)
station = relationship("Station", back_populates="photos")
name = Column(Text)
mime_type = Column(Text)
original_url = Column(Text, nullable=True)
data = Column(LargeBinary, nullable=True)
width = Column(Integer)
height = Column(Integer)
license_type = Column(Text, nullable=True)
license_link = Column(Text, nullable=True)
author = Column(Text, nullable=True)
author_link = Column(Text, nullable=True)
processed = Column(Boolean, default=False)
processed_by = Column(Text)
processed_at = Column(DateTime(timezone=True), nullable=True)
approved = Column(Boolean, default=False)
approved_by = Column(Text)
approved_at = Column(DateTime(timezone=True), nullable=True)
@hybrid_property
def photo_url(self) -> Optional[str]:
if self.name:
return "https://photos.opennem.org.au/{}".format(self.name)
return None
class BomStation(Base):
__tablename__ = "bom_station"
__table_args__ = (
Index("idx_bom_station_geom", "geom", postgresql_using="gist"),
)
code = Column(Text, primary_key=True)
state = Column(Text)
name = Column(Text)
name_alias = Column(Text, nullable=True)
registered = Column(Date)
# priority from 1-5
priority = Column(Integer, default=5)
is_capital = Column(Boolean, default=False)
website_url = Column(Text, nullable=True)
feed_url = Column(Text, nullable=True)
observations = relationship("BomObservation")
altitude = Column(Integer, nullable=True)
geom = Column(Geometry("POINT", srid=4326, spatial_index=False))
@hybrid_property
def lat(self) -> Optional[float]:
if self.geom:
return wkb.loads(bytes(self.geom.data)).y
return None
@hybrid_property
def lng(self) -> Optional[float]:
if self.geom:
return wkb.loads(bytes(self.geom.data)).x
return None
class BomObservation(Base):
__tablename__ = "bom_observation"
observation_time = Column(DateTime, primary_key=True)
station_id = Column(
Text,
ForeignKey("bom_station.code", name="fk_bom_observation_station_code"),
primary_key=True,
)
station = relationship("BomStation")
temp_apparent = Column(Numeric)
temp_air = Column(Numeric)
press_qnh = Column(Numeric)
wind_dir = Column(Text, nullable=True)
wind_spd = Column(Numeric)
wind_gust = Column(Numeric)
humidity = Column(Numeric, nullable=True)
cloud = Column(Text, nullable=True)
cloud_type = Column(Text, nullable=True)
class Location(Base):
__tablename__ = "location"
__table_args__ = (
Index("idx_location_geom", "geom", postgresql_using="gist"),
Index("idx_location_boundary", "boundary", postgresql_using="gist"),
)
id = Column(
Integer, Sequence("seq_location_id", start=1000), primary_key=True
)
# station_id = Column(Integer, ForeignKey("station.id"))
# @TODO sort out this join based on this lateral query ..
# select l.id, l.locality, l.state, closest_station.state, closest_station.code, closest_station.dist from location l
# left join lateral (
# select code, state, ST_Distance(l.geom, bom_station.geom) / 1000 as dist from bom_station order by l.geom <-> bom_station.geom limit 1
# ) AS closest_station on TRUE;
# weather_station = relationship(
# "BomStation",
# primaryjoin="func.ST_ClosestPoint(remote(BomStation.geom), foreign(Location.geom))",
# viewonly=True,
# uselist=True,
# lazy="joined",
# )
address1 = Column(Text)
address2 = Column(Text)
locality = Column(Text)
state = Column(Text)
postcode = Column(Text, nullable=True)
revisions = relationship("Revision", lazy="joined")
# Geo fields
place_id = Column(Text, nullable=True, index=True)
geocode_approved = Column(Boolean, default=False)
geocode_skip = Column(Boolean, default=False)
geocode_processed_at = Column(DateTime, nullable=True)
geocode_by = Column(Text, nullable=True)
geom = Column(Geometry("POINT", srid=4326, spatial_index=False))
boundary = Column(Geometry("MULTIPOLYGON", srid=4326, spatial_index=False))
@hybrid_property
def lat(self) -> Optional[float]:
if self.geom:
return wkb.loads(bytes(self.geom.data)).y
return None
@hybrid_property
def lng(self) -> Optional[float]:
if self.geom:
return wkb.loads(bytes(self.geom.data)).x
return None
class Station(Base, BaseModel):
__tablename__ = "station"
# __table_args__ = (
# UniqueConstraint(
# "customer_id", "location_code", name="_customer_location_uc"
# ),
# )
def __str__(self):
return "{} <{}>".format(self.name, self.code)
def __repr__(self):
return "{} {} <{}>".format(self.__class__, self.name, self.code)
id = Column(
Integer,
Sequence("seq_station_id", start=1000, increment=1),
primary_key=True,
)
participant_id = Column(
Integer,
ForeignKey("participant.id", name="fk_station_participant_id"),
nullable=True,
)
participant = relationship("Participant")
location_id = Column(
Integer,
ForeignKey("location.id", name="fk_station_location_id"),
nullable=True,
)
location = relationship("Location", lazy="joined", innerjoin=True)
facilities = relationship("Facility", lazy="joined", innerjoin=True)
revisions = relationship("Revision")
photos = relationship("Photo")
code = Column(Text, index=True, nullable=True)
name = Column(Text)
# wikipedia links
description = Column(Text, nullable=True)
wikipedia_link = Column(Text, nullable=True)
wikidata_id = Column(Text, nullable=True)
# Original network fields
network_code = Column(Text, index=True)
network_name = Column(Text)
approved = Column(Boolean, default=False)
approved_by = Column(Text)
approved_at = Column(DateTime(timezone=True), nullable=True)
@hybrid_property
def network(self) -> Optional[Network]:
"""
Return the network from the facility
"""
if not self.facilities or not len(self.facilities) > 0:
return None
return self.facilities[0].network
@hybrid_property
def capacity_registered(self) -> Optional[int]:
"""
This is the sum of registered capacities for all units for
this station
"""
cap_reg = None
for fac in self.facilities:
if (
fac.capacity_registered
and type(fac.capacity_registered) in [int, float, Decimal]
and fac.status_id
in ["operating", "committed", "commissioning"]
and fac.dispatch_type == DispatchType.GENERATOR
and fac.active
):
if not cap_reg:
cap_reg = 0
cap_reg += fac.capacity_registered
if cap_reg:
cap_reg = round(cap_reg, 2)
return cap_reg
@hybrid_property
def capacity_aggregate(self) -> Optional[int]:
"""
This is the sum of aggregate capacities for all units
"""
cap_agg = None
for fac in self.facilities:
if (
fac.capacity_aggregate
and type(fac.capacity_aggregate) in [int, float, Decimal]
and fac.status_id
in ["operating", "committed", "commissioning"]
and fac.dispatch_type == DispatchType.GENERATOR
and fac.active
):
if not cap_agg:
cap_agg = 0
cap_agg += fac.capacity_aggregate
if cap_agg:
cap_agg = round(cap_agg, 2)
return cap_agg
@hybrid_property
def oid(self) -> str:
return get_oid(self)
@hybrid_property
def ocode(self) -> str:
return get_ocode(self)
class Facility(Base, BaseModel):
__tablename__ = "facility"
def __str__(self):
return "{} <{}>".format(self.code, self.fueltech_id)
def __repr__(self):
return "{} {} <{}>".format(self.__class__, self.code, self.fueltech_id)
id = Column(
Integer,
Sequence("seq_facility_id", start=1000, increment=1),
primary_key=True,
)
network_id = Column(
Text,
ForeignKey("network.code", name="fk_station_network_code"),
nullable=False,
)
network = relationship("Network")
fueltech_id = Column(
Text,
ForeignKey("fueltech.code", name="fk_facility_fueltech_id"),
nullable=True,
)
fueltech = relationship(
"FuelTech", back_populates="facilities", lazy="joined", innerjoin=True
)
status_id = Column(
Text,
ForeignKey("facility_status.code", name="fk_facility_status_code"),
)
status = relationship("FacilityStatus", lazy="joined", innerjoin=True)
station_id = Column(
Integer,
ForeignKey("station.id", name="fk_station_status_code"),
nullable=True,
)
# station = relationship("Station", back_populates="facilities")
revisions = relationship("Revision")
# DUID but modified by opennem as an identifier
code = Column(Text, index=True)
# Network details
network_code = Column(Text, nullable=True, index=True)
network_region = Column(Text, index=True)
network_name = Column(Text)
active = Column(Boolean, default=True)
dispatch_type = Column(
Enum(DispatchType), nullable=False, default=DispatchType.GENERATOR
)
# @TODO remove when ref count is 0
capacity_registered = Column(Numeric, nullable=True)
registered = Column(DateTime, nullable=True)
deregistered = Column(DateTime, nullable=True)
unit_id = Column(Integer, nullable=True)
unit_number = Column(Integer, nullable=True)
unit_alias = Column(Text, nullable=True)
unit_capacity = Column(Numeric, nullable=True)
# unit_number_max = Column(Numeric, nullable=True)
approved = Column(Boolean, default=False)
approved_by = Column(Text)
approved_at = Column(DateTime(timezone=True), nullable=True)
@hybrid_property
def capacity_aggregate(self) -> Optional[int]:
"""
This is unit_no * unit_capacity and can differ from registered
"""
num_units = 1
cap_aggr = None
if not self.active:
return 0
if self.unit_number and type(self.unit_number) is int:
num_units = self.unit_number
if self.unit_capacity and type(self.unit_capacity) is Decimal:
cap_aggr = num_units * self.unit_capacity
if type(cap_aggr) is Decimal:
cap_aggr = round(cap_aggr, 2)
return cap_aggr
@hybrid_property
def duid(self) -> str:
return self.network_code or self.code
@hybrid_property
def status_label(self) -> Optional[str]:
return self.status.label if self.status else None
@hybrid_property
def fueltech_label(self) -> Optional[str]:
return self.fueltech.label if self.fueltech else None
@hybrid_property
def oid(self) -> str:
return get_oid(self)
@hybrid_property
def ocode(self) -> str:
return get_ocode(self)
class Revision(Base, BaseModel):
__tablename__ = "revisions"
id = Column(
Integer,
Sequence("seq_revision_id", start=1000, increment=1),
primary_key=True,
)
station_id = Column(
Integer,
ForeignKey("station.id", name="fk_revision_station_id"),
nullable=True,
)
station = relationship(
"Station", back_populates="revisions", lazy="joined"
)
facility_id = Column(
Integer,
ForeignKey("facility.id", name="fk_revision_facility_id"),
nullable=True,
)
facility = relationship(
"Facility", back_populates="revisions", lazy="joined"
)
location_id = Column(
Integer,
ForeignKey("location.id", name="fk_revision_location_id"),
nullable=True,
)
location = relationship(
"Location", back_populates="revisions", lazy="joined"
)
changes = Column(JSON, nullable=True)
previous = Column(JSON, nullable=True)
is_update = Column(Boolean, default=False)
approved = Column(Boolean, default=False)
approved_by = Column(Text)
approved_at = Column(DateTime(timezone=True), nullable=True)
approved_comment = Column(Text, nullable=True)
discarded = Column(Boolean, default=False)
discarded_by = Column(Text)
discarded_at = Column(DateTime(timezone=True), nullable=True)
@hybrid_property
def parent_id(self) -> str:
return self.station_id or self.facility_id or self.location_id
@hybrid_property
def parent_type(self) -> str:
if self.station_id:
return "station"
if self.facility_id:
return "facility"
if self.location_id:
return "location"
return ""
@hybrid_property
def station_owner_id(self) -> int:
if self.station_id:
return self.station_id
if self.facility_id:
return self.facility.station.id
if self.location:
return self.location.station.id
@hybrid_property
def station_owner_name(self) -> str:
if self.station_id:
return self.station.name
if self.facility_id:
return self.facility.station.name
if self.location:
return self.location.station.name
@hybrid_property
def station_owner_code(self) -> str:
if self.station_id:
return self.station.code
if self.facility_id:
return self.facility.station.code
if self.location:
return self.location.station.code
class FacilityScada(Base, BaseModel):
"""
Facility Scada
"""
__tablename__ = "facility_scada"
__table_args__ = (
Index(
"idx_facility_scada_trading_interval_year",
text("date_trunc('year', trading_interval AT TIME ZONE 'UTC')"),
),
Index(
"idx_facility_scada_trading_interval_month",
text("date_trunc('month', trading_interval AT TIME ZONE 'UTC')"),
),
Index(
"idx_facility_scada_trading_interval_day",
text("date_trunc('day', trading_interval AT TIME ZONE 'UTC')"),
),
Index(
"idx_facility_scada_trading_interval_hour",
text("date_trunc('hour', trading_interval AT TIME ZONE 'UTC')"),
),
# new timezone based indicies
# @NOTE: other indicies in migration files
)
def __str__(self) -> str:
return "<{}: {} {} {}>".format(
self.__class__,
self.trading_interval,
self.network_id,
self.facility_code,
)
def __repr__(self) -> str:
return "{}: {} {} {}".format(
self.__class__,
self.trading_interval,
self.network_id,
self.facility_code,
)
network_id = Column(
Text,
ForeignKey("network.code", name="fk_balancing_summary_network_code"),
primary_key=True,
)
network = relationship("Network")
trading_interval = Column(
TIMESTAMP(timezone=True), index=True, primary_key=True
)
facility_code = Column(Text, nullable=False, primary_key=True, index=True)
generated = Column(Numeric, nullable=True)
eoi_quantity = Column(Numeric, nullable=True)
class BalancingSummary(Base, BaseModel):
__tablename__ = "balancing_summary"
__table_args__ = (
Index(
"idx_balancing_summary_trading_interval_year",
text("date_trunc('year', trading_interval AT TIME ZONE 'UTC')"),
),
Index(
"idx_balancing_summary_trading_interval_month",
text("date_trunc('month', trading_interval AT TIME ZONE 'UTC')"),
),
Index(
"idx_balancing_summary_trading_interval_day",
text("date_trunc('day', trading_interval AT TIME ZONE 'UTC')"),
),
Index(
"idx_balancing_summary_trading_interval_hour",
text("date_trunc('hour', trading_interval AT TIME ZONE 'UTC')"),
),
)
network_id = Column(
Text,
ForeignKey("network.code", name="fk_balancing_summary_network_code"),
primary_key=True,
)
network = relationship("Network")
trading_interval = Column(
TIMESTAMP(timezone=True), index=True, primary_key=True
)
network_region = Column(Text, primary_key=True)
forecast_load = Column(Numeric, nullable=True)
generation_scheduled = Column(Numeric, nullable=True)
generation_non_scheduled = Column(Numeric, nullable=True)
generation_total = Column(Numeric, nullable=True)
price = Column(Numeric, nullable=True)
| 2,471 | 14,606 | 330 |
1088673daa0097667a5fca816f207d5e564b7fe1 | 22 | py | Python | factest/data_service/__init__.py | zzb610/factest | 1e628f6fc885cd1975c2e68181caf40e2874dc08 | [
"MIT"
] | 4 | 2020-12-17T11:45:09.000Z | 2020-12-20T05:42:55.000Z | factest/data_service/__init__.py | zzb610/factest | 1e628f6fc885cd1975c2e68181caf40e2874dc08 | [
"MIT"
] | null | null | null | factest/data_service/__init__.py | zzb610/factest | 1e628f6fc885cd1975c2e68181caf40e2874dc08 | [
"MIT"
] | null | null | null | name = 'data_service'
| 11 | 21 | 0.727273 | name = 'data_service'
| 0 | 0 | 0 |
5d52583f6c7ba537e0737f4e7009f8e8792c8c36 | 6,037 | py | Python | wifi_direct/wfd.py | sabzo/PiDroid | 8f80f751424335cee89ff6455255e65c06f1ea10 | [
"Apache-2.0"
] | 16 | 2017-01-04T20:50:39.000Z | 2021-09-15T17:23:34.000Z | wifi_direct/wfd.py | sabzo/PiDroid | 8f80f751424335cee89ff6455255e65c06f1ea10 | [
"Apache-2.0"
] | null | null | null | wifi_direct/wfd.py | sabzo/PiDroid | 8f80f751424335cee89ff6455255e65c06f1ea10 | [
"Apache-2.0"
] | 8 | 2016-02-02T21:27:37.000Z | 2021-04-21T11:17:04.000Z | # -*- coding: UTF-8 -*-
# This file is forked from Piracast (July 2014): https://github.com/codemonkeyricky/piracast
#
import re
import time
from util import get_stdout
cmd_killall_wpa_spplicant = 'killall wpa_supplicant'
cmd_killall_hostapd = 'killall hostapd'
cmd_iwlist_wlan0_scan = 'iwlist wlan0 scan'
# -----------------------
# p2p_enable
# Enable wifi direct
# -----------------------
# -----------------------
# p2p_peer_devaddr_get
# Gets peer device address
# -----------------------
# -----------------------
# p2p_req_cm_get
# Gets supported authentication type
# -----------------------
| 23.490272 | 96 | 0.598807 | # -*- coding: UTF-8 -*-
# This file is forked from Piracast (July 2014): https://github.com/codemonkeyricky/piracast
#
import re
import time
from util import get_stdout
cmd_killall_wpa_spplicant = 'killall wpa_supplicant'
cmd_killall_hostapd = 'killall hostapd'
cmd_iwlist_wlan0_scan = 'iwlist wlan0 scan'
def peer_mac_get():
output = get_stdout('iwpriv wlan0 p2p_get peer_ifa')
match = re.search(r'MAC (.*)$', output)
return match.group(1)
def wpa_supplicant_start():
print 'wpa_supplicant_start:'
get_stdout(["./wpa_supplicant", "-i", "wlan0", "-c", "./wpa_0_8.conf", "-B"])
time.sleep(1)
def wps_auth():
print 'wps_auth:'
output = get_stdout(["./hostapd_cli", "wps_pbc", "any"])
print output
time.sleep(1)
def wps_status_get():
print 'wps_status_get:'
output = get_stdout(["./wpa_cli", "status"])
print output
def p2p_wpsinfo():
print 'p2p_wpsinfo:'
get_stdout('iwpriv wlan0 p2p_set got_wpsinfo=3')
def p2p_status_get():
#print 'p2p_status_get:'
output = get_stdout('iwpriv wlan0 p2p_get status')
match = re.search(r'Status=(\d*)', output)
return int(match.group(1))
def p2p_set_nego(mac):
print 'p2p_set_nego:'
print 'mac: %s' % mac
get_stdout('iwpriv wlan0 p2p_set nego=%s' % mac)
# Enter negotiation loop
while 1:
# Wait for result
time.sleep(0.5)
# Poll status
peer_status = p2p_status_get()
print 'peer_status: %d' % peer_status
# For Windows 8.1 support, we consider 19 as negotiation completed
if peer_status in [10, 19]:
print 'Negotiation suceeded!'
break
# Get role
role = p2p_role_get()
print 'Role: %s' % role
# TODO: doesn't seem to return anything
#p2p_opch_get()
# Get peer interface address
peer_mac_get()
p2p_go_mode_set()
# -----------------------
# p2p_enable
# Enable wifi direct
# -----------------------
def p2p_enable():
# Enable p2p
get_stdout('iwpriv wlan0 p2p_set enable=1')
# Set intent
get_stdout('iwpriv wlan0 p2p_set intent=15')
# Set operation channel
get_stdout('iwpriv wlan0 p2p_set op_ch=%d' % 11)
# Sleep for 50ms
time.sleep(0.05)
# Set ssid
get_stdout('iwpriv wlan0 p2p_set ssid=DIRECT-RT')
# Set DN
get_stdout('iwpriv wlan0 p2p_set setDN=PiDroid')
#print 'p2p_get role...'
#get_stdout('iwpriv wlan0 p2p_get role')
#print 'scan...'
#get_stdout('iwlist wlan0 scan')
# -----------------------
# p2p_peer_devaddr_get
# Gets peer device address
# -----------------------
def p2p_peer_devaddr_get():
print 'p2p_peer_devaddr_get:'
output = get_stdout(["iwpriv", "wlan0", "p2p_get", "peer_deva"])
match = re.search(r'\n(.*)$', output)
mac = ':'.join(re.findall('..', match.group(1)))
#mac = match.group(1)[0] + match.group(1)[1] + ':' \
# + match.group(1)[2] + match.group(1)[3] + ':' \
# + match.group(1)[4] + match.group(1)[5] + ':' \
# + match.group(1)[6] + match.group(1)[7] + ':' \
# + match.group(1)[8] + match.group(1)[9] + ':' \
# + match.group(1)[10] + match.group(1)[11]
return mac
# -----------------------
# p2p_req_cm_get
# Gets supported authentication type
# -----------------------
def p2p_req_cm_get():
print 'p2p_req_cm_get:'
print get_stdout('iwpriv wlan0 p2p_get req_cm')
def p2p_role_get():
print 'p2p_role_get:'
output = get_stdout('iwpriv wlan0 p2p_get role')
match = re.search(r'Role=(\d*)', output)
role = int(match.group(1))
return role
def p2p_go_mode_set():
# Start hostAPd and wait for it to daemonize; ignore stdout
get_stdout(["./hostapd", "-B", "p2p_hostapd.conf"])
# Wait for initialization
time.sleep(1)
do_wps()
# Wait for host apd interval
time.sleep(1)
def do_wps():
while 1:
print 'do_wps:'
output = get_stdout(["./hostapd_cli", "wps_pbc", "any"])
print output
if 'OK' in output:
print 'wps passed!'
return
time.sleep(1)
def p2p_disable():
get_stdout('iwpriv wlan0 p2p_set enable=0')
def p2p_peer_scan():
count = 0
while 1:
output = get_stdout(cmd_iwlist_wlan0_scan)
print output
if 'No scan results' not in output:
return True
if count > 3:
return False
count += 1
def wfd_connection_wait():
get_stdout(cmd_killall_wpa_spplicant)
get_stdout(cmd_killall_hostapd)
# Disable p2p
p2p_disable()
time.sleep(0.5)
# Enable p2p
p2p_enable()
#p2p_peer_scan()
print 'Waiting for incoming connection...'
while 1:
peer_status = p2p_status_get()
print 'peer_status: %d' % peer_status
if peer_status == 0:
print 'p2p disabled! Re-enable p2p...'
p2p_enable()
#if peer_status == 11:
# print 'p2p request received! Scan for peer ...'
# p2p_peer_scan()
# status 8 is the original Discovery Request
# status 22 needs to be handled this way, or Nexus 4 4.4 won't always work
# status 19 was added to try to implement windows 8.1 support
if peer_status in [8, 19, 22]:
# Discovery request or gonego fail
print 'Discovery request received!'
peer_found = p2p_peer_scan()
if peer_found:
break
p2p_disable()
time.sleep(1)
print 'Getting peer device address...'
# Get peer device address
mac = p2p_peer_devaddr_get()
print 'peer_devaddr: %s' % mac
# Notify received wps info
p2p_wpsinfo()
print 'Getting peer authentication type...'
# Get request configuration
p2p_req_cm_get()
print 'Confirming peer authentication...'
#print 'Getting status...'
# Get status
#peer_status = p2p_status_get()
#print 'peer_status: ', peer_status
# Set negotiation
p2p_set_nego(mac)
| 5,038 | 0 | 365 |
dbd5a54d0bb2ef3bda2378f863fa670f59924a50 | 2,301 | py | Python | stock_portfolio/stock_portfolio/views/auth.py | tyler-fishbone/pyramid-stocks | 2d7c5a9aa74d5a9eec04005a99af25048a6666df | [
"MIT"
] | null | null | null | stock_portfolio/stock_portfolio/views/auth.py | tyler-fishbone/pyramid-stocks | 2d7c5a9aa74d5a9eec04005a99af25048a6666df | [
"MIT"
] | 3 | 2019-12-26T16:42:46.000Z | 2021-06-01T22:22:44.000Z | stock_portfolio/views/auth.py | tyler-fishbone/new_stock_portfolio | 05ec449b4a5798f197e1e147151a1c222cb26f9d | [
"MIT"
] | null | null | null | from pyramid.response import Response
from pyramid.view import view_config
# from ..sample_data import MOCK_DATA
from sqlalchemy.exc import DBAPIError, IntegrityError
from pyramid.httpexceptions import HTTPFound, HTTPNotFound, HTTPUnauthorized, HTTPBadRequest
from pyramid.security import NO_PERMISSION_REQUIRED, remember, forget
from pyramid.response import Response
from ..models import Account
from . import DB_ERR_MSG
import requests
@view_config(route_name='auth', renderer='../templates/auth.jinja2', permission=NO_PERMISSION_REQUIRED)
@view_config(route_name='logout') | 34.343284 | 103 | 0.646241 | from pyramid.response import Response
from pyramid.view import view_config
# from ..sample_data import MOCK_DATA
from sqlalchemy.exc import DBAPIError, IntegrityError
from pyramid.httpexceptions import HTTPFound, HTTPNotFound, HTTPUnauthorized, HTTPBadRequest
from pyramid.security import NO_PERMISSION_REQUIRED, remember, forget
from pyramid.response import Response
from ..models import Account
from . import DB_ERR_MSG
import requests
@view_config(route_name='auth', renderer='../templates/auth.jinja2', permission=NO_PERMISSION_REQUIRED)
def get_auth_view(request):
if request.method == 'POST':
try:
username = request.POST['username']
email = request.POST['email']
password = request.POST['password']
except KeyError:
return HTTPBadRequest()
try:
instance = Account(
username=username,
email=email,
password=password,
)
headers = remember(request, userid=instance.username)
request.dbsession.add(instance)
request.dbsession.flush()
return HTTPFound(location=request.route_url('portfolio'), headers=headers)
except (DBAPIError, IntegrityError):
return Response(DB_ERR_MSG, content_type='text/plain', status=500)
# print('User: {}, Pass: {}, Email: {}'.format(username, password, email))
# return HTTPNotFound()
if request.method == 'GET':
try:
username = request.GET['username']
password = request.GET['password']
# print('User: {}, Pass: {}'.format(username, password))
except KeyError:
return {}
is_authenticated = Account.check_credentials(request, username, password)
if is_authenticated[0] == True:
headers = remember(request, userid=username)
print(headers)
return HTTPFound(location=request.route_url('portfolio'), headers=headers)
else:
return HTTPUnauthorized()
return HTTPFound(location=request.route_url('home'))
@view_config(route_name='logout')
def logout(request):
headers = forget(request)
return HTTPFound(location=request.route_url('home'), headers=headers) | 1,677 | 0 | 44 |
fa4c82bd5e9d1c1ab8d0a807e0ed131f0e310820 | 4,310 | py | Python | src/datasets/samplers.py | achariso/gans-thesis | cde09fec4b617da47b74f5ecbf2ac2f9444c3634 | [
"MIT"
] | 2 | 2021-07-28T07:48:20.000Z | 2021-07-29T13:14:42.000Z | src/datasets/samplers.py | achariso/gans-thesis | cde09fec4b617da47b74f5ecbf2ac2f9444c3634 | [
"MIT"
] | 1 | 2021-10-31T21:15:39.000Z | 2021-10-31T21:15:39.000Z | src/datasets/samplers.py | achariso/gans-thesis | cde09fec4b617da47b74f5ecbf2ac2f9444c3634 | [
"MIT"
] | null | null | null | from typing import Union, Sized
import numpy as np
import torch
from torch.utils.data import Sampler
from utils.command_line_logger import CommandLineLogger
class InfiniteSampler(Sampler):
"""
InfiniteSampler Class:
Sampler for torch.utils.data.DataLoader that loops over the dataset indefinitely, shuffling items as it goes.
Source: https://github.com/NVlabs/stylegan2-ada-pytorch/blob/main/torch_utils/misc.py
"""
class ResumableRandomSampler(Sampler):
"""
ResumableRandomSampler Class:
Samples elements randomly. If without replacement, then sample from a shuffled dataset.
Original source: https://gist.github.com/usamec/1b3b4dcbafad2d58faa71a9633eea6a5
"""
def __init__(self, data_source: Sized, shuffle: bool = True, seed: int = 42,
logger: Union[CommandLineLogger, None] = None):
"""
ResumableRandomSampler class constructor.
generator (Generator): Generator used in sampling.
:param (Sized) data_source: torch.utils.data.Dataset or generally typings.Sized object of the dataset to draw
samples from
:param (int) seed: generator manual seed parameter
:param (optional) logger: CommandLineLogger instance
"""
super(ResumableRandomSampler, self).__init__(data_source=data_source)
self.n_samples = len(data_source)
self.generator = torch.Generator().manual_seed(seed)
self.shuffle = shuffle
self.perm_index = 0
if self.shuffle:
self.perm = None
self.reshuffle()
else:
self.perm = range(0, self.n_samples)
self.logger = logger
assert self.logger is not None, 'Please provide a logger instance for ResumableRandomSampler'
| 35.619835 | 118 | 0.617633 | from typing import Union, Sized
import numpy as np
import torch
from torch.utils.data import Sampler
from utils.command_line_logger import CommandLineLogger
class InfiniteSampler(Sampler):
"""
InfiniteSampler Class:
Sampler for torch.utils.data.DataLoader that loops over the dataset indefinitely, shuffling items as it goes.
Source: https://github.com/NVlabs/stylegan2-ada-pytorch/blob/main/torch_utils/misc.py
"""
def __init__(self, data_source: Sized, shuffle: bool = True, seed: int = 42, rank: int = 0, num_replicas: int = 1,
window_size=0.5, logger: Union[CommandLineLogger, None] = None):
self.data_len = len(data_source)
assert self.data_len > 0
assert num_replicas > 0
assert 0 <= rank < num_replicas
assert 0 <= window_size <= 1
super(InfiniteSampler, self).__init__(data_source=data_source)
self.rank = rank
self.num_replicas = num_replicas
self.shuffle = shuffle
self.seed = seed
self.window_size = window_size
self.logger = logger
assert self.logger is not None, 'Please provide a logger instance for InfiniteSampler'
def __iter__(self):
order = np.arange(self.data_len)
rnd = None
window = 0
if self.shuffle:
rnd = np.random.RandomState(self.seed)
rnd.shuffle(order)
window = int(np.rint(order.size * self.window_size))
idx = 0
while True:
i = idx % order.size
if idx % self.num_replicas == self.rank:
yield order[i]
if window >= 2:
j = (i - rnd.randint(window)) % order.size
order[i], order[j] = order[j], order[i]
idx += 1
def __len__(self):
return np.inf
class ResumableRandomSampler(Sampler):
"""
ResumableRandomSampler Class:
Samples elements randomly. If without replacement, then sample from a shuffled dataset.
Original source: https://gist.github.com/usamec/1b3b4dcbafad2d58faa71a9633eea6a5
"""
def __init__(self, data_source: Sized, shuffle: bool = True, seed: int = 42,
logger: Union[CommandLineLogger, None] = None):
"""
ResumableRandomSampler class constructor.
generator (Generator): Generator used in sampling.
:param (Sized) data_source: torch.utils.data.Dataset or generally typings.Sized object of the dataset to draw
samples from
:param (int) seed: generator manual seed parameter
:param (optional) logger: CommandLineLogger instance
"""
super(ResumableRandomSampler, self).__init__(data_source=data_source)
self.n_samples = len(data_source)
self.generator = torch.Generator().manual_seed(seed)
self.shuffle = shuffle
self.perm_index = 0
if self.shuffle:
self.perm = None
self.reshuffle()
else:
self.perm = range(0, self.n_samples)
self.logger = logger
assert self.logger is not None, 'Please provide a logger instance for ResumableRandomSampler'
def reshuffle(self) -> None:
self.perm_index = 0
if self.shuffle:
self.perm = list(torch.randperm(self.n_samples, generator=self.generator).numpy())
def __iter__(self):
# If reached the end of dataset, reshuffle
if self.perm_index >= len(self.perm):
if self.logger:
self.logger.debug(f'[SAMPLER] Reached end of epoch. Resetting state... (shuffle = {self.shuffle})')
self.reshuffle()
while self.perm_index < len(self.perm):
self.perm_index += 1
yield self.perm[self.perm_index - 1]
def __len__(self):
return self.n_samples
def get_state(self) -> dict:
return {
"shuffle": self.shuffle,
"perm": self.perm,
"perm_index": self.perm_index,
"generator_state": self.generator.get_state()
}
def set_state(self, state: dict) -> None:
self.shuffle = bool(state.get("shuffle", True))
self.perm = state["perm"]
self.perm_index = state["perm_index"]
self.generator.set_state(state["generator_state"])
| 2,299 | 0 | 216 |
cdf623d62d45366dbca8833e23fde4ea5943179d | 14,782 | py | Python | primehub/__init__.py | InfuseAI/primehub-python-sdk | edbdbcb3e41f0c99e4542245de1345a64f509fb4 | [
"Apache-2.0"
] | 10 | 2021-09-13T23:14:22.000Z | 2022-02-06T06:07:40.000Z | primehub/__init__.py | KellenJohn/primehub-python-sdk | edbdbcb3e41f0c99e4542245de1345a64f509fb4 | [
"Apache-2.0"
] | 4 | 2021-08-10T03:10:27.000Z | 2021-12-16T02:11:50.000Z | primehub/__init__.py | KellenJohn/primehub-python-sdk | edbdbcb3e41f0c99e4542245de1345a64f509fb4 | [
"Apache-2.0"
] | 1 | 2021-12-21T11:59:51.000Z | 2021-12-21T11:59:51.000Z | import abc
import importlib
import json
import os
import sys
from typing import Union, Callable, Any
from primehub.utils import group_required, create_logger, PrimeHubException
from primehub.utils.core import CommandContainer
from primehub.utils.decorators import cmd # noqa: F401
from primehub.utils.display import Display, HumanFriendlyDisplay, Displayable
from primehub.utils.http_client import Client
logger = create_logger('primehub-config')
__version__ = _get_version()
class PrimeHubConfig(object):
"""
PrimeHubConfig load the config from the default path ~/.primehub/config.json
The config.json looks like:
{
"endpoint": ""
"api-token": "",
"group": {
"id": "",
"name": "",
"displayName": "",
}
}
PrimeHubConfig allows changing setting from four ways:
* the default config path
* alternative path for the config file (config argument from constructor)
* environment variables: PRIMEHUB_API_TOKEN, PRIMEHUB_API_ENDPOINT and PRIMEHUB_GROUP
* set property for api_token, endpoint and group
PrimeHubConfig evaluates a property in the above order and the last updates take effect
"""
def save(self, path=None):
"""
The config.json looks like:
{
"endpoint": ""
"api-token": "",
"group": {
"id": "",
"name": "",
"displayName": "",
}
}
"""
output = dict()
output['endpoint'] = self.endpoint
output['api-token'] = self.api_token
if self.group_info and self.group_info.get('name', None) == self.group:
output['group'] = self.group_info
else:
output['group'] = dict(name=self.group)
output_path = os.path.expanduser(path or self.config_file)
if os.path.dirname(output_path):
os.makedirs(os.path.dirname(output_path), exist_ok=True)
with open(output_path, "w") as fh:
fh.write(json.dumps(output, indent=2, sort_keys=True))
@property
@group.setter
@property
@api_token.setter
@property
@endpoint.setter
@property
@current_group.setter
class Dummy(Helpful, Module):
"""
Dummy subcommand
"""
description = None
def has_data_from_stdin():
"""
Check if any data comes from stdin.
:return: True if there are data from stdin, otherwise False
"""
import sys
import select
if select.select([sys.stdin, ], [], [], 0.0)[0]:
return True
else:
return False
| 32.559471 | 114 | 0.636991 | import abc
import importlib
import json
import os
import sys
from typing import Union, Callable, Any
from primehub.utils import group_required, create_logger, PrimeHubException
from primehub.utils.core import CommandContainer
from primehub.utils.decorators import cmd # noqa: F401
from primehub.utils.display import Display, HumanFriendlyDisplay, Displayable
from primehub.utils.http_client import Client
logger = create_logger('primehub-config')
def _get_version():
version_file = os.path.normpath(os.path.join(os.path.dirname(__file__), 'VERSION'))
with open(version_file) as fh:
version = fh.read().strip()
return version
__version__ = _get_version()
class PrimeHubConfig(object):
"""
PrimeHubConfig load the config from the default path ~/.primehub/config.json
The config.json looks like:
{
"endpoint": ""
"api-token": "",
"group": {
"id": "",
"name": "",
"displayName": "",
}
}
PrimeHubConfig allows changing setting from four ways:
* the default config path
* alternative path for the config file (config argument from constructor)
* environment variables: PRIMEHUB_API_TOKEN, PRIMEHUB_API_ENDPOINT and PRIMEHUB_GROUP
* set property for api_token, endpoint and group
PrimeHubConfig evaluates a property in the above order and the last updates take effect
"""
def __init__(self, **kwargs):
self.config_file = kwargs.get('config', None)
if not self.config_file:
self.config_file = self.get_default_path()
# PrimeHub SDK evaluate
self.config_from_file = {}
self.config_from_env = {}
self.config_from_user_input = {}
self.group_info = {}
self.load_config()
self.load_config_from_env()
self.set_properties(**kwargs)
def set_properties(self, **kwargs):
if kwargs.get('group', None):
self.group = kwargs['group']
if kwargs.get('token', None):
self.api_token = kwargs['token']
if kwargs.get('endpoint', None):
self.endpoint = kwargs['endpoint']
def load_config(self):
try:
if not os.path.exists(os.path.expanduser(self.config_file)):
return
with open(self.config_file, "r") as fh:
self.config_from_file = json.load(fh)
if self.config_from_file and 'group' in self.config_from_file:
self.group_info = self.config_from_file['group']
except BaseException:
pass
def load_config_from_env(self):
# environment variables: PRIMEHUB_API_TOKEN, PRIMEHUB_API_ENDPOINT and PRIMEHUB_GROUP
def set_env(key):
if os.environ.get(key):
self.config_from_env[key] = os.environ.get(key)
if key == 'PRIMEHUB_GROUP' and self.group_info:
self.group_info['name'] = os.environ.get(key)
set_env('PRIMEHUB_API_TOKEN')
set_env('PRIMEHUB_API_ENDPOINT')
set_env('PRIMEHUB_GROUP')
def get_default_path(self):
return os.path.expanduser("~/.primehub/config.json")
def save(self, path=None):
"""
The config.json looks like:
{
"endpoint": ""
"api-token": "",
"group": {
"id": "",
"name": "",
"displayName": "",
}
}
"""
output = dict()
output['endpoint'] = self.endpoint
output['api-token'] = self.api_token
if self.group_info and self.group_info.get('name', None) == self.group:
output['group'] = self.group_info
else:
output['group'] = dict(name=self.group)
output_path = os.path.expanduser(path or self.config_file)
if os.path.dirname(output_path):
os.makedirs(os.path.dirname(output_path), exist_ok=True)
with open(output_path, "w") as fh:
fh.write(json.dumps(output, indent=2, sort_keys=True))
@property
def group(self):
if self.config_from_user_input.get('group', None):
logger.debug('group config_from_user_input')
return self.config_from_user_input['group']
if self.config_from_env.get('PRIMEHUB_GROUP', None):
logger.debug('group config_from_env')
return self.config_from_env.get('PRIMEHUB_GROUP')
if self.config_from_file.get('group', None) and self.config_from_file['group'].get('name', None):
logger.debug('group config_from_file')
return self.config_from_file['group']['name']
@group.setter
def group(self, group):
if group:
self.config_from_user_input['group'] = group
if self.group_info:
self.group_info['name'] = group
@property
def api_token(self):
if self.config_from_user_input.get('api-token', None):
return self.config_from_user_input['api-token']
if self.config_from_env.get('PRIMEHUB_API_TOKEN', None):
return self.config_from_env.get('PRIMEHUB_API_TOKEN')
return self.config_from_file.get('api-token', None)
@api_token.setter
def api_token(self, api_token):
if api_token:
self.config_from_user_input['api-token'] = api_token
@property
def endpoint(self):
if self.config_from_user_input.get('endpoint', None):
return self.config_from_user_input['endpoint']
if self.config_from_env.get('PRIMEHUB_API_ENDPOINT', None):
return self.config_from_env.get('PRIMEHUB_API_ENDPOINT')
return self.config_from_file.get('endpoint', None)
@endpoint.setter
def endpoint(self, endpoint):
if endpoint:
self.config_from_user_input['endpoint'] = endpoint
@property
def current_group(self) -> dict:
return self.group_info
@current_group.setter
def current_group(self, group_info):
if group_info and group_info.get('id', None):
self.group_info = group_info
class Helpful(metaclass=abc.ABCMeta):
@abc.abstractmethod
def help_description(self):
"""one line description for all commands"""
return NotImplemented
class PrimeHub(object):
def __init__(self, config: PrimeHubConfig):
self.primehub_config = config
self.json_output = True
self.usage_role = 'user'
self.commands: CommandContainer = CommandContainer()
self.admin_commands: CommandContainer = CommandContainer()
self._stderr = sys.stderr
self._stdout = sys.stdout
# register commands
self.register_command('config', 'Config')
self.register_command('groups', 'Groups')
self.register_command('images', 'Images')
self.register_command('volumes', 'Volumes')
self.register_command('instancetypes', 'InstanceTypes')
self.register_command('jobs', 'Jobs')
self.register_command('recurring_jobs', 'RecurringJobs', 'recurring-jobs')
self.register_command('deployments', 'Deployments')
self.register_command('notebooks', 'Notebooks')
self.register_command('files', 'Files')
self.register_command('me', 'Me')
self.register_command('version', 'Version')
self.register_command('apptemplates', 'AppTemplate')
self.register_command('apps', 'Apps')
self.register_command('models', 'Models')
self.register_command('datasets', 'Datasets')
self.register_command('secrets', 'Secrets')
self.register_dummy_command('admin', 'Commands for system administrator')
# register admin commands
self.register_admin_command('admin_images', 'AdminImages', 'images')
self.register_admin_command('admin_volumes', 'AdminVolumes', 'volumes')
self.register_admin_command('admin_instancetypes', 'AdminInstanceTypes', 'instancetypes')
self.register_admin_command('admin_users', 'AdminUsers', 'users')
self.register_admin_command('admin_groups', 'AdminGroups', 'groups')
self.register_admin_command('admin_secrets', 'AdminSecrets', 'secrets')
# initial
self._ensure_config_details(config)
def _ensure_config_details(self, config: PrimeHubConfig):
try:
group_id = config.current_group.get('id', None)
group_name = config.current_group.get('name', None)
if group_name is None:
return
if group_id is None:
self.config.reconfigure_group(group_name)
except BaseException:
pass
def request(self, variables: dict, query: str, error_handler: Callable = None):
return Client(self.primehub_config).request(variables, query, error_handler)
def request_logs(self, endpint: str, follow: bool, tail: int):
return Client(self.primehub_config).request_logs(endpint, follow, tail)
def request_file(self, endpint: str, dest: str):
return Client(self.primehub_config).request_file(endpint, dest)
def upload_file(self, endpoint: str, src: str):
return Client(self.primehub_config).upload_file(endpoint, src)
def _find_command_class(self, command_class, module_name):
# create command instance
if isinstance(command_class, str):
clazz = importlib.import_module('primehub.' + module_name).__getattribute__(command_class)
else:
clazz = command_class
return clazz
def register_dummy_command(self, command_name, command_help):
# register to the commands table
self.commands[command_name] = Dummy(self, command_help)
def register_command(self, module_name: str, command_class: Union[str, Callable], command_name=None):
self._register_command(self.commands, module_name, command_class, command_name)
def register_admin_command(self, module_name: str, command_class: Union[str, Callable], command_name=None):
self._register_command(self.admin_commands, module_name, command_class, command_name)
def _register_command(self, target, module_name: str, command_class: Union[str, Callable], command_name=None):
clazz = self._find_command_class(command_class, module_name)
# register to the commands table
module_object = clazz(self)
if module_name and command_name:
target[f':{module_name}'] = module_object
target[command_name] = module_object
else:
target[module_name] = module_object
def switch_admin_role(self):
self.usage_role = 'admin'
self.commands = self.admin_commands
@property
def admin(self):
admin_primehub = PrimeHub(self.primehub_config)
admin_primehub.commands = self.admin_commands
return admin_primehub
def __getattr__(self, item):
if item in self.commands:
return self.commands[item]
raise AttributeError("Cannot find a command [{}]".format(item))
@property
def stderr(self):
return self._stderr
@stderr.setter
def stderr(self, out):
self._stderr = out
@property
def stdout(self):
return self._stdout
@stdout.setter
def stdout(self, out):
self._stdout = out
def get_all_commands(self):
return sorted(self.commands.keys())
def is_ready(self):
if self.primehub_config.current_group is None:
return False
if not self.primehub_config.current_group.get('id', None):
return False
return True
@property
def current_group(self) -> dict:
g = self.primehub_config.current_group
if not g:
group_required()
if not g.get('id', None):
group_required()
return g
@property
def group_id(self) -> str:
return self.current_group['id']
@property
def group_name(self) -> str:
return self.current_group['name']
class Module(object):
def __init__(self, primehub: PrimeHub, **kwargs):
self.primehub = primehub
# attach request method
self.request = primehub.request
self.request_logs = primehub.request_logs
self.request_file = primehub.request_file
self.upload_file = primehub.upload_file
@property
def current_group(self) -> dict:
return self.primehub.current_group
@property
def group_id(self) -> str:
return self.primehub.group_id
@property
def group_name(self) -> str:
return self.primehub.group_name
@property
def endpoint(self) -> str:
return self.primehub.primehub_config.endpoint
@property
def primehub_config(self):
raise ValueError(
'The attribute [primehub_config] is access denied, '
'please use props of the Module to get configurations')
def get_display(self) -> Displayable:
if self.primehub.json_output:
return Display()
else:
return HumanFriendlyDisplay()
def display(self, action: dict, value: Any):
self.get_display().display(action, value, self.primehub.stdout)
@staticmethod
def output(result: dict, object_path: str):
"""
Give a dict {'data': {'a': {'b': 'c'}}}
we could get the c by the path a.b
"""
if 'data' not in result:
return result
data = result.get('data')
if not data:
return result
root: dict = dict()
if isinstance(data, dict):
root = data
else:
raise PrimeHubException('Unsupported data format')
paths = object_path.split('.')
for p in paths:
if p not in root:
raise PrimeHubException(f'Cannot access to the path {object_path}')
root = root[p]
return root
class Dummy(Helpful, Module):
"""
Dummy subcommand
"""
description = None
def __init__(self, primehub: PrimeHub, description):
super(Dummy, self).__init__(primehub)
self.description = description
def help_description(self):
return self.description
def has_data_from_stdin():
"""
Check if any data comes from stdin.
:return: True if there are data from stdin, otherwise False
"""
import sys
import select
if select.select([sys.stdin, ], [], [], 0.0)[0]:
return True
else:
return False
def primehub_load_config(filename):
if has_data_from_stdin():
return json.loads("".join(sys.stdin.readlines()))
if filename and os.path.exists(filename):
with open(filename, 'r') as fh:
return json.load(fh)
return {}
| 9,755 | 1,900 | 512 |
ce9e0db3f30d2be75fb0fc0351d471e23abcab55 | 168 | py | Python | Werewolf/WP/__init__.py | HuangFuSL/Werewolf | 4791aa4b743acb810c86e1bad6b2442b38e2821c | [
"MIT"
] | 3 | 2020-10-24T09:04:59.000Z | 2020-11-23T03:49:27.000Z | Werewolf/WP/__init__.py | HuangFuSL/Werewolf | 4791aa4b743acb810c86e1bad6b2442b38e2821c | [
"MIT"
] | 8 | 2020-11-11T04:07:53.000Z | 2020-11-23T04:37:53.000Z | Werewolf/WP/__init__.py | HuangFuSL/Werewolf | 4791aa4b743acb810c86e1bad6b2442b38e2821c | [
"MIT"
] | 3 | 2020-10-18T13:23:55.000Z | 2020-11-08T16:27:48.000Z | """
The protocol for the werewolf game
"""
import os
import socket
import sys
from .api import ChunckedData, ReceiveThread, _recv, TimeLock, KillableThread, ReadInput
| 18.666667 | 88 | 0.785714 | """
The protocol for the werewolf game
"""
import os
import socket
import sys
from .api import ChunckedData, ReceiveThread, _recv, TimeLock, KillableThread, ReadInput
| 0 | 0 | 0 |
7f0e7f9590442369c6f34ff92071d31f95b824ae | 1,093 | py | Python | gunicorn_logstash_formatter/gunicorn_logstash_formatter.py | alphagov/gunicorn-logstash-formatter | 0787016e085b40856f2b1d07ce753523a05f8bdb | [
"MIT"
] | 1 | 2020-05-03T09:27:12.000Z | 2020-05-03T09:27:12.000Z | gunicorn_logstash_formatter/gunicorn_logstash_formatter.py | alphagov/gunicorn-logstash-formatter | 0787016e085b40856f2b1d07ce753523a05f8bdb | [
"MIT"
] | 2 | 2017-10-11T23:02:25.000Z | 2019-06-04T17:26:28.000Z | gunicorn_logstash_formatter/gunicorn_logstash_formatter.py | alphagov/gunicorn-logstash-formatter | 0787016e085b40856f2b1d07ce753523a05f8bdb | [
"MIT"
] | 3 | 2017-10-11T23:16:28.000Z | 2021-04-10T19:54:03.000Z | # -*- coding: utf-8 -*-
"""Main module."""
import json
import logging
# this only works with post-19.7.1 gunicorn to pull in commit 610596c9
# which logs separate format and args
| 33.121212 | 73 | 0.505032 | # -*- coding: utf-8 -*-
"""Main module."""
import json
import logging
# this only works with post-19.7.1 gunicorn to pull in commit 610596c9
# which logs separate format and args
class AccessFormatter(logging.Formatter):
def __init__(self, *args, **kwargs):
pass
def format(self, record):
msg = {
'@version': 1,
# FIXME convert to Z-timezone? fractions of a second?
'@timestamp': self.formatTime(record, "%Y-%m-%dT%H:%M:%S%z"),
'access': {
'method': record.args['m'],
'http_version': record.args['H'].replace('HTTP/', ''),
'response_code': record.args['s'],
'url': record.args['{raw_uri}e'],
'remote_ip': record.args['h'],
'agent': record.args['a'],
'body_sent': {'bytes': record.args['B']},
'remote_user': record.args['u'],
'referrer': record.args['f'],
'elapsed_time': {'ms': record.args['D']/1000},
}
}
return json.dumps(msg)
| 816 | 20 | 75 |
c0f734da732f372e948a1ca81000809240a47e74 | 403 | py | Python | ee/urls.py | jessethegame/posthog | f6f6e2e91ef211df1d980e5aedbc8145d54a3dff | [
"MIT"
] | null | null | null | ee/urls.py | jessethegame/posthog | f6f6e2e91ef211df1d980e5aedbc8145d54a3dff | [
"MIT"
] | null | null | null | ee/urls.py | jessethegame/posthog | f6f6e2e91ef211df1d980e5aedbc8145d54a3dff | [
"MIT"
] | null | null | null | from rest_framework_extensions.routers import NestedRegistryItem
from posthog.api.routing import DefaultRouterPlusPlus
from .api import hooks, license
| 36.636364 | 98 | 0.82134 | from rest_framework_extensions.routers import NestedRegistryItem
from posthog.api.routing import DefaultRouterPlusPlus
from .api import hooks, license
def extend_api_router(root_router: DefaultRouterPlusPlus, *, projects_router: NestedRegistryItem):
root_router.register(r"license", license.LicenseViewSet)
projects_router.register(r"hooks", hooks.HookViewSet, "project_hooks", ["team_id"])
| 226 | 0 | 23 |
40710d8467979f15b42d1c45bd0a92775dacde11 | 9,907 | py | Python | src/rogerthat/bizz/news/influx/__init__.py | goubertbrent/oca-backend | b9f59cc02568aecb55d4b54aec05245790ea25fd | [
"Apache-2.0"
] | null | null | null | src/rogerthat/bizz/news/influx/__init__.py | goubertbrent/oca-backend | b9f59cc02568aecb55d4b54aec05245790ea25fd | [
"Apache-2.0"
] | null | null | null | src/rogerthat/bizz/news/influx/__init__.py | goubertbrent/oca-backend | b9f59cc02568aecb55d4b54aec05245790ea25fd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import logging
import time
from collections import defaultdict
from datetime import datetime
from influxdb import InfluxDBClient
from influxdb.resultset import ResultSet
from typing import List, Dict
from rogerthat.consts import DEBUG, DAY
from rogerthat.models.news import NewsItemAction, NewsItem
from rogerthat.models.properties.news import NewsItemStatistics
from rogerthat.settings import get_server_settings
from rogerthat.to.news import NewsItemBasicStatisticsTO, NewsItemTimeStatisticsTO, NewsItemBasicStatisticTO, \
NewsItemTimeValueTO, NewsItemStatisticsPerApp, NewsItemStatisticApp
from rogerthat.utils import now
| 47.401914 | 119 | 0.62683 | # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import logging
import time
from collections import defaultdict
from datetime import datetime
from influxdb import InfluxDBClient
from influxdb.resultset import ResultSet
from typing import List, Dict
from rogerthat.consts import DEBUG, DAY
from rogerthat.models.news import NewsItemAction, NewsItem
from rogerthat.models.properties.news import NewsItemStatistics
from rogerthat.settings import get_server_settings
from rogerthat.to.news import NewsItemBasicStatisticsTO, NewsItemTimeStatisticsTO, NewsItemBasicStatisticTO, \
NewsItemTimeValueTO, NewsItemStatisticsPerApp, NewsItemStatisticApp
from rogerthat.utils import now
def get_influxdb_datetime(time_str):
# type: (str) -> datetime
if len(time_str) == 20:
return datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%SZ")
else:
return datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%S.%fZ")
def get_influxdb_client(database='news', params=None):
ss = get_server_settings()
if not ss.news_statistics_influxdb_host:
return None
if params:
return InfluxDBClient(host=params['host'],
port=params['port'],
ssl=params['ssl'],
verify_ssl=params['verify_ssl'],
database=params['database'],
username=params['username'],
password=params['password'])
return InfluxDBClient(host=ss.news_statistics_influxdb_host,
port=ss.news_statistics_influxdb_port,
ssl=not DEBUG,
verify_ssl=not DEBUG,
database=database,
username=ss.news_statistics_influxdb_username,
password=ss.news_statistics_influxdb_password)
def get_age_field_key(age):
return 'age-%s' % age.replace(' ', '')
def get_news_item_time_statistics(news_item):
# type: (NewsItem) -> NewsItemTimeStatisticsTO
qry_start_time = time.time()
start_time = news_item.published_timestamp
start_time_plus_one_month = start_time + DAY * 30
end_time = min(start_time_plus_one_month, now())
qry = 'SELECT sum(total) as value ' \
'FROM "item.%(news_id)s" ' \
'WHERE time >= %(start_time)ss AND time <= %(end_time)ss ' \
'GROUP BY "action", time(1h) fill(0);' % {
'news_id': news_item.id,
'start_time': start_time,
'end_time': end_time,
}
try:
result_set = get_influxdb_client().query(qry)
except Exception as e:
if DEBUG:
logging.error('Error while fetching statistics. Returning empty result', exc_info=True)
return NewsItemTimeStatisticsTO(id=news_item.id, reached=[], action=[])
raise e
qry_end_time = time.time()
logging.debug('Time statistics queries took %ss', qry_end_time - qry_start_time)
reached = [NewsItemTimeValueTO(time=point['time'], value=point['value'])
for point in result_set.get_points(tags={'action': NewsItemAction.REACHED})]
action = [NewsItemTimeValueTO(time=point['time'], value=point['value'])
for point in result_set.get_points(tags={'action': NewsItemAction.ACTION})]
stats = NewsItemTimeStatisticsTO(id=news_item.id, reached=reached, action=action)
logging.debug('Transforming resultset took %ss', time.time() - qry_end_time)
return stats
def _get_first_point(result_set, tags):
# type: (ResultSet, dict) -> dict
points = list(result_set.get_points(tags=tags))
return points[0] if points else None
def _build_stats_qry(news_id, fields_to_select, group_by):
fields = ['sum("%s") as "%s"' % (field, field) for field in fields_to_select]
return 'SELECT %(fields)s' \
'FROM "item.%(news_id)s" ' \
'WHERE ("action" = \'%(reached)s\' OR "action" = \'%(action)s\') ' \
'GROUP BY %(group_by)s;' % {
'fields': ', '.join(fields),
'news_id': news_id,
'reached': NewsItemAction.REACHED,
'action': NewsItemAction.ACTION,
'group_by': ','.join(group_by)
}
def get_news_item_statistics_per_app(news_ids, total_only=False):
# type: (List[long], bool) -> List[NewsItemStatisticsPerApp]
stats_per_news_item_per_app = defaultdict(list) # type: Dict[int, List[NewsItemStatisticApp]]
statements = []
qry = ''
qry_start_time = time.time()
for news_id in news_ids:
fields_to_select = ['total']
if not total_only:
fields_to_select += NewsItemStatistics.get_gender_labels() + [get_age_field_key(l) for l in
NewsItemStatistics.get_age_labels()]
qry += _build_stats_qry(news_id, fields_to_select, ['app', 'action'])
statements.append(news_id)
if qry:
try:
result_sets = get_influxdb_client().query(qry)
except Exception as e:
if DEBUG:
logging.error('Error while fetching statistics. Returning empty result', exc_info=True)
return []
raise e
qry_end_time = time.time()
logging.debug('stats per app: %d statistics queries took %ss', len(statements), qry_end_time - qry_start_time)
# In case there is only one result set the above method returns the resultset instead of a list
if not isinstance(result_sets, list):
result_sets = [result_sets]
empty_stats = NewsItemBasicStatisticTO.from_point(None, total_only)
for statement_id, news_id in enumerate(statements):
for result_set in result_sets: # type: ResultSet
if result_set.raw['statement_id'] == statement_id:
item_stats = defaultdict(dict) # type: Dict[str, Dict[str, NewsItemBasicStatisticsTO]]
for (measurement, tags), points in result_set.items():
app_id = tags['app']
action = tags['action']
point = points.next() # points should only contain 1 item
item_stats[app_id][action] = NewsItemBasicStatisticTO.from_point(point, total_only)
for app_id, data in item_stats.iteritems():
app_stats = NewsItemStatisticApp(
app_id=app_id,
stats=NewsItemBasicStatisticsTO(reached=data.get(NewsItemAction.REACHED, empty_stats),
action=data.get(NewsItemAction.ACTION, empty_stats))
)
stats_per_news_item_per_app[news_id].append(app_stats)
logging.debug('stats per app: Transforming resultset took %ss', time.time() - qry_end_time)
return [NewsItemStatisticsPerApp(id=news_id, results=stats_per_news_item_per_app[news_id]) for news_id in news_ids]
def get_basic_news_item_statistics(news_ids):
# type: (List[long]) -> List[NewsItemBasicStatisticsTO]
stats_per_news_item = {}
statements = []
qry = ''
qry_start_time = time.time()
for news_id in news_ids:
fields_to_select = ['total'] + NewsItemStatistics.get_gender_labels() + [get_age_field_key(l) for l in
NewsItemStatistics.get_age_labels()]
qry += _build_stats_qry(news_id, fields_to_select, ['action'])
statements.append(news_id)
if qry:
try:
result_sets = get_influxdb_client().query(qry)
except Exception as e:
if DEBUG:
logging.error('Error while fetching statistics. Returning empty result', exc_info=True)
zero_stats = NewsItemBasicStatisticTO(total=0, gender=[], age=[])
return [NewsItemBasicStatisticsTO(id=news_id, reached=zero_stats, action=zero_stats)
for news_id in news_ids]
raise e
qry_end_time = time.time()
logging.debug('basic: %d statistics queries took %ss', len(statements), qry_end_time - qry_start_time)
# In case there is only one result set the above method returns the resultset instead of a list
if not isinstance(result_sets, list):
result_sets = [result_sets]
for statement_id, news_id in enumerate(statements):
for result_set in result_sets: # type: ResultSet
if result_set.raw['statement_id'] == statement_id:
reached_stats = _get_first_point(result_set, {'action': NewsItemAction.REACHED})
action_stats = _get_first_point(result_set, {'action': NewsItemAction.ACTION})
stats_per_news_item[news_id] = NewsItemBasicStatisticsTO(
id=news_id,
reached=NewsItemBasicStatisticTO.from_point(reached_stats),
action=NewsItemBasicStatisticTO.from_point(action_stats),
)
logging.debug('basic: Transforming resultset took %ss', time.time() - qry_end_time)
return [stats_per_news_item[news_id] for news_id in news_ids]
| 8,444 | 0 | 184 |
e8eef6e7da1adab3641ffa7a4ccbd1615df1d5c7 | 303 | py | Python | mindhome_alpha/erpnext/patches/v7_2/update_website_for_variant.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:55:29.000Z | 2021-04-29T14:55:29.000Z | mindhome_alpha/erpnext/patches/v7_2/update_website_for_variant.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | null | null | null | mindhome_alpha/erpnext/patches/v7_2/update_website_for_variant.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:39:01.000Z | 2021-04-29T14:39:01.000Z | from __future__ import unicode_literals
import frappe | 23.307692 | 40 | 0.726073 | from __future__ import unicode_literals
import frappe
def execute():
# variant must have show_in_website = 0
frappe.reload_doctype('Item')
frappe.db.sql('''
update tabItem set
show_variant_in_website = 1,
show_in_website = 0
where
show_in_website=1
and ifnull(variant_of, "")!=""''') | 227 | 0 | 23 |
3f80b87d12d499c59e0bbc921240b76b2fa3a219 | 3,381 | py | Python | SyslogIP.py | sschaefervmw/vshieldSyslog | a2b42b37fdb79e7e94febec22e7bc76a6c0d6c43 | [
"Unlicense"
] | 2 | 2015-03-12T09:53:27.000Z | 2015-03-13T21:25:02.000Z | SyslogIP.py | sschaefervmw/vshieldSyslog | a2b42b37fdb79e7e94febec22e7bc76a6c0d6c43 | [
"Unlicense"
] | null | null | null | SyslogIP.py | sschaefervmw/vshieldSyslog | a2b42b37fdb79e7e94febec22e7bc76a6c0d6c43 | [
"Unlicense"
] | null | null | null | # coding=utf-8
import requests
import getpass
from requests.auth import HTTPBasicAuth
import xml.etree.ElementTree as ET
API_URL = "Place vCD URL Here" #Cloud API URL ending in /api/
EDGE_NAME = 'Place Edge Name Here' #Edge Gateway Name
SYSLOG_IP = 'Place Syslog IP Here' #IP of syslog server
USERNAME = 'Place Username Here' #Username@orgname E.g: email@domain.com@org
PASSWORD = 'Place Password Here' #Password
if __name__ == '__main__':
main()
| 40.25 | 131 | 0.656906 | # coding=utf-8
import requests
import getpass
from requests.auth import HTTPBasicAuth
import xml.etree.ElementTree as ET
API_URL = "Place vCD URL Here" #Cloud API URL ending in /api/
EDGE_NAME = 'Place Edge Name Here' #Edge Gateway Name
SYSLOG_IP = 'Place Syslog IP Here' #IP of syslog server
USERNAME = 'Place Username Here' #Username@orgname E.g: email@domain.com@org
PASSWORD = 'Place Password Here' #Password
class SyslogServerSettings():
xml_string = """
<vmext:SyslogServerSettings xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5">
<vmext:TenantSyslogServerSettings>
<vmext:SyslogServerIp>{syslog_ip}</vmext:SyslogServerIp>
</vmext:TenantSyslogServerSettings>
</vmext:SyslogServerSettings>
"""
def __init__(self, edge_url, auth_token, ip='0.0.0.0'):
self.syslog_ip = ip
self.edge_url = edge_url
self.api_endpoint = '/action/configureSyslogServerSettings'
self.headers = {'accept':'application/*+xml;version=5.6',
'x-vcloud-authorization':auth_token}
def submit_changes(self):
print('Submitting syslog IP changes: IP', self.syslog_ip)
xml_out = self.xml_string.format(syslog_ip=self.syslog_ip)
submit_url = self.edge_url + self.api_endpoint
result = requests.post(submit_url, headers=self.headers, data=xml_out)
print('API Endpoint: ', result.request.url)
print('Status: {} {}'.format(result.status_code, result.reason))
class Cloud():
def __init__(self, api_url, username=None, password=None):
self.token = None
self.headers = {'accept':'application/*+xml;version=5.7'}
self._api_url = api_url
self._username = username
self._password = password
def login(self):
username = self._username or input('Username: ')
password = self._password or getpass.getpass(prompt='Password: ')
api_url = self._api_url+'sessions'
print('Logging in to [%s]...' % api_url)
r = requests.post(api_url, auth=HTTPBasicAuth(username, password),
headers=self.headers, proxies=None)
print('Status:', r.status_code, r.reason)
self.token = r.headers.get('x-vcloud-authorization')
self.headers['x-vcloud-authorization'] = self.token
print('Auth Token:', self.token)
def query_edge_url(self, name):
print('Querying edges for {}'.format(name))
params = {'type': 'edgeGateway',
'format': 'references'}
response = self.run_query(params)
xml_tree = ET.fromstring(response.text)
for item in xml_tree:
if item.get('name') == name:
return item.get('href')
def run_query(self, query_parameters):
query_url = self._api_url + 'query'
response = requests.get(query_url, headers=self.headers,
params=query_parameters)
return response
def main():
vca = Cloud(API_URL, username=USERNAME, password=PASSWORD)
vca.login()
edge_url = vca.query_edge_url(EDGE_NAME)
syslog = SyslogServerSettings(edge_url, vca.token, SYSLOG_IP)
syslog.submit_changes()
if __name__ == '__main__':
main()
| 2,235 | 515 | 177 |
cd21e79a44fe5ee27387f892bb515f4bb277f8fa | 5,318 | py | Python | redditscript.py | Azl-Eyekay/channel-keepalive | a3aaf4fa5e716484c7ede35be37aeeab26e024fd | [
"MIT"
] | null | null | null | redditscript.py | Azl-Eyekay/channel-keepalive | a3aaf4fa5e716484c7ede35be37aeeab26e024fd | [
"MIT"
] | null | null | null | redditscript.py | Azl-Eyekay/channel-keepalive | a3aaf4fa5e716484c7ede35be37aeeab26e024fd | [
"MIT"
] | null | null | null | # Installed python3-praw, geany and ffmpeg from debian
# and others from pip
# Made a Reddit account, set up an application and added its client id and secret
# made a white noise video named noise.mkv and downloaded 3 pieces of music as music{0-1}.mp3
import praw
from PIL import Image, ImageDraw, ImageFont
import urllib.request
import sys
import os
from io import BytesIO
import random
import subprocess
from gtts import gTTS
DEBUG_IMAGES_CREATE=False
DEBUG_AUDIOS_CREATE=False
DEBUG_VIDEO_CREATE_TRY2=False
DEBUG_VIDEO_COMPILATION_CREATE=False
DEBUG_VIDEO_MUSIC_ADD=False
DEBUG_VIDEO_COMPILATION_CREATE_2=True
if DEBUG_IMAGES_CREATE==True:
file_path = 'reddits.txt'
sys.stdout = open(file_path, "w")
reddit = praw.Reddit(client_id='', client_secret='', user_agent='')
acc=0 #FOR SAVED IMAGE NUMBERING
best_posts_linuxmemes = reddit.subreddit('linuxmemes').hot(limit=60)
for post in best_posts_linuxmemes:
if post.selftext == '' and post.stickied==False and (post.url.endswith(".png") or post.url.endswith(".jpg") or post.url.endswith(".jpeg") or post.url.endswith(".gif")):
urllib.request.urlretrieve(post.url, "tmpimage")
try:
img = Image.open("tmpimage")
except:
continue
wid, hgt = img.size
newimg = Image.new(img.mode, (wid, hgt + (hgt//20)),(0,0,0))
newimg.paste(img, (0,0))
d1=ImageDraw.Draw(newimg)
font = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeSans.ttf',hgt//40)
post.title=post.title.replace('"','')
post.title=post.title.replace("'",'')
post.title=post.title.replace('--','')
print(post.title + ' by '+post.author.name)
if len(post.title)>20:
post.title=post.title[0:20]+'...'
d1.text((10,(hgt + hgt//50)),post.title+' - u/'+post.author.name+' - '+post.subreddit.display_name,fill="white",font=font)
newimg.save('video'+str(acc)+'.png')
acc+=1
if DEBUG_AUDIOS_CREATE==True:
dirlist = os.listdir()
f=open("reddits.txt","r")
lines=f.readlines()
vidcount=0
for i in dirlist:
if i.startswith("video"):
vidcount+=1
for i in range(vidcount):
#os.system('espeak -s 120 -w audio'+str(i)+'.waw \''+lines[i]+'\'')
mytext=lines[i]
gts=gTTS(text=mytext,lang='en',slow=False)
gts.save('audio'+str(i)+'.wav')
if DEBUG_VIDEO_CREATE_TRY2==True:
dirlist = os.listdir()
vidcount=0
clips=[]
for i in dirlist:
if i.startswith("video"):
vidcount+=1
for i in range(vidcount):
os.system('ffmpeg -stream_loop -1 -i video'+str(i)+'.png -i audio'+str(i)+'.wav -vf "tpad=stop_mode=clone:stop_duration=7,scale=1280:720:force_original_aspect_ratio=decrease,pad=1280:720:-1:-1:color=black" -shortest out'+str(i)+'.mkv')
#ffmpeg -i input -vf "scale=1280:720:force_original_aspect_ratio=decrease,pad=1280:720:-1:-1:color=black" output
if DEBUG_VIDEO_COMPILATION_CREATE==True:
sys.stdout = open("vids.txt", "w")
dirlist = os.listdir()
for i in dirlist:
if i.startswith("out"):
print('file '+i)
print('file noise.mkv')
print('file outro.mkv')
subprocess.Popen('ffmpeg -f concat -i vids.txt -c copy nomusic.mkv',shell=True)
if DEBUG_VIDEO_COMPILATION_CREATE_2==True:
dirlist = os.listdir()
#inputs=''
acc=0
#filterstring=''
for i in dirlist:
if i.startswith("out"):
# inputs=inputs+' -i '+i+' -i noise.mkv'
acc+=1
#inputs=inputs+' -i outro.mkv'
tmp=0
remaining=0
print(acc,'acc')
sets=acc//5
if acc//5<acc/5:
remaining=acc%5
print(sets,'sets')
print(remaining,'remaining')
for i in range(sets):
inputs='-i out'+str(i*5)+'.mkv -i noise.mkv -i out'+str(i*5+1)+'.mkv -i noise.mkv -i out'+str(i*5+2)+'.mkv -i noise.mkv -i out'+str(i*5+3)+'.mkv -i noise.mkv -i out'+str(i*5+4)+'.mkv -i noise.mkv'
print('ffmpeg '+inputs+' -filter_complex "[0:v] [0:a] [1:v] [1:a] [2:v] [2:a] [3:v] [3:a] [4:v] [4:a] [5:v] [5:a] [6:v] [6:a] [7:v] [7:a] [8:v] [8:a] [9:v] [9:a] concat=n=10:v=1:a=1 [v] [a]" -map "[v]" -map "[a]" nomusic-set'+str(i)+'.mkv')
os.system('ffmpeg '+inputs+' -filter_complex "[0:v] [0:a] [1:v] [1:a] [2:v] [2:a] [3:v] [3:a] [4:v] [4:a] [5:v] [5:a] [6:v] [6:a] [7:v] [7:a] [8:v] [8:a] [9:v] [9:a] concat=n=10:v=1:a=1 [v] [a]" -map "[v]" -map "[a]" nomusic-set'+str(i)+'.mkv')
inputs=''
filterstring=''
for i in range(remaining-1):
inputs += '-i out'+str(sets*5+i)+'.mkv -i noise.mkv '
filterstring+='['+str(i)+':v] ['+str(i)+':a] '
filterstring+='['+str(remaining-1+i)+':v] ['+str(remaining-1+i)+':a] '
#os.system('ffmpeg '+inputs+' -filter_complex "'+filterstring+'concat=n='+str(2*(remaining-1))+':v=1:a=1 [v] [a]" -map "[v]" -map "[a]" nomusic-remaining.mkv')
print('ffmpeg '+inputs+' -filter_complex "'+filterstring+'concat=n='+str(2*(remaining-1))+':v=1:a=1 [v] [a]" -map "[v]" -map "[a]" nomusic-remaining.mkv')
#for i in range(acc//5):
# filterstring+='['+str(i)+':v] ['+str(i)+':a] '
#print('ffmpeg'+inputs+' -filter_complex "'+filterstring+'concat=n='+str(i)+':v=1:a=1 [v] [a]" -map "[v]" -map "[a]" nomusic.mkv')
#os.system('ffmpeg'+inputs+' -filter_complex "'+filterstring+'concat=n='+str(i)+':v=1:a=1 [v] [a]" -map "[v]" -map "[a]" nomusic'+str(tmp)+'.mkv')
if DEBUG_VIDEO_MUSIC_ADD==True:
musicno=random.randint(0,2)
subprocess.Popen('ffmpeg -i nomusic.mp4 -stream_loop -1 -i music'+str(musicno)+'.mp3 -filter_complex "[1:a]volume=0.15,apad[A];[0:a][A]amerge[out]" -shortest -c:v copy -map 0:v -map [out] -y finale.mkv',shell=True)
| 42.887097 | 246 | 0.657202 | # Installed python3-praw, geany and ffmpeg from debian
# and others from pip
# Made a Reddit account, set up an application and added its client id and secret
# made a white noise video named noise.mkv and downloaded 3 pieces of music as music{0-1}.mp3
import praw
from PIL import Image, ImageDraw, ImageFont
import urllib.request
import sys
import os
from io import BytesIO
import random
import subprocess
from gtts import gTTS
DEBUG_IMAGES_CREATE=False
DEBUG_AUDIOS_CREATE=False
DEBUG_VIDEO_CREATE_TRY2=False
DEBUG_VIDEO_COMPILATION_CREATE=False
DEBUG_VIDEO_MUSIC_ADD=False
DEBUG_VIDEO_COMPILATION_CREATE_2=True
if DEBUG_IMAGES_CREATE==True:
file_path = 'reddits.txt'
sys.stdout = open(file_path, "w")
reddit = praw.Reddit(client_id='', client_secret='', user_agent='')
acc=0 #FOR SAVED IMAGE NUMBERING
best_posts_linuxmemes = reddit.subreddit('linuxmemes').hot(limit=60)
for post in best_posts_linuxmemes:
if post.selftext == '' and post.stickied==False and (post.url.endswith(".png") or post.url.endswith(".jpg") or post.url.endswith(".jpeg") or post.url.endswith(".gif")):
urllib.request.urlretrieve(post.url, "tmpimage")
try:
img = Image.open("tmpimage")
except:
continue
wid, hgt = img.size
newimg = Image.new(img.mode, (wid, hgt + (hgt//20)),(0,0,0))
newimg.paste(img, (0,0))
d1=ImageDraw.Draw(newimg)
font = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeSans.ttf',hgt//40)
post.title=post.title.replace('"','')
post.title=post.title.replace("'",'')
post.title=post.title.replace('--','')
print(post.title + ' by '+post.author.name)
if len(post.title)>20:
post.title=post.title[0:20]+'...'
d1.text((10,(hgt + hgt//50)),post.title+' - u/'+post.author.name+' - '+post.subreddit.display_name,fill="white",font=font)
newimg.save('video'+str(acc)+'.png')
acc+=1
if DEBUG_AUDIOS_CREATE==True:
dirlist = os.listdir()
f=open("reddits.txt","r")
lines=f.readlines()
vidcount=0
for i in dirlist:
if i.startswith("video"):
vidcount+=1
for i in range(vidcount):
#os.system('espeak -s 120 -w audio'+str(i)+'.waw \''+lines[i]+'\'')
mytext=lines[i]
gts=gTTS(text=mytext,lang='en',slow=False)
gts.save('audio'+str(i)+'.wav')
if DEBUG_VIDEO_CREATE_TRY2==True:
dirlist = os.listdir()
vidcount=0
clips=[]
for i in dirlist:
if i.startswith("video"):
vidcount+=1
for i in range(vidcount):
os.system('ffmpeg -stream_loop -1 -i video'+str(i)+'.png -i audio'+str(i)+'.wav -vf "tpad=stop_mode=clone:stop_duration=7,scale=1280:720:force_original_aspect_ratio=decrease,pad=1280:720:-1:-1:color=black" -shortest out'+str(i)+'.mkv')
#ffmpeg -i input -vf "scale=1280:720:force_original_aspect_ratio=decrease,pad=1280:720:-1:-1:color=black" output
if DEBUG_VIDEO_COMPILATION_CREATE==True:
sys.stdout = open("vids.txt", "w")
dirlist = os.listdir()
for i in dirlist:
if i.startswith("out"):
print('file '+i)
print('file noise.mkv')
print('file outro.mkv')
subprocess.Popen('ffmpeg -f concat -i vids.txt -c copy nomusic.mkv',shell=True)
if DEBUG_VIDEO_COMPILATION_CREATE_2==True:
dirlist = os.listdir()
#inputs=''
acc=0
#filterstring=''
for i in dirlist:
if i.startswith("out"):
# inputs=inputs+' -i '+i+' -i noise.mkv'
acc+=1
#inputs=inputs+' -i outro.mkv'
tmp=0
remaining=0
print(acc,'acc')
sets=acc//5
if acc//5<acc/5:
remaining=acc%5
print(sets,'sets')
print(remaining,'remaining')
for i in range(sets):
inputs='-i out'+str(i*5)+'.mkv -i noise.mkv -i out'+str(i*5+1)+'.mkv -i noise.mkv -i out'+str(i*5+2)+'.mkv -i noise.mkv -i out'+str(i*5+3)+'.mkv -i noise.mkv -i out'+str(i*5+4)+'.mkv -i noise.mkv'
print('ffmpeg '+inputs+' -filter_complex "[0:v] [0:a] [1:v] [1:a] [2:v] [2:a] [3:v] [3:a] [4:v] [4:a] [5:v] [5:a] [6:v] [6:a] [7:v] [7:a] [8:v] [8:a] [9:v] [9:a] concat=n=10:v=1:a=1 [v] [a]" -map "[v]" -map "[a]" nomusic-set'+str(i)+'.mkv')
os.system('ffmpeg '+inputs+' -filter_complex "[0:v] [0:a] [1:v] [1:a] [2:v] [2:a] [3:v] [3:a] [4:v] [4:a] [5:v] [5:a] [6:v] [6:a] [7:v] [7:a] [8:v] [8:a] [9:v] [9:a] concat=n=10:v=1:a=1 [v] [a]" -map "[v]" -map "[a]" nomusic-set'+str(i)+'.mkv')
inputs=''
filterstring=''
for i in range(remaining-1):
inputs += '-i out'+str(sets*5+i)+'.mkv -i noise.mkv '
filterstring+='['+str(i)+':v] ['+str(i)+':a] '
filterstring+='['+str(remaining-1+i)+':v] ['+str(remaining-1+i)+':a] '
#os.system('ffmpeg '+inputs+' -filter_complex "'+filterstring+'concat=n='+str(2*(remaining-1))+':v=1:a=1 [v] [a]" -map "[v]" -map "[a]" nomusic-remaining.mkv')
print('ffmpeg '+inputs+' -filter_complex "'+filterstring+'concat=n='+str(2*(remaining-1))+':v=1:a=1 [v] [a]" -map "[v]" -map "[a]" nomusic-remaining.mkv')
#for i in range(acc//5):
# filterstring+='['+str(i)+':v] ['+str(i)+':a] '
#print('ffmpeg'+inputs+' -filter_complex "'+filterstring+'concat=n='+str(i)+':v=1:a=1 [v] [a]" -map "[v]" -map "[a]" nomusic.mkv')
#os.system('ffmpeg'+inputs+' -filter_complex "'+filterstring+'concat=n='+str(i)+':v=1:a=1 [v] [a]" -map "[v]" -map "[a]" nomusic'+str(tmp)+'.mkv')
if DEBUG_VIDEO_MUSIC_ADD==True:
musicno=random.randint(0,2)
subprocess.Popen('ffmpeg -i nomusic.mp4 -stream_loop -1 -i music'+str(musicno)+'.mp3 -filter_complex "[1:a]volume=0.15,apad[A];[0:a][A]amerge[out]" -shortest -c:v copy -map 0:v -map [out] -y finale.mkv',shell=True)
| 0 | 0 | 0 |
c4c26982ca660d6c3c28d85eb698c83bf6259908 | 210 | py | Python | shot_detector/services/__init__.py | w495/shot_detector | 617ff45c9c3c96bbd9a975aef15f1b2697282b9c | [
"BSD-3-Clause"
] | 18 | 2015-09-27T21:34:18.000Z | 2022-03-24T12:14:51.000Z | shot_detector/services/__init__.py | w495/shot_detector | 617ff45c9c3c96bbd9a975aef15f1b2697282b9c | [
"BSD-3-Clause"
] | 6 | 2021-03-18T21:21:35.000Z | 2022-03-11T23:32:55.000Z | shot_detector/services/__init__.py | w495/shot_detector | 617ff45c9c3c96bbd9a975aef15f1b2697282b9c | [
"BSD-3-Clause"
] | 3 | 2017-09-14T20:53:26.000Z | 2021-12-18T19:18:18.000Z | # -*- coding: utf8 -*-
"""
Services
"""
from __future__ import absolute_import, division, print_function
from .base_service import BaseService
from .shot_detector_service import ShotDetectorPlotService
| 19.090909 | 64 | 0.771429 | # -*- coding: utf8 -*-
"""
Services
"""
from __future__ import absolute_import, division, print_function
from .base_service import BaseService
from .shot_detector_service import ShotDetectorPlotService
| 0 | 0 | 0 |
bca359f6502b4247bcc9867263ccbe22ad06b24c | 174 | py | Python | eplot/__init__.py | SophiaHanx/eplot | 1cc104bba6c41fe3cce98a24e2c722a890cef2c2 | [
"MIT"
] | 76 | 2019-02-04T05:05:56.000Z | 2021-12-26T05:56:07.000Z | eplot/__init__.py | SophiaHanx/eplot | 1cc104bba6c41fe3cce98a24e2c722a890cef2c2 | [
"MIT"
] | 5 | 2019-05-13T02:45:39.000Z | 2022-03-27T14:57:16.000Z | eplot/__init__.py | SophiaHanx/eplot | 1cc104bba6c41fe3cce98a24e2c722a890cef2c2 | [
"MIT"
] | 18 | 2019-05-13T02:45:46.000Z | 2021-08-10T00:32:57.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@version: 0.1
@author: pjgao
@city: Nanjing
@file: __init__.py.py
@time: 2018/12/10 16:22
"""
__version__ = '0.1.1' | 15.818182 | 26 | 0.586207 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@version: 0.1
@author: pjgao
@city: Nanjing
@file: __init__.py.py
@time: 2018/12/10 16:22
"""
__version__ = '0.1.1' | 0 | 0 | 0 |
ee1b696fd0dd9bef0ddf714611f2ae67f9df1f0d | 1,067 | py | Python | jussi/middlewares/__init__.py | bnchdrff/jussi | 79d8decde323b84cdd8331e5cbb490021ea78cee | [
"MIT"
] | null | null | null | jussi/middlewares/__init__.py | bnchdrff/jussi | 79d8decde323b84cdd8331e5cbb490021ea78cee | [
"MIT"
] | null | null | null | jussi/middlewares/__init__.py | bnchdrff/jussi | 79d8decde323b84cdd8331e5cbb490021ea78cee | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from .jsonrpc import validate_jsonrpc_request
from .jussi import finalize_jussi_response
from .jussi import convert_to_jussi_request
from .limits import check_limits
from .caching import get_response
from .caching import cache_response
from .update_block_num import update_last_irreversible_block_num
| 35.566667 | 77 | 0.811621 | # -*- coding: utf-8 -*-
from .jsonrpc import validate_jsonrpc_request
from .jussi import finalize_jussi_response
from .jussi import convert_to_jussi_request
from .limits import check_limits
from .caching import get_response
from .caching import cache_response
from .update_block_num import update_last_irreversible_block_num
def setup_middlewares(app):
logger = app.config.logger
logger.info('before_server_start -> setup_middlewares')
# request middleware
app.request_middleware.append(validate_jsonrpc_request)
app.request_middleware.append(convert_to_jussi_request)
app.request_middleware.append(check_limits)
app.request_middleware.append(get_response)
# response middlware
app.response_middleware.append(finalize_jussi_response)
app.response_middleware.append(update_last_irreversible_block_num)
app.response_middleware.append(cache_response)
logger.info(f'configured request middlewares:{app.request_middleware}')
logger.info(f'configured response middlewares:{app.response_middleware}')
return app
| 717 | 0 | 23 |
5752b7e60dffe0e263eaf7ca7c23f9cbf7ed6ee4 | 5,072 | py | Python | Book/chap1/python/chap1_intro.py | lorenghoh/pyman | 9b4ddd52c5577fc85e2601ae3128f398f0eb673c | [
"CC0-1.0"
] | 3 | 2020-04-30T19:50:11.000Z | 2020-10-17T02:07:00.000Z | Book/chap1/python/chap1_intro.py | lorenghoh/pyman | 9b4ddd52c5577fc85e2601ae3128f398f0eb673c | [
"CC0-1.0"
] | 35 | 2020-04-21T04:25:31.000Z | 2021-11-06T22:49:44.000Z | Book/chap1/python/chap1_intro.py | lorenghoh/pyman | 9b4ddd52c5577fc85e2601ae3128f398f0eb673c | [
"CC0-1.0"
] | 11 | 2020-04-21T04:33:48.000Z | 2020-10-23T21:12:12.000Z | # ---
# jupyter:
# jupytext:
# cell_metadata_filter: all
# notebook_metadata_filter: all,-language_info
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.2'
# jupytext_version: 1.2.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# toc:
# base_numbering: 1
# nav_menu: {}
# number_sections: true
# sideBar: true
# skip_h1_title: false
# title_cell: Table of Contents
# title_sidebar: Contents
# toc_cell: true
# toc_position: {}
# toc_section_display: true
# toc_window_display: true
# ---
# %% [markdown] {"toc": true}
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Introduction" data-toc-modified-id="Introduction-1"><span class="toc-item-num">1 </span>Introduction</a></span><ul class="toc-item"><li><span><a href="#Introduction-to-Python-and-its-use-in-science" data-toc-modified-id="Introduction-to-Python-and-its-use-in-science-1.1"><span class="toc-item-num">1.1 </span>Introduction to Python and its use in science</a></span></li></ul></li></ul></div>
# %% [markdown]
# Introduction
# ============
#
# latex
#
# Introduction to Python and its use in science
# ---------------------------------------------
#
# This manual is meant to serve as an introduction to the Python
# programming language and its use for scientific computing. It's ok if
# you have never programmed a computer before. This manual will teach you
# how to do it from the ground up.
#
# The Python programming language is useful for all kinds of scientific
# and engineering tasks. You can use it to analyze and plot data. You can
# also use it to numerically solve science and engineering problems that
# are difficult or even impossible to solve analytically.
#
# While we want to marshall Python's powers to address scientific
# problems, you should know that Python is a general purpose computer
# language that is widely used to address all kinds of computing tasks,
# from web applications to processing financial data on Wall Street and
# various scripting tasks for computer system management. Over the past
# decade it has been increasingly used by scientists and engineers for
# numerical computations, graphics, and as a "wrapper" for numerical
# software originally written in other languages, like Fortran and C.
#
# Python is similar to Matlab and IDL, two other computer languages that
# are frequently used in science and engineering applications. Like Matlab
# and IDL, Python is an *interpreted* language, meaning you can run your
# code without having to go through an extra step of compiling, as
# required for the C and Fortran programming languages. It is also a
# *dynamically typed* language, meaning you don't have to declare
# variables and set aside memory before using them. Don't worry if you
# don't know exactly what these terms mean. Their primary significance for
# you is that you can write Python code, test, and use it quickly with a
# minimum of fuss.
#
# One advantage of Python over similar languages like Matlab and IDL is
# that it is free. It can be downloaded from the web and is available on
# all the standard computer platforms, including Windows, MacOS, and
# Linux. This also means that you can use Python without being tethered to
# the internet, as required for commercial software that is tied to a
# remote license server.
#
# Another advantage is Python's clean and simple syntax, including its
# implementation of *object oriented* programming (which we do not
# emphasize in this introduction).
#
# An important disadvantage is that Python programs can be slower than
# compiled languages like C. For large scale simulations and other
# demanding applications, there can be a considerable speed penalty in
# using Python. In these cases, C, C++, or Fortran is recommended,
# although intelligent use of Python's array processing tools contained in
# the NumPy module can greatly speed up Python code. Another disadvantage
# is that compared to Matlab and IDL, Python is less well documented. This
# stems from the fact that it is public *open source* software and thus is
# dependent on volunteers from the community of developers and users for
# documentation. The documentation is freely available on the web but is
# scattered among a number of different sites and can be terse. This
# manual will acquaint you with the most commonly-used web sites. Search
# engines like Google can help you find others.
#
# You are not assumed to have had any previous programming experience.
# However, the purpose of this manual isn't to teach you the principles of
# computer programming; it's to provide a practical guide to getting
# started with Python for scientific computing. Perhaps once you see some
# of the powerful tasks that you can accomplish with Python, you will be
# inspired to study computational science and engineering, as well as
# computer programming, in greater depth.
| 48.769231 | 474 | 0.743494 | # ---
# jupyter:
# jupytext:
# cell_metadata_filter: all
# notebook_metadata_filter: all,-language_info
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.2'
# jupytext_version: 1.2.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# toc:
# base_numbering: 1
# nav_menu: {}
# number_sections: true
# sideBar: true
# skip_h1_title: false
# title_cell: Table of Contents
# title_sidebar: Contents
# toc_cell: true
# toc_position: {}
# toc_section_display: true
# toc_window_display: true
# ---
# %% [markdown] {"toc": true}
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Introduction" data-toc-modified-id="Introduction-1"><span class="toc-item-num">1 </span>Introduction</a></span><ul class="toc-item"><li><span><a href="#Introduction-to-Python-and-its-use-in-science" data-toc-modified-id="Introduction-to-Python-and-its-use-in-science-1.1"><span class="toc-item-num">1.1 </span>Introduction to Python and its use in science</a></span></li></ul></li></ul></div>
# %% [markdown]
# Introduction
# ============
#
# latex
#
# Introduction to Python and its use in science
# ---------------------------------------------
#
# This manual is meant to serve as an introduction to the Python
# programming language and its use for scientific computing. It's ok if
# you have never programmed a computer before. This manual will teach you
# how to do it from the ground up.
#
# The Python programming language is useful for all kinds of scientific
# and engineering tasks. You can use it to analyze and plot data. You can
# also use it to numerically solve science and engineering problems that
# are difficult or even impossible to solve analytically.
#
# While we want to marshall Python's powers to address scientific
# problems, you should know that Python is a general purpose computer
# language that is widely used to address all kinds of computing tasks,
# from web applications to processing financial data on Wall Street and
# various scripting tasks for computer system management. Over the past
# decade it has been increasingly used by scientists and engineers for
# numerical computations, graphics, and as a "wrapper" for numerical
# software originally written in other languages, like Fortran and C.
#
# Python is similar to Matlab and IDL, two other computer languages that
# are frequently used in science and engineering applications. Like Matlab
# and IDL, Python is an *interpreted* language, meaning you can run your
# code without having to go through an extra step of compiling, as
# required for the C and Fortran programming languages. It is also a
# *dynamically typed* language, meaning you don't have to declare
# variables and set aside memory before using them. Don't worry if you
# don't know exactly what these terms mean. Their primary significance for
# you is that you can write Python code, test, and use it quickly with a
# minimum of fuss.
#
# One advantage of Python over similar languages like Matlab and IDL is
# that it is free. It can be downloaded from the web and is available on
# all the standard computer platforms, including Windows, MacOS, and
# Linux. This also means that you can use Python without being tethered to
# the internet, as required for commercial software that is tied to a
# remote license server.
#
# Another advantage is Python's clean and simple syntax, including its
# implementation of *object oriented* programming (which we do not
# emphasize in this introduction).
#
# An important disadvantage is that Python programs can be slower than
# compiled languages like C. For large scale simulations and other
# demanding applications, there can be a considerable speed penalty in
# using Python. In these cases, C, C++, or Fortran is recommended,
# although intelligent use of Python's array processing tools contained in
# the NumPy module can greatly speed up Python code. Another disadvantage
# is that compared to Matlab and IDL, Python is less well documented. This
# stems from the fact that it is public *open source* software and thus is
# dependent on volunteers from the community of developers and users for
# documentation. The documentation is freely available on the web but is
# scattered among a number of different sites and can be terse. This
# manual will acquaint you with the most commonly-used web sites. Search
# engines like Google can help you find others.
#
# You are not assumed to have had any previous programming experience.
# However, the purpose of this manual isn't to teach you the principles of
# computer programming; it's to provide a practical guide to getting
# started with Python for scientific computing. Perhaps once you see some
# of the powerful tasks that you can accomplish with Python, you will be
# inspired to study computational science and engineering, as well as
# computer programming, in greater depth.
| 0 | 0 | 0 |
9e12d44f5d5d039463c31ca6d9efbeeca62f298e | 5,812 | py | Python | src/python/janelia_emrp/zcorr/plot_z_coords.py | JaneliaSciComp/EM_recon_pipeline | 0a1076aaa0de24623014902a4c7ee9741a0730fb | [
"BSD-3-Clause"
] | null | null | null | src/python/janelia_emrp/zcorr/plot_z_coords.py | JaneliaSciComp/EM_recon_pipeline | 0a1076aaa0de24623014902a4c7ee9741a0730fb | [
"BSD-3-Clause"
] | null | null | null | src/python/janelia_emrp/zcorr/plot_z_coords.py | JaneliaSciComp/EM_recon_pipeline | 0a1076aaa0de24623014902a4c7ee9741a0730fb | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import glob
import os
import sys
from bokeh.io import output_file
from bokeh.models import SingleIntervalTicker
from bokeh.plotting import figure, show
if __name__ == '__main__':
if len(sys.argv) < 4:
print(f'USAGE: {sys.argv[0]} <owner> <project> <stack> <run> [label ...]')
else:
plot_run(owner=sys.argv[1],
project=sys.argv[2],
stack=sys.argv[3],
run=sys.argv[4],
solve_labels=[] if len(sys.argv) == 4 else sys.argv[5:])
| 34.188235 | 111 | 0.639195 | #!/usr/bin/env python
import glob
import os
import sys
from bokeh.io import output_file
from bokeh.models import SingleIntervalTicker
from bokeh.plotting import figure, show
def load_z_coords_file_data(z_coords_path):
z_values = []
delta_values = []
layer_count = 0
first_z = None
previous_corrected_z = 0
with open(z_coords_path, 'r') as z_coords_file:
for line in z_coords_file:
layer_count = layer_count + 1
words = line.split()
original_z = float(words[0])
corrected_z = float(words[1])
if not first_z:
first_z = original_z
delta = -1
else:
delta = corrected_z - previous_corrected_z
z_values.append(original_z)
delta_values.append(delta)
previous_corrected_z = corrected_z
return first_z, z_values, delta_values
def normalize_legacy_data(legacy_data_path, first_z, last_z, z_offset):
legacy_first_z, legacy_z_values, legacy_delta_values = load_z_coords_file_data(legacy_data_path)
normalized_first_z = None
normalized_z_values = []
normalized_delta_values = []
for i in range(0, len(legacy_z_values)):
z = legacy_z_values[i] + z_offset
if first_z <= z <= last_z:
normalized_z_values.append(z)
normalized_delta_values.append(legacy_delta_values[i])
if not normalized_first_z:
normalized_first_z = z
elif z > last_z:
break
return normalized_first_z, normalized_z_values, normalized_delta_values
def get_line_color(for_index):
line_colors = ['red', 'blue', 'cyan', 'brown', 'lightgreen', 'yellow']
return line_colors[for_index % len(line_colors)]
def plot_delta_z(title, data_paths, ignore_margin=35, plot_width=2400, plot_height=1200,
legacy_data_path=None, legacy_z_offset=1, output_file_path=None, max_batches_to_plot=None,
ticker_interval=None):
data = []
z_coords_paths = []
unmerged_data = False
for data_path in data_paths:
if os.path.isdir(data_path):
z_coords_paths.extend(glob.glob(f'{data_path}/**/Zcoords.txt', recursive=True))
unmerged_data = True
else:
z_coords_paths.append(data_path)
for z_coords_path in z_coords_paths:
data.append(load_z_coords_file_data(z_coords_path))
if output_file_path:
output_file(output_file_path)
print(f'writing plot to {output_file_path}')
tooltips = [("z", "@x"), ("delta", "@y")]
# , y_range=Range1d(0, 3.9)
p = figure(title=title, x_axis_label='z', y_axis_label='delta z',
tooltips=tooltips, plot_width=plot_width, plot_height=plot_height)
if ticker_interval:
p.xaxis.ticker = SingleIntervalTicker(interval=ticker_interval)
p.xaxis.major_label_orientation = "vertical"
if unmerged_data:
sorted_data = sorted(data, key=lambda tup: tup[0])
else:
sorted_data = data
if max_batches_to_plot and max_batches_to_plot < len(sorted_data):
sorted_data = sorted_data[0:max_batches_to_plot]
if legacy_data_path:
first_z = sorted_data[0][0]
last_z_values = sorted_data[-1][1]
last_z = last_z_values[-1]
sorted_data.insert(0, normalize_legacy_data(legacy_data_path, first_z, last_z, legacy_z_offset))
plotted_batch_count = 0
for (first_z, z_values, delta_values) in sorted_data:
if ignore_margin == 0:
stop = len(z_values)
start = 1
else:
stop = len(z_values) - ignore_margin
start = ignore_margin
trimmed_z_values = z_values[start:stop]
trimmed_delta_values = delta_values[start:stop]
line_color = get_line_color(plotted_batch_count)
p.circle(trimmed_z_values, trimmed_delta_values,
line_color=line_color, fill_color=line_color,
legend_label=f'batch {plotted_batch_count}')
plotted_batch_count = plotted_batch_count + 1
show(p)
def plot_run(owner, project, stack, run, solve_labels=None):
owner_run_sub_path = f'{owner}/{project}/{stack}/{run}'
run_dir = f'/nrs/flyem/render/z_corr/{owner_run_sub_path}'
plot_html_name = 'delta_z_plot.html'
output_file_path = f'{run_dir}/{plot_html_name}'
plot_url = f'http://renderer-data4.int.janelia.org:8080/z_corr_plots/{owner_run_sub_path}/{plot_html_name}'
data_paths = []
solve_title = " "
solve_index = 0
for data_path in sorted(glob.glob(f'{run_dir}/solve_*/Zcoords.txt')):
data_paths.append(data_path)
color = get_line_color(solve_index)
if solve_labels and solve_index < len(solve_labels):
label = solve_labels[solve_index]
else:
label = os.path.basename(os.path.dirname(data_path))
solve_title = f'{solve_title} {color}={label},'
solve_index += 1
solve_title = solve_title[:-1] # trim trailing comma
if len(data_paths) > 0:
plot_delta_z(title=f'{owner} : {project} : {stack} :{solve_title}',
data_paths=data_paths,
plot_height=1000,
ignore_margin=10,
output_file_path=output_file_path)
print(f'view plot at {plot_url}')
else:
print(f'ERROR: no solve data to plot in {run_dir}')
if __name__ == '__main__':
if len(sys.argv) < 4:
print(f'USAGE: {sys.argv[0]} <owner> <project> <stack> <run> [label ...]')
else:
plot_run(owner=sys.argv[1],
project=sys.argv[2],
stack=sys.argv[3],
run=sys.argv[4],
solve_labels=[] if len(sys.argv) == 4 else sys.argv[5:])
| 5,141 | 0 | 115 |
18b61b5c3fb43146fb5267cd7120f9d26e8dde9f | 148 | py | Python | paralleldomain/constants.py | parallel-domain/pd-sdk | 20e3d052a5cb612a2dd84bda7b1b5487a6a60edc | [
"Apache-2.0"
] | 10 | 2021-11-17T17:23:49.000Z | 2022-03-18T09:51:23.000Z | paralleldomain/constants.py | parallel-domain/pd-sdk | 20e3d052a5cb612a2dd84bda7b1b5487a6a60edc | [
"Apache-2.0"
] | 3 | 2021-12-02T17:16:20.000Z | 2022-01-07T12:47:13.000Z | paralleldomain/constants.py | parallel-domain/pd-sdk | 20e3d052a5cb612a2dd84bda7b1b5487a6a60edc | [
"Apache-2.0"
] | 2 | 2022-03-09T07:03:54.000Z | 2022-03-23T15:53:48.000Z | CAMERA_MODEL_OPENCV_PINHOLE: str = "opencv_pinhole"
CAMERA_MODEL_OPENCV_FISHEYE: str = "opencv_fisheye"
CAMERA_MODEL_PD_FISHEYE: str = "pd_fisheye"
| 37 | 51 | 0.837838 | CAMERA_MODEL_OPENCV_PINHOLE: str = "opencv_pinhole"
CAMERA_MODEL_OPENCV_FISHEYE: str = "opencv_fisheye"
CAMERA_MODEL_PD_FISHEYE: str = "pd_fisheye"
| 0 | 0 | 0 |
c1639b26a8e3699cd950a8a4897360c1ca8da877 | 2,395 | py | Python | panoptes_aggregation/tests/reducer_tests/test_question_reducer.py | CKrawczyk/python-reducers-for-caesar | 9c5d9e072906d3fde2497fa61a66e4c8c0113ec2 | [
"Apache-2.0"
] | 1 | 2018-11-27T16:43:37.000Z | 2018-11-27T16:43:37.000Z | panoptes_aggregation/tests/reducer_tests/test_question_reducer.py | CKrawczyk/python-reducers-for-caesar | 9c5d9e072906d3fde2497fa61a66e4c8c0113ec2 | [
"Apache-2.0"
] | 2 | 2017-07-27T09:05:03.000Z | 2019-04-03T20:28:31.000Z | panoptes_aggregation/tests/reducer_tests/test_question_reducer.py | CKrawczyk/python-reducers-for-caesar | 9c5d9e072906d3fde2497fa61a66e4c8c0113ec2 | [
"Apache-2.0"
] | 2 | 2017-07-26T17:30:17.000Z | 2017-07-27T09:14:51.000Z | import unittest
from collections import Counter
import flask
import json
from panoptes_aggregation.reducers.question_reducer import process_data, question_reducer
from panoptes_aggregation.reducers.test_utils import extract_in_data
extracted_data = [
{'a': 1, 'b': 1},
{'a': 1},
{'b': 1, 'c': 1},
{'b': 1, 'a': 1}
]
processed_data = [
Counter({'a': 1, 'b': 1}),
Counter({'a': 1}),
Counter({'b': 1, 'c': 1}),
Counter({'b': 1, 'a': 1})
]
processed_data_pairs = [
Counter({'a+b': 1}),
Counter({'a': 1}),
Counter({'b+c': 1}),
Counter({'a+b': 1})
]
reduced_data = {
'a': 3,
'b': 3,
'c': 1
}
reduced_data_pairs = {
'a+b': 2,
'a': 1,
'b+c': 1
}
if __name__ == '__main__':
unittest.main()
| 27.528736 | 89 | 0.66096 | import unittest
from collections import Counter
import flask
import json
from panoptes_aggregation.reducers.question_reducer import process_data, question_reducer
from panoptes_aggregation.reducers.test_utils import extract_in_data
extracted_data = [
{'a': 1, 'b': 1},
{'a': 1},
{'b': 1, 'c': 1},
{'b': 1, 'a': 1}
]
processed_data = [
Counter({'a': 1, 'b': 1}),
Counter({'a': 1}),
Counter({'b': 1, 'c': 1}),
Counter({'b': 1, 'a': 1})
]
processed_data_pairs = [
Counter({'a+b': 1}),
Counter({'a': 1}),
Counter({'b+c': 1}),
Counter({'a+b': 1})
]
reduced_data = {
'a': 3,
'b': 3,
'c': 1
}
reduced_data_pairs = {
'a+b': 2,
'a': 1,
'b+c': 1
}
class TestCountQuestions(unittest.TestCase):
def setUp(self):
self.app = flask.Flask(__name__)
self.request_kwargs = {
'data': json.dumps(extract_in_data(extracted_data)),
'content_type': 'application/json'
}
def test_process_data(self):
result = process_data(extracted_data)
self.assertCountEqual(result, processed_data)
def test_process_data_pairs(self):
result = process_data(extracted_data, pairs=True)
self.assertCountEqual(result, processed_data_pairs)
def test_count_vote(self):
reuslt = question_reducer._original(processed_data)
self.assertDictEqual(reuslt, reduced_data)
def test_count_vote_pairs(self):
reuslt = question_reducer._original(processed_data_pairs)
self.assertDictEqual(reuslt, reduced_data_pairs)
def test_question_reducer(self):
reuslt = question_reducer(extracted_data)
self.assertDictEqual(reuslt, reduced_data)
def test_question_reducer_pairs(self):
reuslt = question_reducer(extracted_data, pairs=True)
self.assertDictEqual(reuslt, reduced_data_pairs)
def test_question_reducer_request(self):
with self.app.test_request_context(**self.request_kwargs):
result = question_reducer(flask.request)
self.assertDictEqual(result, reduced_data)
def test_question_reducer_request_pairs(self):
with self.app.test_request_context('/?pairs=True', **self.request_kwargs):
result = question_reducer(flask.request)
self.assertDictEqual(result, reduced_data_pairs)
if __name__ == '__main__':
unittest.main()
| 1,342 | 23 | 265 |
7cf76b633da3ca24efc2b044a305e83bcc121a93 | 905 | py | Python | chapter_08/03_date_printer.py | SergeHall/Tony-Gaddis-Python-4th | 24e7c70fbd196ff531a5e4e7f6f5021c4b4177ba | [
"MIT"
] | 2 | 2021-04-07T03:26:37.000Z | 2021-07-26T07:38:49.000Z | chapter_08/03_date_printer.py | SergeHall/Tony-Gaddis-Python-4th | 24e7c70fbd196ff531a5e4e7f6f5021c4b4177ba | [
"MIT"
] | null | null | null | chapter_08/03_date_printer.py | SergeHall/Tony-Gaddis-Python-4th | 24e7c70fbd196ff531a5e4e7f6f5021c4b4177ba | [
"MIT"
] | null | null | null | # 3. Принтер дат. Напишите программу, которая считывает от пользователя
# строковое значение, содержащее дату в формате дд/мм/гггг. Она должна
# напечатать дату в формате 12 марта 2018 г.
main()
| 25.138889 | 71 | 0.614365 | # 3. Принтер дат. Напишите программу, которая считывает от пользователя
# строковое значение, содержащее дату в формате дд/мм/гггг. Она должна
# напечатать дату в формате 12 марта 2018 г.
def main():
# get the date from the user.
date = get_date()
# convert date to like March 12, 2014
literal_date = convert(date)
print("The date is below.")
print(literal_date)
def get_date():
date = input("Enter the date as mm/dd/yyyy: ")
return date
def convert(date):
# first separate the string.
date_list = date.split("/")
# create a list for months.
months = ["January", "February", "March", "April", "May",
"June", "July", "August", "September", "October",
"November", "December"]
new_date = str(months[int(date_list[0]) - 1]) + " " + str(
date_list[1]) + "," + " " + str(date_list[2])
return new_date
main()
| 637 | 0 | 69 |
b763443b905c931d81818a74873ed7fa19410bcf | 2,461 | py | Python | Mailbot/cogs/utils/strManip/owo.py | tekofu/Mailbot | a0475d20ae01c9f09c1174a8ddaf3a92cf1707a2 | [
"MIT"
] | null | null | null | Mailbot/cogs/utils/strManip/owo.py | tekofu/Mailbot | a0475d20ae01c9f09c1174a8ddaf3a92cf1707a2 | [
"MIT"
] | null | null | null | Mailbot/cogs/utils/strManip/owo.py | tekofu/Mailbot | a0475d20ae01c9f09c1174a8ddaf3a92cf1707a2 | [
"MIT"
] | null | null | null | # Port from https://github.com/zuzak/owo
"""MIT License
Original substitutions: Copyright (c) 2018 Eva (Nepeta)
JavaScript library: Copyright (c) 2019 Douglas Gardner <douglas@chippy.ch>
Python library: Copyright (c) 2019 tekofu
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import random
owoPrefix = [
'<3 ',
'H-hewwo?? ',
'HIIII! ',
'Haiiii! ',
'Huohhhh. ',
'OWO ',
'OwO ',
'UwU '
]
owoSuffix = [
' :3',
' UwU',
' ʕʘ‿ʘʔ',
' >_>',
' ^_^',
'..',
' Huoh.',
' ^-^',
' ;_;',
' ;-;',
' xD',
' x3',
' :D',
' :P',
' ;3',
' XDDD',
', fwendo',
' ㅇㅅㅇ',
' (人◕ω◕)',
'(^v^)',
' Sigh.',
' x3',
' ._.',
' (• o •)',
' >_<'
]
owoDict = {
'r': 'w',
'l': 'w',
'R': 'W',
'L': 'W',
'no': 'nu',
'has': 'haz',
'have': 'haz',
'you': 'uu',
'the ': 'da ',
'The ': 'Da '
}
| 23.893204 | 78 | 0.60382 | # Port from https://github.com/zuzak/owo
"""MIT License
Original substitutions: Copyright (c) 2018 Eva (Nepeta)
JavaScript library: Copyright (c) 2019 Douglas Gardner <douglas@chippy.ch>
Python library: Copyright (c) 2019 tekofu
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import random
owoPrefix = [
'<3 ',
'H-hewwo?? ',
'HIIII! ',
'Haiiii! ',
'Huohhhh. ',
'OWO ',
'OwO ',
'UwU '
]
owoSuffix = [
' :3',
' UwU',
' ʕʘ‿ʘʔ',
' >_>',
' ^_^',
'..',
' Huoh.',
' ^-^',
' ;_;',
' ;-;',
' xD',
' x3',
' :D',
' :P',
' ;3',
' XDDD',
', fwendo',
' ㅇㅅㅇ',
' (人◕ω◕)',
'(^v^)',
' Sigh.',
' x3',
' ._.',
' (• o •)',
' >_<'
]
owoDict = {
'r': 'w',
'l': 'w',
'R': 'W',
'L': 'W',
'no': 'nu',
'has': 'haz',
'have': 'haz',
'you': 'uu',
'the ': 'da ',
'The ': 'Da '
}
def owo(owoInput):
words = owoInput.split()
for i in range(0, len(words)):
word = words[i]
if word in owoDict:
words[i] = owoDict[word]
owoInput = ' '.join(words)
prefChoice = random.choice(owoPrefix)
suffChoice = random.choice(owoSuffix)
owoInput = owoInput.replace('l', 'w')
owoInput = owoInput.replace('L', 'w')
owoInput = owoInput.replace('r', 'w')
owoInput = owoInput.replace('R', 'w')
owoOutput = prefChoice + owoInput + suffChoice
return owoOutput
| 517 | 0 | 23 |
ee07d386a54685fbce86e02e57f64443fe1564d9 | 2,185 | py | Python | biliapi/search.py | elecmonkey/biliapi-py | 560bf8e332c79daa83da6ff6f011a08172c651ab | [
"MIT"
] | 1 | 2020-03-26T03:33:25.000Z | 2020-03-26T03:33:25.000Z | biliapi/search.py | elecmonkey/biliapi-py | 560bf8e332c79daa83da6ff6f011a08172c651ab | [
"MIT"
] | null | null | null | biliapi/search.py | elecmonkey/biliapi-py | 560bf8e332c79daa83da6ff6f011a08172c651ab | [
"MIT"
] | null | null | null | import json
import urllib.parse
from .lib.gethttp import getHttpPage | 43.7 | 269 | 0.523112 | import json
import urllib.parse
from .lib.gethttp import getHttpPage
class Search:
__keyword = ''
__caching = {}
def __init__(self, keyword):
self.__keyword = keyword
def setUid(self, keyword):
self.__keyword = keyword
def getUid(self, keyword):
return self.__keyword
def getSearch(self, page = 1, method = 0):
# 全站搜索接口
# 每页20条,page为页数。
if method == 0 and self.__caching != {}:
return self.__caching
try:
JsonData = getHttpPage("https://api.bilibili.com/x/web-interface/search/type?context=&page=" + str(page) + "&order=&keyword=" + urllib.parse.quote(self.__keyword) + "&duration=&tids_1=&tids_2=&__refresh__=true&search_type=video&highlight=1&single_column=0")
DicData = json.loads(JsonData)
PageData = {
"numResults" : DicData['data']['numResults'],
"numPages": DicData['data']['numPages']
}
ReData = { }
if (page == PageData['numPages']):
iPage = PageData['numResults'] - 20 * (page - 1)
else :
iPage = 20
for iGetVideo in range(0, iPage):
ReData[iGetVideo] = {
"aid" : DicData['data']['result'][iGetVideo]['aid'],
"author" : DicData['data']['result'][iGetVideo]['author'],
"uid" : DicData['data']['result'][iGetVideo]['mid'],
"typeid" : DicData['data']['result'][iGetVideo]['typeid'],
"typename" : DicData['data']['result'][iGetVideo]['typename'],
"title" : DicData['data']['result'][iGetVideo]['title'],
"description" : DicData['data']['result'][iGetVideo]['description'],
"pic" : DicData['data']['result'][iGetVideo]['pic'],
"tag" : DicData['data']['result'][iGetVideo]['tag']
}
self.__caching = {"error": 0, "Page" : PageData, "Data" : ReData}
except KeyError:
self.__caching = {"error": 1}
except:
self.__caching = {"error": 2}
return self.__caching | 1,986 | 136 | 23 |
01b5c4ad0af3c39ac513f99c62e7604958db38e2 | 1,510 | py | Python | tests/test_dbgp_eval_property.py | Taluu/vdebug | 2ebf27b1ffb6752f0ba6cf4ec6be0a7498897d48 | [
"MIT"
] | null | null | null | tests/test_dbgp_eval_property.py | Taluu/vdebug | 2ebf27b1ffb6752f0ba6cf4ec6be0a7498897d48 | [
"MIT"
] | null | null | null | tests/test_dbgp_eval_property.py | Taluu/vdebug | 2ebf27b1ffb6752f0ba6cf4ec6be0a7498897d48 | [
"MIT"
] | null | null | null | import unittest
import vdebug.dbgp
import xml.etree.ElementTree as ET
| 37.75 | 123 | 0.659603 | import unittest
import vdebug.dbgp
import xml.etree.ElementTree as ET
class EvalPropertyTest(unittest.TestCase):
def __get_eval_property(self,xml_string,code,lang):
xml = ET.fromstring(xml_string)
firstnode = xml[0]
return vdebug.dbgp.EvalProperty(firstnode,code,lang)
def test_numeric_keys(self):
prop = self.__get_eval_property(\
"""<?xml version="1.0" encoding="iso-8859-1"?>
<response xmlns="urn:debugger_protocol_v1" xmlns:xdebug="http://xdebug.org/dbgp/xdebug" command="eval" transaction_id="13">
<property
address="140722906708544" type="array"
children="1" numchildren="2" page="0" pagesize="32">
<property
name="0" address="140022315302704"
type="array" children="1" numchildren="1"></property>
<property
name="key" address="140022315307008"
type="array" children="1" numchildren="1"></property>
</property>
</response>
""", '$testarr', 'php')
self.assertEqual(prop.display_name,'$testarr')
self.assertEqual(prop.value,'')
self.assertEqual(prop.type,'array')
self.assertEqual(prop.depth,0)
self.assertTrue(prop.has_children)
self.assertEqual(prop.child_count(),2)
self.assertEqual(prop.children[0].type,'array')
self.assertEqual(prop.children[0].display_name,'$testarr[0]')
self.assertEqual(prop.children[1].type,'array')
self.assertEqual(prop.children[1].display_name,"$testarr['key']")
| 1,343 | 21 | 76 |
16cc64d424b0814ed9440ed104da575c4aaeb25f | 7,366 | py | Python | darling_ansible/python_venv/lib/python3.7/site-packages/oci/cims/models/limit_item.py | revnav/sandbox | f9c8422233d093b76821686b6c249417502cf61d | [
"Apache-2.0"
] | null | null | null | darling_ansible/python_venv/lib/python3.7/site-packages/oci/cims/models/limit_item.py | revnav/sandbox | f9c8422233d093b76821686b6c249417502cf61d | [
"Apache-2.0"
] | null | null | null | darling_ansible/python_venv/lib/python3.7/site-packages/oci/cims/models/limit_item.py | revnav/sandbox | f9c8422233d093b76821686b6c249417502cf61d | [
"Apache-2.0"
] | 1 | 2020-06-25T03:12:58.000Z | 2020-06-25T03:12:58.000Z | # coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .item import Item
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class LimitItem(Item):
"""
Details of Limit Item
"""
#: A constant which can be used with the limit_status property of a LimitItem.
#: This constant has a value of "APPROVED"
LIMIT_STATUS_APPROVED = "APPROVED"
#: A constant which can be used with the limit_status property of a LimitItem.
#: This constant has a value of "PARTIALLY_APPROVED"
LIMIT_STATUS_PARTIALLY_APPROVED = "PARTIALLY_APPROVED"
#: A constant which can be used with the limit_status property of a LimitItem.
#: This constant has a value of "NOT_APPROVED"
LIMIT_STATUS_NOT_APPROVED = "NOT_APPROVED"
def __init__(self, **kwargs):
"""
Initializes a new LimitItem object with values from keyword arguments. The default value of the :py:attr:`~oci.cims.models.LimitItem.type` attribute
of this class is ``limit`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param item_key:
The value to assign to the item_key property of this LimitItem.
:type item_key: str
:param name:
The value to assign to the name property of this LimitItem.
:type name: str
:param type:
The value to assign to the type property of this LimitItem.
:type type: str
:param category:
The value to assign to the category property of this LimitItem.
:type category: Category
:param sub_category:
The value to assign to the sub_category property of this LimitItem.
:type sub_category: SubCategory
:param issue_type:
The value to assign to the issue_type property of this LimitItem.
:type issue_type: IssueType
:param current_limit:
The value to assign to the current_limit property of this LimitItem.
:type current_limit: int
:param current_usage:
The value to assign to the current_usage property of this LimitItem.
:type current_usage: int
:param requested_limit:
The value to assign to the requested_limit property of this LimitItem.
:type requested_limit: int
:param limit_status:
The value to assign to the limit_status property of this LimitItem.
Allowed values for this property are: "APPROVED", "PARTIALLY_APPROVED", "NOT_APPROVED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type limit_status: str
"""
self.swagger_types = {
'item_key': 'str',
'name': 'str',
'type': 'str',
'category': 'Category',
'sub_category': 'SubCategory',
'issue_type': 'IssueType',
'current_limit': 'int',
'current_usage': 'int',
'requested_limit': 'int',
'limit_status': 'str'
}
self.attribute_map = {
'item_key': 'itemKey',
'name': 'name',
'type': 'type',
'category': 'category',
'sub_category': 'subCategory',
'issue_type': 'issueType',
'current_limit': 'currentLimit',
'current_usage': 'currentUsage',
'requested_limit': 'requestedLimit',
'limit_status': 'limitStatus'
}
self._item_key = None
self._name = None
self._type = None
self._category = None
self._sub_category = None
self._issue_type = None
self._current_limit = None
self._current_usage = None
self._requested_limit = None
self._limit_status = None
self._type = 'limit'
@property
def current_limit(self):
"""
Gets the current_limit of this LimitItem.
Current available limit of the resource
:return: The current_limit of this LimitItem.
:rtype: int
"""
return self._current_limit
@current_limit.setter
def current_limit(self, current_limit):
"""
Sets the current_limit of this LimitItem.
Current available limit of the resource
:param current_limit: The current_limit of this LimitItem.
:type: int
"""
self._current_limit = current_limit
@property
def current_usage(self):
"""
Gets the current_usage of this LimitItem.
Current used limit of the resource
:return: The current_usage of this LimitItem.
:rtype: int
"""
return self._current_usage
@current_usage.setter
def current_usage(self, current_usage):
"""
Sets the current_usage of this LimitItem.
Current used limit of the resource
:param current_usage: The current_usage of this LimitItem.
:type: int
"""
self._current_usage = current_usage
@property
def requested_limit(self):
"""
Gets the requested_limit of this LimitItem.
Requested limit for the resource
:return: The requested_limit of this LimitItem.
:rtype: int
"""
return self._requested_limit
@requested_limit.setter
def requested_limit(self, requested_limit):
"""
Sets the requested_limit of this LimitItem.
Requested limit for the resource
:param requested_limit: The requested_limit of this LimitItem.
:type: int
"""
self._requested_limit = requested_limit
@property
def limit_status(self):
"""
Gets the limit_status of this LimitItem.
Status of the Limit
Allowed values for this property are: "APPROVED", "PARTIALLY_APPROVED", "NOT_APPROVED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The limit_status of this LimitItem.
:rtype: str
"""
return self._limit_status
@limit_status.setter
def limit_status(self, limit_status):
"""
Sets the limit_status of this LimitItem.
Status of the Limit
:param limit_status: The limit_status of this LimitItem.
:type: str
"""
allowed_values = ["APPROVED", "PARTIALLY_APPROVED", "NOT_APPROVED"]
if not value_allowed_none_or_none_sentinel(limit_status, allowed_values):
limit_status = 'UNKNOWN_ENUM_VALUE'
self._limit_status = limit_status
| 32.307018 | 245 | 0.63508 | # coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .item import Item
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class LimitItem(Item):
"""
Details of Limit Item
"""
#: A constant which can be used with the limit_status property of a LimitItem.
#: This constant has a value of "APPROVED"
LIMIT_STATUS_APPROVED = "APPROVED"
#: A constant which can be used with the limit_status property of a LimitItem.
#: This constant has a value of "PARTIALLY_APPROVED"
LIMIT_STATUS_PARTIALLY_APPROVED = "PARTIALLY_APPROVED"
#: A constant which can be used with the limit_status property of a LimitItem.
#: This constant has a value of "NOT_APPROVED"
LIMIT_STATUS_NOT_APPROVED = "NOT_APPROVED"
def __init__(self, **kwargs):
"""
Initializes a new LimitItem object with values from keyword arguments. The default value of the :py:attr:`~oci.cims.models.LimitItem.type` attribute
of this class is ``limit`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param item_key:
The value to assign to the item_key property of this LimitItem.
:type item_key: str
:param name:
The value to assign to the name property of this LimitItem.
:type name: str
:param type:
The value to assign to the type property of this LimitItem.
:type type: str
:param category:
The value to assign to the category property of this LimitItem.
:type category: Category
:param sub_category:
The value to assign to the sub_category property of this LimitItem.
:type sub_category: SubCategory
:param issue_type:
The value to assign to the issue_type property of this LimitItem.
:type issue_type: IssueType
:param current_limit:
The value to assign to the current_limit property of this LimitItem.
:type current_limit: int
:param current_usage:
The value to assign to the current_usage property of this LimitItem.
:type current_usage: int
:param requested_limit:
The value to assign to the requested_limit property of this LimitItem.
:type requested_limit: int
:param limit_status:
The value to assign to the limit_status property of this LimitItem.
Allowed values for this property are: "APPROVED", "PARTIALLY_APPROVED", "NOT_APPROVED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type limit_status: str
"""
self.swagger_types = {
'item_key': 'str',
'name': 'str',
'type': 'str',
'category': 'Category',
'sub_category': 'SubCategory',
'issue_type': 'IssueType',
'current_limit': 'int',
'current_usage': 'int',
'requested_limit': 'int',
'limit_status': 'str'
}
self.attribute_map = {
'item_key': 'itemKey',
'name': 'name',
'type': 'type',
'category': 'category',
'sub_category': 'subCategory',
'issue_type': 'issueType',
'current_limit': 'currentLimit',
'current_usage': 'currentUsage',
'requested_limit': 'requestedLimit',
'limit_status': 'limitStatus'
}
self._item_key = None
self._name = None
self._type = None
self._category = None
self._sub_category = None
self._issue_type = None
self._current_limit = None
self._current_usage = None
self._requested_limit = None
self._limit_status = None
self._type = 'limit'
@property
def current_limit(self):
"""
Gets the current_limit of this LimitItem.
Current available limit of the resource
:return: The current_limit of this LimitItem.
:rtype: int
"""
return self._current_limit
@current_limit.setter
def current_limit(self, current_limit):
"""
Sets the current_limit of this LimitItem.
Current available limit of the resource
:param current_limit: The current_limit of this LimitItem.
:type: int
"""
self._current_limit = current_limit
@property
def current_usage(self):
"""
Gets the current_usage of this LimitItem.
Current used limit of the resource
:return: The current_usage of this LimitItem.
:rtype: int
"""
return self._current_usage
@current_usage.setter
def current_usage(self, current_usage):
"""
Sets the current_usage of this LimitItem.
Current used limit of the resource
:param current_usage: The current_usage of this LimitItem.
:type: int
"""
self._current_usage = current_usage
@property
def requested_limit(self):
"""
Gets the requested_limit of this LimitItem.
Requested limit for the resource
:return: The requested_limit of this LimitItem.
:rtype: int
"""
return self._requested_limit
@requested_limit.setter
def requested_limit(self, requested_limit):
"""
Sets the requested_limit of this LimitItem.
Requested limit for the resource
:param requested_limit: The requested_limit of this LimitItem.
:type: int
"""
self._requested_limit = requested_limit
@property
def limit_status(self):
"""
Gets the limit_status of this LimitItem.
Status of the Limit
Allowed values for this property are: "APPROVED", "PARTIALLY_APPROVED", "NOT_APPROVED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The limit_status of this LimitItem.
:rtype: str
"""
return self._limit_status
@limit_status.setter
def limit_status(self, limit_status):
"""
Sets the limit_status of this LimitItem.
Status of the Limit
:param limit_status: The limit_status of this LimitItem.
:type: str
"""
allowed_values = ["APPROVED", "PARTIALLY_APPROVED", "NOT_APPROVED"]
if not value_allowed_none_or_none_sentinel(limit_status, allowed_values):
limit_status = 'UNKNOWN_ENUM_VALUE'
self._limit_status = limit_status
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 177 | 0 | 81 |
522211a4f5a97c6fa2d49305fa8392bcbc68d35d | 12,541 | py | Python | pytests/sg/sginstalltests.py | sumedhpb/testrunner | 9ff887231c75571624abc31a3fb5248110e01203 | [
"Apache-2.0"
] | 14 | 2015-02-06T02:47:57.000Z | 2020-03-14T15:06:05.000Z | pytests/sg/sginstalltests.py | sumedhpb/testrunner | 9ff887231c75571624abc31a3fb5248110e01203 | [
"Apache-2.0"
] | 3 | 2019-02-27T19:29:11.000Z | 2021-06-02T02:14:27.000Z | pytests/sg/sginstalltests.py | sumedhpb/testrunner | 9ff887231c75571624abc31a3fb5248110e01203 | [
"Apache-2.0"
] | 108 | 2015-03-26T08:58:49.000Z | 2022-03-21T05:21:39.000Z | from sg.sg_base import GatewayBaseTest
from remote.remote_util import RemoteMachineShellConnection
import time
help_string = ['This script creates an init service to run a sync_gateway instance.',
'If you want to install more than one service instance',
'create additional services with different names.',
'', 'sync_gateway_service_install.sh', ' -h --help',
' --runas=<The user account to run sync_gateway as; default (sync_gateway)>',
' --runbase=<The directory to run sync_gateway from; defaut (/home/sync_gateway)>',
' --sgpath=<The path to the sync_gateway executable; default (/opt/couchbase-sync-gateway/bin/sync_gateway)>',
' --cfgpath=<The path to the sync_gateway JSON config file; default (/home/sync_gateway/sync_gateway.json)>',
' --logsdir=<The path to the log file direcotry; default (/home/sync_gateway/logs)>', '']
| 55.49115 | 128 | 0.667889 | from sg.sg_base import GatewayBaseTest
from remote.remote_util import RemoteMachineShellConnection
import time
help_string = ['This script creates an init service to run a sync_gateway instance.',
'If you want to install more than one service instance',
'create additional services with different names.',
'', 'sync_gateway_service_install.sh', ' -h --help',
' --runas=<The user account to run sync_gateway as; default (sync_gateway)>',
' --runbase=<The directory to run sync_gateway from; defaut (/home/sync_gateway)>',
' --sgpath=<The path to the sync_gateway executable; default (/opt/couchbase-sync-gateway/bin/sync_gateway)>',
' --cfgpath=<The path to the sync_gateway JSON config file; default (/home/sync_gateway/sync_gateway.json)>',
' --logsdir=<The path to the log file direcotry; default (/home/sync_gateway/logs)>', '']
class SGInstallerTest(GatewayBaseTest):
def setUp(self):
super(SGInstallerTest, self).setUp()
self.input.test_params["default_bucket"] = False
def tearDown(self):
super(SGInstallerTest, self).tearDown()
def installBasic(self):
for server in self.servers:
shell = RemoteMachineShellConnection(server)
self.install(shell)
pid = self.is_sync_gateway_process_running(shell)
self.assertNotEqual(pid, 0)
exist = shell.file_exists('{0}/tmp/'.format(self.folder_prefix), 'gateway.log')
self.assertTrue(exist)
shell.disconnect()
def serviceInstallNoSyncGatewayUser(self):
try:
for server in self.servers:
shell = RemoteMachineShellConnection(server)
self.assertTrue(self.service_clean(shell))
self.assertTrue(self.install_gateway(shell))
self.assertTrue(self.remove_user(shell, "sync_gateway"))
output, error = self.run_sync_gateway_service_install(shell, self.extra_param)
self.assertEqual(error, [self.expected_error])
self.assertEqual(output, [])
self.assertFalse(self.is_sync_gateway_service_running(shell))
self.assertFalse(self.is_sync_gateway_process_running(shell))
self.assertFalse(shell.file_exists(self.logsdir, 'sync_gateway_error.log'))
self.assertFalse(shell.file_exists(self.datadir, 'data'))
self.assertFalse(shell.file_exists(self.configdir, self.configfile))
finally:
self.add_user(shell, "sync_gateway")
def serviceInstallBasic(self):
for server in self.servers:
shell = RemoteMachineShellConnection(server)
self.assertTrue(self.service_clean(shell))
self.assertTrue(self.install_gateway(shell))
shell.execute_command_raw(
'rm -rf {0}/* {1}/* {2}/sync_gateway.json {3}/tmp/test*; mkdir {3}/tmp/test {3}/tmp/test2'.
format(self.logsdir, self.datadir, self.configdir, self.folder_prefix))
output, error = self.run_sync_gateway_service_install(shell, self.extra_param)
self.check_normal_error_output(shell, output, error)
self.assertTrue(self.is_sync_gateway_service_running(shell))
self.assertTrue(self.is_sync_gateway_process_running(shell))
if not "--runbase" in self.extra_param:
# hardcoded for services LOGS_TEMPLATE_VAR=${RUNBASE_TEMPLATE_VAR}/logs
self.datadir = '/home/sync_gateway'
if not "--logsdir" in self.extra_param:
self.logsdir = '/home/sync_gateway/logs'
if not "--cfgpath" in self.extra_param:
self.configdir = '/home/sync_gateway'
self.assertTrue(shell.file_exists(self.logsdir, 'sync_gateway_error.log'))
self.assertTrue(shell.file_exists(self.datadir, 'data'))
self.assertTrue(shell.file_exists(self.configdir, self.configfile))
def serviceInstallSGPath(self):
for server in self.servers:
shell = RemoteMachineShellConnection(server)
self.assertTrue(self.service_clean(shell))
self.assertTrue(self.install_gateway(shell))
shell.execute_command_raw('mv /opt/couchbase-sync-gateway/bin /opt/couchbase-sync-gateway/bin2 ')
output, error = self.run_sync_gateway_service_install(shell,
'--sgpath=/opt/couchbase-sync-gateway/bin2/sync_gateway')
self.check_normal_error_output(shell, output, error)
self.assertTrue(self.is_sync_gateway_service_running(shell))
self.assertTrue(self.is_sync_gateway_process_running(shell))
self.assertTrue(shell.file_exists('/home/sync_gateway/logs', 'sync_gateway_error.log'))
self.assertTrue(shell.file_exists('/home/sync_gateway', 'data'))
self.assertTrue(shell.file_exists('/home/sync_gateway', self.configfile))
def serviceInstallMultipleTimes(self):
for server in self.servers:
shell = RemoteMachineShellConnection(server)
self.assertTrue(self.service_clean(shell))
self.assertTrue(self.install_gateway(shell))
output, error = self.run_sync_gateway_service_install(shell, self.extra_param)
self.check_normal_error_output(shell, output, error)
self.assertTrue(self.is_sync_gateway_service_running(shell))
for i in range(3):
output, error = self.run_sync_gateway_service_install(shell, self.extra_param)
self.assertTrue(self.check_job_already_running(shell, output, error))
self.assertTrue(self.is_sync_gateway_service_running(shell))
self.assertTrue(self.is_sync_gateway_process_running(shell))
def serviceInstallThenStartService(self):
for server in self.servers:
shell = RemoteMachineShellConnection(server)
self.assertTrue(self.service_clean(shell))
self.assertTrue(self.install_gateway(shell))
output, error = self.run_sync_gateway_service_install(shell, self.extra_param)
self.check_normal_error_output(shell, output, error)
self.assertTrue(self.is_sync_gateway_service_running(shell))
self.assertTrue(self.is_sync_gateway_process_running(shell))
for i in range(3):
output, error = self.start_gateway_service(shell)
self.assertTrue(self.check_job_already_running(shell, output, error))
self.assertTrue(self.is_sync_gateway_service_running(shell))
self.assertTrue(self.is_sync_gateway_process_running(shell))
def serviceInstallStopStartServiceMultipleTimes(self):
for server in self.servers:
shell = RemoteMachineShellConnection(server)
self.assertTrue(self.service_clean(shell))
self.assertTrue(self.install_gateway(shell))
output, error = self.run_sync_gateway_service_install(shell, self.extra_param)
self.check_normal_error_output(shell, output, error)
self.assertTrue(self.is_sync_gateway_service_running(shell))
self.assertTrue(self.is_sync_gateway_process_running(shell))
for i in range(2):
output, error = self.stop_gateway_service(shell)
self.assertFalse(self.is_sync_gateway_process_running(shell))
self.assertFalse(self.is_sync_gateway_service_running(shell))
output, error = self.start_gateway_service(shell)
self.assertTrue(self.is_sync_gateway_service_running(shell))
self.assertTrue(self.is_sync_gateway_process_running(shell))
def serviceInstallHelp(self):
shell = RemoteMachineShellConnection(self.master)
self.kill_processes_gateway(shell)
self.uninstall_gateway(shell)
self.assertTrue(self.install_gateway(shell))
output, error = self.run_sync_gateway_service_install(shell, "-h")
self.assertEqual(error, [])
self.assertEqual(output, help_string)
output, error = self.run_sync_gateway_service_install(shell, "--help")
self.assertEqual(error, [])
self.assertEqual(output, help_string)
shell.disconnect()
def serviceInstallNegative(self):
for server in self.servers:
shell = RemoteMachineShellConnection(server)
self.assertTrue(self.service_clean(shell))
self.assertTrue(self.install_gateway(shell))
output, error = self.run_sync_gateway_service_install(shell, self.extra_param)
self.assertEqual(error, [self.expected_error])
self.assertEqual(output, [])
self.assertFalse(self.is_sync_gateway_service_running(shell))
self.assertFalse(self.is_sync_gateway_process_running(shell))
self.assertFalse(shell.file_exists(self.logsdir, 'sync_gateway_error.log'))
self.assertFalse(shell.file_exists(self.datadir, 'data'))
self.assertFalse(shell.file_exists(self.configdir, self.configfile))
def serviceInstallNegativeCfgPath(self):
for server in self.servers:
shell = RemoteMachineShellConnection(server)
self.assertTrue(self.service_clean(shell))
self.assertTrue(self.install_gateway(shell))
output, error = self.run_sync_gateway_service_install(shell, self.extra_param)
self.assertTrue(error[0].startswith(self.expected_error))
time.sleep(3)
# self.assertEqual(output, [])
# self.assertFalse(shell.file_exists("/tmp/sync_gateway", 'logs'))
# self.assertFalse(shell.file_exists("/tmp/sync_gateway", 'data'))
self.assertFalse(shell.file_exists("/tmp/sync_gateway", 'sync_gateway.json'))
self.assertFalse(self.is_sync_gateway_service_running(shell))
self.assertFalse(self.is_sync_gateway_process_running(shell))
def serviceInstallLogsDirNotExist(self):
for server in self.servers:
shell = RemoteMachineShellConnection(server)
self.assertTrue(self.service_clean(shell))
self.assertTrue(self.install_gateway(shell))
output, error = self.run_sync_gateway_service_install(shell, self.extra_param)
self.check_normal_error_output(shell, output, error)
self.assertTrue(self.is_sync_gateway_service_running(shell))
self.assertTrue(self.is_sync_gateway_process_running(shell))
self.assertTrue(shell.file_exists(self.logsdir, 'sync_gateway_error.log'))
self.assertTrue(shell.file_exists(self.datadir, 'data'))
self.assertTrue(shell.file_exists(self.configdir, self.configfile))
def serviceInstallBadParameters(self):
shell = RemoteMachineShellConnection(self.master)
self.kill_processes_gateway(shell)
self.uninstall_gateway(shell)
self.assertTrue(self.install_gateway(shell))
output, error = self.run_sync_gateway_service_install(shell, "-runbase /tmp/test")
temp_help = ["ERROR: unknown parameter \"-runbase\""]
temp_help.extend(help_string)
self.assertEqual(error, [])
self.assertEqual(output, temp_help)
output, error = self.run_sync_gateway_service_install(shell, "-r/tmp/test")
temp_help = ["ERROR: unknown parameter \"-r/tmp/test\""]
temp_help.extend(help_string)
self.assertEqual(error, [])
self.assertEqual(output, temp_help)
output, error = self.run_sync_gateway_service_install(shell, "-r /tmp/test")
temp_help = ["ERROR: unknown parameter \"-r\""]
temp_help.extend(help_string)
self.assertEqual(error, [])
self.assertEqual(output, temp_help)
output, error = self.run_sync_gateway_service_install(shell, "-runbase==/tmp/test")
temp_help = ["ERROR: unknown parameter \"-runbase\""]
temp_help.extend(help_string)
self.assertEqual(error, [])
self.assertEqual(output, temp_help)
output, error = self.run_sync_gateway_service_install(shell, "runbase=/tmp/test")
temp_help = ["ERROR: unknown parameter \"runbase\""]
temp_help.extend(help_string)
self.assertEqual(error, [])
self.assertEqual(output, temp_help)
shell.disconnect()
| 11,151 | 18 | 400 |
bc824221bd86159252cc6ddafd65edc617bbc567 | 3,713 | py | Python | build-tools/garbage-files-generator.py | neuro-inc/neuro-cli | 72bd2a825cc319bbc79c6df16f33380796fad4f5 | [
"Apache-2.0"
] | 5 | 2019-09-24T15:37:47.000Z | 2020-08-04T09:25:29.000Z | build-tools/garbage-files-generator.py | neuromation/platform-client-python | 72bd2a825cc319bbc79c6df16f33380796fad4f5 | [
"Apache-2.0"
] | 748 | 2019-08-05T14:57:11.000Z | 2020-09-28T09:54:41.000Z | build-tools/garbage-files-generator.py | neuro-inc/neuro-cli | 72bd2a825cc319bbc79c6df16f33380796fad4f5 | [
"Apache-2.0"
] | 3 | 2019-10-07T19:25:22.000Z | 2020-06-29T01:41:26.000Z | #!/usr/bin/env python
import argparse
import math
import os
import pathlib
import re
from rich.console import Console
from rich.progress import Progress
if __name__ == "__main__":
main()
| 31.466102 | 88 | 0.587934 | #!/usr/bin/env python
import argparse
import math
import os
import pathlib
import re
from rich.console import Console
from rich.progress import Progress
def main():
args = _parse_args()
generate_data(
args.total_size,
args.files_count,
args.branching_factor,
args.output_dir,
)
def generate_data(
total_size: int,
files_count: int,
branching_factor: int,
output_dir: pathlib.Path,
):
file_size_bytes = math.ceil(total_size / files_count)
tree_depth = math.floor(math.log(files_count, branching_factor))
name_length = len(str(branching_factor))
console = Console()
console.log(
f"Generating {files_count} files {file_size_bytes} bytes each, "
f"which in total: {total_size} bytes."
)
buffer_size = min(file_size_bytes, 16 * 2**20) # 16MB at max
garbage = os.urandom(buffer_size)
write_iterations = file_size_bytes // buffer_size
tail_size = file_size_bytes % buffer_size
output_dir.mkdir(parents=True)
created_files = 0
folders_counter = 0
with Progress() as progress:
data_gen_task = progress.add_task(
"[green]Generating data...", total=files_count
)
file_gen_task = progress.add_task(
f"[cyan]Generating file...", total=write_iterations
)
while created_files < files_count:
files_count_z = str(folders_counter).zfill(name_length * tree_depth)
split_path = []
for level in range(tree_depth):
split_path.append(
files_count_z[name_length * level : name_length * (level + 1)]
)
folder_path = output_dir.joinpath(*split_path)
folder_path.mkdir(parents=True, exist_ok=True)
folders_counter += 1
for i in range(branching_factor):
if created_files < files_count:
file_name = str(i).zfill(name_length)
full_file_name = folder_path / file_name
with full_file_name.open("wb") as file:
for iteration in range(write_iterations):
file.write(garbage)
progress.update(
file_gen_task,
completed=iteration,
description=f"[cyan]Writing file {full_file_name}...",
)
if tail_size != 0:
file.write(garbage[:tail_size])
created_files += 1
progress.advance(data_gen_task)
else:
break
console.log("Data generation completed.")
def _parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("files_count", type=int)
parser.add_argument("total_size", type=_parse_size)
parser.add_argument("--branching-factor", type=int, default=100)
parser.add_argument(
"--output-dir", type=_parse_dir_path, default=pathlib.Path("./data")
)
return parser.parse_args()
def _parse_size(size: str) -> int:
units = {"B": 1, "KB": 2**10, "MB": 2**20, "GB": 2**30, "TB": 2**40}
size = size.upper()
# print("parsing size ", size)
number, unit = re.fullmatch(r"(\d+(?:\.\d*)?)([KMGT]?B)", size.strip()).groups()
return int(float(number) * units[unit])
def _parse_dir_path(path: str) -> pathlib.Path:
if not os.path.exists(path):
return pathlib.Path(path)
else:
raise argparse.ArgumentTypeError(f"{path} already exists, could not overwrite.")
if __name__ == "__main__":
main()
| 3,398 | 0 | 115 |
aa2432989d267f1697e3b7870f5529c883957cbd | 416 | py | Python | app.py | youqad/oxford-hack-2020 | 7c4bf02f0dc52ce99cee721a3b7b3344060018f2 | [
"MIT"
] | null | null | null | app.py | youqad/oxford-hack-2020 | 7c4bf02f0dc52ce99cee721a3b7b3344060018f2 | [
"MIT"
] | null | null | null | app.py | youqad/oxford-hack-2020 | 7c4bf02f0dc52ce99cee721a3b7b3344060018f2 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request, url_for, flash, redirect
from werkzeug.exceptions import abort
app = Flask(__name__)
app.config['SECRET_KEY'] = 'mA7OumwKVZQr9ousrge1OVQxQr51WEs7'
@app.route('/')
@app.route('/info')
@app.route('/algo') | 23.111111 | 75 | 0.730769 | from flask import Flask, render_template, request, url_for, flash, redirect
from werkzeug.exceptions import abort
app = Flask(__name__)
app.config['SECRET_KEY'] = 'mA7OumwKVZQr9ousrge1OVQxQr51WEs7'
@app.route('/')
def index():
return render_template('index.html')
@app.route('/info')
def info():
return render_template('info.html')
@app.route('/algo')
def algo():
return render_template('algo.html') | 92 | 0 | 66 |
ce7b3baf52f5102717543610e6978b690f6d6393 | 9,183 | py | Python | src/anchorpy/clientgen/accounts.py | kevinheavey/anchorpy | d4cc28365c6adaeaec7f5001fa6b8a3e719b41ad | [
"MIT"
] | 87 | 2021-09-26T18:14:07.000Z | 2022-03-28T08:22:24.000Z | src/anchorpy/clientgen/accounts.py | kevinheavey/anchorpy | d4cc28365c6adaeaec7f5001fa6b8a3e719b41ad | [
"MIT"
] | 15 | 2021-10-07T16:12:23.000Z | 2022-03-20T21:04:40.000Z | src/anchorpy/clientgen/accounts.py | kevinheavey/anchorpy | d4cc28365c6adaeaec7f5001fa6b8a3e719b41ad | [
"MIT"
] | 16 | 2021-10-16T04:40:28.000Z | 2022-03-18T16:49:40.000Z | from pathlib import Path
from black import format_str, FileMode
from autoflake import fix_code
from pyheck import snake
from genpy import (
FromImport,
Import,
Assign,
Suite,
Collection,
ImportAs,
Return,
For,
If,
Raise,
Statement,
)
from anchorpy.coder.accounts import _account_discriminator
from anchorpy.idl import (
Idl,
_IdlAccountDef,
)
from anchorpy.clientgen.genpy_extension import (
Dataclass,
Method,
ClassMethod,
TypedParam,
TypedDict,
StrDict,
StrDictEntry,
NamedArg,
Call,
Continue,
)
from anchorpy.clientgen.common import (
_json_interface_name,
_py_type_from_idl,
_idl_type_to_json_type,
_layout_for_type,
_field_from_decoded,
_field_to_json,
_field_from_json,
)
| 31.885417 | 86 | 0.541326 | from pathlib import Path
from black import format_str, FileMode
from autoflake import fix_code
from pyheck import snake
from genpy import (
FromImport,
Import,
Assign,
Suite,
Collection,
ImportAs,
Return,
For,
If,
Raise,
Statement,
)
from anchorpy.coder.accounts import _account_discriminator
from anchorpy.idl import (
Idl,
_IdlAccountDef,
)
from anchorpy.clientgen.genpy_extension import (
Dataclass,
Method,
ClassMethod,
TypedParam,
TypedDict,
StrDict,
StrDictEntry,
NamedArg,
Call,
Continue,
)
from anchorpy.clientgen.common import (
_json_interface_name,
_py_type_from_idl,
_idl_type_to_json_type,
_layout_for_type,
_field_from_decoded,
_field_to_json,
_field_from_json,
)
def gen_accounts(idl: Idl, root: Path) -> None:
accounts = idl.accounts
if accounts is None or not accounts:
return
accounts_dir = root / "accounts"
accounts_dir.mkdir(exist_ok=True)
gen_index_file(idl, accounts_dir)
accounts_dict = gen_accounts_code(idl, accounts_dir)
for path, code in accounts_dict.items():
formatted = format_str(code, mode=FileMode())
fixed = fix_code(formatted, remove_all_unused_imports=True)
path.write_text(fixed)
def gen_index_file(idl: Idl, accounts_dir: Path) -> None:
code = gen_index_code(idl)
formatted = format_str(code, mode=FileMode())
(accounts_dir / "__init__.py").write_text(formatted)
def gen_index_code(idl: Idl) -> str:
imports: list[FromImport] = []
for acc in idl.accounts:
members = [
acc.name,
_json_interface_name(acc.name),
]
imports.append(FromImport(f".{snake(acc.name)}", members))
return str(Collection(imports))
def gen_accounts_code(idl: Idl, accounts_dir: Path) -> dict[Path, str]:
res = {}
for acc in idl.accounts:
filename = f"{snake(acc.name)}.py"
path = accounts_dir / filename
code = gen_account_code(acc, idl)
res[path] = code
return res
def gen_account_code(acc: _IdlAccountDef, idl: Idl) -> str:
base_imports = [
Import("typing"),
FromImport("dataclasses", ["dataclass"]),
FromImport("base64", ["b64decode"]),
FromImport("construct", ["Construct"]),
FromImport("solana.publickey", ["PublicKey"]),
FromImport("solana.rpc.async_api", ["AsyncClient"]),
FromImport("solana.rpc.commitment", ["Commitment"]),
ImportAs("borsh_construct", "borsh"),
FromImport("anchorpy.coder.accounts", ["ACCOUNT_DISCRIMINATOR_SIZE"]),
FromImport("anchorpy.error", ["AccountInvalidDiscriminator"]),
FromImport("anchorpy.utils.rpc", ["get_multiple_accounts"]),
FromImport("anchorpy.borsh_extension", ["BorshPubkey", "EnumForCodegen"]),
FromImport("..program_id", ["PROGRAM_ID"]),
]
imports = (
[*base_imports, FromImport("..", ["types"])] if idl.types else base_imports
)
fields_interface_params: list[TypedParam] = []
json_interface_params: list[TypedParam] = []
fields = acc.type.fields
name = acc.name
json_interface_name = _json_interface_name(name)
layout_items: list[str] = []
init_body_assignments: list[Assign] = []
decode_body_entries: list[NamedArg] = []
to_json_entries: list[StrDictEntry] = []
from_json_entries: list[NamedArg] = []
for field in fields:
fields_interface_params.append(
TypedParam(
field.name,
_py_type_from_idl(
idl=idl,
ty=field.type,
types_relative_imports=False,
use_fields_interface_for_struct=False,
),
)
)
json_interface_params.append(
TypedParam(
field.name,
_idl_type_to_json_type(ty=field.type, types_relative_imports=False),
)
)
layout_items.append(
_layout_for_type(
idl=idl, ty=field.type, name=field.name, types_relative_imports=False
)
)
init_body_assignments.append(
Assign(f"self.{field.name}", f'fields["{field.name}"]')
)
decode_body_entries.append(
NamedArg(
field.name,
_field_from_decoded(
idl=idl, ty=field, types_relative_imports=False, val_prefix="dec."
),
)
)
to_json_entries.append(
StrDictEntry(field.name, _field_to_json(idl, field, "self."))
)
from_json_entries.append(
NamedArg(
field.name,
_field_from_json(idl=idl, ty=field, types_relative_imports=False),
)
)
json_interface = TypedDict(json_interface_name, json_interface_params)
discriminator_assignment = Assign(
"discriminator: typing.ClassVar", _account_discriminator(name)
)
layout_assignment = Assign(
"layout: typing.ClassVar", f"borsh.CStruct({','.join(layout_items)})"
)
fetch_method = ClassMethod(
"fetch",
[
TypedParam("conn", "AsyncClient"),
TypedParam("address", "PublicKey"),
TypedParam("commitment", "typing.Optional[Commitment] = None"),
],
Suite(
[
Assign(
"resp",
"await conn.get_account_info(address, commitment=commitment)",
),
Assign("info", 'resp["result"]["value"]'),
If("info is None", Return("None")),
If(
'info["owner"] != str(PROGRAM_ID)',
Raise('ValueError("Account does not belong to this program")'),
),
Assign("bytes_data", 'b64decode(info["data"][0])'),
Return("cls.decode(bytes_data)"),
]
),
f'typing.Optional["{name}"]',
is_async=True,
)
account_does_not_belong_raise = Raise(
'ValueError("Account does not belong to this program")'
)
fetch_multiple_return_type = f'typing.List[typing.Optional["{name}"]]'
fetch_multiple_method = ClassMethod(
"fetch_multiple",
[
TypedParam("conn", "AsyncClient"),
TypedParam("addresses", "list[PublicKey]"),
TypedParam("commitment", "typing.Optional[Commitment] = None"),
],
Suite(
[
Assign(
"infos",
(
"await get_multiple_accounts"
"(conn, addresses,commitment=commitment)"
),
),
Assign(f"res: {fetch_multiple_return_type}", "[]"),
For(
"info",
"infos",
Suite(
[
If(
"info is None",
Suite([Statement("res.append(None)"), Continue()]),
),
If(
"info.account.owner != PROGRAM_ID",
account_does_not_belong_raise,
),
Statement("res.append(cls.decode(info.account.data))"),
]
),
),
Return("res"),
]
),
f'typing.List[typing.Optional["{name}"]]',
is_async=True,
)
decode_body_end = Call("cls", decode_body_entries)
account_invalid_raise = Raise(
'AccountInvalidDiscriminator("The discriminator for this account is invalid")'
)
decode_method = ClassMethod(
"decode",
[TypedParam("data", "bytes")],
Suite(
[
If(
"data[:ACCOUNT_DISCRIMINATOR_SIZE] != cls.discriminator",
account_invalid_raise,
),
Assign(
"dec", f"{name}.layout.parse(data[ACCOUNT_DISCRIMINATOR_SIZE:])"
),
Return(decode_body_end),
]
),
f'"{name}"',
)
to_json_body = StrDict(to_json_entries)
to_json_method = Method("to_json", [], Return(to_json_body), json_interface_name)
from_json_body = Call("cls", from_json_entries)
from_json_method = ClassMethod(
"from_json",
[TypedParam("obj", json_interface_name)],
Return(from_json_body),
f'"{name}"',
)
klass = Dataclass(
name,
[
discriminator_assignment,
layout_assignment,
*fields_interface_params,
fetch_method,
fetch_multiple_method,
decode_method,
to_json_method,
from_json_method,
],
)
return str(
Collection(
[
*imports,
json_interface,
klass,
]
)
)
| 8,265 | 0 | 115 |
b1b31f991a09674904d9957324ed536384780435 | 2,369 | py | Python | analysis/active/saccades/deconvolve-saccades.py | goldman-lab/oculomotor-response-timescales | fa9f73e6f6a256a5983248b30c6348a50f1fdfcf | [
"BSD-3-Clause"
] | null | null | null | analysis/active/saccades/deconvolve-saccades.py | goldman-lab/oculomotor-response-timescales | fa9f73e6f6a256a5983248b30c6348a50f1fdfcf | [
"BSD-3-Clause"
] | null | null | null | analysis/active/saccades/deconvolve-saccades.py | goldman-lab/oculomotor-response-timescales | fa9f73e6f6a256a5983248b30c6348a50f1fdfcf | [
"BSD-3-Clause"
] | 1 | 2022-01-05T18:26:33.000Z | 2022-01-05T18:26:33.000Z | import numpy as np
import sys
sys.path.append('../../../tools/')
import fitting_functions
import os
import scipy.io as sio
import scipy.optimize
import tqdm
if __name__ == "__main__":
file_names = [ ('090711e_0006',), ('090811c_0002',), ('090811d_0002','090811d_0004',),
('091111a_0001', '091111a_0003'), ('091111c_0003',), ('091211a_0002', '091211a_0005')]
best_num_components = {'090711e':3, '090811c':3, '090811d':3, '091111a':4, '091111c':3, '091211a':3}
T_start = 17 # ~130 ms
if not os.path.isdir('deconv'):
os.makedirs('deconv')
if not os.path.isdir('deconv/distributed'):
os.makedirs('deconv/distributed')
if not os.path.isdir('deconv/fast'):
os.makedirs('deconv/fast')
for fish_num in tqdm.trange(len(file_names), desc='Fish'):
fish_name = file_names[fish_num][0][:-5]
n = best_num_components[fish_name]
# Load plants
plant_file = sio.loadmat('../plants/best/distributed/'+fish_name+'.mat')
plant = plant_file['plant'][0]
plant_file = sio.loadmat('../plants/best/fast/'+fish_name+'.mat')
plant_fast = plant_file['plant'][0]
for trace_num in range(len(file_names[fish_num])):
saccade_data_file = sio.loadmat('fit/'+file_names[fish_num][trace_num]+'.mat')
trange_sacc = saccade_data_file['trange'][0]
eye_pos_sacc = saccade_data_file['model'][0]
drive = deconvolveEyePos(trange_sacc, eye_pos_sacc, plant)
drive_fast = deconvolveEyePos(trange_sacc, eye_pos_sacc, fast_plant)
sio.savemat('deconv/distributed/'+file_names[fish_num][trace_num]+'.mat', {'drive': drive})
sio.savemat('deconv/fast/'+file_names[fish_num][trace_num]+'.mat', {'drive': drive_fast})
| 37.603175 | 104 | 0.633179 | import numpy as np
import sys
sys.path.append('../../../tools/')
import fitting_functions
import os
import scipy.io as sio
import scipy.optimize
import tqdm
def deconvolveEyePos(trange, eye_pos, plant, ind=-1):
plant_model = fitting_functions.exponentialModel(trange, plant)
dt = trange[2]-trange[1]
def convMat(signal, m):
mat = np.zeros((len(signal), m))
mat[:,0] = signal
for i in range(1, m):
mat[i:,i] = signal[:len(signal)-i]
return mat
if ind == -1:
ind = len(trange)
mat = dt*convMat(plant_model, ind)
optresult = scipy.optimize.lsq_linear(mat, eye_pos, bounds=(0, np.inf))
f_ = np.zeros(len(trange))
f_[:len(optresult.x)] = optresult.x
return f_
if __name__ == "__main__":
file_names = [ ('090711e_0006',), ('090811c_0002',), ('090811d_0002','090811d_0004',),
('091111a_0001', '091111a_0003'), ('091111c_0003',), ('091211a_0002', '091211a_0005')]
best_num_components = {'090711e':3, '090811c':3, '090811d':3, '091111a':4, '091111c':3, '091211a':3}
T_start = 17 # ~130 ms
if not os.path.isdir('deconv'):
os.makedirs('deconv')
if not os.path.isdir('deconv/distributed'):
os.makedirs('deconv/distributed')
if not os.path.isdir('deconv/fast'):
os.makedirs('deconv/fast')
for fish_num in tqdm.trange(len(file_names), desc='Fish'):
fish_name = file_names[fish_num][0][:-5]
n = best_num_components[fish_name]
# Load plants
plant_file = sio.loadmat('../plants/best/distributed/'+fish_name+'.mat')
plant = plant_file['plant'][0]
plant_file = sio.loadmat('../plants/best/fast/'+fish_name+'.mat')
plant_fast = plant_file['plant'][0]
for trace_num in range(len(file_names[fish_num])):
saccade_data_file = sio.loadmat('fit/'+file_names[fish_num][trace_num]+'.mat')
trange_sacc = saccade_data_file['trange'][0]
eye_pos_sacc = saccade_data_file['model'][0]
drive = deconvolveEyePos(trange_sacc, eye_pos_sacc, plant)
drive_fast = deconvolveEyePos(trange_sacc, eye_pos_sacc, fast_plant)
sio.savemat('deconv/distributed/'+file_names[fish_num][trace_num]+'.mat', {'drive': drive})
sio.savemat('deconv/fast/'+file_names[fish_num][trace_num]+'.mat', {'drive': drive_fast})
| 566 | 0 | 23 |
7f099f5157ae9f8859ce21a33bfc1f8961efeb77 | 7,032 | py | Python | models/backbone/shufflenet.py | Minerva-J/Pytorch-Segmentation-multi-models | 0845b54d4fbc8d38c70f158054b7ab1be2b3ceb9 | [
"Apache-2.0"
] | 84 | 2020-06-10T11:50:10.000Z | 2022-03-28T15:24:27.000Z | models/backbone/shufflenet.py | eeaesa/Pytorch-Segmentation-multi-models | 0845b54d4fbc8d38c70f158054b7ab1be2b3ceb9 | [
"Apache-2.0"
] | 4 | 2020-08-10T07:22:08.000Z | 2022-01-28T01:58:25.000Z | models/backbone/shufflenet.py | eeaesa/Pytorch-Segmentation-multi-models | 0845b54d4fbc8d38c70f158054b7ab1be2b3ceb9 | [
"Apache-2.0"
] | 28 | 2020-07-02T11:04:56.000Z | 2022-02-16T13:57:20.000Z | # -*- coding: utf-8 -*-
# Thanks to https://github.com/ericsun99/Shufflenet-v2-Pytorch
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from collections import OrderedDict
from torch.nn import init
import math
__all__ = ['shufflenet_v2', 'ShuffleNetV2']
if __name__ == "__main__":
"""Testing
"""
from torchsummary import summary
model = shufflenet_v2(pretrained=True, output_stride=16)
summary(model, [3, 224, 224], device='cpu')
| 34.135922 | 115 | 0.567975 | # -*- coding: utf-8 -*-
# Thanks to https://github.com/ericsun99/Shufflenet-v2-Pytorch
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from collections import OrderedDict
from torch.nn import init
import math
__all__ = ['shufflenet_v2', 'ShuffleNetV2']
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
def channel_shuffle(x, groups):
batchsize, num_channels, height, width = x.data.size()
channels_per_group = num_channels // groups
# reshape
x = x.view(batchsize, groups,
channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
# flatten
x = x.view(batchsize, -1, height, width)
return x
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, benchmodel, dilation=1):
super(InvertedResidual, self).__init__()
self.benchmodel = benchmodel
self.stride = stride
assert stride in [1, 2]
oup_inc = oup // 2
if self.benchmodel == 1:
# assert inp == oup_inc
self.banch2 = nn.Sequential(
# pw
nn.Conv2d(oup_inc, oup_inc, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup_inc),
nn.ReLU(inplace=True),
# dw
nn.Conv2d(oup_inc, oup_inc, 3, stride, dilation, dilation=dilation, groups=oup_inc, bias=False),
nn.BatchNorm2d(oup_inc),
# pw-linear
nn.Conv2d(oup_inc, oup_inc, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup_inc),
nn.ReLU(inplace=True),
)
else:
self.banch1 = nn.Sequential(
# dw
nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
# pw-linear
nn.Conv2d(inp, oup_inc, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup_inc),
nn.ReLU(inplace=True),
)
self.banch2 = nn.Sequential(
# pw
nn.Conv2d(inp, oup_inc, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup_inc),
nn.ReLU(inplace=True),
# dw
nn.Conv2d(oup_inc, oup_inc, 3, stride, dilation, dilation=dilation, groups=oup_inc, bias=False),
nn.BatchNorm2d(oup_inc),
# pw-linear
nn.Conv2d(oup_inc, oup_inc, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup_inc),
nn.ReLU(inplace=True),
)
@staticmethod
def _concat(x, out):
# concatenate along channel axis
return torch.cat((x, out), 1)
def forward(self, x):
if 1 == self.benchmodel:
x1 = x[:, :(x.shape[1] // 2), :, :]
x2 = x[:, (x.shape[1] // 2):, :, :]
out = self._concat(x1, self.banch2(x2))
elif 2 == self.benchmodel:
out = self._concat(self.banch1(x), self.banch2(x))
return channel_shuffle(out, 2)
class ShuffleNetV2(nn.Module):
def __init__(self, width_mult=1., output_stride=None, pretrained=False):
super(ShuffleNetV2, self).__init__()
self.stage_repeats = [4, 8, 4]
if output_stride == 16:
self.strides = [2, 2, 1]
self.dilations = [1, 1, 2]
elif output_stride == 8:
self.strides = [2, 1, 1]
self.dilations = [1, 2, 2]
else:
self.strides = [2, 2, 2]
self.dilations = [1, 1, 1]
# index 0 is invalid and should never be called.
# only used for indexing convenience.
if width_mult == 0.5:
self.stage_out_channels = [-1, 24, 48, 96, 192, 1024]
elif width_mult == 1.0:
self.stage_out_channels = [-1, 24, 116, 232, 464, 1024]
elif width_mult == 1.5:
self.stage_out_channels = [-1, 24, 176, 352, 704, 1024]
elif width_mult == 2.0:
self.stage_out_channels = [-1, 24, 224, 488, 976, 2048]
else:
raise ValueError
# building first layer
input_channel = self.stage_out_channels[1]
self.conv1 = conv_bn(3, input_channel, 2)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.features = []
# building inverted residual blocks
for idxstage in range(len(self.stage_repeats)):
numrepeat = self.stage_repeats[idxstage]
output_channel = self.stage_out_channels[idxstage + 2]
for i in range(numrepeat):
if i == 0:
# inp, oup, stride, benchmodel):
self.features.append(InvertedResidual(input_channel, output_channel, self.strides[idxstage], 2,
self.dilations[idxstage]))
else:
self.features.append(InvertedResidual(input_channel, output_channel, 1, 1))
input_channel = output_channel
# make it nn.Sequential
self.features = nn.Sequential(*self.features)
# building last several layers
# self.conv_last = conv_1x1_bn(input_channel, self.stage_out_channels[-1])
# self.globalpool = nn.Sequential(nn.AvgPool2d(int(input_size / 32)))
self.interconv_channel = 24
self.lastconv_channel = input_channel
# building classifier
# self.classifier = nn.Sequential(nn.Linear(self.stage_out_channels[-1], n_class))
if pretrained:
self._load_pretrained_model(
torch.load('/home/yhuangcc/ImageSegmentation/checkpoints/shufflenetv2_x1_69.402_88.374.pth.tar'))
def forward(self, x):
x = self.conv1(x)
x = self.maxpool(x)
x = self.features(x)
# x = self.conv_last(x)
# x = self.globalpool(x)
# x = x.view(-1, self.stage_out_channels[-1])
# x = self.classifier(x)
return x
def _load_pretrained_model(self, pretrain_dict):
model_dict = {}
state_dict = self.state_dict()
for k, v in pretrain_dict.items():
if k in state_dict:
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict)
def shufflenet_v2(pretrained=None, output_stride=None, width_mult=1.):
pretrained = False if width_mult != 1. else pretrained
model = ShuffleNetV2(width_mult=width_mult, output_stride=output_stride, pretrained=pretrained)
return model
if __name__ == "__main__":
"""Testing
"""
from torchsummary import summary
model = shufflenet_v2(pretrained=True, output_stride=16)
summary(model, [3, 224, 224], device='cpu')
| 6,180 | 120 | 218 |
3305587d836a98367649a9f623ff4f20d1af90de | 1,826 | py | Python | backend/importer/import_tmks.py | warent/ACLU | 94c617658284b93ac1edb6c01193fb344f8f60cc | [
"MIT"
] | 23 | 2017-07-13T04:47:43.000Z | 2021-12-12T09:33:08.000Z | backend/importer/import_tmks.py | warent/ACLU | 94c617658284b93ac1edb6c01193fb344f8f60cc | [
"MIT"
] | 188 | 2017-07-13T04:40:27.000Z | 2021-07-20T05:42:30.000Z | backend/importer/import_tmks.py | warent/ACLU | 94c617658284b93ac1edb6c01193fb344f8f60cc | [
"MIT"
] | 16 | 2017-08-17T05:05:58.000Z | 2019-03-18T21:54:31.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2017
#
# Distributed under terms of the MIT license.
import click
import datetime
import logging
import logging.config
import multiprocessing
import os
import sys
import uuid
from utilities import get_features_from_geojson
from utilities import get_organization
from utilities import get_pyeve_formatted_datetime
from utilities import post_feature
logging.config.fileConfig(
os.path.join(os.path.dirname(os.path.realpath(__file__)), 'logging.conf'))
logger = logging.getLogger("aclu_importer.tmks")
@click.command()
@click.option('--tmk_features_path', help='Path to tmk features file being imported.', required=True, type=click.Path(exists=True))
@click.option('--api_base_url', default='http://localhost:50050', help='API base url. Defaults to http://localhost:50050')
if __name__ == '__main__':
import_tmk()
# vim: fenc=utf-8
# vim: filetype=python
| 26.463768 | 131 | 0.7092 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2017
#
# Distributed under terms of the MIT license.
import click
import datetime
import logging
import logging.config
import multiprocessing
import os
import sys
import uuid
from utilities import get_features_from_geojson
from utilities import get_organization
from utilities import get_pyeve_formatted_datetime
from utilities import post_feature
logging.config.fileConfig(
os.path.join(os.path.dirname(os.path.realpath(__file__)), 'logging.conf'))
logger = logging.getLogger("aclu_importer.tmks")
@click.command()
@click.option('--tmk_features_path', help='Path to tmk features file being imported.', required=True, type=click.Path(exists=True))
@click.option('--api_base_url', default='http://localhost:50050', help='API base url. Defaults to http://localhost:50050')
def import_tmk(tmk_features_path, api_base_url):
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
organization = get_organization(api_base_url, "Park")
numFeatures = 0
if organization:
for feature in get_features_from_geojson(tmk_features_path):
numFeatures += 1
f = _construct_tmk_feature_json(feature, organization)
pool.apply_async(post_feature, [api_base_url, f])
print(numFeatures)
pool.close()
pool.join()
return sys.exit(0)
def _construct_tmk_feature_json(feature, organization):
return {
"_id": str(uuid.uuid4()),
"geojson": feature,
"organization": organization["_id"],
"name": "TMK " + str(feature['properties']['TMK']),
"type": "tmk",
"last_imported_at":
get_pyeve_formatted_datetime(datetime.datetime.utcnow())
}
if __name__ == '__main__':
import_tmk()
# vim: fenc=utf-8
# vim: filetype=python
| 848 | 0 | 45 |
b958639b31027285cbe8affba52c97466d35072c | 702 | py | Python | src/models/version_model_test.py | DEV3L/python-learning-journal | cb7f7b4ddaeffa8e207f9f4b42b775d442d975a4 | [
"Beerware"
] | null | null | null | src/models/version_model_test.py | DEV3L/python-learning-journal | cb7f7b4ddaeffa8e207f9f4b42b775d442d975a4 | [
"Beerware"
] | null | null | null | src/models/version_model_test.py | DEV3L/python-learning-journal | cb7f7b4ddaeffa8e207f9f4b42b775d442d975a4 | [
"Beerware"
] | null | null | null | from src.models.version_model import VersionModel
| 21.9375 | 53 | 0.746439 | from src.models.version_model import VersionModel
def test_version_model_sets_attributes():
expected_version = 'v001_data_init'
version_model = VersionModel(expected_version)
assert expected_version == version_model.version
def test_version_model_returns_json():
expected_json = {
'version': 'v001_data_init'
}
version_model = VersionModel('v001_data_init')
assert expected_json == version_model.to_json
def test_version_model_loads_from_json():
expected_version = 'v001_data_init'
json_data = {
'version': expected_version
}
version_model = VersionModel.from_json(json_data)
assert expected_version == version_model.version
| 580 | 0 | 69 |
dc2fa87cd670ce4ae43ad47ddaa88ef5cbdd69cf | 112 | py | Python | Marauders_GUI/kirk.py | kmackenzieii/marauders-map | 0027d2880cec8e5ef77b7e5c6454f43ef17b9867 | [
"MIT"
] | null | null | null | Marauders_GUI/kirk.py | kmackenzieii/marauders-map | 0027d2880cec8e5ef77b7e5c6454f43ef17b9867 | [
"MIT"
] | null | null | null | Marauders_GUI/kirk.py | kmackenzieii/marauders-map | 0027d2880cec8e5ef77b7e5c6454f43ef17b9867 | [
"MIT"
] | null | null | null | width = 300
height = 450
box_size = 35
x = width / box_size
y = height / box_size
File = "/interface/kirk.gif"
| 14 | 28 | 0.678571 | width = 300
height = 450
box_size = 35
x = width / box_size
y = height / box_size
File = "/interface/kirk.gif"
| 0 | 0 | 0 |
66441229a6c1d50bf2de571106f41bcdf21efda4 | 989 | py | Python | discovery-provider/alembic/versions/f775fb87f5ff_add_skipped_transaction_level_column.py | lucylow/audius-protocol | 5ef93462f9dc7df01a15877c02ca79b9a7d99236 | [
"Apache-2.0"
] | 1 | 2022-03-27T21:40:36.000Z | 2022-03-27T21:40:36.000Z | discovery-provider/alembic/versions/f775fb87f5ff_add_skipped_transaction_level_column.py | abelxmendoza/audius-protocol | 33757e1b722a4be97960086b98b26ae3a75ee56b | [
"Apache-2.0"
] | null | null | null | discovery-provider/alembic/versions/f775fb87f5ff_add_skipped_transaction_level_column.py | abelxmendoza/audius-protocol | 33757e1b722a4be97960086b98b26ae3a75ee56b | [
"Apache-2.0"
] | null | null | null | """add skipped_transaction level column
Revision ID: f775fb87f5ff
Revises: be27a2794f75
Create Date: 2022-01-12 22:32:24.949547
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "f775fb87f5ff"
down_revision = "be27a2794f75"
branch_labels = None
depends_on = None
| 26.026316 | 88 | 0.697674 | """add skipped_transaction level column
Revision ID: f775fb87f5ff
Revises: be27a2794f75
Create Date: 2022-01-12 22:32:24.949547
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "f775fb87f5ff"
down_revision = "be27a2794f75"
branch_labels = None
depends_on = None
def upgrade():
skippedtransactionlevel = sa.Enum("node", "network", name="skippedtransactionlevel")
skippedtransactionlevel.create(op.get_bind())
op.add_column(
"skipped_transactions",
sa.Column(
"level",
sa.Enum("node", "network", name="skippedtransactionlevel"),
nullable=True,
),
)
op.execute("UPDATE skipped_transactions SET level = 'network'")
op.alter_column("skipped_transactions", "level", nullable=False)
def downgrade():
op.drop_column("skipped_transactions", "level")
bind = op.get_bind()
sa.Enum(name="skippedtransactionlevel").drop(bind, checkfirst=False)
| 621 | 0 | 46 |
cb86331692b61d5b15915b797e85955b1ce6bdaa | 344 | py | Python | Doc/includes/sqlite3/adapter_point_2.py | shawwn/cpython | 0ff8a3b374286d2218fc18f47556a5ace202dad3 | [
"0BSD"
] | 52,316 | 2015-01-01T15:56:25.000Z | 2022-03-31T23:19:01.000Z | Doc/includes/sqlite3/adapter_point_2.py | shawwn/cpython | 0ff8a3b374286d2218fc18f47556a5ace202dad3 | [
"0BSD"
] | 25,286 | 2015-03-03T23:18:02.000Z | 2022-03-31T23:17:27.000Z | Doc/includes/sqlite3/adapter_point_2.py | shawwn/cpython | 0ff8a3b374286d2218fc18f47556a5ace202dad3 | [
"0BSD"
] | 31,623 | 2015-01-01T13:29:37.000Z | 2022-03-31T19:55:06.000Z | import sqlite3
sqlite3.register_adapter(Point, adapt_point)
con = sqlite3.connect(":memory:")
cur = con.cursor()
p = Point(4.0, -3.2)
cur.execute("select ?", (p,))
print(cur.fetchone()[0])
con.close()
| 17.2 | 44 | 0.630814 | import sqlite3
class Point:
def __init__(self, x, y):
self.x, self.y = x, y
def adapt_point(point):
return "%f;%f" % (point.x, point.y)
sqlite3.register_adapter(Point, adapt_point)
con = sqlite3.connect(":memory:")
cur = con.cursor()
p = Point(4.0, -3.2)
cur.execute("select ?", (p,))
print(cur.fetchone()[0])
con.close()
| 76 | -9 | 72 |
64b3ce3a7dd6f9e321707f00bb7be83f35d3e438 | 177 | py | Python | src/test/shedule test/schedule_0.5.0.py | dev-gmmahs/block-vote-service | cc1e296f4bfff657961a50e05150ad5b0f7479e5 | [
"MIT"
] | 4 | 2018-08-28T10:14:58.000Z | 2020-02-14T07:49:20.000Z | src/test/shedule test/schedule_0.5.0.py | dev-gmmahs/block-vote-service | cc1e296f4bfff657961a50e05150ad5b0f7479e5 | [
"MIT"
] | 17 | 2018-08-29T01:57:04.000Z | 2022-02-12T02:49:52.000Z | src/test/shedule test/schedule_0.5.0.py | dev-gmmahs/block-vote-service | cc1e296f4bfff657961a50e05150ad5b0f7479e5 | [
"MIT"
] | 5 | 2018-08-27T12:02:10.000Z | 2018-12-07T20:54:52.000Z | # schedule 0.5.0
import schedule
import time
# 1분마다 호출
schedule.every().minutes.do(job)
while True:
schedule.run_pending()
time.sleep(1) | 12.642857 | 32 | 0.666667 | # schedule 0.5.0
import schedule
import time
def job():
print("호출 됨")
# 1분마다 호출
schedule.every().minutes.do(job)
while True:
schedule.run_pending()
time.sleep(1) | 13 | 0 | 23 |
47f5a4c8ff80d736fd6553d71572644d4ce13d3b | 739 | py | Python | back-end/credentials.py | tamasandacian/Flask-Angular-EmployeeSearch | dfe4fd825de36eabdf9b293f8e566e44a5a49ebf | [
"MIT"
] | 3 | 2019-07-20T20:19:47.000Z | 2022-01-06T16:44:05.000Z | back-end/credentials.py | tamasandacian/Flask-Angular-EmployeeSearch | dfe4fd825de36eabdf9b293f8e566e44a5a49ebf | [
"MIT"
] | 9 | 2020-09-06T14:09:14.000Z | 2022-03-02T04:55:41.000Z | back-end/credentials.py | tamasandacian/Flask-Angular-EmployeeSearch | dfe4fd825de36eabdf9b293f8e566e44a5a49ebf | [
"MIT"
] | null | null | null | import os
from dotenv import load_dotenv
from pathlib import Path
env_path = Path('.') / '.env'
load_dotenv(dotenv_path=env_path)
############################# CONNECTION TO ELASTICSEARCH LOCALHOST #############################
# username = os.environ.get('ELASTICSEARCH_USERNAME_LOCALHOST')
# password = os.environ.get('ELASTICSEARCH_PASSWORD_LOCALHOST')
# ES_HOST = {"host": "localhost", "port": 9200}
############################# CONNECTION TO CLOUD ELASTICSEARCH #################################
username = os.getenv('ELASTIC_CLOUD_USERNAME')
password = os.getenv('ELASTIC_CLOUD_PASSWORD')
ES_HOST = "REPLACE_THIS_WITH_YOUR_ES_HOST"
################################################################################################# | 43.470588 | 97 | 0.554804 | import os
from dotenv import load_dotenv
from pathlib import Path
env_path = Path('.') / '.env'
load_dotenv(dotenv_path=env_path)
############################# CONNECTION TO ELASTICSEARCH LOCALHOST #############################
# username = os.environ.get('ELASTICSEARCH_USERNAME_LOCALHOST')
# password = os.environ.get('ELASTICSEARCH_PASSWORD_LOCALHOST')
# ES_HOST = {"host": "localhost", "port": 9200}
############################# CONNECTION TO CLOUD ELASTICSEARCH #################################
username = os.getenv('ELASTIC_CLOUD_USERNAME')
password = os.getenv('ELASTIC_CLOUD_PASSWORD')
ES_HOST = "REPLACE_THIS_WITH_YOUR_ES_HOST"
################################################################################################# | 0 | 0 | 0 |
d3d36bed3f73ffc3b77760185d28d8fe4c701b22 | 3,132 | py | Python | benchmark/bench_objectify.py | skeptycal/lxml | 32ac7a3bdc8faf2104a77787ed18f2096d0a7346 | [
"BSD-3-Clause"
] | null | null | null | benchmark/bench_objectify.py | skeptycal/lxml | 32ac7a3bdc8faf2104a77787ed18f2096d0a7346 | [
"BSD-3-Clause"
] | 1 | 2020-07-07T04:35:41.000Z | 2020-07-07T04:35:41.000Z | benchmark/bench_objectify.py | skeptycal/lxml | 32ac7a3bdc8faf2104a77787ed18f2096d0a7346 | [
"BSD-3-Clause"
] | null | null | null | from itertools import *
import benchbase
from benchbase import with_text, children, nochange
############################################################
# Benchmarks
############################################################
if __name__ == "__main__":
benchbase.main(BenchMark)
| 26.1 | 70 | 0.58014 | from itertools import *
import benchbase
from benchbase import with_text, children, nochange
############################################################
# Benchmarks
############################################################
class BenchMark(benchbase.TreeBenchMark):
repeat100 = range(100)
repeat1000 = range(1000)
repeat3000 = range(3000)
def __init__(self, lib):
from lxml import etree, objectify
self.objectify = objectify
parser = etree.XMLParser(remove_blank_text=True)
lookup = objectify.ObjectifyElementClassLookup()
parser.setElementClassLookup(lookup)
super(BenchMark, self).__init__(etree, parser)
@nochange
def bench_attribute(self, root):
"1 2 4"
for i in self.repeat3000:
root.zzzzz
def bench_attribute_assign_int(self, root):
"1 2 4"
for i in self.repeat3000:
root.XYZ = 5
def bench_attribute_assign_string(self, root):
"1 2 4"
for i in self.repeat3000:
root.XYZ = "5"
@nochange
def bench_attribute_cached(self, root):
"1 2 4"
cache = root.zzzzz
for i in self.repeat3000:
root.zzzzz
@nochange
def bench_attributes_deep(self, root):
"1 2 4"
for i in self.repeat3000:
root.zzzzz["{cdefg}a00001"]
@nochange
def bench_attributes_deep_cached(self, root):
"1 2 4"
cache1 = root.zzzzz
cache2 = cache1["{cdefg}a00001"]
for i in self.repeat3000:
root.zzzzz["{cdefg}a00001"]
@nochange
def bench_objectpath(self, root):
"1 2 4"
path = self.objectify.ObjectPath(".zzzzz")
for i in self.repeat3000:
path(root)
@nochange
def bench_objectpath_deep(self, root):
"1 2 4"
path = self.objectify.ObjectPath(".zzzzz.{cdefg}a00001")
for i in self.repeat3000:
path(root)
@nochange
def bench_objectpath_deep_cached(self, root):
"1 2 4"
cache1 = root.zzzzz
cache2 = cache1["{cdefg}a00001"]
path = self.objectify.ObjectPath(".zzzzz.{cdefg}a00001")
for i in self.repeat3000:
path(root)
@with_text(text=True, utext=True, no_text=True)
def bench_annotate(self, root):
self.objectify.annotate(root)
@nochange
def bench_descendantpaths(self, root):
root.descendantpaths()
@nochange
@with_text(text=True)
def bench_type_inference(self, root):
"1 2 4"
el = root.aaaaa
for i in self.repeat1000:
el.getchildren()
@nochange
@with_text(text=True)
def bench_type_inference_annotated(self, root):
"1 2 4"
el = root.aaaaa
self.objectify.annotate(el)
for i in self.repeat1000:
el.getchildren()
@nochange
@children
def bench_elementmaker(self, children):
E = self.objectify.E
for child in children:
root = E.this("test", E.will(E.do("nothing"), E.special,))
if __name__ == "__main__":
benchbase.main(BenchMark)
| 540 | 2,279 | 23 |
d20d5b327d487d24d93236afb3de25b6fe501f5e | 12,392 | py | Python | sdk/python/pulumi_proxmox/ct/outputs.py | meyskens/pulumi-proxmox | bf48570690350be68fa554e1cec376212eb449ab | [
"ECL-2.0",
"Apache-2.0"
] | 16 | 2021-01-11T11:26:19.000Z | 2022-01-23T02:32:34.000Z | sdk/python/pulumi_proxmox/ct/outputs.py | meyskens/pulumi-proxmox | bf48570690350be68fa554e1cec376212eb449ab | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2021-01-29T08:15:46.000Z | 2021-10-17T16:33:19.000Z | sdk/python/pulumi_proxmox/ct/outputs.py | meyskens/pulumi-proxmox | bf48570690350be68fa554e1cec376212eb449ab | [
"ECL-2.0",
"Apache-2.0"
] | 4 | 2021-04-06T00:36:05.000Z | 2021-12-16T14:25:07.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
__all__ = [
'ContainerClone',
'ContainerConsole',
'ContainerCpu',
'ContainerDisk',
'ContainerInitialization',
'ContainerInitializationDns',
'ContainerInitializationIpConfig',
'ContainerInitializationIpConfigIpv4',
'ContainerInitializationIpConfigIpv6',
'ContainerInitializationUserAccount',
'ContainerMemory',
'ContainerNetworkInterface',
'ContainerOperatingSystem',
]
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
| 30.902743 | 98 | 0.633393 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
__all__ = [
'ContainerClone',
'ContainerConsole',
'ContainerCpu',
'ContainerDisk',
'ContainerInitialization',
'ContainerInitializationDns',
'ContainerInitializationIpConfig',
'ContainerInitializationIpConfigIpv4',
'ContainerInitializationIpConfigIpv6',
'ContainerInitializationUserAccount',
'ContainerMemory',
'ContainerNetworkInterface',
'ContainerOperatingSystem',
]
@pulumi.output_type
class ContainerClone(dict):
def __init__(__self__, *,
vm_id: int,
datastore_id: Optional[str] = None,
node_name: Optional[str] = None):
pulumi.set(__self__, "vm_id", vm_id)
if datastore_id is not None:
pulumi.set(__self__, "datastore_id", datastore_id)
if node_name is not None:
pulumi.set(__self__, "node_name", node_name)
@property
@pulumi.getter(name="vmId")
def vm_id(self) -> int:
return pulumi.get(self, "vm_id")
@property
@pulumi.getter(name="datastoreId")
def datastore_id(self) -> Optional[str]:
return pulumi.get(self, "datastore_id")
@property
@pulumi.getter(name="nodeName")
def node_name(self) -> Optional[str]:
return pulumi.get(self, "node_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerConsole(dict):
def __init__(__self__, *,
enabled: Optional[bool] = None,
tty_count: Optional[int] = None,
type: Optional[str] = None):
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if tty_count is not None:
pulumi.set(__self__, "tty_count", tty_count)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="ttyCount")
def tty_count(self) -> Optional[int]:
return pulumi.get(self, "tty_count")
@property
@pulumi.getter
def type(self) -> Optional[str]:
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerCpu(dict):
def __init__(__self__, *,
architecture: Optional[str] = None,
cores: Optional[int] = None,
units: Optional[int] = None):
if architecture is not None:
pulumi.set(__self__, "architecture", architecture)
if cores is not None:
pulumi.set(__self__, "cores", cores)
if units is not None:
pulumi.set(__self__, "units", units)
@property
@pulumi.getter
def architecture(self) -> Optional[str]:
return pulumi.get(self, "architecture")
@property
@pulumi.getter
def cores(self) -> Optional[int]:
return pulumi.get(self, "cores")
@property
@pulumi.getter
def units(self) -> Optional[int]:
return pulumi.get(self, "units")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerDisk(dict):
def __init__(__self__, *,
datastore_id: Optional[str] = None):
if datastore_id is not None:
pulumi.set(__self__, "datastore_id", datastore_id)
@property
@pulumi.getter(name="datastoreId")
def datastore_id(self) -> Optional[str]:
return pulumi.get(self, "datastore_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerInitialization(dict):
def __init__(__self__, *,
dns: Optional['outputs.ContainerInitializationDns'] = None,
hostname: Optional[str] = None,
ip_configs: Optional[Sequence['outputs.ContainerInitializationIpConfig']] = None,
user_account: Optional['outputs.ContainerInitializationUserAccount'] = None):
if dns is not None:
pulumi.set(__self__, "dns", dns)
if hostname is not None:
pulumi.set(__self__, "hostname", hostname)
if ip_configs is not None:
pulumi.set(__self__, "ip_configs", ip_configs)
if user_account is not None:
pulumi.set(__self__, "user_account", user_account)
@property
@pulumi.getter
def dns(self) -> Optional['outputs.ContainerInitializationDns']:
return pulumi.get(self, "dns")
@property
@pulumi.getter
def hostname(self) -> Optional[str]:
return pulumi.get(self, "hostname")
@property
@pulumi.getter(name="ipConfigs")
def ip_configs(self) -> Optional[Sequence['outputs.ContainerInitializationIpConfig']]:
return pulumi.get(self, "ip_configs")
@property
@pulumi.getter(name="userAccount")
def user_account(self) -> Optional['outputs.ContainerInitializationUserAccount']:
return pulumi.get(self, "user_account")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerInitializationDns(dict):
def __init__(__self__, *,
domain: Optional[str] = None,
server: Optional[str] = None):
if domain is not None:
pulumi.set(__self__, "domain", domain)
if server is not None:
pulumi.set(__self__, "server", server)
@property
@pulumi.getter
def domain(self) -> Optional[str]:
return pulumi.get(self, "domain")
@property
@pulumi.getter
def server(self) -> Optional[str]:
return pulumi.get(self, "server")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerInitializationIpConfig(dict):
def __init__(__self__, *,
ipv4: Optional['outputs.ContainerInitializationIpConfigIpv4'] = None,
ipv6: Optional['outputs.ContainerInitializationIpConfigIpv6'] = None):
if ipv4 is not None:
pulumi.set(__self__, "ipv4", ipv4)
if ipv6 is not None:
pulumi.set(__self__, "ipv6", ipv6)
@property
@pulumi.getter
def ipv4(self) -> Optional['outputs.ContainerInitializationIpConfigIpv4']:
return pulumi.get(self, "ipv4")
@property
@pulumi.getter
def ipv6(self) -> Optional['outputs.ContainerInitializationIpConfigIpv6']:
return pulumi.get(self, "ipv6")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerInitializationIpConfigIpv4(dict):
def __init__(__self__, *,
address: Optional[str] = None,
gateway: Optional[str] = None):
if address is not None:
pulumi.set(__self__, "address", address)
if gateway is not None:
pulumi.set(__self__, "gateway", gateway)
@property
@pulumi.getter
def address(self) -> Optional[str]:
return pulumi.get(self, "address")
@property
@pulumi.getter
def gateway(self) -> Optional[str]:
return pulumi.get(self, "gateway")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerInitializationIpConfigIpv6(dict):
def __init__(__self__, *,
address: Optional[str] = None,
gateway: Optional[str] = None):
if address is not None:
pulumi.set(__self__, "address", address)
if gateway is not None:
pulumi.set(__self__, "gateway", gateway)
@property
@pulumi.getter
def address(self) -> Optional[str]:
return pulumi.get(self, "address")
@property
@pulumi.getter
def gateway(self) -> Optional[str]:
return pulumi.get(self, "gateway")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerInitializationUserAccount(dict):
def __init__(__self__, *,
keys: Optional[Sequence[str]] = None,
password: Optional[str] = None):
if keys is not None:
pulumi.set(__self__, "keys", keys)
if password is not None:
pulumi.set(__self__, "password", password)
@property
@pulumi.getter
def keys(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "keys")
@property
@pulumi.getter
def password(self) -> Optional[str]:
return pulumi.get(self, "password")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerMemory(dict):
def __init__(__self__, *,
dedicated: Optional[int] = None,
swap: Optional[int] = None):
if dedicated is not None:
pulumi.set(__self__, "dedicated", dedicated)
if swap is not None:
pulumi.set(__self__, "swap", swap)
@property
@pulumi.getter
def dedicated(self) -> Optional[int]:
return pulumi.get(self, "dedicated")
@property
@pulumi.getter
def swap(self) -> Optional[int]:
return pulumi.get(self, "swap")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerNetworkInterface(dict):
def __init__(__self__, *,
name: str,
bridge: Optional[str] = None,
enabled: Optional[bool] = None,
mac_address: Optional[str] = None,
rate_limit: Optional[float] = None,
vlan_id: Optional[int] = None):
pulumi.set(__self__, "name", name)
if bridge is not None:
pulumi.set(__self__, "bridge", bridge)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if mac_address is not None:
pulumi.set(__self__, "mac_address", mac_address)
if rate_limit is not None:
pulumi.set(__self__, "rate_limit", rate_limit)
if vlan_id is not None:
pulumi.set(__self__, "vlan_id", vlan_id)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def bridge(self) -> Optional[str]:
return pulumi.get(self, "bridge")
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="macAddress")
def mac_address(self) -> Optional[str]:
return pulumi.get(self, "mac_address")
@property
@pulumi.getter(name="rateLimit")
def rate_limit(self) -> Optional[float]:
return pulumi.get(self, "rate_limit")
@property
@pulumi.getter(name="vlanId")
def vlan_id(self) -> Optional[int]:
return pulumi.get(self, "vlan_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerOperatingSystem(dict):
def __init__(__self__, *,
template_file_id: str,
type: Optional[str] = None):
pulumi.set(__self__, "template_file_id", template_file_id)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="templateFileId")
def template_file_id(self) -> str:
return pulumi.get(self, "template_file_id")
@property
@pulumi.getter
def type(self) -> Optional[str]:
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| 7,916 | 3,128 | 286 |
9ee5b034b8a1988afc54329a5bec46ecaa4b519c | 4,585 | py | Python | vizier/api/webservice/__init__.py | VizierDB/web-api-async | e99f43df3df80ad5647f57d805c339257336ac73 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2019-10-21T03:01:39.000Z | 2020-06-05T01:43:00.000Z | vizier/api/webservice/__init__.py | VizierDB/web-api-async | e99f43df3df80ad5647f57d805c339257336ac73 | [
"ECL-2.0",
"Apache-2.0"
] | 56 | 2019-07-12T21:16:03.000Z | 2020-11-06T23:29:22.000Z | vizier/api/webservice/__init__.py | VizierDB/web-api-async | e99f43df3df80ad5647f57d805c339257336ac73 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-02-07T19:56:55.000Z | 2020-08-07T11:17:51.000Z | # Copyright (C) 2017-2019 New York University,
# University at Buffalo,
# Illinois Institute of Technology.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Vizier web service application object. We currently use Flask microframework
to build the web service. The web service is the main access point for the
Vizier front end and for any other (remote) clients.
"""
import logging
import os
from flask import Flask, jsonify, make_response, redirect
from flask_cors import CORS # type: ignore[import]
from logging.handlers import RotatingFileHandler
from vizier.config.app import AppConfig
import vizier.api.base as srv
import vizier.config.base as const
def create_app() -> Flask:
"""Factory pattern for Flask. Initialize the Flask application object.
Returns
-------
Flask
"""
#Get application configuration parameters from environment variables.
config = AppConfig()
# Create the app and enable cross-origin resource sharing
app = Flask(__name__)
#app.config['APPLICATION_ROOT'] = config.webservice.app_path
#app.config['DEBUG'] = True
# Set size limit for uploaded files
app.config['MAX_CONTENT_LENGTH'] = config.webservice.defaults.max_file_size
# Enable CORS
CORS(app)
# Switch logging on
log_dir = os.path.abspath(config.logs.server)
# Create the directory if it does not exist
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
# File handle for server logs
file_handler = RotatingFileHandler(
os.path.join(log_dir, 'vizier-webapi.log'),
maxBytes=1024 * 1024 * 100,
backupCount=20
)
file_handler.setLevel(logging.ERROR)
file_handler.setFormatter(
logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
)
app.logger.addHandler(file_handler)
# --------------------------------------------------------------------------
#
# Error Handler
#
# --------------------------------------------------------------------------
@app.errorhandler(srv.ServerRequestException)
def invalid_request_or_resource_not_found(error):
"""JSON response handler for invalid requests or requests that access
unknown resources.
Parameters
----------
error : Exception
Exception thrown by request Handler
Returns
-------
Http response
"""
app.logger.error(error.message)
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@app.errorhandler(413)
def upload_error(exception):
"""Exception handler for file uploads that exceed the file size limit."""
app.logger.error(exception)
return make_response(jsonify({'title':'Error', 'message': str(exception), 'error': str(exception)}), 413)
@app.errorhandler(500)
def internal_error(exception):
"""Exception handler that logs exceptions."""
app.logger.error(exception)
return make_response(jsonify({'title':'Error', 'message': str(exception), 'error': str(exception)}), 500)
# Register the API blueprint
from . import server
app.register_blueprint(server.bp)
# Return the applicatio object
# --------------------------------------------------------------------------
#
# Initialize
#
# --------------------------------------------------------------------------
@app.before_first_request
def initialize():
"""Initialize Mimir gateway (if necessary) before the first request.
"""
# Initialize the Mimir gateway if using Mimir engine
if config.engine.identifier == const.MIMIR_ENGINE:
import vizier.mimir as mimir
print("Using Mimir at {}".format(mimir._mimir_url))
root_redirect_path = "{}/web-ui/vizier-db".format(server.bp.url_prefix)
@app.route("/")
def handle_root():
"""Redirect users to the web UI
"""
return redirect(root_redirect_path)
return app
| 33.224638 | 113 | 0.627481 | # Copyright (C) 2017-2019 New York University,
# University at Buffalo,
# Illinois Institute of Technology.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Vizier web service application object. We currently use Flask microframework
to build the web service. The web service is the main access point for the
Vizier front end and for any other (remote) clients.
"""
import logging
import os
from flask import Flask, jsonify, make_response, redirect
from flask_cors import CORS # type: ignore[import]
from logging.handlers import RotatingFileHandler
from vizier.config.app import AppConfig
import vizier.api.base as srv
import vizier.config.base as const
def create_app() -> Flask:
"""Factory pattern for Flask. Initialize the Flask application object.
Returns
-------
Flask
"""
#Get application configuration parameters from environment variables.
config = AppConfig()
# Create the app and enable cross-origin resource sharing
app = Flask(__name__)
#app.config['APPLICATION_ROOT'] = config.webservice.app_path
#app.config['DEBUG'] = True
# Set size limit for uploaded files
app.config['MAX_CONTENT_LENGTH'] = config.webservice.defaults.max_file_size
# Enable CORS
CORS(app)
# Switch logging on
log_dir = os.path.abspath(config.logs.server)
# Create the directory if it does not exist
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
# File handle for server logs
file_handler = RotatingFileHandler(
os.path.join(log_dir, 'vizier-webapi.log'),
maxBytes=1024 * 1024 * 100,
backupCount=20
)
file_handler.setLevel(logging.ERROR)
file_handler.setFormatter(
logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
)
app.logger.addHandler(file_handler)
# --------------------------------------------------------------------------
#
# Error Handler
#
# --------------------------------------------------------------------------
@app.errorhandler(srv.ServerRequestException)
def invalid_request_or_resource_not_found(error):
"""JSON response handler for invalid requests or requests that access
unknown resources.
Parameters
----------
error : Exception
Exception thrown by request Handler
Returns
-------
Http response
"""
app.logger.error(error.message)
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@app.errorhandler(413)
def upload_error(exception):
"""Exception handler for file uploads that exceed the file size limit."""
app.logger.error(exception)
return make_response(jsonify({'title':'Error', 'message': str(exception), 'error': str(exception)}), 413)
@app.errorhandler(500)
def internal_error(exception):
"""Exception handler that logs exceptions."""
app.logger.error(exception)
return make_response(jsonify({'title':'Error', 'message': str(exception), 'error': str(exception)}), 500)
# Register the API blueprint
from . import server
app.register_blueprint(server.bp)
# Return the applicatio object
# --------------------------------------------------------------------------
#
# Initialize
#
# --------------------------------------------------------------------------
@app.before_first_request
def initialize():
"""Initialize Mimir gateway (if necessary) before the first request.
"""
# Initialize the Mimir gateway if using Mimir engine
if config.engine.identifier == const.MIMIR_ENGINE:
import vizier.mimir as mimir
print("Using Mimir at {}".format(mimir._mimir_url))
root_redirect_path = "{}/web-ui/vizier-db".format(server.bp.url_prefix)
@app.route("/")
def handle_root():
"""Redirect users to the web UI
"""
return redirect(root_redirect_path)
return app
| 0 | 0 | 0 |
316e27aa43683da3ab30191f818a1621683e1504 | 3,228 | py | Python | http_helper.py | uk-gov-mirror/alphagov.github-actions-runner-orchestration | 9803e0c96d3da3c800d8c809f06065a611a65a10 | [
"MIT"
] | 8 | 2021-03-19T17:40:51.000Z | 2021-12-24T04:18:28.000Z | http_helper.py | uk-gov-mirror/alphagov.github-actions-runner-orchestration | 9803e0c96d3da3c800d8c809f06065a611a65a10 | [
"MIT"
] | 3 | 2021-03-15T15:55:30.000Z | 2021-06-07T10:42:40.000Z | http_helper.py | uk-gov-mirror/alphagov.github-actions-runner-orchestration | 9803e0c96d3da3c800d8c809f06065a611a65a10 | [
"MIT"
] | 2 | 2021-04-10T17:16:39.000Z | 2021-04-10T20:14:41.000Z | import hmac
import hashlib
import base64
import json
import re
import time
def extractAndValidateBody(
body: str,
key: str = "",
signature: str = "",
isBase64: bool = False,
with_validate: bool = True,
) -> dict:
"""
Basic parsing of the body, including optional validation of a HMAC, to a dict
>>> t = int(time.time())
>>> valid_body = f'{{ "subnet": "123", "sg": "456", "repo": "789", "time": {t} }}'
>>> valid_b64b = base64.b64encode(valid_body.encode("utf-8")).decode("utf-8")
>>> test1 = extractAndValidateBody(valid_b64b, isBase64=True, with_validate=False)
>>> test1.pop("time") != "0"
True
>>> test1
{'subnet': '123', 'sg': '456', 'repo': '789'}
>>> test2 = extractAndValidateBody(valid_body, with_validate=False)
>>> test2.pop("time") != "0"
True
>>> test2
{'subnet': '123', 'sg': '456', 'repo': '789'}
>>> kinda_valid = f'{{ "subnet": "123", "sg": "456", "repo": "789", "time": {t} }}'
>>> test3 = extractAndValidateBody(kinda_valid, with_validate=False)
>>> test3.pop("time") != "0"
True
>>> test3
{'subnet': '123', 'sg': '456', 'repo': '789'}
>>> with open('tests/fixtures/example.json') as json_file:
... example = json.load(json_file)
>>> example["body"] = example["body"].replace("111", str(t))
>>> test4 = extractAndValidateBody(example["body"], with_validate=False)
>>> test4.pop("time") != "0"
True
>>> test4
{'subnet': '123', 'sg': '456', 'repo': '789'}
>>> key = "abcdefg"
>>> h = hmac.new(key.encode("utf-8"), valid_body.encode("utf-8"), hashlib.sha512)
>>> test5 = extractAndValidateBody(valid_body, key=key, signature=h.hexdigest())
>>> test5.pop("time") != "0"
True
>>> test5
{'subnet': '123', 'sg': '456', 'repo': '789'}
>>> try:
... extractAndValidateBody(key="12345", body="{}")
... except Exception as e:
... print(e)
key or signature missing
>>> try:
... extractAndValidateBody('{"subnet": "123", "sg": "456", "repo": "789", "time": 1015213801}', with_validate=False)
... except Exception as e:
... print(e)
request expired
"""
if with_validate and (not key or not signature):
raise Exception("key or signature missing")
if isBase64:
dec_body = base64.b64decode(body.encode("utf-8"))
body = dec_body.decode("utf-8")
body_qs = json.loads(body)
if not all(x in body_qs for x in ["time"]):
raise Exception("missing required body item")
requestTime = int(body_qs["time"])
# less than 30 seconds old
if (int(time.time()) - requestTime) >= 30:
raise Exception(f"request expired")
if with_validate:
key_bytes = None
if not key:
raise Exception("Key not valid")
else:
key_bytes = key.encode("utf-8")
h = hmac.new(key_bytes, body.encode("utf-8"), hashlib.sha512)
res = h.hexdigest()
if res == signature:
return body_qs
else:
raise Exception("Bad signature")
return body_qs
| 29.888889 | 125 | 0.550186 | import hmac
import hashlib
import base64
import json
import re
import time
def extractAndValidateBody(
body: str,
key: str = "",
signature: str = "",
isBase64: bool = False,
with_validate: bool = True,
) -> dict:
"""
Basic parsing of the body, including optional validation of a HMAC, to a dict
>>> t = int(time.time())
>>> valid_body = f'{{ "subnet": "123", "sg": "456", "repo": "789", "time": {t} }}'
>>> valid_b64b = base64.b64encode(valid_body.encode("utf-8")).decode("utf-8")
>>> test1 = extractAndValidateBody(valid_b64b, isBase64=True, with_validate=False)
>>> test1.pop("time") != "0"
True
>>> test1
{'subnet': '123', 'sg': '456', 'repo': '789'}
>>> test2 = extractAndValidateBody(valid_body, with_validate=False)
>>> test2.pop("time") != "0"
True
>>> test2
{'subnet': '123', 'sg': '456', 'repo': '789'}
>>> kinda_valid = f'{{ "subnet": "123", "sg": "456", "repo": "789", "time": {t} }}'
>>> test3 = extractAndValidateBody(kinda_valid, with_validate=False)
>>> test3.pop("time") != "0"
True
>>> test3
{'subnet': '123', 'sg': '456', 'repo': '789'}
>>> with open('tests/fixtures/example.json') as json_file:
... example = json.load(json_file)
>>> example["body"] = example["body"].replace("111", str(t))
>>> test4 = extractAndValidateBody(example["body"], with_validate=False)
>>> test4.pop("time") != "0"
True
>>> test4
{'subnet': '123', 'sg': '456', 'repo': '789'}
>>> key = "abcdefg"
>>> h = hmac.new(key.encode("utf-8"), valid_body.encode("utf-8"), hashlib.sha512)
>>> test5 = extractAndValidateBody(valid_body, key=key, signature=h.hexdigest())
>>> test5.pop("time") != "0"
True
>>> test5
{'subnet': '123', 'sg': '456', 'repo': '789'}
>>> try:
... extractAndValidateBody(key="12345", body="{}")
... except Exception as e:
... print(e)
key or signature missing
>>> try:
... extractAndValidateBody('{"subnet": "123", "sg": "456", "repo": "789", "time": 1015213801}', with_validate=False)
... except Exception as e:
... print(e)
request expired
"""
if with_validate and (not key or not signature):
raise Exception("key or signature missing")
if isBase64:
dec_body = base64.b64decode(body.encode("utf-8"))
body = dec_body.decode("utf-8")
body_qs = json.loads(body)
if not all(x in body_qs for x in ["time"]):
raise Exception("missing required body item")
requestTime = int(body_qs["time"])
# less than 30 seconds old
if (int(time.time()) - requestTime) >= 30:
raise Exception(f"request expired")
if with_validate:
key_bytes = None
if not key:
raise Exception("Key not valid")
else:
key_bytes = key.encode("utf-8")
h = hmac.new(key_bytes, body.encode("utf-8"), hashlib.sha512)
res = h.hexdigest()
if res == signature:
return body_qs
else:
raise Exception("Bad signature")
return body_qs
| 0 | 0 | 0 |
6726cbedef2c6f18fa62a6fb72c6c2851af790bb | 50 | py | Python | tests/tests/test.py | cbsi-dto/KubernetesJobOperator | 32f23c20adaa7db63e35a103f466207908063b43 | [
"MIT"
] | 35 | 2020-02-10T16:55:41.000Z | 2022-03-18T01:25:00.000Z | tests/tests/test.py | LamaAni/KubernetesJobOperator | 32f23c20adaa7db63e35a103f466207908063b43 | [
"MIT"
] | 26 | 2020-02-10T05:36:44.000Z | 2022-03-02T18:44:47.000Z | tests/tests/test.py | cbsi-dto/KubernetesJobOperator | 32f23c20adaa7db63e35a103f466207908063b43 | [
"MIT"
] | 8 | 2020-02-28T23:24:07.000Z | 2021-11-29T21:35:46.000Z | from airflow import version
print(version.version) | 25 | 27 | 0.86 | from airflow import version
print(version.version) | 0 | 0 | 0 |
5a82275a3506fff40f44b058e01545b74a067eb9 | 106 | py | Python | ntc_rosetta_conf/usr_state_data_handlers.py | networktocode/ntc-rosetta-conf | 06c8028e0bbafdd97d15e14ca13faa2601345d8b | [
"Apache-2.0"
] | 5 | 2019-07-31T03:06:48.000Z | 2020-09-01T21:51:04.000Z | ntc_rosetta_conf/usr_state_data_handlers.py | networktocode/ntc-rosetta-conf | 06c8028e0bbafdd97d15e14ca13faa2601345d8b | [
"Apache-2.0"
] | 1 | 2020-12-14T15:02:05.000Z | 2020-12-14T15:02:05.000Z | ntc_rosetta_conf/usr_state_data_handlers.py | networktocode/ntc-rosetta-conf | 06c8028e0bbafdd97d15e14ca13faa2601345d8b | [
"Apache-2.0"
] | 1 | 2021-04-05T09:53:53.000Z | 2021-04-05T09:53:53.000Z | from jetconf.data import BaseDatastore
| 17.666667 | 55 | 0.783019 | from jetconf.data import BaseDatastore
def register_state_handlers(ds: BaseDatastore) -> None:
pass
| 43 | 0 | 23 |
9e4a27f78a5c2ba5e353dcd5fc86de8de8171e03 | 1,723 | py | Python | geonode/geonode/maps/__init__.py | ttungbmt/BecaGIS_GeoPortal | 6c05f9fc020ec4ccf600ba2503a06c2231443920 | [
"MIT"
] | null | null | null | geonode/geonode/maps/__init__.py | ttungbmt/BecaGIS_GeoPortal | 6c05f9fc020ec4ccf600ba2503a06c2231443920 | [
"MIT"
] | null | null | null | geonode/geonode/maps/__init__.py | ttungbmt/BecaGIS_GeoPortal | 6c05f9fc020ec4ccf600ba2503a06c2231443920 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.utils.translation import ugettext_noop as _
from geonode.notifications_helper import NotificationsAppConfigBase
default_app_config = 'geonode.maps.MapsAppConfig'
| 44.179487 | 96 | 0.598375 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.utils.translation import ugettext_noop as _
from geonode.notifications_helper import NotificationsAppConfigBase
class MapsAppConfig(NotificationsAppConfigBase):
name = 'geonode.maps'
NOTIFICATIONS = (("map_created", _("Map Created"), _("A Map was created"),),
("map_updated", _("Map Updated"), _("A Map was updated"),),
("map_approved", _("Map Approved"), _("A Map was approved by a Manager"),),
("map_published", _("Map Published"), _("A Map was published"),),
("map_deleted", _("Map Deleted"), _("A Map was deleted"),),
("map_comment", _("Comment on Map"), _("A map was commented on"),),
("map_rated", _("Rating for Map"), _("A rating was given to a map"),),
)
default_app_config = 'geonode.maps.MapsAppConfig'
| 0 | 684 | 23 |
d343e3206562b2f8211d80e497a5ea3ebe66d36a | 3,970 | py | Python | draalcore/middleware/tests/test_login.py | jojanper/draalcore | 3d3f5a53efe32c721c34d7e48267328a4e9e8402 | [
"MIT"
] | 1 | 2017-04-25T10:54:55.000Z | 2017-04-25T10:54:55.000Z | draalcore/middleware/tests/test_login.py | jojanper/draalcore | 3d3f5a53efe32c721c34d7e48267328a4e9e8402 | [
"MIT"
] | 1 | 2022-02-10T06:48:36.000Z | 2022-02-10T06:48:36.000Z | draalcore/middleware/tests/test_login.py | jojanper/draalcore | 3d3f5a53efe32c721c34d7e48267328a4e9e8402 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Login middleware tests"""
# System imports
import logging
from mock import MagicMock, patch
from datetime import datetime, timedelta
from django.http import HttpRequest
from django.conf import settings
from django.contrib import auth
from django.urls import reverse
# Project imports
from draalcore.test_utils.basetest import BaseTestMiddleware
from ..login import LoginRequiredMiddleware, UserEmailRequiredMiddleware, AutoLogout, DateTimeSerializer
logger = logging.getLogger(__name__)
class LoginRequiredTestCase(BaseTestMiddleware):
"""Login required middleware."""
def test_request(self):
"""User authentication is validated."""
obj = LoginRequiredMiddleware(self.get_response)
# GIVEN request has no user authentication
request = HttpRequest()
request.path_info = '/view'
request.user = MagicMock()
request.user.is_authenticated = False
# WHEN request is processed by the login middleware
response = obj(request)
# THEN redirect response should returned
self.assertIsNotNone(response)
self.assertEqual(response.status_code, 401)
self.assertEqual(self.responseFuncCalled, 0)
self.clear_response()
class UserEmailRequiredTestCase(BaseTestMiddleware):
"""User email required middleware."""
def test_request_redirect(self):
"""Presence of user email is validated."""
obj = UserEmailRequiredMiddleware(self.get_response)
# GIVEN user data has no email included and user requests main page
request = HttpRequest()
request.path_info = '/'
request.user = MagicMock(email='')
request.user.is_authenticated = True
# WHEN request is processed by the middleware
response = obj(request)
# THEN redirect response should returned
self.assertIsNotNone(response)
self.assertTrue('{}?next='.format(reverse(settings.USER_EMAIL_REDIRECT)) in response['Location'])
self.assertEqual(self.responseFuncCalled, 0)
self.clear_response()
# -----
# GIVEN user data has email included and user requests main page
request = HttpRequest()
request.path_info = '/'
request.user = MagicMock(email='test@test.com')
request.user.is_authenticated = True
# WHEN request is processed by the middleware
response = obj(request)
# THEN provided callable should be called
self.assertTrue(response)
self.assertEqual(self.responseFuncCalled, 1)
self.clear_response()
class AutoLogoutTestCase(BaseTestMiddleware):
"""Auto logout middleware."""
def test_request1(self):
"""User is not logged in."""
obj = AutoLogout(self.get_response)
# GIVEN unauthenticated user
request = HttpRequest()
request.user = MagicMock()
request.user.is_authenticated = False
# WHEN request is processed by the auto logout middleware
response = obj(request)
# THEN it succeeds
self.assertTrue(response)
self.assertEqual(self.responseFuncCalled, 1)
self.clear_response()
@patch.object(auth, 'logout')
def test_request2(self, logout):
"""User session has expired."""
obj = AutoLogout(self.get_response)
logout.return_value = True
# GIVEN expired user session
request = HttpRequest()
request.user = MagicMock()
request.user.is_authenticated = True
timestamp = datetime.now() - timedelta(0, settings.AUTO_LOGOUT_DELAY * 2, 0)
request.session = {'last_touch': DateTimeSerializer(timestamp).encode}
# WHEN request is processed by the auto logout middleware
response = obj(request)
# THEN unauthorized response is returned
self.assertIsNotNone(response)
self.assertEqual(response.status_code, 401)
| 31.015625 | 105 | 0.678086 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Login middleware tests"""
# System imports
import logging
from mock import MagicMock, patch
from datetime import datetime, timedelta
from django.http import HttpRequest
from django.conf import settings
from django.contrib import auth
from django.urls import reverse
# Project imports
from draalcore.test_utils.basetest import BaseTestMiddleware
from ..login import LoginRequiredMiddleware, UserEmailRequiredMiddleware, AutoLogout, DateTimeSerializer
logger = logging.getLogger(__name__)
class LoginRequiredTestCase(BaseTestMiddleware):
"""Login required middleware."""
def test_request(self):
"""User authentication is validated."""
obj = LoginRequiredMiddleware(self.get_response)
# GIVEN request has no user authentication
request = HttpRequest()
request.path_info = '/view'
request.user = MagicMock()
request.user.is_authenticated = False
# WHEN request is processed by the login middleware
response = obj(request)
# THEN redirect response should returned
self.assertIsNotNone(response)
self.assertEqual(response.status_code, 401)
self.assertEqual(self.responseFuncCalled, 0)
self.clear_response()
class UserEmailRequiredTestCase(BaseTestMiddleware):
"""User email required middleware."""
def test_request_redirect(self):
"""Presence of user email is validated."""
obj = UserEmailRequiredMiddleware(self.get_response)
# GIVEN user data has no email included and user requests main page
request = HttpRequest()
request.path_info = '/'
request.user = MagicMock(email='')
request.user.is_authenticated = True
# WHEN request is processed by the middleware
response = obj(request)
# THEN redirect response should returned
self.assertIsNotNone(response)
self.assertTrue('{}?next='.format(reverse(settings.USER_EMAIL_REDIRECT)) in response['Location'])
self.assertEqual(self.responseFuncCalled, 0)
self.clear_response()
# -----
# GIVEN user data has email included and user requests main page
request = HttpRequest()
request.path_info = '/'
request.user = MagicMock(email='test@test.com')
request.user.is_authenticated = True
# WHEN request is processed by the middleware
response = obj(request)
# THEN provided callable should be called
self.assertTrue(response)
self.assertEqual(self.responseFuncCalled, 1)
self.clear_response()
class AutoLogoutTestCase(BaseTestMiddleware):
"""Auto logout middleware."""
def test_request1(self):
"""User is not logged in."""
obj = AutoLogout(self.get_response)
# GIVEN unauthenticated user
request = HttpRequest()
request.user = MagicMock()
request.user.is_authenticated = False
# WHEN request is processed by the auto logout middleware
response = obj(request)
# THEN it succeeds
self.assertTrue(response)
self.assertEqual(self.responseFuncCalled, 1)
self.clear_response()
@patch.object(auth, 'logout')
def test_request2(self, logout):
"""User session has expired."""
obj = AutoLogout(self.get_response)
logout.return_value = True
# GIVEN expired user session
request = HttpRequest()
request.user = MagicMock()
request.user.is_authenticated = True
timestamp = datetime.now() - timedelta(0, settings.AUTO_LOGOUT_DELAY * 2, 0)
request.session = {'last_touch': DateTimeSerializer(timestamp).encode}
# WHEN request is processed by the auto logout middleware
response = obj(request)
# THEN unauthorized response is returned
self.assertIsNotNone(response)
self.assertEqual(response.status_code, 401)
| 0 | 0 | 0 |
2c8adce33aac5b9e88d71156a20ce690711966f7 | 346 | py | Python | data_science_layer/machine_learning/support_vector_regressor.py | nathangeology/cyclist_dataset | 44ad4a3765e86cba934bfdbfb151a788eddfbead | [
"MIT"
] | null | null | null | data_science_layer/machine_learning/support_vector_regressor.py | nathangeology/cyclist_dataset | 44ad4a3765e86cba934bfdbfb151a788eddfbead | [
"MIT"
] | null | null | null | data_science_layer/machine_learning/support_vector_regressor.py | nathangeology/cyclist_dataset | 44ad4a3765e86cba934bfdbfb151a788eddfbead | [
"MIT"
] | null | null | null | from sklearn.svm import SVR
from data_science_layer.machine_learning.base_regressor import BaseRegressor
| 24.714286 | 76 | 0.699422 | from sklearn.svm import SVR
from data_science_layer.machine_learning.base_regressor import BaseRegressor
class SupportVectorRegressorModel(BaseRegressor):
short_name = 'SVR'
sklearn_model = SVR()
hyper_param_dict = {'C': [0.001, 0.01, 0.1, 1, 10, 100]}
def __init__(self):
super().__init__()
self.set_params()
| 51 | 165 | 23 |
f1c8a2ea1e6774516b221761cec538d39be7d6c1 | 254 | py | Python | learn-python/sort_with_key.py | barissimsek/gopython | 7e2c1bdb20b2a908c601794ea9dbf71ea035a869 | [
"Apache-2.0"
] | null | null | null | learn-python/sort_with_key.py | barissimsek/gopython | 7e2c1bdb20b2a908c601794ea9dbf71ea035a869 | [
"Apache-2.0"
] | null | null | null | learn-python/sort_with_key.py | barissimsek/gopython | 7e2c1bdb20b2a908c601794ea9dbf71ea035a869 | [
"Apache-2.0"
] | null | null | null |
ips = [
'10.0.0.5',
'10.5.3.1',
'192.168.11.10',
'2.2.2.2',
'100.0.0.1',
'20.3.2.4'
]
print(sort_ips(ips))
| 12.095238 | 52 | 0.566929 |
ips = [
'10.0.0.5',
'10.5.3.1',
'192.168.11.10',
'2.2.2.2',
'100.0.0.1',
'20.3.2.4'
]
def getKey(item):
return tuple(int(part) for part in item.split('.'))
def sort_ips(iplist):
return sorted(ips, key=getKey)
print(sort_ips(ips))
| 81 | 0 | 46 |
472bac29ae211663cfbeeaa2790296ad3609df06 | 4,913 | py | Python | src/glusterfsweb/glusternodestate.py | aravindavk/glusterfs-web | 5ffacd4ac614a524b4e118b7f4aa68f12453b82a | [
"BSD-3-Clause"
] | 10 | 2015-08-28T10:18:05.000Z | 2019-10-04T03:45:25.000Z | src/glusterfsweb/glusternodestate.py | iesool/glusterfs-web | 5ffacd4ac614a524b4e118b7f4aa68f12453b82a | [
"BSD-3-Clause"
] | null | null | null | src/glusterfsweb/glusternodestate.py | iesool/glusterfs-web | 5ffacd4ac614a524b4e118b7f4aa68f12453b82a | [
"BSD-3-Clause"
] | 7 | 2015-11-21T00:59:49.000Z | 2022-02-21T20:37:00.000Z | # -*- coding: utf-8 -*-
"""
glusternodestate.py
:copyright: (c) 2013 by Aravinda VK
:license: BSD, GPL v2, see LICENSE for more details.
"""
import argparse
import errno
import os
from functools import wraps
import sys
import requests
from glusterfstools import volumes
import nodestatedb as _db
from config import DB_PATH, DB_FILE, HOOKS_ROOT
_glusterfs_events_funcs = {}
@glusterfsevent("setup")
@glusterfsevent("cleanup")
@glusterfsevent("glusterd-start")
@glusterfsevent("create")
@glusterfsevent("delete")
@glusterfsevent("add-brick")
@glusterfsevent("remove-brick")
@glusterfsevent("set")
@glusterfsevent("start")
@glusterfsevent("stop")
if __name__ == "__main__":
main()
| 24.565 | 77 | 0.607165 | # -*- coding: utf-8 -*-
"""
glusternodestate.py
:copyright: (c) 2013 by Aravinda VK
:license: BSD, GPL v2, see LICENSE for more details.
"""
import argparse
import errno
import os
from functools import wraps
import sys
import requests
from glusterfstools import volumes
import nodestatedb as _db
from config import DB_PATH, DB_FILE, HOOKS_ROOT
_glusterfs_events_funcs = {}
class GlusterNodeStateError(Exception):
pass
def _get_args():
parser = argparse.ArgumentParser(description='Handle GlusterFS state')
parser.add_argument('event')
parser.add_argument('--volname', type=str, help='volume name')
return parser.parse_known_args()
def glusterfsevent(name):
def decorator(f):
@wraps(f)
def wrapper(*args, **kwds):
return f(*args, **kwds)
global _glusterfs_events_funcs
_glusterfs_events_funcs[name] = wrapper
return wrapper
return decorator
@glusterfsevent("setup")
def setup(param=None):
os.mkdir(DB_PATH)
_db.connect(DB_FILE)
_db.setup()
os.chown(DB_FILE, 0, 0)
os.chmod(DB_FILE, 0600)
sys.stdout.write("Created DB: %s\n" % DB_FILE)
events = ["add-brick", "create", "delete",
"remove-brick", "set", "start", "stop"]
for event in events:
hook_file = HOOKS_ROOT + "%s/post/Sglusternodestate.bash" % event
with open(hook_file, "w") as f:
f.write("#!/bin/bash\nglusternodestate %s \"$@\"" % event)
sys.stdout.write("Added Hook: %s\n" % hook_file)
os.chmod(hook_file, 0755)
@glusterfsevent("cleanup")
def cleanup(param=None):
try:
os.remove(DB_FILE)
sys.stdout.write("removed DB file\n")
os.rmdir(DB_PATH)
events = ["add-brick", "create", "delete",
"remove-brick", "set", "start", "stop"]
for event in events:
hook_file = HOOKS_ROOT + "%s/post/Sglusternodestate.bash" % event
os.remove(hook_file)
sys.stdout.write("removed Hook file: %s\n" % hook_file)
except (IOError, OSError):
if sys.exc_info()[1].errno == errno.ENOENT:
pass
else:
raise GlusterNodeStateError('Error while cleanup')
@glusterfsevent("glusterd-start")
def flush_and_regenerate(param=None):
_db.table_cleanup_all()
vols = []
vols_data = volumes.get()
for vol in vols_data:
bricks = []
vols.append((vol["uuid"],
vol["name"],
vol["type"],
vol["status"],
vol["num_bricks"],
vol["transport"]))
for brick in vol['bricks']:
bricks.append((vol['name'], brick))
_db.volumes_add(vols)
_db.bricks_add(bricks)
@glusterfsevent("create")
def volume_create(volume):
vol_data = volumes.get(volume)
vol = vol_data[0]
vols = (vol["uuid"],
vol["name"],
vol["type"],
vol["status"],
vol["num_bricks"],
vol["transport"])
_db.volumes_add([vols])
bricks = []
for brick in vol['bricks']:
bricks.append((vol['name'], brick))
_db.bricks_add(bricks)
@glusterfsevent("delete")
def volume_delete(volume):
_db.table_cleanup_all(volume=volume)
@glusterfsevent("add-brick")
def add_brick(volume):
add_remove_brick(volume, "add")
@glusterfsevent("remove-brick")
def remove_brick(volume):
add_remove_brick(volume, "add")
def add_remove_brick(volume, action):
_db.table_cleanup_bricks(volume=volume)
vol_data = volumes.get(volume)
vol = vol_data[0]
bricks = []
for brick in vol['bricks']:
bricks.append((vol['name'], brick))
_db.bricks_add(bricks)
_db.update_volume(vol['name'], "num_bricks", len(bricks))
@glusterfsevent("set")
def options_set(volume):
_db.table_cleanup_options(volume)
vol_data = volumes.get(volume)
vol = vol_data[0]
options = []
for opt in vol['options']:
options.append((vol['name'], opt['name'], opt['value']))
_db.options_add(options)
@glusterfsevent("start")
def volume_start(volume):
_db.update_volume(volume, "status", "UP")
@glusterfsevent("stop")
def volume_stop(volume):
_db.update_volume(volume, "status", "DOWN")
def main():
args = _get_args()[0]
if not args.event in ["setup", "cleanup"]:
_db.connect(DB_FILE)
if args.event in _glusterfs_events_funcs:
_glusterfs_events_funcs[args.event](args.volname)
url_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"data",
"url")
try:
with open(url_file) as f:
url = "%s/%s" % (f.read(), args.event)
print requests.get(url)
except (IOError, OSError, requests.exceptions.RequestException):
pass
if __name__ == "__main__":
main()
| 3,826 | 27 | 335 |
c91d9e8b4b3fd15318d5312b088f30df2d69e23d | 940 | py | Python | array/defuse_the_bomb.py | elenaborisova/LeetCode-Solutions | 98376aab7fd150a724e316357ae5ea46988d9eac | [
"MIT"
] | null | null | null | array/defuse_the_bomb.py | elenaborisova/LeetCode-Solutions | 98376aab7fd150a724e316357ae5ea46988d9eac | [
"MIT"
] | null | null | null | array/defuse_the_bomb.py | elenaborisova/LeetCode-Solutions | 98376aab7fd150a724e316357ae5ea46988d9eac | [
"MIT"
] | null | null | null |
print(decrypt([5, 7, 1, 4], 3))
print(decrypt([1, 2, 3, 4], 0))
print(decrypt([2, 4, 9, 3], -2))
| 21.860465 | 44 | 0.370213 | def decrypt(code, k):
decoded = []
if k > 0:
for i in range(len(code)):
num = 0
idx = i
count = 0
while count < k:
if idx == len(code):
idx = 0
num += code[idx]
idx += 1
count += 1
decoded.append(num)
return decoded[1:] + decoded[:1]
elif k < 0:
for i in range(len(code)):
num = 0
idx = i
count = 0
while count < k * -1:
if idx == -1:
idx = len(code) - 1
num += code[idx]
idx -= 1
count += 1
decoded.append(num)
return decoded[-1:] + decoded[:-1]
else:
return [0 for _ in range(len(code))]
print(decrypt([5, 7, 1, 4], 3))
print(decrypt([1, 2, 3, 4], 0))
print(decrypt([2, 4, 9, 3], -2))
| 819 | 0 | 22 |
865161f80d1b8ceda72b675814ede88a53fd25f6 | 1,321 | py | Python | extrairdados/lpl springs 21/dragons.py | pedrograngeiro/Webcrasping-E-sports-Wiki | d5f6f1a0577b7ff2e2796bfd18b412f296256fe9 | [
"MIT"
] | null | null | null | extrairdados/lpl springs 21/dragons.py | pedrograngeiro/Webcrasping-E-sports-Wiki | d5f6f1a0577b7ff2e2796bfd18b412f296256fe9 | [
"MIT"
] | null | null | null | extrairdados/lpl springs 21/dragons.py | pedrograngeiro/Webcrasping-E-sports-Wiki | d5f6f1a0577b7ff2e2796bfd18b412f296256fe9 | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup
import csv
cont = 0
i = 0
j = 0
source = requests.get('https://lol.gamepedia.com/LPL/2021_Season/Spring_Season/Scoreboards/Week_10').text
soup = BeautifulSoup(source, 'html.parser')
times = soup.find_all('span', "teamname")
kills = soup.find_all('div', "sb-footer-item sb-footer-item-dragons")
numpartidas = 9
listatime = []
partidas = [[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0]]
numkills = []
numdragonspartidas = [0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0]
print(partidas)
for time in times:
listatime.append(time.text)
for l in range(0,numpartidas):
for c in range(0,2):
partidas[l][c] = listatime[i]
i = i + 1
for kill in kills:
numkills.append(kill.text)
for linha in range(0,numpartidas):
for coluna in range(0,2):
numdragonspartidas[linha][coluna] = numkills[j]
j = j + 1
print(numdragonspartidas)
"""with open('newFile.csv', 'a') as csvfile:
wr = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
for word in partidas:
wr.writerow([None,[word]])
"""
with open('dragons_lpl.csv', 'a', newline="") as csvfile:
wr = csv.writer(csvfile,quoting=csv.QUOTE_ALL)
for word in partidas:
wr.writerow(word)
for word in numdragonspartidas:
wr.writerow(word) | 23.589286 | 105 | 0.643452 | import requests
from bs4 import BeautifulSoup
import csv
cont = 0
i = 0
j = 0
source = requests.get('https://lol.gamepedia.com/LPL/2021_Season/Spring_Season/Scoreboards/Week_10').text
soup = BeautifulSoup(source, 'html.parser')
times = soup.find_all('span', "teamname")
kills = soup.find_all('div', "sb-footer-item sb-footer-item-dragons")
numpartidas = 9
listatime = []
partidas = [[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0]]
numkills = []
numdragonspartidas = [0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0]
print(partidas)
for time in times:
listatime.append(time.text)
for l in range(0,numpartidas):
for c in range(0,2):
partidas[l][c] = listatime[i]
i = i + 1
for kill in kills:
numkills.append(kill.text)
for linha in range(0,numpartidas):
for coluna in range(0,2):
numdragonspartidas[linha][coluna] = numkills[j]
j = j + 1
print(numdragonspartidas)
"""with open('newFile.csv', 'a') as csvfile:
wr = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
for word in partidas:
wr.writerow([None,[word]])
"""
with open('dragons_lpl.csv', 'a', newline="") as csvfile:
wr = csv.writer(csvfile,quoting=csv.QUOTE_ALL)
for word in partidas:
wr.writerow(word)
for word in numdragonspartidas:
wr.writerow(word) | 0 | 0 | 0 |
9090ec2dfef9312f57c4b22062703c36c0d45f2c | 6,375 | py | Python | tests/python_tests/ryw_benchmark.py | sfc-gh-bvr/foundationdb | 7594f5c0f92d2582dae717ce0244c11642b27dd4 | [
"Apache-2.0"
] | 1 | 2022-02-23T07:17:32.000Z | 2022-02-23T07:17:32.000Z | tests/python_tests/ryw_benchmark.py | sfc-gh-bvr/foundationdb | 7594f5c0f92d2582dae717ce0244c11642b27dd4 | [
"Apache-2.0"
] | null | null | null | tests/python_tests/ryw_benchmark.py | sfc-gh-bvr/foundationdb | 7594f5c0f92d2582dae717ce0244c11642b27dd4 | [
"Apache-2.0"
] | 1 | 2022-03-01T12:28:03.000Z | 2022-03-01T12:28:03.000Z | #!/usr/bin/env python
#
# ryw_benchmark.py
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import sys
import time
import traceback
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from python_tests import PythonTest
import fdb
fdb.api_version(400)
if __name__ == "__main__":
print(
"Running RYW Benchmark test on Python version %d.%d.%d%s%d"
% (
sys.version_info[0],
sys.version_info[1],
sys.version_info[2],
sys.version_info[3][0],
sys.version_info[4],
)
)
parser = argparse.ArgumentParser()
tests = sorted(RYWBenchmark.tests.keys())
assert len(tests) > 0, "RYW benchmark test has no test_functions"
test_string = ", ".join(tests[:-1])
if len(tests) > 1:
test_string += ", and "
test_string += tests[-1]
parser.add_argument(
"--tests-to-run",
nargs="*",
help="Names of tests to run. Can be any of %s. By default, all tests are run."
% test_string,
)
RYWBenchmark().run(parser=parser)
| 32.19697 | 93 | 0.586667 | #!/usr/bin/env python
#
# ryw_benchmark.py
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import sys
import time
import traceback
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from python_tests import PythonTest
import fdb
fdb.api_version(400)
class RYWBenchmark(PythonTest):
tests = {
"get_single": "RYW: get single cached value throughput",
"get_many_sequential": "RYW: get sequential cached values throughput",
"get_range_basic": "RYW: get range cached values throughput",
"single_clear_get_range": "RYW: get range cached values with clears throughput",
"clear_range_get_range": "RYW: get range cached values with clear ranges throughput",
"interleaved_sets_gets": "RYW: interleaved sets and gets on a single key throughput",
}
def __init__(self, key_count=10000, key_size=16):
super(RYWBenchmark, self).__init__()
self.key_count = key_count
self.key_size = key_size
def run_test(self):
try:
db = fdb.open(None, "DB")
except KeyboardInterrupt:
raise
except Exception:
self.result.add_error(self.get_error("fdb.open failed"))
return
try:
self.test_performance(db)
except KeyboardInterrupt:
raise
except Exception:
self.result.add_error(self.get_error("Failed to complete all tests"))
def key(self, num):
return "%0*d" % (self.key_size, num)
# Adds the stack trace to an error message
def get_error(self, message):
error_message = message + "\n" + traceback.format_exc()
print(error_message)
return error_message
def test_performance(self, db):
tr = db.create_transaction()
self.insert_data(tr)
if not self.args.tests_to_run:
self.args.tests_to_run = RYWBenchmark.tests.keys()
else:
for t in self.args.tests_to_run:
if t not in RYWBenchmark.tests:
raise Exception("Unknown RYW benchmark test '%s'" % t)
num_runs = 25
for test in self.args.tests_to_run:
time.sleep(5)
print("Running test %s" % test)
results = []
fxn_name = "run_%s" % test
assert hasattr(self, fxn_name), (
"Test function %s is not implemented" % fxn_name
)
for x in range(0, num_runs):
try:
results.append(getattr(self, fxn_name)(tr))
except KeyboardInterrupt:
raise
except Exception:
self.result.add_error(
self.get_error(
"Performance test failed: " + RYWBenchmark.tests[test]
)
)
break
if len(results) == num_runs:
median = sorted(results)[num_runs / 2]
self.result.add_kpi(RYWBenchmark.tests[test], int(median), "keys/s")
def insert_data(self, tr):
del tr[:]
for i in range(0, 10000):
tr[self.key(i)] = "foo"
def run_get_single(self, tr, count=10000):
start = time.time()
for i in range(count):
tr.get(self.key(5001)).wait()
return count / (time.time() - start)
def run_get_many_sequential(self, tr, count=10000):
start = time.time()
for j in range(count):
tr.get(self.key(j)).wait()
return count / (time.time() - start)
def run_get_range_basic(self, tr, count=100):
start = time.time()
for i in range(count):
list(tr.get_range(self.key(0), self.key(self.key_count)))
return self.key_count * count / (time.time() - start)
def run_single_clear_get_range(self, tr, count=100):
for i in range(0, self.key_count, 2):
tr.clear(self.key(i))
start = time.time()
for i in range(0, count):
list(tr.get_range(self.key(0), self.key(self.key_count)))
kpi = self.key_count * count / 2 / (time.time() - start)
self.insert_data(tr)
return kpi
def run_clear_range_get_range(self, tr, count=100):
for i in range(0, self.key_count, 4):
tr.clear_range(self.key(i), self.key(i + 1))
start = time.time()
for i in range(0, count):
list(tr.get_range(self.key(0), self.key(self.key_count)))
kpi = self.key_count * count * 3 / 4 / (time.time() - start)
self.insert_data(tr)
return kpi
def run_interleaved_sets_gets(self, tr, count=10000):
start = time.time()
tr["foo"] = str(1)
for i in range(count):
old = int(tr.get("foo").wait())
tr.set("foo", str(old + 1))
return count / (time.time() - start)
if __name__ == "__main__":
print(
"Running RYW Benchmark test on Python version %d.%d.%d%s%d"
% (
sys.version_info[0],
sys.version_info[1],
sys.version_info[2],
sys.version_info[3][0],
sys.version_info[4],
)
)
parser = argparse.ArgumentParser()
tests = sorted(RYWBenchmark.tests.keys())
assert len(tests) > 0, "RYW benchmark test has no test_functions"
test_string = ", ".join(tests[:-1])
if len(tests) > 1:
test_string += ", and "
test_string += tests[-1]
parser.add_argument(
"--tests-to-run",
nargs="*",
help="Names of tests to run. Can be any of %s. By default, all tests are run."
% test_string,
)
RYWBenchmark().run(parser=parser)
| 3,714 | 892 | 23 |
9cc587f7aa5a1c467dbabad1cd923174c7d37d14 | 2,836 | py | Python | example/generateExampleIFP.py | whitesides1/spify | 5e7ec10f96d486ec15167ebe09bc5d9c05e7f0a8 | [
"BSD-3-Clause"
] | 1 | 2021-10-20T21:32:32.000Z | 2021-10-20T21:32:32.000Z | example/generateExampleIFP.py | LLNL/spify | 6b8386837de7398a43d0440c3b229cb4cefebdf9 | [
"BSD-3-Clause"
] | null | null | null | example/generateExampleIFP.py | LLNL/spify | 6b8386837de7398a43d0440c3b229cb4cefebdf9 | [
"BSD-3-Clause"
] | null | null | null | #!/bin/env python
import os,sys
#Root of spify src directory
SPIFY_SRC_DIR = os.path.join(os.getcwd(),'..')
#Name your parser
spify_parser_name = "ExampleIFP"
spify_parser_params = []
#Specify parameters
spify_parser_params.append(
{
'name':'boolOption',
'type':'bool',
'shortDesc':'My Boolean Option',
'defaultValue': 0
}
)
spify_parser_params.append(
{
'name':'boolVectOption',
'type':'v_bool',
'shortDesc':'A vector of booleans.',
'defaultValue': [1,0,1,0]
}
)
spify_parser_params.append(
{
'name':'intOption',
'type':'int',
'longDesc':
"""Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor
incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis
nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu
fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in
culpa qui officia deserunt mollit anim id est laborum.""",
'discreteValues': [0,1,3,5,7]
}
)
spify_parser_params.append(
{
'name':'intVectOption',
'type':'v_int',
'shortDesc':'My Integer Vector Option.',
'defaultValue': [-3,-2,-1,0],
'boundMin': -10,
'boundMax': 10
}
)
spify_parser_params.append(
{
'name':'floatOption',
'type':'double',
'defaultValue': 300.0,
'boundMax': 300.0
}
)
spify_parser_params.append(
{
'name':'floatVectOption',
'type':'v_double',
'boundMin': 0.0,
'boundMax': 100.0
}
)
spify_parser_params.append(
{
'name':'stringOption',
'type':'string',
'defaultValue': "foo",
'discreteValues': ["foo","bar","baz"]
}
)
spify_parser_params.append(
{
'name':'stringVectOption',
'type':'v_string',
'shortDesc': "My String Vector Option"
}
)
spify_parser_params.append(
{
'name':'intIntMapOption',
'type':'m_int_int',
'shortDesc': "My Integer-Integer Map Option",
'discreteValuesFirst': [1,2,4,6]
}
)
spify_parser_params.append(
{
'name':'stringIntMapOption',
'type':'m_string_int',
'defaultValue': {"foo":1,"bar":2}
}
)
spify_parser_params.append(
{
'name':'stringStringMapOption',
'type':'m_string_string',
'discreteValuesFirst': ["foo","bar","baz"],
'discreteValuesSecond': ["parrot","buzzard","eagle"]
}
)
spify_parser_params.append(
{
'name':'floatFloatMapOption',
'type':'m_double_double',
'defaultValue': {1.0:10.0, 2.0:20.0, 3.0:30.0}
}
)
#Make sure we can import SpifyParserGenerator
sys.path.append(os.path.join(SPIFY_SRC_DIR,'src'))
#Import
from SpifyParserGenerator import SpifyParserGenerator as spg
#Generate parser code
spg().generate(spify_parser_name,spify_parser_params)
#Generate master file
spg().make_master_file(spify_parser_name,spify_parser_params)
#Done.
| 19.971831 | 82 | 0.680536 | #!/bin/env python
import os,sys
#Root of spify src directory
SPIFY_SRC_DIR = os.path.join(os.getcwd(),'..')
#Name your parser
spify_parser_name = "ExampleIFP"
spify_parser_params = []
#Specify parameters
spify_parser_params.append(
{
'name':'boolOption',
'type':'bool',
'shortDesc':'My Boolean Option',
'defaultValue': 0
}
)
spify_parser_params.append(
{
'name':'boolVectOption',
'type':'v_bool',
'shortDesc':'A vector of booleans.',
'defaultValue': [1,0,1,0]
}
)
spify_parser_params.append(
{
'name':'intOption',
'type':'int',
'longDesc':
"""Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor
incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis
nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu
fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in
culpa qui officia deserunt mollit anim id est laborum.""",
'discreteValues': [0,1,3,5,7]
}
)
spify_parser_params.append(
{
'name':'intVectOption',
'type':'v_int',
'shortDesc':'My Integer Vector Option.',
'defaultValue': [-3,-2,-1,0],
'boundMin': -10,
'boundMax': 10
}
)
spify_parser_params.append(
{
'name':'floatOption',
'type':'double',
'defaultValue': 300.0,
'boundMax': 300.0
}
)
spify_parser_params.append(
{
'name':'floatVectOption',
'type':'v_double',
'boundMin': 0.0,
'boundMax': 100.0
}
)
spify_parser_params.append(
{
'name':'stringOption',
'type':'string',
'defaultValue': "foo",
'discreteValues': ["foo","bar","baz"]
}
)
spify_parser_params.append(
{
'name':'stringVectOption',
'type':'v_string',
'shortDesc': "My String Vector Option"
}
)
spify_parser_params.append(
{
'name':'intIntMapOption',
'type':'m_int_int',
'shortDesc': "My Integer-Integer Map Option",
'discreteValuesFirst': [1,2,4,6]
}
)
spify_parser_params.append(
{
'name':'stringIntMapOption',
'type':'m_string_int',
'defaultValue': {"foo":1,"bar":2}
}
)
spify_parser_params.append(
{
'name':'stringStringMapOption',
'type':'m_string_string',
'discreteValuesFirst': ["foo","bar","baz"],
'discreteValuesSecond': ["parrot","buzzard","eagle"]
}
)
spify_parser_params.append(
{
'name':'floatFloatMapOption',
'type':'m_double_double',
'defaultValue': {1.0:10.0, 2.0:20.0, 3.0:30.0}
}
)
#Make sure we can import SpifyParserGenerator
sys.path.append(os.path.join(SPIFY_SRC_DIR,'src'))
#Import
from SpifyParserGenerator import SpifyParserGenerator as spg
#Generate parser code
spg().generate(spify_parser_name,spify_parser_params)
#Generate master file
spg().make_master_file(spify_parser_name,spify_parser_params)
#Done.
| 0 | 0 | 0 |
30b60c0864be34ec2d64b39f90a4d523c0554545 | 632 | py | Python | apps/about.py | BlessedMut/Time-Series-Crypto | 83817d4fe939802a7d2dc361a094d9982698a66b | [
"MIT"
] | null | null | null | apps/about.py | BlessedMut/Time-Series-Crypto | 83817d4fe939802a7d2dc361a094d9982698a66b | [
"MIT"
] | null | null | null | apps/about.py | BlessedMut/Time-Series-Crypto | 83817d4fe939802a7d2dc361a094d9982698a66b | [
"MIT"
] | 1 | 2021-07-06T09:48:40.000Z | 2021-07-06T09:48:40.000Z | import streamlit as st
| 26.333333 | 62 | 0.547468 | import streamlit as st
def app():
st.title('Time Series Predictions for Crypto-Currencies')
c1, c2, c3 = st.beta_columns([4,2,4])
with c2:
st.subheader('Done by:')
b,m1,r, m2, l = st.beta_columns([2,1,2,1,2])
with b:
st.write('Blessed Mutengwa - R182565F')
st.write('blessedmutengwa@gmail.com')
with m1:
st.write("|")
with r:
st.write('Rufaro Nyandoro - R182565F')
st.write('rufarohazelnyandoro@gmail.com')
with m2:
st.write("|")
with l:
st.write('Lyka - R182565F')
st.write('lyka@gmail.com') | 585 | 0 | 24 |
8eb50fbdaf93c046a32c9ce8f6cfefb2ba329d1a | 1,749 | py | Python | init.py | substancegrise/POKEMON_TP | 641c5be5a641c39a52c6e382528218a36807badb | [
"Xnet",
"X11"
] | null | null | null | init.py | substancegrise/POKEMON_TP | 641c5be5a641c39a52c6e382528218a36807badb | [
"Xnet",
"X11"
] | null | null | null | init.py | substancegrise/POKEMON_TP | 641c5be5a641c39a52c6e382528218a36807badb | [
"Xnet",
"X11"
] | null | null | null | import requests
from bs4 import BeautifulSoup
import mysql.connector
conn = mysql.connector.connect(host="localhost", user="root", password="root", database="pokedex", port=8889)
cursor = conn.cursor(buffered=True)
# connexion et récupération des données / parse du site
#response = requests.get("https://pokemondb.net/pokedex/all")
#html = str(response.content)
fichier = open("data_pokemon.html","r")
html = fichier.read()
fichier.close()
soup = BeautifulSoup(html, "html.parser")
tab = soup.find(id="pokedex")
for link in tab.find_all("tr"):
tt = []
x = 0
type_ids = []
for l in link.find_all("td"):
if x == 1:
if l.find_all("a"):
nom = l.find_all("a")
tt.append(nom[0].text)
else:
tt.append("")
if x == 2:
for type_poke in l.find_all("a"):
nom_type = type_poke.text
cursor.execute("SELECT id FROM type WHERE nom LIKE '"+nom_type+"%' ;")
test_type = cursor.fetchone()
if test_type == None:
cursor.execute("INSERT INTO type VALUES (0, '"+nom_type+"');")
type_ids.append(cursor.lastrowid)
if x == 0 or x > 2:
tt.append(l.text)
x = x+1
if len(tt) > 0 and tt[1] != "":
cursor.execute("""INSERT INTO pokemon (ref, nom, total, hp, attack, defense, sp_atk, sp_def, speed) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)""", tt)
pokemon_id = cursor.lastrowid
for type_id in type_ids:
print(type_id)
cursor.execute("INSERT INTO pokemon_types VALUES (0, "+str(pokemon_id)+", "+str(type_id)+");")
cursor.close()
conn.commit()
conn.close() | 26.104478 | 159 | 0.563751 | import requests
from bs4 import BeautifulSoup
import mysql.connector
conn = mysql.connector.connect(host="localhost", user="root", password="root", database="pokedex", port=8889)
cursor = conn.cursor(buffered=True)
# connexion et récupération des données / parse du site
#response = requests.get("https://pokemondb.net/pokedex/all")
#html = str(response.content)
fichier = open("data_pokemon.html","r")
html = fichier.read()
fichier.close()
soup = BeautifulSoup(html, "html.parser")
tab = soup.find(id="pokedex")
for link in tab.find_all("tr"):
tt = []
x = 0
type_ids = []
for l in link.find_all("td"):
if x == 1:
if l.find_all("a"):
nom = l.find_all("a")
tt.append(nom[0].text)
else:
tt.append("")
if x == 2:
for type_poke in l.find_all("a"):
nom_type = type_poke.text
cursor.execute("SELECT id FROM type WHERE nom LIKE '"+nom_type+"%' ;")
test_type = cursor.fetchone()
if test_type == None:
cursor.execute("INSERT INTO type VALUES (0, '"+nom_type+"');")
type_ids.append(cursor.lastrowid)
if x == 0 or x > 2:
tt.append(l.text)
x = x+1
if len(tt) > 0 and tt[1] != "":
cursor.execute("""INSERT INTO pokemon (ref, nom, total, hp, attack, defense, sp_atk, sp_def, speed) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)""", tt)
pokemon_id = cursor.lastrowid
for type_id in type_ids:
print(type_id)
cursor.execute("INSERT INTO pokemon_types VALUES (0, "+str(pokemon_id)+", "+str(type_id)+");")
cursor.close()
conn.commit()
conn.close() | 0 | 0 | 0 |
6a97bb611be63764ae92d04e937cf473eb3bec37 | 191,812 | pyt | Python | Create arcgis project tool.pyt | traderboy/collector-tools | 2afa61a0f93095461c6b324ef9641786a969c0a7 | [
"MIT"
] | null | null | null | Create arcgis project tool.pyt | traderboy/collector-tools | 2afa61a0f93095461c6b324ef9641786a969c0a7 | [
"MIT"
] | null | null | null | Create arcgis project tool.pyt | traderboy/collector-tools | 2afa61a0f93095461c6b324ef9641786a969c0a7 | [
"MIT"
] | null | null | null | import subprocess
import arcpy
from arcpy import env
import sqlite3
import xml.etree.ElementTree
import os
import json
import zipfile
from arcpy import mapping
import os
from xml.dom.minidom import parse
from datetime import datetime
import time
import copy
import shutil
import types
import ConfigParser
import copy
import logging
import sys
Config = ConfigParser.ConfigParser()
#logging.basicConfig(filename="logfile.txt")
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# create a file handler
handler = logging.FileHandler("logfile.txt")
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
arcpy.env.overwriteOutput = True
#notes: urlKey in portals.self.json must be blank or it will try to authenticate at arcgis.com
#other gotchas
#For polygon styles, makes sure to use "style": "esriSFSSolid" and NOT "style": "esriSLSSolid" for the outline style
#OBS! OBJECTID in layers/tables MUST be int32, not integer. Otherwise lookups will not work, even after creating new records
#import time
#env.workspace = "CURRENT"
#env.addOutputsToMap = False
#env.overwriteOutput = True
arcpy.env.overwriteOutput = True
toolkitPath = os.path.abspath(os.path.dirname(__file__)).replace("\\","/")
gdal_path = ""
ogr2ogr_path = ""
ogrinfo_path = ""
gdal_data_path = ""
spatialite_path = ""
#toolkitPath+"/gdal/ogr2ogr.exe
#create a replica sqlite database for a single layer/table
#DatasetID
#DatasetName
#DatasetType>esriDTFeatureClass</DatasetType>
#LayerID
#LayerName
# Open original file
#et = xml.etree.ElementTree.parse(xmlFile)
# Append new tag: <a x='1' y='abc'>body text</a>
#new_tag = xml.etree.ElementTree.SubElement(et.getroot(), 'a')
#new_tag.text = 'body text'
#new_tag.attrib['x'] = '1' # must be str; cannot be an int
#new_tag.attrib['y'] = 'abc'
# Write back to file
#et.write('file.xml')
#et.write('file_new.xml')
#def getLayerDefinition(lyr,symbol):
# return getSymbol(lyr,symbols[featureName],lyr.name)
# layerDef={
# "drawingInfo":{
# "renderer":getRendere(lyr)
# }
# }
#get the fields for the popup
#{
# "id" : <relationshipId1>,
# "name" : "<relationshipName1>",
# "relatedTableId" : <relatedTableId1>,
# "cardinality" : "<esriRelCardinalityOneToOne>|<esriRelCardinalityOneToMany>|<esriRelCardinalityManyToMany>";,//Added at 10.1
# "role" : "<esriRelRoleOrigin>|<esriRelRoleDestination>";,//Added at 10.1
# "keyField" : "<keyFieldName2>",//Added at 10.1
# "composite" : <true>|<false>,//Added at 10.1
# "relationshipTableId": <attributedRelationshipClassTableId>, //Added in 10.1. Returned only for attributed relationships
# "keyFieldInRelationshipTable": "<key field in AttributedRelationshipClass table that matches keyField>" //Added in 10.1. Returned only for attributed relationships
#},
#def getRelationships(lyr,lyrid,cnt,tables,relationshipObj):
# getFeatureClassParentWorkspace: This script gets the geodatabase for a
# feature class. The trick here is that feature classes can be within a
# feature dataset so you need to account for two possible levels in the
# directory structure.
#see http://resources.arcgis.com/en/help/arcgis-rest-api/index.html#//02r30000019t000000
# and http://resources.arcgis.com/en/help/arcgis-rest-api/index.html#//02r3000000n5000000
#"symbol":{ "type": "esriSMS", "style": "esriSMSSquare", "color": [76,115,0,255], "size": 8, "angle": 0, "xoffset": 0, "yoffset": 0, "outline": { "color": [152,230,0,255], "width": 1 } }
if __name__ == '__main__':
if sys.executable.find("python.exe") != -1:
main()
| 49.270999 | 1,296 | 0.61883 | import subprocess
import arcpy
from arcpy import env
import sqlite3
import xml.etree.ElementTree
import os
import json
import zipfile
from arcpy import mapping
import os
from xml.dom.minidom import parse
from datetime import datetime
import time
import copy
import shutil
import types
import ConfigParser
import copy
import logging
import sys
Config = ConfigParser.ConfigParser()
#logging.basicConfig(filename="logfile.txt")
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# create a file handler
handler = logging.FileHandler("logfile.txt")
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
arcpy.env.overwriteOutput = True
#notes: urlKey in portals.self.json must be blank or it will try to authenticate at arcgis.com
#other gotchas
#For polygon styles, makes sure to use "style": "esriSFSSolid" and NOT "style": "esriSLSSolid" for the outline style
#OBS! OBJECTID in layers/tables MUST be int32, not integer. Otherwise lookups will not work, even after creating new records
#import time
#env.workspace = "CURRENT"
#env.addOutputsToMap = False
#env.overwriteOutput = True
arcpy.env.overwriteOutput = True
toolkitPath = os.path.abspath(os.path.dirname(__file__)).replace("\\","/")
gdal_path = ""
ogr2ogr_path = ""
ogrinfo_path = ""
gdal_data_path = ""
spatialite_path = ""
#toolkitPath+"/gdal/ogr2ogr.exe
class Toolbox(object):
def __init__(self):
self.label = "Create ArcServices toolbox"
self.alias = "arcservices"
self.canRunInBackground = False
# List of tool classes associated with this toolbox
self.tools = [CreateNewProject]
class CreateNewProject(object):
def __init__(self):
self.label = "Convert map document to JSON"
self.alias="arcservices"
self.description = "Creates the JSON files for a standalone ArcGIS Online/Server node application. Note: you need to fill out the project information in the File->Map Document Properties before running."
def getParameterInfo(self):
Config.read(toolkitPath+"/settings.ini")
servername = arcpy.Parameter(
displayName="Enter server FQDN (example: www.esri.com)",
name="servername",
datatype="GPString",
parameterType="Required",
direction="Input",
multiValue=False)
try:
servername.value = Config.get("settings","server")
except Exception as e:
pass
if not servername.value:
servername.value = "my.host.com"
username = arcpy.Parameter(
displayName="Enter your username",
name="username",
datatype="GPString",
parameterType="Optional",
direction="Input",
multiValue=False)
try:
username.value= Config.get("settings","username")
except Exception as e:
pass
if not username.value:
username.value="user"
#projecttitle = arcpy.Parameter(
# displayName="Enter your project title",
# name="projectname",
# datatype="GPString",
# parameterType="Required",
# direction="Input",
# multiValue=False)
#projectname = arcpy.Parameter(
# displayName="Enter your project name (no spaces)",
# name="projectname",
# datatype="GPString",
# parameterType="Required",
# direction="Input",
# multiValue=False)
#tags = arcpy.Parameter(
# displayName="Enter tags",
# name="tags",
# datatype="GPString",
# parameterType="Optional",
# direction="Input",
# multiValue=False)
#
#summary = arcpy.Parameter(
# displayName="Enter project summary",
# name="summary",
# datatype="GPString",
# parameterType="Optional",
# direction="Input",
# multiValue=False)
#
#description = arcpy.Parameter(
# displayName="Enter project description",
# name="description",
# datatype="GPString",
# parameterType="Optional",
# direction="Input",
# multiValue=False)
datasrc = arcpy.Parameter(
displayName="Enter default database",
name="datasrc",
datatype="GPString",
parameterType="Required",
direction="Input",
multiValue=False)
datasrc.filter.type = "ValueList"
datasrc.filter.list = ["File","Sqlite","Postgresql"]
datasrc.value = "Sqlite"
outputfolder = arcpy.Parameter(
displayName="Enter output folder",
name="outputfolder",
datatype="DEFolder",
parameterType="Required",
direction="Input")
try:
outputfolder.value= Config.get("settings","destination")
except Exception as e:
pass
if not outputfolder.value:
outputfolder.value=os.getcwd().replace("\\","/")
#sqlitedb = arcpy.Parameter()
#sqlitedb.name = u'Output_Report_File'
#sqlitedb.displayName = u'Output Sqlite database'
#sqlitedb.parameterType = 'Optional'
#sqlitedb.direction = 'Output'
#sqlitedb.datatype = u'File'
#try:
# sqlitedb.value= Config.get("settings","sqlitedb")
#except Exception as e:
# pass
pg = arcpy.Parameter()
pg.name = u'Output_DB_String'
pg.displayName = u'Postgresql database connection string Ex: PG:"host=localhost user=postgres dbname=gis"'
pg.parameterType = 'Optional'
pg.direction = 'Output'
pg.datatype = u'GPString'
try:
pg.value= Config.get("settings","pg")
except Exception as e:
pass
spatialite_path = arcpy.Parameter()
spatialite_path.name = u'spatialite_executable'
spatialite_path.displayName = u'Path to spatialite executable'
spatialite_path.parameterType = 'Required'
spatialite_path.direction = 'Input'
#spatialite_path.datatype="DEFolder"
spatialite_path.datatype = u'File'
try:
spatialite_path.value= Config.get("settings","spatialite_path")
except Exception as e:
pass
gdal_path = arcpy.Parameter()
gdal_path.name = u'gdal_executable'
gdal_path.displayName = u'Path to ogr2ogr and ogrinfo executables'
gdal_path.parameterType = 'Required'
gdal_path.direction = 'Input'
gdal_path.datatype = "DEFolder"
#gdal_path.datatype = u'File'
try:
gdal_path.value= Config.get("settings","gdal_path")
except Exception as e:
pass
cert = arcpy.Parameter()
cert.name = u'cert_path'
cert.displayName = u'cert file for Https server'
cert.parameterType = 'Optional'
cert.direction = 'Input'
cert.datatype = u'File'
try:
cert.value= Config.get("settings","cert")
except Exception as e:
pass
pem = arcpy.Parameter()
pem.name = u'pem_path'
pem.displayName = u'pem file for Https server'
pem.parameterType = 'Optional'
pem.direction = 'Input'
pem.datatype = u'File'
try:
pem.value= Config.get("settings","pem")
except Exception as e:
pass
#param0.filter.type = "ValueList"
#param0.filter.list = ["Street","Aerial","Terrain","Topographic"]
parameters = [servername,username,datasrc,outputfolder,pg,spatialite_path,gdal_path,cert,pem]
#username,projecttitle,projectname,tags,summary,description,
return parameters
def isLicensed(self): #optional
return True
def updateParameters(self, parameters): #optional
#if parameters[2].altered:
# try:
# os.makedirs(parameters[2].valueAsText)
# except Exception as e:
# return
return
def updateMessages(self, parameters): #optional
return
def execute(self, parameters, messages):
global toolkitPath
global ogr2ogr_path
global ogrinfo_path
global spatialite_path
global gdal_data_path
global gdal_path
serverName = parameters[0].valueAsText
username = parameters[1].valueAsText
datasrc = parameters[2].valueAsText
baseDestinationPath = parameters[3].valueAsText
#sqliteDb = parameters[3].valueAsText
pg = parameters[4].valueAsText
spatialite_path=parameters[5].valueAsText
gdal_path=parameters[6].valueAsText
cert = parameters[7].valueAsText
pem = parameters[8].valueAsText
#toolkitPath+"/spatialite/spatialite.exe
created_ts=int(time.time()*1000)
sep = "/"
printMessage("****************************************************************")
printMessage("Parameters")
# suppose you want to add it to the current MXD (open MXD)
#mxd,host,user,datasrc,output,pg,spatialite_path,gdal_path,cert,pem,
try:
if type(messages)==types.ListType:
vals = messages
#vals = messages.split("|")
if len(vals)>1:
serverName = vals[1]
if len(vals)>2:
username= vals[2]
if len(vals)>3:
datasrc=vals[3]
if len(vals)>4:
baseDestinationPath=vals[4].replace("\\","/")
#if len(vals)>4:
# sqliteDb=vals[4]
if len(vals)>5:
pg=vals[5]
if len(vals)>6:
spatialite_path=vals[6]
if len(vals)>7:
gdal_path=vals[7]
if len(vals)>8:
cert=vals[8]
if len(vals)>9:
pem=vals[9]
mxdName=vals[0].replace("\\","/")
mxd = arcpy.mapping.MapDocument(mxdName)
else:
mxd = arcpy.mapping.MapDocument("CURRENT")
except Exception as e:
printMessage("Still Unable to open map document. Make sure background processing is unchecked in the geoprocessing options")
return
if datasrc == "File":
datasrc = "file"
elif datasrc=="Sqlite":
datasrc = "sqlite"
elif datasrc=="Postgresql":
datasrc = "pgsql"
else:
datasrc="sqlite"
if not pem:
pem=""
if not cert:
cert=""
#if sqliteDb.find(".sqlite") == -1:
# sqliteDb = sqliteDb + ".sqlite"
#put file in the catalogs folder
sqliteDb = os.path.join(baseDestinationPath,"catalogs","collectorDb.sqlite") #.replace("\\","/")
#if os.path.exists(sqliteDb):
# try:
# os.remove(sqliteDb)
# except Exception as e:
# printMessage("Unable to delete collectoDb.sqlite file. Is it currently open in another application or is the collector-server running?")
# printMessage(e)
# return
# #os._exit(1)
serviceName = mxd.activeDataFrame.name.replace(" ","").lower()
if serviceName=='Layers':
printMessage("Rename the dataframe from Layers to service name. Must be valid service name (no spaces)")
return
#mxd.makeThumbnail ()
#toolkitPath = os.path.abspath(os.path.dirname(__file__)).replace("\\","/")
templatePath = toolkitPath + "/templates"
if not os.path.exists(templatePath):
printMessage("Template path not found: " + templatePath)
return
cfgfile = open(toolkitPath+"/settings.ini",'w')
try:
Config.add_section("settings")
except Exception as e:
pass
Config.set("settings","server",serverName)
Config.set("settings","username",username)
Config.set("settings","mxd",mxd.filePath)
Config.set("settings","destination",baseDestinationPath)
#Config.set("settings","remotedestination",remoteDestinationPath)
Config.set("settings","sqlitedb",sqliteDb)
Config.set("settings","spatialite_path",spatialite_path)
Config.set("settings","gdal_path",gdal_path)
Config.set("settings","cert",cert)
Config.set("settings","pem",pem)
Config.set("settings","datasrc",datasrc)
if pg:
Config.set("settings","pg",pg)
else:
Config.set("settings","pg","")
Config.write(cfgfile)
cfgfile.close()
del cfgfile
#locate spatialite, ogr2ogr, and ogrinfo executables
#if sys.platform== 'win32':
gdal_path=gdal_path.replace("/","\\")
if not os.path.exists(spatialite_path ):
# spatialite_path = os.path.join(spatialite_path , "spatialite.exe") #.replace("/","\\")
#elif os.path.exists(spatialite_path + os.sep + "spatialite"):
# spatialite_path = (spatialite_path + os.sep + "spatialite")
#else:
printMessage("Unable to locate spatialite executable")
return
if os.path.exists(gdal_path + "/bin/gdal/apps/ogr2ogr.exe"):
ogr2ogr_path = os.path.join(gdal_path ,"bin","gdal","apps","ogr2ogr.exe") #.replace("/","\\")
elif os.path.exists(gdal_path + os.sep + "ogr2ogr"):
ogr2ogr_path = (gdal_path + os.sep + "ogr2ogr")
else:
printMessage("Unable to locate ogr2ogr executable")
return
if os.path.exists(gdal_path + os.sep + "/bin/gdal/apps/ogrinfo.exe"):
ogrinfo_path = os.path.join(gdal_path ,"bin","gdal","apps","ogrinfo.exe") #.replace("/","\\")
elif os.path.exists(gdal_path + os.sep + "ogrinfo"):
ogrinfo_path = (gdal_path + os.sep + "ogrinfo")
else:
printMessage("Unable to locate ogrinfo executable")
return
if os.path.exists(gdal_path + os.sep + "/bin/gdal-data"):
gdal_data_path = os.path.join(gdal_path ,"bin","gdal-data")
#(gdal_path + "/bin/gdal-data").replace("/","\\")
elif os.path.exists(gdal_path + os.sep + "gdal-data"):
gdal_data_path = (gdal_path + os.sep + "gdal-data")
else:
printMessage("Unable to locate gdal-data path")
return
#try:
# arcpy.gp.CreateSQLiteDatabase(sqliteDb, "SPATIALITE")
#except Exception as e:
# arcpy.AddMessage("Database already exists")
printMessage("Server name: " +serverName)
printMessage("User name: " + username)
printMessage("MXD Path: " + mxd.filePath)
printMessage("Default data source: " + datasrc)
printMessage("Destination path: " + baseDestinationPath)
#printMessage("Remote destination path: " + remoteDestinationPath)
printMessage("Sqlite path: " + os.path.join("catalogs","collectorDb.sqlite"))
printMessage("Spatialite path: " + spatialite_path)
printMessage("ogr2ogr path: " + ogr2ogr_path)
printMessage("ogrinfo path: " + ogrinfo_path)
printMessage("gdal-data path: " + gdal_data_path)
printMessage("cert path: " + cert)
printMessage("pem path: " + pem)
if pg:
printMessage("Postgresql connection: " + pg)
printMessage("****************************************************************")
printMessage("Settings")
printMessage("Exporting dataframe: " + mxd.activeDataFrame.name)
if baseDestinationPath:
baseDestinationPath = unicode(baseDestinationPath).encode('unicode-escape')
baseDestinationPath=baseDestinationPath.replace("\\","/")+ sep +"catalogs"
else:
baseDestinationPath = toolkitPath+ sep +"catalogs"
#baseDestinationPath = baseDestinationPath + sep + serviceName
serviceDestinationPath = baseDestinationPath + sep + serviceName
#if the folder does not exist create it
if not os.path.exists(baseDestinationPath):
os.makedirs(serviceDestinationPath)
else:
#check to see if service already exists. If so, remove it so it can be overwritten
if os.path.exists(serviceDestinationPath):
try:
printMessage("Removing "+serviceDestinationPath+" directory")
shutil.rmtree(serviceDestinationPath)
except Exception as e:
printMessage("Unable to remove destination path")
return
try:
os.makedirs(serviceDestinationPath)
except Exception as e:
printMessage("Unable to create destination path")
servicesDestinationPath = serviceDestinationPath + "/services"
if not os.path.exists(servicesDestinationPath):
try:
os.makedirs(servicesDestinationPath)
except Exception as e:
pass
printMessage("Services path: " +servicesDestinationPath)
dataDestinationPath = serviceDestinationPath + "/shapefiles"
if not os.path.exists(dataDestinationPath):
try:
os.makedirs(dataDestinationPath)
except Exception as e:
pass
printMessage("Shapefile path: " +dataDestinationPath)
replicaDestinationPath = serviceDestinationPath + "/replicas"
if not os.path.exists(replicaDestinationPath):
try:
os.makedirs(replicaDestinationPath)
except Exception as e:
pass
printMessage("Replica path: " +replicaDestinationPath)
#remoteReplicaDestinationPath = remoteDestinationPath + "/catalogs/" + serviceName + "/replicas"
mapfileDestinationPath = serviceDestinationPath + "/mapfiles"
if not os.path.exists(mapfileDestinationPath):
os.makedirs(mapfileDestinationPath)
printMessage("Mapfile path: " +mapfileDestinationPath)
printMessage("****************************************************************")
printMessage("Log output")
symbols = getSymbology(mxd)
dataFrames = arcpy.mapping.ListDataFrames(mxd, "*")
#service = {}
project = None
config = None
if os.path.exists(baseDestinationPath + "/config.json"):
config=openJSON(baseDestinationPath + "/config.json")
try:
for proj in config["projects"]:
if proj == serviceName:
printMessage("Found existing project for " + proj["name"] + ": updating")
project = proj
break
except:
pass
if not project:
project = {}
config["projects"][serviceName]=project
#project["name"]=serviceName
#config["projects"].append(project)
#service = {}
#project["services"]={}
#project[serviceName]=service
else:
try:
project = config["projects"][serviceName]
except Exception as e:
project = {}
#project["services"]={}
config["projects"][serviceName] =project
#try:
# config["project"]["services"][serviceName]=service
#except:
# printMessage("Service already exists: " + serviceName)
#config["services"][serviceName]["layers"]={}
#config["services"][serviceName]["mxd"]=mxd.filePath
project["layers"]={}
project["mxd"]=mxd.filePath
else:
config={}
config["projects"]={}
project = {}
#project["services"]={}
config["projects"][serviceName]=project
#service["name"]=serviceName
#config["services"]=[service]
project["layers"]={}
project["mxd"]=mxd.filePath
#config["services"][serviceName]={"layers":{}}
#config["services"][serviceName]["mxd"]=mxd.filePath
config["hostname"]=serverName
config["username"]=username
config["pemPath"]=pem
config["certPath"]=cert
config["httpPort"]="80"
config["httpsPort"]="443"
config["arcMapVersion"]= arcpy.GetInstallInfo()['Version']
config["defaultDatabase"]=datasrc
config["dataPath"]="catalogs"
config["dataSourceTypes"]=["file","sqlite","pgsql"]
#config["services"][serviceName]["mxd"]=mxd.filePath
#config["services"][serviceName]["sqliteDb"]=sqliteDb
#config["services"][serviceName]["pg"]=pg
#config["services"][serviceName]["dataSource"]="sqlite"
#config["services"][serviceName]["rootPath"]=baseDestinationPath
config["sqliteDb"]="catalogs/collectorDb.sqlite"
#sqliteDb
config["pg"]=pg
#project["dataSource"]="sqlite"
#config["services"][serviceName]["layers"]={}
fullname = mxd.author
if fullname=="":
printMessage("Author missing in File->Map Document Properties")
return
fullname_split = fullname.split(' ')
first_name = fullname_split[0]
last_name=""
if len(fullname_split)>1:
last_name = fullname_split[1]
email_address = first_name + '.' + last_name + '@' + serverName
if not username:
username=fullname.lower().replace(" ","")
title = mxd.title
if title=="":
printMessage("Title missing in File->Map Document Properties")
return
tags = mxd.tags
if not tags:
tags=""
summary = mxd.summary
if not summary:
summary=""
description = mxd.description
if not description:
description=""
initializeSqlite(sqliteDb)
if not os.path.exists(baseDestinationPath + "/portals.self.json"):
portals_self_json=openJSON(templatePath + "/portals.self.json")
else:
portals_self_json=openJSON(baseDestinationPath + "/portals.self.json")
portals_self_json['portalHostname']=serverName
portals_self_json['defaultExtent']['xmin']=mxd.activeDataFrame.extent.XMin
portals_self_json['defaultExtent']['ymin']=mxd.activeDataFrame.extent.YMin
portals_self_json['defaultExtent']['xmax']=mxd.activeDataFrame.extent.XMax
portals_self_json['defaultExtent']['ymax']=mxd.activeDataFrame.extent.YMax
portals_self_json['user']['fullName']=fullname
portals_self_json['user']['firstName']=first_name
portals_self_json['user']['lastName']=last_name
portals_self_json['user']['email']=email_address
portals_self_json['user']['username']=username
file = saveJSON(baseDestinationPath + "/portals.self.json",portals_self_json)
LoadCatalog(sqliteDb,"portals", "self",file)
if not os.path.exists(baseDestinationPath + "/community.users.json"):
community_users_json=openJSON(templatePath + "/community.users.json")
else:
community_users_json=openJSON(baseDestinationPath + "/community.users.json")
community_users_json['fullName']=fullname
community_users_json['firstName']=first_name
community_users_json['lastName']=last_name
community_users_json['email']=email_address
community_users_json['username']=username
community_users_json['created']=created_ts
community_users_json['modified']=created_ts
community_users_json['lastLogin']=created_ts
#community_users_json['groups'][0]['userMembership']['username']=username
file = saveJSON(baseDestinationPath + "/community.users.json",community_users_json)
LoadCatalog(sqliteDb,"community", "users",file)
#User info
content_users_json=openJSON(templatePath + "/content.users.json")
content_users_json['username']=username
#content_users_json['items'][0]['created']=int(time.time()*1000)
file = saveJSON(baseDestinationPath + "/content.users.json",content_users_json)
LoadCatalog(sqliteDb,"content", "users",file)
#Search results
if not os.path.exists(baseDestinationPath + "/search.json"):
search_json=openJSON(templatePath + "/search.json")
#search_json['results'][0]=username
baseResult = search_json['results'][0]
search_json['results']=[]
else:
search_json=openJSON(baseDestinationPath + "/search.json")
baseResult = search_json['results'][0]
#see if result already exists and delete it
for idx, val in enumerate(search_json['results']):
if val["id"] == serviceName:
del search_json['results'][idx]
#search_json['results']
#add stuff for each dataframe below
#community groups
#community_groups_json=openJSON(templatePath + "/community.groups.json")
#saveJSON(destinationPath + "/community.groups.json",community_groups_json)
#shutil.copy2(templatePath + "/community.groups.json", baseDestinationPath + "/community.groups.json")
community_groups_json=openJSON(templatePath + "/community.groups.json")
file = saveJSON(baseDestinationPath + "/community.groups.json",community_groups_json)
LoadCatalog(sqliteDb,"community", "groups",file)
#os.system("copy "+ templatePath + "/community.groups.json " + servicesDestinationPath + "/community.groups.json")
#result = 0
if not os.path.exists(baseDestinationPath+"/FeatureServer.json"):
feature_services={"currentVersion":arcpy.GetInstallInfo()['Version'],"folders":[],"services":[]}
else:
feature_services=openJSON(baseDestinationPath + "/FeatureServer.json")
#if not os.path.exists(servicesDestinationPath+"/FeatureServer.json"):
# saveJSON(servicesDestinationPath + "/FeatureServer.json",response)
#else:
# featureServer_json=openJSON(servicesDestinationPath + "/FeatureServer.json")
# if not serviceName in featureServer_json['folders']:
# featureServer_json['folders'].append(serviceName);
# saveJSON(servicesDestinationPath + "/FeatureServer.json",featureServer_json)
# #create base FeatureServer.json file with folders for each service
# #,"folders":["Canvas","Demographics","Elevation","Ocean","Polar","Reference","Specialty","Utilities"]
#for dataFrame in dataFrames:
if mxd.activeDataFrame:
dataFrame = mxd.activeDataFrame
serviceName = dataFrame.name.replace(" ","").lower()
#mxd.activeDataFrame.name
if serviceName=='Layers':
printMessage("Rename the dataframe from Layers to service name. Must be valid service name (no spaces)")
return
#replace the service name if it exists
for idx, val in enumerate(feature_services["folders"]):
if val == serviceName:
del feature_services["folders"][idx]
#flush the services table for this service
ClearService(sqliteDb,serviceName)
#must set dataframe projection to web mercator
#outCS = arcpy.SpatialReference(3785) #the code for WGS84 Web Mercator
outCS = arcpy.SpatialReference(3857) #the code for WGS84 Web Mercator
#dataFrame.spatialReference = outCS
#else:
# dataFrame = dataFrame #mxd.activeDataFrame
operationalLayers = []
operationalTables = []
operationalTablesObj = []
allData=[]
layerIds={}
id=0
#for df in arcpy.mapping.ListDataFrames(mxd):
for lyr in arcpy.mapping.ListLayers(mxd, "", dataFrame):
# Exit if the current layer is not a service layer.
if lyr.isServiceLayer or lyr.supports("SERVICEPROPERTIES"): # or not lyr.visible
continue
#make sure layer is web mercator
cur_projection = arcpy.Describe(lyr).spatialReference
if cur_projection.name != outCS.name:
printMessage(lyr.name + " is NOT in Web Mercator projection. Skipping...")
printMessage(cur_projection.name)
printMessage(outCS.name)
continue
#lyr.visible=True
#opLayer = {
# "id": lyr.name,
# "title": lyr.name,
# "url": lyr.serviceProperties["Resturl"]+ "/" + lyr.longName + "/" + lyr.serviceProperties["ServiceType"],
# "opacity": (100 - lyr.transparency) / 100,
# "visibility": lyr.visible
#}
printMessage("Exporting layer: " + lyr.name)
operationalLayers.append(lyr)
allData.append(lyr)
layerIds[lyr.name]=id
id = id+1
#arcpy.mapping.RemoveLayer(df, lyr)
if len(operationalLayers)==0:
printMessage("No Feature layers found in data frame!")
return
id=len(operationalLayers)
for tbl in arcpy.mapping.ListTableViews(mxd, "", dataFrame):
operationalTables.append(tbl)
allData.append(tbl)
operationalTablesObj.append({"name":tbl.name,"id":id})
layerIds[tbl.name]=id
id=id+1
#now add any attachment tables
for lyr in allData:
desc = arcpy.Describe(lyr)
if hasattr(desc, "layer"):
featureName=os.path.basename(desc.layer.catalogPath)
rootFGDB=desc.layer.catalogPath.replace("\\","/")
else:
featureName=os.path.basename(desc.catalogPath)
rootFGDB=os.path.dirname(desc.catalogPath).replace("\\","/")
#layerIds[tbl.name]=id
layerIds[featureName]=layerIds[lyr.name]
if arcpy.Exists(rootFGDB+"/"+featureName+"__ATTACH"):
layerIds[featureName+"__ATTACH"]=id
id=id+1
#lyrpath=os.getcwd().replace("\\","/")
#lyrpath = os.path.abspath(os.path.dirname(__file__)).replace("\\","/")
ext = operationalLayers[0].getExtent()
dataFrame.extent = ext
desc = arcpy.Describe(operationalLayers[0])
if hasattr(desc, "layer"):
ws=desc.layer.catalogPath.replace("\\","/")
else:
ws=os.path.dirname(desc.catalogPath).replace("\\","/")
#for j,rel in enumerate(allData):
# printMessage(str(j) + ": " + rel.name)
relationships = [c.name for c in arcpy.Describe(ws).children if c.datatype == "RelationshipClass"]
relArr=[]
desc = arcpy.Describe(lyr)
#if not desc.relationshipClassNames:
# continue
# return rel
if hasattr(desc, "layer"):
featureName=os.path.basename(desc.layer.catalogPath)
rootFGDB=desc.layer.catalogPath.replace("\\","/")
else:
featureName=os.path.basename(desc.catalogPath)
rootFGDB=os.path.dirname(desc.catalogPath).replace("\\","/")
project["fgdb"]=rootFGDB
#project["replica"]=remoteReplicaDestinationPath+"/"+serviceName+".geodatabase"
project["replica"]="catalogs/" + serviceName + "/replicas/"+serviceName+".geodatabase"
#config["services"][serviceName]["fgdb"]=rootFGDB
#config["services"][serviceName]["replica"]=replicaDestinationPath+"/"+serviceName+".geodatabase"
relationshipList = {}
relationshipObj = {}
relations={}
id=0
destIds={}
printMessage("Finding relationships")
for rc in relationships:
relDesc = arcpy.Describe(rootFGDB+"/"+rc)
if relDesc.isAttachmentRelationship:
continue
try:
originId=layerIds[relDesc.originClassNames[0]]
except:
printMessage("Skipping unused relationship: " + relDesc.originClassNames[0])
continue
try:
destId=layerIds[relDesc.destinationClassNames[0]]
except:
printMessage("Skipping unused relationship: " + relDesc.destinationClassNames[0])
continue
#if not layerIds.has_key(originId):
# printMessage("Skipping relation: " + relDesc.destinationClassNames[0])
# continue
#printMessage("Relationship Name: " + rc)
#printMessage("Origin Class Names")
#printMessage(relDesc.originClassNames)
#printMessage("Origin Class Keys")
#printMessage(relDesc.originClassKeys)
#printMessage("Destination Class Names")
#printMessage(relDesc.destinationClassNames)
#printMessage("Destination Class Keys")
#printMessage(relDesc.destinationClassKeys)
#printMessage("Key type: "+relDesc.keyType)
#printMessage(relDesc.notification)
#printMessage("backwardPathLabel: "+relDesc.backwardPathLabel)
#printMessage("forwardPathLabel: "+relDesc.forwardPathLabel)
#originId=getDataIndex(allData,relDesc.originClassNames[0])
#destId=getDataIndex(allData,relDesc.destinationClassNames[0])
relatedTableId=0
role=""
key=""
relations[str(id)]={"oTable":relDesc.originClassNames[0],"dTable":relDesc.destinationClassNames[0],"oJoinKey":relDesc.originClassKeys[0][0],"dJoinKey":relDesc.originClassKeys[1][0],"oId":originId,"dId":destId}
relationshipList[originId]={"origin":originId,"dest":destId,"id":id,"name":relDesc.backwardPathLabel,"keyField":relDesc.originClassKeys[1][0]}
relObj = {"id":id,"name":relDesc.forwardPathLabel,"relatedTableId":destId,"cardinality":"esriRelCardinality"+relDesc.cardinality,"role":"esriRelRoleOrigin","keyField":relDesc.originClassKeys[0][0],"composite":relDesc.isComposite}
destIds[str(originId)]=id
id=id+1
try:
len(relationshipObj[relDesc.originClassNames[0]])
except:
relationshipObj[relDesc.originClassNames[0]]=[]
relationshipObj[relDesc.originClassNames[0]].append(relObj)
try:
len(relationshipObj[relDesc.destinationClassNames[0]])
except:
relationshipObj[relDesc.destinationClassNames[0]]=[]
#if relationship already exists, use its id instead
destId = id
#if destIds[originId]:
try:
destId = destIds[str(originId)]
except:
pass
relObj = {"id":destId,"name":relDesc.backwardPathLabel,"relatedTableId":originId,"cardinality":"esriRelCardinality"+relDesc.cardinality,"role":"esriRelRoleDestination","keyField":relDesc.originClassKeys[1][0],"composite":relDesc.isComposite}
relationshipObj[relDesc.destinationClassNames[0]].append(relObj)
#printMessage(json.dumps(relationshipObj, indent=4, sort_keys=True))
#print(destIds)
#config["services"][serviceName]["relationships"]=relations
project["relationships"]=relations
#return
#printMessage(relationships)
#for rc in relationships:
# rc_path = ws + "\\" + rc
# des_rc = arcpy.Describe(rc_path)
# printMessage(des_rc.originClassNames)
#rc_list = [c.name for c in arcpy.Describe(workspace).children if c.datatype == "RelationshipClass"]
#for rc in rc_list:
#rc_path = workspace + "\\" + rc
#des_rc = arcpy.Describe(rc_path)
#origin = des_rc.originClassNames
#destination = des_rc.destinationClassNames
#mxd.activeDataFrame=dataFrame
mxd.activeView = dataFrame.name
arcpy.RefreshActiveView()
#out_file_name = r"c:\thumbnails\{basename}.png".format(basename=os.path.basename(featureclass))
# Export "thumbnail" of data frame
#if the folder does not exist create it
if not os.path.exists(servicesDestinationPath+"/thumbnails/"):
os.makedirs(servicesDestinationPath+"/thumbnails/")
out_file_name = servicesDestinationPath + "/thumbnails/" + serviceName + ".png"
arcpy.mapping.ExportToPNG(mxd, out_file_name, dataFrame, 200, 133)
#dataFrame = arcpy.mapping.ListDataFrames(mxd, "*")[0]
#if dataFrame != mxd.activeDataFrame:
# printMessage("Active data frame is not the first data frame")
feature_services['folders'].append(serviceName)
#now set path to serviceName folder
#destinationPath = servicesDestinationPath + "/data" #+ serviceName
#print destinationPath
#printMessage("Spatial JSON destination path: " + servicesDestinationPath)
#if the folder does not exist create it
#if not os.path.exists(destinationPath):
# os.makedirs(destinationPath)
rootService_json={"folders": [], "services":[{"name":serviceName,"type":"FeatureServer","url":"http://"+serverName+"/rest/services/"+serviceName+"/FeatureServer"},{"name":serviceName,"type":"MapServer"}], "currentVersion": arcpy.GetInstallInfo()['Version']}
file = saveJSON(servicesDestinationPath + "/"+serviceName+".json",rootService_json)
LoadService(sqliteDb,serviceName,serviceName, "",-1,"",file)
#analysis = arcpy.mapping.AnalyzeForMSD(mxd)
#
#for key in ('messages', 'warnings', 'errors'):
# printMessage( "----" + key.upper() + "---")
# vars = analysis[key]
# for ((message, code), layerlist) in vars.iteritems():
# printMessage( " " + message + " (CODE %i)" % code)
# printMessage( " applies to:")
# for layer in layerlist:
# printMessage( layer.name)
# printMessage("")
# sddraft = templatePath + serviceName + '.sddraft'
# sd = templatePath + serviceName + '.sd'
# summary = 'Sample output'
# tags = 'county, counties, population, density, census'
#
# # create service definition draft
# analysis = arcpy.mapping.CreateMapSDDraft(mxd, sddraft, serviceName, 'ARCGIS_SERVER')
#
# for key in ('messages', 'warnings', 'errors'):
# printMessage("----" + key.upper() + "---")
# vars = analysis[key]
# for ((message, code), layerlist) in vars.iteritems():
# printMessage(" " + message + " (CODE %i)" % code)
# printMessage(" applies to:")
# for layer in layerlist:
# printMessage(layer.name)
# printMessage("")
#
# printMessage("")
# printMessage("")
# #arcpy.StageService_server(sddraft, sd)
#
# # stage and upload the service if the sddraft analysis did not contain errors
# if analysis['errors'] == {}:
# # Execute StageService
# arcpy.StageService_server(sddraft, sd)
# # Execute UploadServiceDefinition
# #arcpy.UploadServiceDefinition_server(sd, con)
# else:
# # if the sddraft analysis contained errors, display them
# #arcpy.StageService_server(sddraft, sd)
# printMessage(analysis['errors'])
# #print analysis['errors']
#arcpy.mapping.ConvertToMSD(mxd,toolkitPath+"/output.msd",dataFrame, "NORMAL", "NORMAL")
#mxde = MxdExtras(mxd)
#for lyr in mxde.itervalues():
# printMessage("Layer Name: " + lyr.name )
# printMessage("Layer Symbology Field Name: " + lyr.symbologyFieldName)
oldspatialref = dataFrame.spatialReference
coordinateSystem = 'GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",SPHEROID["WGS_1984",6378137.0,298.257223563]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.017453292519943295]]'
#set to wgs84
dataFrame.spatialReference = coordinateSystem
#get coors of extent center in new coordinate system
x = (dataFrame.extent.XMin + dataFrame.extent.XMax)/2
y = (dataFrame.extent.YMin + dataFrame.extent.YMax)/2
#printMessage(str(dataFrame.extent.XMin) + "," + str(dataFrame.extent.YMin) + "," + str(dataFrame.extent.XMax) + "," + str(dataFrame.extent.YMax))
xmin_geo=dataFrame.extent.XMin
xmax_geo=dataFrame.extent.XMax
ymin_geo=dataFrame.extent.YMin
ymax_geo=dataFrame.extent.YMax
# set dataframe spatial ref back
dataFrame.spatialReference = oldspatialref
output = {
"extent": {
"xmin": dataFrame.extent.XMin,
"ymin": dataFrame.extent.YMin,
"xmax": dataFrame.extent.XMax,
"ymax": dataFrame.extent.YMax
},
"scale": dataFrame.scale,
"rotation": dataFrame.rotation,
"spatialReference": {"wkid": dataFrame.spatialReference.PCSCode}
}
result=copy.deepcopy(baseResult) # deep copy
result['snippet']=summary
result['title']=dataFrame.description
result['id']=serviceName
#result['extent']=[0,0]
result['extent'][0]=[0,0]
result['extent'][1]=[0,0]
result['extent'][0][0]=xmin_geo
result['extent'][0][1]=ymin_geo
result['extent'][1][0]=xmax_geo
result['extent'][1][1]=ymax_geo
result['owner']=username
result['created']=created_ts
result['modified']=created_ts
if tags!="":
result['tags']=tags.split(",")
search_json['results'].append(result)
#result = result + 1
#only need to update the operationalLayers
content_items_json=openJSON(templatePath + "/content.items.data.json")
opLayers = getOperationalLayers(operationalLayers,serverName,serviceName,symbols)
opTables = getTables(operationalTables,serverName,serviceName,len(opLayers))
#need to update id (unique id=serviceName + layer name) and itemId (should be the serviceName)
content_items_json['operationalLayers']=copy.deepcopy(opLayers) # deep copy
content_items_json['tables']=copy.deepcopy(opTables)
for lyr in content_items_json['operationalLayers']:
lyr["itemId"]=serviceName
lyr["id"]=serviceName+"_"+lyr["title"].replace(" ","_")
for lyr in content_items_json['tables']:
lyr["itemId"]=serviceName
lyr["id"]=serviceName+"_"+lyr["title"].replace(" ","_")
file = saveJSON(servicesDestinationPath + "/content.data.json",content_items_json)
LoadService(sqliteDb,serviceName,"content","", -1,"data",file)
content_items_json=openJSON(templatePath + "/content.items.json")
#content_items_json["id"]=title
content_items_json["id"]=serviceName
content_items_json["name"]=None
content_items_json["owner"]=username
content_items_json["created"]=created_ts
content_items_json["modified"]=created_ts
content_items_json["title"]=title
content_items_json["snippet"]=summary
content_items_json["description"]=description
content_items_json['extent'][0][0]=xmin_geo
content_items_json['extent'][0][1]=ymin_geo
content_items_json['extent'][1][0]=xmax_geo
content_items_json['extent'][1][1]=ymax_geo
content_items_json["url"]=None
#content_items_json["type"]="Feature Service"
#content_items_json["url"]="http://"+serverName+"/rest/services/"+serviceName+"/FeatureServer"
file=saveJSON(servicesDestinationPath + "/content.items.json",content_items_json)
LoadService(sqliteDb,serviceName,"content","", -1,"items",file)
#create JSON description of all services. Each dataframe is a service for this application.
featureserver_json={
"currentVersion":arcpy.GetInstallInfo()['Version'],
"services": [{
"name":serviceName,
"type":"FeatureServer",
"url": "http://"+serverName + "/arcgis/rest/services/"+serviceName+"/FeatureServer"
}]
}
#file=saveJSON(servicesDestinationPath + "/FeatureServer.json",featureserver_json)
#LoadService(sqliteDb,serviceName,"FeatureServer", -1,"",file)
#create JSON description of all layers in the service.
featureserver_json=openJSON(templatePath + "/name.FeatureServer.json")
featureserver_json['initialExtent']['xmin']=dataFrame.extent.XMin
featureserver_json['initialExtent']['ymin']=dataFrame.extent.YMin
featureserver_json['initialExtent']['xmax']=dataFrame.extent.XMax
featureserver_json['initialExtent']['ymax']=dataFrame.extent.YMax
featureserver_json['fullExtent']['xmin']=dataFrame.extent.XMin
featureserver_json['fullExtent']['ymin']=dataFrame.extent.YMin
featureserver_json['fullExtent']['xmax']=dataFrame.extent.XMax
featureserver_json['fullExtent']['ymax']=dataFrame.extent.YMax
featureserver_json['layers'] = getLayers(operationalLayers)
featureserver_json['tables']=operationalTablesObj
file=saveJSON(servicesDestinationPath + "/FeatureServer.json",featureserver_json)
LoadService(sqliteDb,serviceName,"FeatureServer", "",-1,"",file)
maps_json=openJSON(templatePath + "/name.MapServer.json")
maps_json['initialExtent']['xmin']=dataFrame.extent.XMin
maps_json['initialExtent']['ymin']=dataFrame.extent.YMin
maps_json['initialExtent']['xmax']=dataFrame.extent.XMax
maps_json['initialExtent']['ymax']=dataFrame.extent.YMax
maps_json['fullExtent']['xmin']=dataFrame.extent.XMin
maps_json['fullExtent']['ymin']=dataFrame.extent.YMin
maps_json['fullExtent']['xmax']=dataFrame.extent.XMax
maps_json['fullExtent']['ymax']=dataFrame.extent.YMax
maps_json['layers'] = featureserver_json['layers']
maps_json['server']=serverName
maps_json['name']=serviceName
maps_json['mapName']=serviceName
maps_json['tables']=operationalTablesObj
file=saveJSON(servicesDestinationPath + "/MapServer.json",maps_json)
LoadService(sqliteDb,serviceName,"MapServer","", -1,"",file)
minx=str(dataFrame.extent.XMin)
miny=str(dataFrame.extent.YMin)
maxx=str(dataFrame.extent.XMax)
maxy=str(dataFrame.extent.YMax)
serviceitems_json=openJSON(templatePath + "/GDB_ServiceItems.json")
serviceitems_json["name"]=title
serviceitems_json["serviceDescription"]=summary
serviceitems_json["description"]=description
serviceitems_json['initialExtent']['xmin']=dataFrame.extent.XMin
serviceitems_json['initialExtent']['ymin']=dataFrame.extent.YMin
serviceitems_json['initialExtent']['xmax']=dataFrame.extent.XMax
serviceitems_json['initialExtent']['ymax']=dataFrame.extent.YMax
serviceitems_json['fullExtent']['xmin']=dataFrame.extent.XMin
serviceitems_json['fullExtent']['ymin']=dataFrame.extent.YMin
serviceitems_json['fullExtent']['xmax']=dataFrame.extent.XMax
serviceitems_json['fullExtent']['ymax']=dataFrame.extent.YMax
createReplica(mxd,dataFrame,allData,replicaDestinationPath,toolkitPath,username,serviceName,serverName,minx,miny,maxx,maxy,relationshipList,layerIds,serviceitems_json)
#create a JSON service file for each feature layer -- broken ---
serviceRep=[]
id=0
for lyr in operationalLayers:
desc = arcpy.Describe(lyr)
if hasattr(desc, "layer"):
featureName=os.path.basename(desc.layer.catalogPath)
else:
featureName=os.path.basename(desc.catalogPath)
printMessage("Loading layer: "+lyr.name+": " + featureName)
feature_json=openJSON(templatePath + "/name.FeatureServer.id.json")
feature_json['defaultVisibility']=lyr.visible
feature_json['description'] = lyr.description
feature_json['fields']=getFields(lyr)
#type=esriFieldTypeOID
#for i in feature_json:
# printMessage(i + ": " + str(feature_json[i]))
#printMessage(feature_json['displayField'])
#if lyr.showLabels:
lbl=""
if lyr.supports("LABELCLASSES"):
for lblclass in lyr.labelClasses:
lblclass.showClassLabels = True
#feature_json.displayField
lbl=lblclass.expression.replace("[","").replace("]","")
#lblclass.expression = " [Label]"
if lbl!="":
feature_json['displayField']=lbl
else:
feature_json['displayField']=getDisplayField(feature_json['fields'])
if desc.shapeType:
if desc.shapeType=='Polygon':
feature_json['geometryType']='esriGeometryPolygon'
feature_json['templates'][0]['drawingTool']="esriFeatureEditToolPolygon"
elif desc.shapeType=='Polyline':
feature_json['geometryType']='esriGeometryPolyline'
feature_json['templates'][0]['drawingTool']="esriFeatureEditToolPolyline"
elif desc.shapeType=='Point':
feature_json['geometryType']='esriGeometryPoint'
elif desc.shapeType=='MultiPoint':
feature_json['geometryType']='esriGeometryMultiPoint'
feature_json['id']=layerIds[lyr.name] #id
feature_json['name']=lyr.name
if desc.hasOID:
feature_json['objectIdField']=desc.OIDFieldName
feature_json['objectIdFieldName']=desc.OIDFieldName
if desc.hasGlobalID:
feature_json['globalIdField'] = desc.globalIDFieldName
feature_json['globalIdFieldName']=desc.globalIDFieldName
else:
del feature_json['globalIdField']
#del feature_json['globalIdFieldName']
feature_json['indexes']=getIndexes(lyr)
feature_json['minScale']=lyr.minScale
feature_json['maxScale']=lyr.maxScale
#bad below, should be Feature Layer, not FeatureLayer
#feature_json['type']=desc.dataType #'Feature Layer'
feature_json['extent']['xmin']=desc.extent.XMin
feature_json['extent']['ymin']=desc.extent.YMin
feature_json['extent']['xmax']=desc.extent.XMax
feature_json['extent']['ymax']=desc.extent.YMax
#feature_json['indexes']=[]
feature_json['templates'][0]['name']=serviceName
attributes={}
for field in feature_json['fields']:
#printMessage(field['name'])
if field['editable']:
attributes[ field['name'] ]=None
feature_json['templates'][0]['prototype']['attributes']=attributes
#feature_json['drawingInfo']['renderer']['symbol']=getSymbol(lyr)
#feature_json['relationships']=getRelationships(lyr,id,len(operationalLayers),operationalTables,relationshipObj)
try:
feature_json['relationships']=relationshipObj[featureName] #getRelationships(lyr,relationshipObj)
except:
pass
feature_json['drawingInfo']=getSymbol(lyr,symbols[featureName]["symbols"],lyr.name)
#set editor tracking fields
editorTracking={}
if desc.editorTrackingEnabled:
editorTracking['creationDateField']=desc.createdAtFieldName
editorTracking['creatorField']=desc.creatorFieldName
editorTracking['editDateField']=desc.editedAtFieldName
editorTracking['editorField']=desc.editorFieldName
feature_json['editFieldsInfo']=editorTracking
else:
del feature_json['editFieldsInfo']
feature_json['editingInfo']={"lastEditDate":created_ts}
if arcpy.Exists(rootFGDB+"/"+featureName+"__ATTACH"):
feature_json['hasAttachments']=True
feature_json['advancedQueryCapabilities']['supportsQueryAttachments']=True
feature_json['attachmentProperties']=[{"name":"name","isEnabled":True},{"name":"size","isEnabled":True},{"name":"contentType","isEnabled":True},{"name":"keywords","isEnabled":True}]
else:
feature_json['hasAttachments']=False
#getSymbol(lyr,symbols[featureName],lyr.name)
#opLayers = content_items_json['operationalLayers']=getOperationalLayers(operationalLayers,serverName,serviceName)
file=saveJSON(servicesDestinationPath + "/FeatureServer."+str(layerIds[lyr.name])+".json",feature_json)
LoadService(sqliteDb,serviceName,"FeatureServer",lyr.name, layerIds[lyr.name],"",file)
#now create a MapServer json file
mapserver_json=openJSON(templatePath + "/name.MapServer.id.json")
mapserver_json['indexes']=feature_json['indexes']
mapserver_json['extent']=feature_json['extent']
mapserver_json['fields']=feature_json['fields']
mapserver_json['templates']=feature_json['templates']
mapserver_json['drawingInfo']=feature_json['drawingInfo']
mapserver_json['geometryType']=feature_json['geometryType']
file=saveJSON(servicesDestinationPath + "/MapServer."+str(layerIds[lyr.name])+".json",feature_json)
LoadService(sqliteDb,serviceName,"MapServer", lyr.name,layerIds[lyr.name],"",file)
#save replica file
feature_json=openJSON(templatePath + "/name.FeatureServer.id.json")
#steps: save layer to blank mxd, save it, run arcpy.CreateRuntimeContent on mxd
createSingleReplica(templatePath,dataFrame,lyr,replicaDestinationPath,toolkitPath,feature_json,serverName,serviceName,username,id)
#save mapserver .map file
saveMapfile(mapfileDestinationPath + "/"+lyr.name+".map",lyr,desc,dataDestinationPath,mapserver_json)
id = id+1
#create a JSON geometry file for each feature layer
id=0
globalFields=[]
valid_fields =[]# ["OBJECTID","GlobalID","GlobalGUID","has_permittee"]
for lyr in operationalLayers:
desc = arcpy.Describe(lyr)
if hasattr(desc, "layer"):
featureName=os.path.basename(desc.layer.catalogPath)
inFeaturesGDB=desc.layer.path
else:
featureName=os.path.basename(desc.catalogPath)
inFeaturesGDB=desc.path
if sqliteDb:
saveToSqlite(lyr,sqliteDb)
if arcpy.Exists(inFeaturesGDB+"/"+featureName+"__ATTACH"):
saveToSqlite(inFeaturesGDB+"/"+featureName+"__ATTACH",sqliteDb)
if pg:
saveToPg(lyr,pg)
if arcpy.Exists(inFeaturesGDB+"/"+featureName+"__ATTACH"):
saveAttachTableToPg(inFeaturesGDB,featureName,"__ATTACH",pg)
fSet = arcpy.FeatureSet()
fSet.load(desc.dataElement.catalogPath)
fdesc = arcpy.Describe(fSet)
#printMessage(fdesc.json)
dataName = os.path.basename(desc.dataElement.catalogPath)
layerObj={"name":lyr.name,"data":dataName}
layerObj["id"]=layerIds[lyr.name]
if desc.relationshipClassNames:
for j,rel in enumerate(desc.relationshipClassNames):
relDesc = arcpy.Describe(desc.path +"/"+rel)
for i in relDesc.originClassKeys:
#if i[1]=="OriginPrimary":
if i[1]=="OriginForeign":
layerObj["joinField"]=i[0]
#globalFields.append(layerObj["joinField"])
valid_fields.append(layerObj["joinField"])
#fields = copy.deepcopy(feature_json['fields'])
feature_json = json.loads(fdesc.json)
feature_json['fields']=getFields(lyr)
#feature_json=openJSON(templatePath + "/name.FeatureServer.id.query.json")
#feature_json['features']=getFeatures(lyr)
#feature_json['fields']=getFields(lyr)
#if desc.shapeType:
# if desc.shapeType=='Polygon':
# feature_json['geometryType']='esriGeometryPolygon'
# elif desc.shapeType=='Polyline':
# feature_json['geometryType']='esriGeometryPolyline'
# elif desc.shapeType=='Point':
# feature_json['geometryType']='esriGeometryPoint'
# elif desc.shapeType=='MultiPoint':
# feature_json['geometryType']='esriGeometryMultiPoint'
layerObj["shapeFieldName"]=desc.shapeFieldName
if desc.hasOID:
feature_json['objectIdField']=desc.OIDFieldName
layerObj["oidname"]=desc.OIDFieldName
feature_json['objectIdFieldName']=desc.OIDFieldName
valid_fields.append(desc.OIDFieldName)
if desc.hasGlobalID:
feature_json['globalIdField'] = desc.globalIDFieldName
feature_json['globalIdFieldName']=desc.globalIDFieldName
layerObj["globaloidname"]=desc.globalIDFieldName
globalFields.append(desc.globalIDFieldName)
valid_fields.append(desc.globalIDFieldName)
#else:
#del feature_json['globalIdField']
#del feature_json['globalIdFieldName']
for field in desc.fields:
if field.type == 'Guid':
globalFields.append(field.name)
try:
if symbols[featureName]["fields"]:
valid_fields.append(symbols[featureName]["fields"])
except Exception as e:
pass
layerObj["type"]="layer"
#remove the defaultValue is it is NEWID() WITH VALUES
#for i in feature_json['fields']:
# try:
# if i.defaultValue=="NEWID() WITH VALUES":
# i.defaultValue=None
# except Exception as e:
# pass
#globalFields = ["GlobalID","GlobalGUID"]
#OBS! must remove the curly brackets around the globalId and GlobalGUID attributes
for i in feature_json['features']:
for j in i['attributes']:
if j in globalFields:
#printMessage(j)
#printMessage(i['attributes'][j])
i['attributes'][j]=i['attributes'][j].replace("{","").replace("}","")
printMessage("Saving layer " + lyr.name + "(" + str(layerIds[lyr.name]) + ") to JSON")
file=saveJSON(servicesDestinationPath + "/FeatureServer."+str(layerIds[lyr.name])+".query.json",feature_json)
LoadService(sqliteDb,serviceName,"FeatureServer",lyr.name, layerIds[lyr.name],"query",file)
#set editor tracking fields
editorTracking={}
if desc.editorTrackingEnabled:
editorTracking['creationDateField']=desc.createdAtFieldName
editorTracking['creatorField']=desc.creatorFieldName
editorTracking['editDateField']=desc.editedAtFieldName
editorTracking['editorField']=desc.editorFieldName
feature_json['editFieldsInfo']=editorTracking
#save to config too for easy access
#tableObj["editFieldsInfo"]=editorTracking
else:
try:
del feature_json['editFieldsInfo']
except Exception as e:
pass
feature_json['editingInfo']={"lastEditDate":created_ts}
if arcpy.Exists(rootFGDB+"/"+featureName+"__ATTACH"):
feature_json['hasAttachments']=True
feature_json['advancedQueryCapabilities']={}
feature_json['advancedQueryCapabilities']['supportsQueryAttachments']=True
feature_json['attachmentProperties']=[{"name":"name","isEnabled":True},{"name":"size","isEnabled":True},{"name":"contentType","isEnabled":True},{"name":"keywords","isEnabled":True}]
else:
feature_json['hasAttachments']=False
#create file containing objectid,globalid and any field used for symbology
if False:
fields = []
for i in feature_json['fields']:
if i['name'] in valid_fields:
fields.append(i)
#try:
# feature_json['fields'].remove(i)
# del i
#except:
# pass
#del i
#del feature_json['fields'][i]
feature_json['fields'] = fields
for i in feature_json['features']:
for j in i['attributes'].keys():
if j not in valid_fields:
del i['attributes'][j]
#del feature_json['features']['attributes'][j]
file=saveJSON(servicesDestinationPath + "/FeatureServer."+str(id)+".outfields.json",feature_json)
LoadService(sqliteDb,serviceName,"FeatureServer", "",id,"outfields",file)
#create a JSON OBJECTID file used in ArcGIS for showing the attribute table
#remove all fields except OBJECTID
#feature_json['fields']=[{"alias":"OBJECTID","name":"OBJECTID","type":"esriFieldTypeInteger","alias":"OBJECTID","sqlType":"sqlTypeOther","defaultValue":None,"domain":None}]
#OBJECTID,GlobalID,has_permittee
if desc.hasOID:
#feature_json['objectIdField']=desc.OIDFieldName
feature_json['fields']=[
{"alias":desc.OIDFieldName,"name":desc.OIDFieldName,"type":"esriFieldTypeOID","sqlType":"sqlTypeOther","defaultValue":None,"domain":None,"nullable":False,"editable":False}
]
else:
feature_json['fields']=[
{"alias":"OBJECTID","name":"OBJECTID","type":"esriFieldTypeOID","sqlType":"sqlTypeOther","defaultValue":None,"domain":None,"nullable":False,"editable":False}
]
features=[]
#for i in feature_json['fields']:
# if i['name'] != 'OBJECTID':
# del i
# #del feature_json['fields'][i]
for i in feature_json['features']:
if desc.OIDFieldName:
features.append({"attributes":{desc.OIDFieldName:i['attributes'][desc.OIDFieldName]}})
feature_json['features']=features
#for j in i['attributes']:
# if j == 'OBJECTID':
# attribute={"OBJECTID":j}
# #del j
#for j in feature_json['features'][i]['attributes']:
# if feature_json['features'][i]['attributes'][j]['name'] != 'OBJECTID':
# del feature_json.features[i]['attributes'][j]
file=saveJSON(servicesDestinationPath + "/FeatureServer."+str(layerIds[lyr.name])+".objectid.json",feature_json)
LoadService(sqliteDb,serviceName,"FeatureServer",lyr.name, layerIds[lyr.name],"objectid",file)
layerObj["itemId"]= lyr.name.replace(" ","_")+str(layerIds[lyr.name])
if desc.editorTrackingEnabled:
#save to config too for easy access
try:
layerObj["editFieldsInfo"]=feature_json['editFieldsInfo']
except Exception as e:
pass
#config["services"][serviceName]["layers"][str(layerIds[lyr.name])]=layerObj
project["layers"][str(layerIds[lyr.name])]=layerObj
id = id+1
#now save any tables
globalFields=[]
valid_fields =[]
for tbl in operationalTables:
desc = arcpy.Describe(tbl)
#featureName=os.path.basename(desc.catalogPath)
if hasattr(desc, "layer"):
featureName=os.path.basename(desc.layer.catalogPath)
inFeaturesGDB=desc.layer.path
else:
featureName=os.path.basename(desc.catalogPath)
inFeaturesGDB=desc.path
if sqliteDb:
saveToSqlite(tbl,sqliteDb)
if arcpy.Exists(inFeaturesGDB+"/"+featureName+"__ATTACH"):
saveToSqlite(inFeaturesGDB+"/"+featureName+"__ATTACH",sqliteDb)
if pg:
saveToPg(tbl,pg)
if arcpy.Exists(inFeaturesGDB+"/"+featureName+"__ATTACH"):
saveAttachTableToPg(inFeaturesGDB,featureName,"__ATTACH",pg)
feature_json=openJSON(templatePath + "/name.RecordSet.id.json")
#feature_json['description'] = tbl.description
tableObj={"name":tbl.name,"data":featureName}
feature_json['fields']=getFields(tbl)
feature_json['displayField']=getDisplayField(feature_json['fields'])
#feature_json['relationships']=getRelationships(tbl,id,len(operationalLayers),operationalTables,relationshipObj)
#feature_json['relationships']=getRelationships(lyr,relationshipObj)
feature_json['id']=layerIds[tbl.name]
feature_json['name']=tbl.name
if desc.hasOID:
feature_json['objectIdField']=desc.OIDFieldName
feature_json['objectIdFieldName']=desc.OIDFieldName
tableObj["oidname"]=desc.OIDFieldName
valid_fields.append(desc.OIDFieldName)
if desc.hasGlobalID:
feature_json['globalIdField'] = desc.globalIDFieldName
feature_json['globalIdFieldName']=desc.globalIDFieldName
tableObj["globaloidname"]=desc.globalIDFieldName
globalFields.append(desc.globalIDFieldName)
valid_fields.append(desc.globalIDFieldName)
else:
del feature_json['globalIdField']
#del feature_json['globalIdFieldName']
for field in desc.fields:
if field.type == 'Guid':
globalFields.append(field.name)
tableObj["type"]="table"
tableObj["id"]=layerIds[tbl.name]
if desc.relationshipClassNames:
for j,rel in enumerate(desc.relationshipClassNames):
relDesc = arcpy.Describe(desc.path +"/"+rel)
for i in relDesc.originClassKeys:
#if i[1]=="OriginPrimary":
if i[1]=="OriginForeign":
tableObj["joinField"]=i[0]
#globalFields.append(tableObj["joinField"])
valid_fields.append(tableObj["joinField"])
feature_json['indexes']=getIndexes(tbl)
feature_json['templates'][0]['name']=serviceName
attributes={}
for field in feature_json['fields']:
#printMessage(field['name'])
if field['editable']:
attributes[ field['name'] ]=None
feature_json['templates'][0]['prototype']['attributes']=attributes
#printMessage(tbl.name+": " + featureName)
feature_json['relationships']=relationshipObj[featureName]
#set editor tracking fields
editorTracking={}
if desc.editorTrackingEnabled:
editorTracking['creationDateField']=desc.createdAtFieldName
editorTracking['creatorField']=desc.creatorFieldName
editorTracking['editDateField']=desc.editedAtFieldName
editorTracking['editorField']=desc.editorFieldName
feature_json['editFieldsInfo']=editorTracking
#save to config too for easy access
tableObj["editFieldsInfo"]=editorTracking
else:
try:
del feature_json['editFieldsInfo']
except Exception as e:
pass
feature_json['editingInfo']={"lastEditDate":created_ts}
if arcpy.Exists(rootFGDB+"/"+featureName+"__ATTACH"):
feature_json['hasAttachments']=True
feature_json['advancedQueryCapabilities']['supportsQueryAttachments']=True
feature_json['attachmentProperties']=[{"name":"name","isEnabled":True},{"name":"size","isEnabled":True},{"name":"contentType","isEnabled":True},{"name":"keywords","isEnabled":True}]
else:
feature_json['hasAttachments']=False
file=saveJSON(servicesDestinationPath + "/FeatureServer."+str(layerIds[tbl.name])+".json",feature_json)
LoadService(sqliteDb,serviceName,"FeatureServer", tbl.name,layerIds[tbl.name],"",file)
tableObj["itemId"]= tbl.name.replace(" ","_")+str(layerIds[tbl.name])
#config["services"][serviceName]["layers"][str(layerIds[tbl.name])]=tableObj
project["layers"][str(layerIds[tbl.name])]=tableObj
#fields = copy.deepcopy(feature_json['fields'])
fSet = arcpy.RecordSet()
fSet.load(desc.catalogPath)
fdesc = arcpy.Describe(fSet)
#printMessage(fdesc.json)
feature_json = json.loads(fdesc.json)
#replace fields with full fields
feature_json['fields']=getFields(tbl)
#remove the defaultValue is it is NEWID() WITH VALUES
#for i in feature_json['fields']:
# try:
# if i.defaultValue=="NEWID() WITH VALUES":
# i.defaultValue=None
# except Exception as e:
# pass
#OBS! must remove the curly brackets around the globalId and GlobalGUID attributes
for i in feature_json['features']:
for j in i['attributes']:
if j in globalFields:
i['attributes'][j]=i['attributes'][j].replace("{","").replace("}","")
#dataName = os.path.basename(desc.dataElement.catalogPath)
#layerObj={"name":lyr.name,"data":dataName}
printMessage("Saving table " + tbl.name + "("+str(layerIds[tbl.name]) + ") to JSON")
file=saveJSON(servicesDestinationPath + "/FeatureServer."+str(layerIds[tbl.name])+".query.json",feature_json)
LoadService(sqliteDb,serviceName,"FeatureServer",tbl.name, layerIds[tbl.name],"query",file)
#valid_fields = ["OBJECTID","GlobalID","GlobalGUID","has_permittee"]
if False:
fields = []
for i in feature_json['fields']:
if i['name'] in valid_fields:
fields.append(i)
#feature_json['fields'].remove(i)
#del i
feature_json['fields'] = fields
for i in feature_json['features']:
for j in i['attributes'].keys():
if j not in valid_fields:
del i['attributes'][j]
file=saveJSON(servicesDestinationPath + "/FeatureServer."+str(layerIds[tbl.name])+".outfields.json",feature_json)
LoadService(sqliteDb,serviceName,"FeatureServer",tbl.name, layerIds[tbl.name],"outfields",file)
id = id+1
#export all layers to shapefiles for rendering in mapserver
for lyr in operationalLayers:
desc = arcpy.Describe(lyr)
if desc.dataType == "FeatureLayer":
printMessage("Exporting layer to shapefile: "+ lyr.name)
arcpy.FeatureClassToFeatureClass_conversion(desc.dataElement.catalogPath,
dataDestinationPath,
lyr.name+".shp")
id = id+1
#now save the search results
search_json['total']=len(search_json['results'])
file=saveJSON(baseDestinationPath + "/search.json",search_json)
LoadCatalog(sqliteDb,"search", "",file)
#save root FeatureServer.json file
file=saveJSON(baseDestinationPath + "/FeatureServer.json",feature_services)
LoadCatalog(sqliteDb,"FeatureServer", "",file)
file=saveJSON(baseDestinationPath + "/MapServer.json",feature_services)
LoadCatalog(sqliteDb,"MapServer", "",file)
file=saveJSON(baseDestinationPath + "/config.json",config)
LoadCatalog(sqliteDb,"config", "",file)
if pg:
saveSqliteToPG(["catalog","services"],sqliteDb,pg)
saveSqliteServiceTablesToPG(replicaDestinationPath+"/"+serviceName+".geodatabase",pg)
#conn.close()
printMessage("Finished")
def openJSON(name):
printMessage("Loading JSON template: " +name)
with open(name, "r+") as f:
json_data = json.load(f)
f.close()
return json_data
def saveJSON(name,json_data): #optional
data = json.dumps(json_data)
with open(name,'w') as f:
f.write(data)
return data
def clearSelections(mxd):
for df in arcpy.mapping.ListDataFrames(mxd):
for lyr in arcpy.mapping.ListLayers(mxd, "", df):
# Exit if the current layer is not a service layer.
if lyr.isServiceLayer or lyr.supports("SERVICEPROPERTIES"): # or not lyr.visible
continue
if not lyr.isFeatureLayer:
continue
#printMessage(lyr.name +": " + arcpy.Describe(lyr).catalogPath)
arcpy.SelectLayerByAttribute_management(lyr, "CLEAR_SELECTION")
#arcpy.Describe(lyr).catalogPath
def getSymbology(mxd):
msdPath = os.path.abspath(os.path.dirname(__file__)).replace("\\","/")+"/output.msd"
#msdPath = self.mxdPath.replace(self.MXD_SUFFIX, self.MSD_SUFFIX)
# Delete temporary msd if it exists
if os.path.exists(msdPath):
os.remove(msdPath)
clearSelections(mxd)
arcpy.mapping.ConvertToMSD(mxd,msdPath)
msd_metadata={}
msd_metadata["symbols"]={}
zz = zipfile.ZipFile(msdPath)
EXCLUDED_FILE_NAMES = ["DocumentInfo.xml", "GISProject.xml", "layers/layers.xml"]
for fileName in (fileName for fileName in zz.namelist() if not fileName in EXCLUDED_FILE_NAMES):
printMessage("Opening: " + fileName)
dom = parse(zz.open(fileName))
#get Name, DisplayName, DatasetType, MinScale, MaxScale
#get FeatureTable->DisplayField
#FeatureTable->DataConnection->Dataset
#child = dom.getElementsByTagName("Name")
#displayName = dom.getElementsByTagName("DisplayName")
#datasetType = dom.getElementsByTagName("DatasetType")
#datasetType = dom.getElementsByTagName("DatasetType")
obj = dom.getElementsByTagName("Dataset")
if len(obj) > 0:
name=str(obj[0].childNodes[0].nodeValue)
else:
name=fileName.split(".")[0]+"_root"
msd_metadata[name]={}
#for j in dom.childNodes:
# if j.tagName == "Name":
# name = str(j.childNodes[0].nodeValue)
# msd_metadata["name"]={}
for j in dom.childNodes[0].childNodes:
if j.tagName == "DisplayName":
msd_metadata[name]["displayName"]=str(j.childNodes[0].nodeValue)
elif j.tagName == "Name":
msd_metadata[name]["name"]=str(j.childNodes[0].nodeValue)
elif j.tagName == "DatasetType":
msd_metadata[name]["datasetType"]=str(j.childNodes[0].nodeValue)
elif j.tagName == "FeatureTable":
obj = j.getElementsByTagName("DisplayField")
msd_metadata[name]["displayField"]=str(obj[0].childNodes[0].nodeValue)
#obj = j.getElementsByTagName("Dataset")
#msd_metadata[name]["dataset"]=str(obj[0].childNodes[0].nodeValue)
#get the next symbol
#k = j.getElementsByTagName("Symbol")
#for m in k:
#type = geomtype[0].getAttribute("xsi:type")=="typens:CIMPolygonSymbol"
# if m.getAttribute("xsi:type")=="typens:CIMPointSymbol":
# obj['symbol'] = getPointSymbol(m)
# elif m.getAttribute("xsi:type")=="typens:CIMPolygonSymbol":
# obj['symbol']=getPolygonSymbol(m)
#symb = dom.getElementsByTagName("Symbolizer")
elif j.tagName=="Symbolizer":
#if symb.length>0:
#name=fileName.split(".")[0]
#rootname = name.split("/")
#if len(rootname)>1:
# name=rootname[1]
#printMessage("Symbology found for : " + name + " length: " + str(symb.length))
msd_metadata[name]["symbols"]=j
#now get field for symbols if exists
f = j.getElementsByTagName("Fields")
if len(f)>0:
for m in f:
n = m.getElementsByTagName("String")
if len(n)>0:
msd_metadata[name]["fields"]=n[0].childNodes[0].nodeValue
printMessage("Found symbol field: " + msd_metadata[name]["fields"])
#printMessage("Found: " + str(symb.length))
#name,lyr = self.loadMsdLayerDom(dom)
#if name != "":
# self[name] = lyr
del zz
return msd_metadata
def getLayers(opLayers):
layers=[]
id=0
for lyr in opLayers:
layer={
"name":lyr.name,
"id":id,
"parentLayerId":-1,
"defaultVisibility":lyr.visible,
"subLayerIds":None,
"minScale":lyr.minScale,
"maxScale":lyr.maxScale
}
layers.append(layer)
id=id+1
return layers
def createReplica(mxd,dataFrame,allData,replicaDestinationPath,toolkitPath,username,serviceName,serverName,minx,miny,maxx,maxy,relationshipList,layerIds,serviceItems):
global spatialite_path
arcpy.CreateRuntimeContent_management(mxd.filePath,
replicaDestinationPath + os.sep + serviceName,
serviceName,"#","#",
"FEATURE_AND_TABULAR_DATA","NON_OPTIMIZE_SIZE","ONLINE","PNG","1","#")
#OPTIMIZE_SIZE, NON_OPTIMIZE_SIZE
filenames = next(os.walk(replicaDestinationPath + "/"+serviceName+"/data/"))[2]
printMessage("Renamed " + replicaDestinationPath + "/"+serviceName+"/data/"+filenames[0]+" to "+ replicaDestinationPath+"/"+serviceName+".geodatabase")
#if offline geodatabase exists, must delete first
newFullReplicaDB=replicaDestinationPath+"/"+serviceName+".geodatabase"
try:
if os.path.exists(newFullReplicaDB):
os.rmdir(newFullReplicaDB)
except:
printMessage("Unable to remove old replica geodatabase")
os.rename(replicaDestinationPath + "/"+serviceName+"/data/"+filenames[0], newFullReplicaDB)
try:
os.rmdir(replicaDestinationPath + "/"+serviceName+"/data/")
os.rmdir(replicaDestinationPath + "/"+serviceName)
except:
printMessage("Unable to remove replica folders")
#get the creation sql string for each layer including __ATTACH tables
conn = sqlite3.connect(newFullReplicaDB)
c = conn.cursor()
#conn = sqlite3.connect("c:/massappraisal/colville/"+inFeaturesName+".sqlite")
#c = conn.cursor()
#c.execute("INSERT INTO catalog(name,type,json) VALUES(?,?,?)", (name,dtype,json))
#c.close()
#conn.commit()
#map(tuple, array.tolist())
creationDate = time.strftime("%Y-%m-%dT%H:%M:%S")
sql1=('INSERT INTO GDB_Items("ObjectID", "UUID", "Type", "Name", "PhysicalName", "Path", "Url", "Properties", "Defaults", "DatasetSubtype1", "DatasetSubtype2", "DatasetInfo1", "DatasetInfo2", "Definition", "Documentation", "ItemInfo", "Shape")'
" select MAX(ObjectID)+1, '{7B6EB064-7BF6-42C8-A116-2E89CD24A000}', '{5B966567-FB87-4DDE-938B-B4B37423539D}', 'MyReplica', 'MYREPLICA', 'MyReplica', '', 1, NULL, NULL, NULL, "
"'http://"+serverName+"/arcgis/rest/services/"+serviceName+"/FeatureServer', '"+username+"',"
"'<GPSyncReplica xsi:type=''typens:GPSyncReplica'' xmlns:xsi=''http://www.w3.org/2001/XMLSchema-instance'' xmlns:xs=''http://www.w3.org/2001/XMLSchema'' xmlns:typens=''http://www.esri.com/schemas/ArcGIS/10.3''>"
"<ReplicaName>MyReplica</ReplicaName><ID>1</ID><ReplicaID>{7b6eb064-7bf6-42c8-a116-2e89cd24a000}</ReplicaID>"
"<ServiceName>http://"+serverName+"/arcgis/rest/services/"+serviceName+"/FeatureServer</ServiceName>"
"<Owner>"+username+"</Owner>"
"<Role>esriReplicaRoleChild</Role><SyncModel>esriSyncModelPerLayer</SyncModel><Direction>esriSyncDirectionBidirectional</Direction><CreationDate>"+creationDate+"</CreationDate><LastSyncDate>1970-01-01T00:00:01</LastSyncDate>"
"<ReturnsAttachments>true</ReturnsAttachments><SpatialRelation>esriSpatialRelIntersects</SpatialRelation><QueryGeometry xsi:type=''typens:PolygonN''><HasID>false</HasID><HasZ>false</HasZ><HasM>false</HasM><Extent xsi:type=''typens:EnvelopeN''>"
"<XMin>"+minx+"</XMin><YMin>"+miny+"</YMin><XMax>"+maxx+"</XMax><YMax>"+maxy+"</YMax></Extent><RingArray xsi:type=''typens:ArrayOfRing''><Ring xsi:type=''typens:Ring''>"
"<PointArray xsi:type=''typens:ArrayOfPoint''>"
"<Point xsi:type=''typens:PointN''><X>"+minx+"</X><Y>"+miny+"</Y></Point><Point xsi:type=''typens:PointN''><X>"+maxx+"</X><Y>"+miny+"</Y></Point>"
"<Point xsi:type=''typens:PointN''><X>"+maxx+"</X><Y>"+maxy+"</Y></Point><Point xsi:type=''typens:PointN''><X>"+minx+"</X><Y>"+maxy+"</Y></Point>"
"<Point xsi:type=''typens:PointN''><X>"+minx+"</X><Y>"+miny+"</Y></Point></PointArray></Ring></RingArray>"
"<SpatialReference xsi:type=''typens:ProjectedCoordinateSystem''><WKT>PROJCS["WGS_1984_Web_Mercator_Auxiliary_Sphere",GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",SPHEROID["WGS_1984",6378137.0,298.257223563]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Mercator_Auxiliary_Sphere"],PARAMETER["False_Easting",0.0],PARAMETER["False_Northing",0.0],PARAMETER["Central_Meridian",0.0],PARAMETER["Standard_Parallel_1",0.0],PARAMETER["Auxiliary_Sphere_Type",0.0],UNIT["Meter",1.0],AUTHORITY["EPSG",3857]]</WKT>"
"<XOrigin>-20037700</XOrigin><YOrigin>-30241100</YOrigin><XYScale>10000</XYScale><ZOrigin>-100000</ZOrigin><ZScale>10000</ZScale><MOrigin>-100000</MOrigin><MScale>10000</MScale><XYTolerance>0.001</XYTolerance><ZTolerance>0.001</ZTolerance><MTolerance>0.001</MTolerance><HighPrecision>true</HighPrecision><WKID>102100</WKID><LatestWKID>3857</LatestWKID></SpatialReference></QueryGeometry><GPSyncDatasets xsi:type=''typens:ArrayOfGPSyncDataset''>")
#lyrs=[]
#for lyr in arcpy.mapping.ListLayers(mxd, "", dataFrame):
# # Exit if the current layer is not a service layer.
# if lyr.isServiceLayer or lyr.supports("SERVICEPROPERTIES"): # or not lyr.visible
# continue
# lyrs.append(lyr)
#for lyr in arcpy.mapping.ListTableViews(mxd, "", dataFrame):
# lyrs.append(lyr)
#<RelationshipClassNames xsi:type="typens:Names"/>
#<ChangeTracked>false</ChangeTracked>
serviceItems["layers"]=[]
#OBS! must alter the OBJECTID field type from integer to int32
tables=""
id=0
idx=1
sql2=[]
sql3=[]
sql5=[]
for lyr in allData:
desc = arcpy.Describe(lyr)
if hasattr(desc, "layer"):
featureName=os.path.basename(desc.layer.catalogPath)
inFeaturesGDB=desc.layer.catalogPath.replace("\\","/")
else:
featureName=os.path.basename(desc.catalogPath)
inFeaturesGDB=os.path.dirname(desc.catalogPath).replace("\\","/")
useGeometry="false"
lyrtype = "esriDTTable"
svcType = "Table"
queryOption="esriRowsTypeNone"
oidName = desc.OIDFieldName
if tables=="":
tables=tables+'"'+featureName+'"'
else:
tables=tables+',"'+featureName+'"'
if hasattr(desc,"featureClass"):
lyrtype = "esriDTFeatureClass"
useGeometry="true"
svcType = "Feature Layer"
queryOption="esriRowsTypeFilter"
layer={
"name":lyr.name,
"id":id+8,
"layerId":layerIds[lyr.name],
"tableName":featureName,
"type":svcType,
"xssTrustedFields":""
}
sql5.append(('UPDATE "GDB_ServiceItems" set "DatasetName"="' + featureName + '" where "ItemId"='+str(layerIds[lyr.name])))
sqlCreation = "SELECT sql FROM sqlite_master WHERE type = 'table' AND name = ?"
c.execute(sqlCreation, (featureName,))
sql = c.fetchone()[0]
#printMessage(sql )
sql5.append(("alter table " + featureName + " rename to " + featureName + "_org"))
#remove trailing close paren
sql = sql[:-1]
#next line is important when doing lookups
sql = sql.replace(oidName +" integer",oidName + " int32")
sql = sql.replace("primary key ","")
sql = sql.replace(" not null","")
#gdb_transaction_time = 'gdb_transaction_time()'
#gdb_transaction_time = "strftime('%s', 'now')"
gdb_transaction_time = "julianday('now')"
#sql = sql.replace("OBJECTID integer","OBJECTID int32 check(typeof(OBJECTID) = 'integer' and OBJECTID >= -2147483648 and OBJECTID <= 2147483647)")
#sql = sql.replace("OBJECTID integer","OBJECTID int32 not null")
#sql = sql.replace("GlobalID uuidtext check(typeof(GlobalID) = 'text' and length(GlobalID) = 38) not null","GlobalID uuidtext check(typeof(GlobalID) = 'text' and length(GlobalID) = 38)")
sql = sql +", gdb_archive_oid integer primary key not null, gdb_from_date realdate check(typeof(gdb_from_date) = 'real' and gdb_from_date >= 0.0) default ("+gdb_transaction_time +"), gdb_to_date realdate check(typeof(gdb_to_date) = 'real' and gdb_to_date >= 0.0) default (julianday ('9999-12-31 23:59:59'))) "
#sql = sql +", gdb_archive_oid integer primary key not null, gdb_from_date realdate check(typeof(gdb_from_date) = 'real' and gdb_from_date >= 0.0), gdb_to_date realdate check(typeof(gdb_to_date) = 'real' and gdb_to_date >= 0.0))"
#sql = sql +", gdb_archive_oid integer primary key not null, gdb_from_date realdate check(typeof(gdb_from_date) = 'real' and gdb_from_date >= 0.0) not null default (gdb_transaction_time ()), gdb_to_date realdate check(typeof(gdb_to_date) = 'real' and gdb_to_date >= 0.0) not null default (julianday ('9999-12-31 23:59:59')))"
sql5.append(sql)
uuid = "(select upper('{' || substr(u,1,8)||'-'||substr(u,9,4)||'-4'||substr(u,13,3)||'-'||v||substr(u,17,3)||'-'||substr(u,21,12)||'}') from (select lower(hex(randomblob(16))) as u, substr('89ab',abs(random()) % 4 + 1, 1) as v))"
hasAttachments="false"
hasAttachmentsStr=""
if arcpy.Exists(inFeaturesGDB+"/"+featureName+"__ATTACH"):
hasAttachments="true"
hasAttachmentsStr = "<HasAttachments>"+hasAttachments+"</HasAttachments>"
layer["attachmentsTableName"]=inFeaturesGDB+"/"+featureName+"__ATTACH"
layer["attachmentsTableName"]=featureName+"__ATTACH"
dscfc = arcpy.Describe(inFeaturesGDB+"/"+featureName+"__ATTACH")
#if dscfc.hasOID == True:
# layer["attachmentsPrimaryKey"]=dscfc.OIDFieldName
#else:
layer["attachmentsPrimaryKey"]= "GlobalID"
serviceItems["layers"].append(layer)
dataSetId='\'||(SELECT ObjectId FROM "GDB_Items" Where Name=\'main.'+featureName+"\')||\'"
sql1=sql1+ ("<GPSyncDataset xsi:type=''typens:GPSyncDataset''><DatasetID>"+dataSetId+"</DatasetID><DatasetName>"+featureName+"</DatasetName><DatasetType>"+lyrtype+"</DatasetType>"
"<LayerID>"+str(layerIds[lyr.name])+"</LayerID><LayerName>"+lyr.name+"</LayerName><Direction>esriSyncDirectionBidirectional</Direction><ReplicaServerGen xsi:type=''xs:long''>53052</ReplicaServerGen><ReplicaClientDownloadGen xsi:type=''xs:long''>1000</ReplicaClientDownloadGen>"
"<ReplicaClientUploadGen xsi:type=''xs:long''>1000</ReplicaClientUploadGen><ReplicaClientAcknowledgeUploadGen xsi:type=''xs:long''>1000</ReplicaClientAcknowledgeUploadGen>"
"<UseGeometry>"+useGeometry+"</UseGeometry><IncludeRelated>true</IncludeRelated>"
"<QueryOption>"+queryOption+"</QueryOption>"+hasAttachmentsStr+"</GPSyncDataset>")
sql2.append(('INSERT INTO GDB_Items("ObjectID", "UUID", "Type", "Name", "PhysicalName", "Path", "Url", "Properties", "Defaults", "DatasetSubtype1", "DatasetSubtype2", "DatasetInfo1", "DatasetInfo2", "Definition", "Documentation", "ItemInfo", "Shape")'
" select MAX(ObjectID)+1, "+uuid+", '{D86502F9-9758-45C6-9D23-6DD1A0107B47}', '"+featureName+"', '"+featureName.upper()+"', 'MyReplica\\"+featureName+"', '', 1, NULL, NULL, NULL, NULL, NULL, "
"'<GPSyncDataset xsi:type=''typens:GPSyncDataset'' xmlns:xsi=''http://www.w3.org/2001/XMLSchema-instance'' xmlns:xs=''http://www.w3.org/2001/XMLSchema'' xmlns:typens=''http://www.esri.com/schemas/ArcGIS/10.3''>"
"<DatasetID>"+dataSetId+"</DatasetID>"
"<DatasetName>"+lyr.name+"</DatasetName>"
"<DatasetType>"+lyrtype+"</DatasetType><LayerID>"+str(layerIds[lyr.name])+"</LayerID><LayerName>"+lyr.name+"</LayerName><Direction>esriSyncDirectionBidirectional</Direction>"
"<ReplicaServerGen xsi:type=''xs:long''>53052</ReplicaServerGen><ReplicaClientDownloadGen xsi:type=''xs:long''>1000</ReplicaClientDownloadGen><ReplicaClientUploadGen xsi:type=''xs:long''>1000</ReplicaClientUploadGen>"
"<ReplicaClientAcknowledgeUploadGen xsi:type=''xs:long''>1000</ReplicaClientAcknowledgeUploadGen>"
"<UseGeometry>"+useGeometry+"</UseGeometry><IncludeRelated>true</IncludeRelated>"
"<QueryOption>"+queryOption+"</QueryOption>"+ hasAttachmentsStr+ "</GPSyncDataset>', NULL, NULL, NULL from GDB_Items"))
sql5.append(('INSERT INTO GDB_ColumnRegistry("table_name", "column_name", "sde_type", "column_size", "decimal_digits", "description", "object_flags", "object_id")'
" values('"+featureName + "','gdb_from_date',7,0,NULL,'Archiving from date.',536870912,NULL)"))
sql5.append(('INSERT INTO GDB_ColumnRegistry("table_name", "column_name", "sde_type", "column_size", "decimal_digits", "description", "object_flags", "object_id")'
" values('"+featureName + "','gdb_to_date',7,0,NULL,'Archiving to date.',536870912,NULL)"))
sql5.append(('INSERT INTO GDB_ColumnRegistry("table_name", "column_name", "sde_type", "column_size", "decimal_digits", "description", "object_flags", "object_id")'
" values('"+featureName + "','gdb_archive_oid',2,0,NULL,'Archiving record unique id.',536870912,NULL)"))
#sql5.append(('ALTER TABLE ' + featureName + ' add gdb_archive_oid integer')) # not null'))
#sql5.append(('ALTER TABLE ' + featureName + ' add gdb_from_date realdate check(typeof(gdb_from_date) = \'real\' and gdb_from_date >= 0.0)')) # not null default (julianday())'))
#sql5.append(('ALTER TABLE ' + featureName + ' add gdb_to_date realdate check(typeof(gdb_to_date) = \'real\' and gdb_to_date >= 0.0)')) # not null default (julianday (\'9999-12-31 23:59:59\'))'))
sql5.append(('INSERT INTO "GDB_ItemRelationships"("ObjectID", "UUID", "Type", "OriginID", "DestID", "Properties", "Attributes")'
'VALUES('
'(select max(OBJECTID) + 1 from "GDB_ItemRelationships"),'+ uuid+','
'(select UUID from "GDB_ItemRelationshipTypes" where "Name"= \'DatasetOfSyncDataset\' limit 1),'
'(select UUID from "GDB_Items" where Name="'+featureName+'" limit 1),'
'(select UUID from "GDB_Items" where Name="main.'+featureName+'" limit 1),'
'1,NULL)'))
sql5.append(('INSERT INTO "GDB_ItemRelationships"("ObjectID", "UUID", "Type", "OriginID", "DestID", "Properties", "Attributes") VALUES('
'(select max(OBJECTID) + 1 from "GDB_ItemRelationships"),'+ uuid+','
'(select UUID from "GDB_ItemRelationshipTypes" where "Name"=\'DatasetOfSyncDataset\' limit 1),'
'(select UUID from "GDB_Items" where Name="MyReplica" limit 1),'
'(select UUID from "GDB_Items" where Name="'+featureName+'" limit 1),'
'1,NULL)'))
desc = arcpy.Describe(lyr)
rels=""
if desc.relationshipClassNames:
for name in desc.relationshipClassNames:
rels = rels + "<Name>main."+name+"</Name>"
sql5.append(('UPDATE "GDB_Items" set "Definition"=replace("Definition","<RelationshipClassNames xsi:type=\'typens:Names\'></RelationshipClassNames>",\'<RelationshipClassNames xsi:type="typens:Names">'+rels+'</RelationshipClassNames>\') where "Name"="main.' +featureName+'"' ) )
#next_row_id='Next_RowID (NULL,\''+featureName+'\')'
#next_row_id='(select max(OBJECTID)+1 from \''+featureName+'\')'
if desc.hasOID:
next_row_id='(coalesce (NEW.'+desc.OIDFieldName+',(select max('+desc.OIDFieldName+')+1 from \''+featureName+'\'),1)'
else:
next_row_id='(coalesce (NEW.OBJECTID,(select max(OBJECTID)+1 from \''+featureName+'\'),1)'
fields=[]
pre=""
newFields=""
allfields=""
excludes=[oidName,"Shape_Length","Shape_Area"]
for field in desc.fields:
if field.name not in excludes:
newFields = newFields +pre+ "NEW."+field.name
allfields = allfields +pre+ field.name
pre=","
fields.append(field.name)
#if field.name==depVar + '_calculated':
sql5.append(('CREATE VIEW '+featureName+'_evw AS SELECT '+oidName+','+allfields+' FROM '+featureName + " WHERE gdb_to_date BETWEEN (julianday ('9999-12-31 23:59:59') - 0.000000001) AND (julianday ('9999-12-31 23:59:59') + 0.000000001)"))
#WHERE gdb_to_date BETWEEN (julianday ('9999-12-31 23:59:59') - 0.000000001) AND (julianday ('9999-12-31 23:59:59') + 0.000000001)
sql5.append(('CREATE TRIGGER '+featureName+'_evw_delete INSTEAD OF DELETE ON '+featureName+'_evw BEGIN '
'DELETE FROM '+featureName+' WHERE '+oidName+' = OLD.'+oidName+' AND gdb_from_date BETWEEN ('+gdb_transaction_time +' - 0.000000001) AND ('+gdb_transaction_time +' + 0.000000001); '
'UPDATE OR REPLACE '+featureName+' SET gdb_to_date = '+gdb_transaction_time +' '
'WHERE '+oidName+' = OLD.'+oidName+' AND gdb_to_date BETWEEN (julianday (\'9999-12-31 23:59:59\') - 0.000000001) AND (julianday (\'9999-12-31 23:59:59\') + 0.000000001); END;'))
sql5.append(('CREATE TRIGGER '+featureName+'_evw_insert INSTEAD OF INSERT ON '+featureName+'_evw BEGIN '
'INSERT INTO '+featureName+' ('+oidName+','+allfields+',gdb_from_date,gdb_to_date) '
'VALUES '+next_row_id+','+newFields+','+gdb_transaction_time +',julianday (\'9999-12-31 23:59:59\')); END;'))
sql5.append(('CREATE TRIGGER '+featureName+'_evw_update INSTEAD OF UPDATE ON '+featureName+'_evw BEGIN '
'UPDATE OR IGNORE '+featureName+' SET gdb_to_date = '+gdb_transaction_time +' '
'WHERE '+oidName+' = OLD.'+oidName+' AND gdb_to_date BETWEEN (julianday (\'9999-12-31 23:59:59\') - 0.000000001) AND (julianday (\'9999-12-31 23:59:59\') + 0.000000001);'
'REPLACE INTO '+featureName+' ('+oidName+','+allfields+',gdb_from_date,gdb_to_date) '
'VALUES (NEW.'+oidName+','+newFields+',(SELECT MAX (gdb_to_date) FROM '+featureName+' '
'WHERE '+oidName+' = OLD.'+oidName+' AND gdb_to_date < julianday (\'9999-12-31 23:59:59\')),julianday (\'9999-12-31 23:59:59\')); END;'))
sql5.append(("insert into " + featureName + "("+oidName+","+allfields+") select "+oidName+","+allfields+" from "+featureName + "_org"))
sql5.append(("drop table "+featureName + "_org"))
sql5.append(("CREATE INDEX gdb_ct4_"+str(idx)+" ON "+featureName+" ("+oidName+",gdb_from_date) "))
sql5.append(("CREATE INDEX gdb_ct1_"+str(idx)+" ON "+featureName+" (gdb_from_date,gdb_to_date) "))
sql5.append(("CREATE INDEX r"+str(idx)+"_gdb_xpk ON "+featureName+" ("+oidName+",gdb_to_date) "))
#if desc.hasOID:
# feature_json['objectIdField']=desc.OIDFieldName
# feature_json['objectIdFieldName']=desc.OIDFieldName
#feature_json['globalIdField'] = desc.globalIDFieldName
#feature_json['globalIdFieldName']=desc.globalIDFieldName
if desc.hasGlobalID:
sql5.append(("CREATE INDEX UUID"+str(idx)+" ON "+featureName+" ("+desc.globalIDFieldName+") "))
joinField = ""
if desc.relationshipClassNames:
for j,rel in enumerate(desc.relationshipClassNames):
relDesc = arcpy.Describe(desc.path +"/"+rel)
for i in relDesc.originClassKeys:
#if i[1]=="OriginPrimary":
if i[1]=="OriginForeign":
joinField=i[0]
if joinField in fields:
sql5.append(("CREATE INDEX GDB_"+str(idx)+"_" + joinField + " ON "+featureName+" ("+joinField+") "))
#need to add triggers for editing spatial layers
if svcType!="Table":
sql5.append(('PRAGMA writable_schema=ON;'))
#sql5.append(('DROP TRIGGER "st_insert_trigger_'+featureName+'_SHAPE";'))
#sql5.append(('DROP TRIGGER "st_delete_trigger_'+featureName+'_SHAPE";'))
#sql5.append(('DROP TRIGGER "st_update_trigger_'+featureName+'_SHAPE";'))
#sql5.append(('DROP TRIGGER "st_update1_trigger_'+featureName+'_SHAPE";'))
#sql5.append(('CREATE TRIGGER "st_insert_trigger_'+featureName+'_SHAPE" AFTER INSERT ON '+featureName+' FOR EACH ROW BEGIN '
#'INSERT INTO "st_spindex__'+featureName+'_SHAPE"(pkid) values(NEW.SHAPE,NEW._ROWID_,2); END'))
#SELECT InsertIndexEntry ("st_spindex__'+featureName+'_SHAPE",NEW.SHAPE,NEW._ROWID_,2); END'))
#sql5.append(('CREATE TRIGGER "st_delete_trigger_'+featureName+'_SHAPE" AFTER DELETE ON '+featureName+' FOR EACH ROW BEGIN DELETE FROM "st_spindex__'+featureName+'_SHAPE" WHERE pkid = OLD._ROWID_; END'))
#sql5.append(('CREATE TRIGGER "st_update_trigger_'+featureName+'_SHAPE" AFTER UPDATE OF SHAPE ON '+featureName+' WHEN OLD._ROWID_ = NEW._ROWID_ BEGIN '
#'SELECT UpdateIndexEntry ("st_spindex__'+featureName+'_SHAPE",NEW.SHAPE,NEW._ROWID_,2); END'))
#SELECT UpdateIndexEntry ("st_spindex__'+featureName+'_SHAPE",NEW.SHAPE,NEW._ROWID_,2); END'))
#sql5.append(('CREATE TRIGGER "st_update1_trigger_'+featureName+'_SHAPE" AFTER UPDATE OF SHAPE ON '+featureName+' WHEN OLD._ROWID_ != NEW._ROWID_ BEGIN DELETE FROM "st_spindex__'+featureName+'_SHAPE" WHERE pkid = OLD._ROWID_; SELECT UpdateIndexEntry ("st_spindex__'+featureName+'_SHAPE",NEW.SHAPE,NEW._ROWID_,2); END'))
#sql5.append(('CREATE TRIGGER "st_delete_trigger_'+featureName+'_SHAPE" AFTER DELETE ON '+featureName+' FOR EACH ROW BEGIN '
#'DELETE FROM "st_spindex__'+featureName+'_SHAPE" WHERE pkid = OLD._ROWID_; END'))
#sql5.append(('CREATE TRIGGER "st_insert_trigger_'+featureName+'_SHAPE" AFTER INSERT ON '+featureName+' FOR EACH ROW BEGIN '
#'SELECT InsertIndexEntry ("st_spindex__'+featureName+'_SHAPE",NEW.SHAPE,NEW._ROWID_,2); END'))
#sql5.append(('CREATE TRIGGER "st_update1_trigger_'+featureName+'_SHAPE" AFTER UPDATE OF SHAPE ON '+featureName+' WHEN OLD._ROWID_ != NEW._ROWID_ BEGIN '
#'DELETE FROM "st_spindex__'+featureName+'_SHAPE" WHERE pkid = OLD._ROWID_; SELECT UpdateIndexEntry ("st_spindex__'+featureName+'_SHAPE",NEW.SHAPE,NEW._ROWID_,2); END'))
#sql5.append(('CREATE TRIGGER "st_update_trigger_'+featureName+'_SHAPE" AFTER UPDATE OF SHAPE ON '+featureName+' WHEN OLD._ROWID_ = NEW._ROWID_ BEGIN '
#'SELECT UpdateIndexEntry ("st_spindex__'+featureName+'_SHAPE",NEW.SHAPE,NEW._ROWID_,2); END'))
sql5.append(('UPDATE "GDB_TableRegistry" set object_flags=278535 where table_name=\''+featureName+"'"))
sql5.append(('PRAGMA writable_schema=OFF;'))
else:
sql5.append(('UPDATE "GDB_TableRegistry" set object_flags=262147 where table_name=\''+featureName+"'"))
#printMessage("Loading " + lyr.name)
#now process any attachment tables
#OBS! The order of fields in these tables is important!!!
if arcpy.Exists(inFeaturesGDB+"/"+featureName+"__ATTACH"):
pre=""
newFields=""
allfields=""
newallfields=""
globalField = featureName+"_GlobalID"
correct_field_order=["ATTACHMENTID","GLOBALID","REL_GLOBALID","CONTENT_TYPE","ATT_NAME","DATA_SIZE","DATA","gdb_archive_oid","gdb_from_date","gdb_to_date"]
# elif field.type == 'Guid':
# fieldInfos['type']='esriFieldTypeGUID'
#elif field.type == 'GlobalID':
desc = arcpy.Describe(inFeaturesGDB+"/"+featureName+"__ATTACH")
oidName = desc.OIDFieldName
excludes=[oidName,"Shape_Length","Shape_Area"]
for field in desc.fields:
if field.type == 'Guid':
globalField = field.name
if field.name not in excludes:
newFields = newFields +pre+ "NEW."+field.name
allfields = allfields +pre+ field.name
newallfields = newallfields + pre + field.name
pre=","
idx=idx+1
lyrtype="esriDTTable"
queryOption="esriRowsTypeFilter"
dataSetId='\'||(SELECT ObjectId FROM "GDB_Items" Where Name=\'main.'+featureName+"__ATTACH"+"\')||\'"
printMessage("Found attachment table: " + featureName+"__ATTACH")
sql1=sql1+ ("<GPSyncDataset xsi:type=''typens:GPSyncDataset''><DatasetID>"+dataSetId+"</DatasetID><DatasetName>"+featureName+"__ATTACH"+"</DatasetName><DatasetType>"+lyrtype+"</DatasetType>"
"<LayerID>"+str(layerIds[featureName+"__ATTACH"])+"</LayerID><LayerName>"+lyr.name+"</LayerName><Direction>esriSyncDirectionBidirectional</Direction><ReplicaServerGen xsi:type=''xs:long''>53052</ReplicaServerGen><ReplicaClientDownloadGen xsi:type=''xs:long''>1000</ReplicaClientDownloadGen>"
"<ReplicaClientUploadGen xsi:type=''xs:long''>1000</ReplicaClientUploadGen><ReplicaClientAcknowledgeUploadGen xsi:type=''xs:long''>1000</ReplicaClientAcknowledgeUploadGen>"
"<UseGeometry>false</UseGeometry><IncludeRelated>false</IncludeRelated>"
"<QueryOption>"+queryOption+"</QueryOption><IsAttachment>true</IsAttachment></GPSyncDataset>")
sql3.append(('INSERT INTO GDB_Items("ObjectID", "UUID", "Type", "Name", "PhysicalName", "Path", "Url", "Properties", "Defaults", "DatasetSubtype1", "DatasetSubtype2", "DatasetInfo1", "DatasetInfo2", "Definition", "Documentation", "ItemInfo", "Shape")'
" select MAX(ObjectID)+1, "+uuid+", '{D86502F9-9758-45C6-9D23-6DD1A0107B47}', '"+featureName+"__ATTACH', '"+featureName.upper()+"__ATTACH', 'MyReplica\\"+featureName+"__ATTACH', '', 1, NULL, NULL, NULL, NULL, NULL, "
"'<GPSyncDataset xsi:type=''typens:GPSyncDataset'' xmlns:xsi=''http://www.w3.org/2001/XMLSchema-instance'' xmlns:xs=''http://www.w3.org/2001/XMLSchema'' xmlns:typens=''http://www.esri.com/schemas/ArcGIS/10.3''>"
"<DatasetID>"+dataSetId+"</DatasetID>"
"<DatasetName>"+featureName+"__ATTACH</DatasetName><DatasetType>"+lyrtype+"</DatasetType><LayerID>"+str(layerIds[featureName])+"</LayerID><LayerName>"+featureName+"</LayerName><Direction>esriSyncDirectionBidirectional</Direction>"
"<ReplicaServerGen xsi:type=''xs:long''>53052</ReplicaServerGen><ReplicaClientDownloadGen xsi:type=''xs:long''>1000</ReplicaClientDownloadGen><ReplicaClientUploadGen xsi:type=''xs:long''>1000</ReplicaClientUploadGen>"
"<ReplicaClientAcknowledgeUploadGen xsi:type=''xs:long''>1000</ReplicaClientAcknowledgeUploadGen>"
"<UseGeometry>false</UseGeometry><IncludeRelated>false</IncludeRelated><QueryOption>"+queryOption+"</QueryOption>"
"<IsAttachment>true</IsAttachment></GPSyncDataset>',"
" NULL, NULL, NULL from GDB_Items"))
sql5.append(('INSERT INTO GDB_ColumnRegistry("table_name", "column_name", "sde_type", "column_size", "decimal_digits", "description", "object_flags", "object_id")'
" values('"+featureName + "__ATTACH','gdb_from_date',7,0,NULL,'Archiving from date.',536870912,NULL)"))
sql5.append(('INSERT INTO GDB_ColumnRegistry("table_name", "column_name", "sde_type", "column_size", "decimal_digits", "description", "object_flags", "object_id")'
" values('"+featureName + "__ATTACH','gdb_to_date',7,0,NULL,'Archiving to date.',536870912,NULL)"))
sql5.append(('INSERT INTO GDB_ColumnRegistry("table_name", "column_name", "sde_type", "column_size", "decimal_digits", "description", "object_flags", "object_id")'
" values('"+featureName + "__ATTACH','gdb_archive_oid',2,0,NULL,'Archiving record unique id.',536870912,NULL)"))
#sql5.append(('ALTER TABLE ' + featureName + '__ATTACH add gdb_archive_oid integer')) # not null'))
#sql5.append(('ALTER TABLE ' + featureName + '__ATTACH add gdb_from_date realdate check(typeof(gdb_from_date) = \'real\' and gdb_from_date >= 0.0)')) #not null default (julianday())'))
#sql5.append(('ALTER TABLE ' + featureName + '__ATTACH add gdb_to_date realdate check(typeof(gdb_to_date) = \'real\' and gdb_to_date >= 0.0)')) # not null default (julianday (\'9999-12-31 23:59:59\'))'))
#sql5.append(('INSERT INTO GDB_ColumnRegistry("table_name", "column_name", "sde_type", "column_size", "decimal_digits", "description", "object_flags", "object_id")'
# " values('"+featureName + "__ATTACH','REL_GLOBALID',12,38,NULL,NULL,0,NULL)"))
sql5.append(("UPDATE GDB_ColumnRegistry set column_name='REL_GLOBALID' where column_name='"+globalField+"' and table_name='"+featureName+"__ATTACH'"))
sql5.append(("UPDATE GDB_ColumnRegistry set column_name='GLOBALID' where column_name='GlobalID' and table_name='"+featureName+"__ATTACH'"))
sql5.append(("DELETE FROM GDB_ColumnRegistry where column_name='REL_OBJECTID' and table_name='"+featureName+"__ATTACH'"))
#sql5.append(("UPDATE GDB_ColumnRegistry set column_name='GLOBALID' where column_name='GlobalID'"))
#("table_name", "column_name", "sde_type", "column_size", "decimal_digits", "description", "object_flags", "object_id")'
# " values('"+featureName + "__ATTACH','REL_GLOBALID',12,38,NULL,NULL,0,NULL)"))
sql5.append(('INSERT INTO "GDB_ItemRelationships"("ObjectID", "UUID", "Type", "OriginID", "DestID", "Properties", "Attributes")'
'VALUES('
'(select max(OBJECTID) + 1 from "GDB_ItemRelationships"),'+ uuid+','
'(select UUID from "GDB_ItemRelationshipTypes" where "Name"= \'DatasetOfSyncDataset\' limit 1),'
'(select UUID from "GDB_Items" where Name="'+featureName+'__ATTACH" limit 1),'
'(select UUID from "GDB_Items" where Name="main.'+featureName+'__ATTACH" limit 1),'
'1,NULL)'))
sql5.append(('INSERT INTO "GDB_ItemRelationships"("ObjectID", "UUID", "Type", "OriginID", "DestID", "Properties", "Attributes") VALUES('
'(select max(OBJECTID) + 1 from "GDB_ItemRelationships"),'+ uuid+','
'(select UUID from "GDB_ItemRelationshipTypes" where "Name"=\'DatasetOfSyncDataset\' limit 1),'
'(select UUID from "GDB_Items" where Name="MyReplica" limit 1),'
'(select UUID from "GDB_Items" where Name="'+featureName+'__ATTACH" limit 1),'
'1,NULL)'))
#set table flag
sql5.append(('UPDATE "GDB_TableRegistry" set object_flags=262147 where table_name="'+featureName+'__ATTACH"'))
#replace old GlobalId
#sql5.append(('UPDATE "GDB_Items" set "Definition"=replace("Definition",\'<ObjectKeyName>'+ featureName +'_GlobalID</ObjectKeyName>\',\'<ObjectKeyName>GlobalID</ObjectKeyName>\') where "Name"=\'main.'+featureName+'__ATTACHREL\''))
sql5.append(('UPDATE "GDB_Items" set "Definition"=replace("Definition",\'<ObjectKeyName>'+ globalField +'</ObjectKeyName>\',\'<ObjectKeyName>REL_GLOBALID</ObjectKeyName>\') where "Name"=\'main.'+featureName+'__ATTACHREL\''))
rels=""
if desc.relationshipClassNames:
for name in desc.relationshipClassNames:
rels = rels + "<Name>main."+name+"</Name>"
sql5.append(('UPDATE "GDB_Items" set "Definition"=replace("Definition","<RelationshipClassNames xsi:type=\'typens:Names\'></RelationshipClassNames>",\'<RelationshipClassNames xsi:type="typens:Names">'+rels+'</RelationshipClassNames>\') where "Name"="main.' +featureName+'__ATTACH"' ) )
sql5.append(('UPDATE "GDB_Items" set "Definition"=replace("Definition","<Name>REL_OBJECTID</Name><ModelName>REL_OBJECTID</ModelName><FieldType>esriFieldTypeInteger</FieldType><IsNullable>true</IsNullable>","<Name>REL_GLOBALID</Name><ModelName>REL_GLOBALID</ModelName><FieldType>esriFieldTypeGUID</FieldType><IsNullable>false</IsNullable>") where "Name"="main.' +featureName+'__ATTACH"' ))
#allfields="ATTACHMENTID,GLOBALID,REL_GLOBALID,CONTENT_TYPE,ATT_NAME,DATA_SIZE,DATA"
#newFields="NEW.ATTACHMENTID,NEW.GLOBALID,NEW.REL_GLOBALID,NEW.CONTENT_TYPE,NEW.ATT_NAME,NEW.DATA_SIZE,NEW.DATA"
sql5.append(("alter table " + featureName +"__ATTACH rename to " + featureName + "__ATTACH_org"))
newallfields = newallfields.replace(globalField, "REL_GLOBALID")
newallfields = newallfields.replace("GlobalID", "GLOBALID")
newallfields = newallfields.replace("REL_OBJECTID,", "")
newFields = newFields.replace(globalField, "REL_GLOBALID")
#newFields = newFields.replace("GlobalID","GLOBALID")
newFields = newFields.replace("NEW.GlobalID", uuid)
newFields = newFields.replace("NEW.REL_OBJECTID,", "")
newFields = newFields.replace("NEW.ATTACHMENTID,", "")
#just hardcode the fields
newallfields = "ATTACHMENTID,GLOBALID,REL_GLOBALID,CONTENT_TYPE,ATT_NAME,DATA_SIZE,DATA"
newFields = "NEW.GLOBALID,NEW.REL_GLOBALID,NEW.CONTENT_TYPE,NEW.ATT_NAME,NEW.DATA_SIZE,NEW.DATA"
allfields = "ATTACHMENTID,GLOBALID,REL_GLOBALID,CONTENT_TYPE,ATT_NAME,DATA_SIZE,DATA"
#,julianday('now'),julianday ('9999-12-31 23:59:59')
allfields = allfields.replace("REL_OBJECTID,", "")
if False:
sqlCreation = "SELECT sql FROM sqlite_master WHERE type = 'table' AND name = ?"
c.execute(sqlCreation, (featureName + "__ATTACH",))
sql = c.fetchone()[0]
#printMessage(sql)
#remove trailing close paren
sql = sql[:-1]
sql = sql.replace("primary key ", "")
sql = sql.replace(" not null", "")
#sql = sql.replace("REL_OBJECTID int32 check((typeof(REL_OBJECTID) = 'integer' or typeof(REL_OBJECTID) = 'null') and REL_OBJECTID >= -2147483648 and REL_OBJECTID <= 2147483647),","")
sql = sql.replace(globalField, "REL_GLOBALID")
sql = sql.replace("GlobalID", "GLOBALID")
sql = sql.replace("ATTACHMENTID integer",
"ATTACHMENTID int32 constraint attachementidcheck check(typeof(ATTACHMENTID) = 'integer' and ATTACHMENTID >= -2147483648 and ATTACHMENTID <= 2147483647) not null")
#newallfields = newallfields.replace("REL_OBJECTID,","")
gdb_transaction_time = 'gdb_transaction_time()'
gdb_transaction_time = "strftime('%s', 'now')"
gdb_transaction_time = "julianday('now')"
#oidName = desc.OIDFieldName
#sql = sql.replace("GlobalID uuidtext check(typeof(GlobalID) = 'text' and length(GlobalID) = 38) not null","GlobalID uuidtext check(typeof(GlobalID) = 'text' and length(GlobalID) = 38)")
sql = sql + ", gdb_archive_oid integer primary key not null, gdb_from_date realdate constraint gdbfromdatecheck check(typeof(gdb_from_date) = 'real' and gdb_from_date >= 0.0) default (" + \
gdb_transaction_time + \
"),gdb_to_date realdate constraint gdbtodatecheck check(typeof(gdb_to_date) = 'real' and gdb_to_date >= 0.0) default (julianday ('9999-12-31 23:59:59'))) "
#sql = sql +", gdb_archive_oid integer primary key not null, gdb_from_date realdate check(typeof(gdb_from_date) = 'real' and gdb_from_date >= 0.0) not null default (gdb_transaction_time ()), gdb_to_date realdate check(typeof(gdb_to_date) = 'real' and gdb_to_date >= 0.0) not null default (julianday ('9999-12-31 23:59:59')))"
#just create the dang table
sql5.append(("CREATE TABLE "+featureName+"__ATTACH ( ATTACHMENTID int32 constraint attachementidcheck check(typeof(ATTACHMENTID) = 'integer' and ATTACHMENTID >= -2147483648 and ATTACHMENTID <= 2147483647) not null, GLOBALID uuidtext constraint globalidcheck check(typeof(GLOBALID) = 'text' and length(GLOBALID) = 38), REL_GLOBALID uuidtext constraint relglobalidcheck check((typeof(REL_GLOBALID) = 'text' or typeof(REL_GLOBALID) = 'null') and length(REL_GLOBALID) = 38), CONTENT_TYPE text(150) constraint contexttypecheck check(typeof(CONTENT_TYPE) = 'text' and not length(CONTENT_TYPE) > 150), ATT_NAME text(250) constraint attnamecheck check(typeof(ATT_NAME) = 'text' and not length(ATT_NAME) > 250), DATA_SIZE int32 constraint datasizecheck check(typeof(DATA_SIZE) = 'integer' and DATA_SIZE >= -2147483648 and DATA_SIZE <= 2147483647), DATA blob constraint datablobcheck check(typeof(DATA) = 'blob' or typeof(DATA) = 'null'), gdb_archive_oid integer primary key not null, gdb_from_date realdate constraint gdbfromdatecheck check(typeof(gdb_from_date) = 'real' and gdb_from_date >= 0.0) default (julianday('now')), gdb_to_date realdate constraint gdbtodatecheck check(typeof(gdb_to_date) = 'real' and gdb_to_date >= 0.0) default (julianday ('9999-12-31 23:59:59')))"))
#sql5.append(("insert into " + featureName + "__ATTACH("+newallfields+") select "+allfields+" from "+featureName + "__ATTACH_org"))
sql5.append(("drop table "+featureName + "__ATTACH_org"))
#sql5.append(('ALTER TABLE '+featureName+'__ATTACH ADD REL_GLOBALID uuidtext'))
#next_row_id='Next_RowID (NULL,\''+featureName+'__ATTACH\')'
#next_row_id='(select max(rowid)+1 from \''+featureName+'__ATTACH\')'
next_row_id='(coalesce (NEW.ATTACHMENTID,(select max(ATTACHMENTID)+1 from \''+featureName+'__ATTACH\'),1))'
sql5.append(("CREATE INDEX gdb_ct4_"+str(idx)+" ON "+featureName+"__ATTACH (ATTACHMENTID,gdb_from_date) "))
sql5.append(("CREATE INDEX gdb_ct1_"+str(idx)+" ON "+featureName+"__ATTACH (gdb_from_date,gdb_to_date) "))
sql5.append(("CREATE INDEX r"+str(idx)+"_gdb_xpk ON "+featureName+"__ATTACH (ATTACHMENTID,gdb_to_date) "))
#sql5.append(("CREATE INDEX GDB_"+str(idx)+"_GlobalGUID ON "+featureName+"__ATTACH (GlobalGUID) "))
sql5.append(("CREATE INDEX UUID"+str(idx)+" ON "+featureName+"__ATTACH (REL_GLOBALID) "))
sql5.append(('CREATE VIEW '+featureName+'__ATTACH_evw AS SELECT '+newallfields+' FROM '+featureName+"__ATTACH WHERE gdb_to_date BETWEEN (julianday ('9999-12-31 23:59:59') - 0.000000001) AND (julianday ('9999-12-31 23:59:59') + 0.000000001)"))
sql5.append(('CREATE TRIGGER '+featureName+'__ATTACH_evw_delete INSTEAD OF DELETE ON '+featureName+'__ATTACH_evw BEGIN '
'DELETE FROM '+featureName+'__ATTACH '
'WHERE ATTACHMENTID = OLD.ATTACHMENTID AND gdb_from_date BETWEEN ('+gdb_transaction_time +' - 0.000000001) AND ('+gdb_transaction_time +' + 0.000000001); '
'UPDATE OR REPLACE '+featureName+'__ATTACH SET gdb_to_date = '+gdb_transaction_time +' '
'WHERE ATTACHMENTID = OLD.ATTACHMENTID AND gdb_to_date BETWEEN (julianday (\'9999-12-31 23:59:59\') - 0.000000001) AND (julianday (\'9999-12-31 23:59:59\') + 0.000000001); END;'))
sql5.append(('CREATE TRIGGER '+featureName+'__ATTACH_evw_insert INSTEAD OF INSERT ON '+featureName+'__ATTACH_evw BEGIN '
'INSERT INTO '+featureName+'__ATTACH ('+newallfields+',gdb_from_date,gdb_to_date) '
#'INSERT INTO '+featureName+'__ATTACH ('+newallfields+',gdb_archive_oid,gdb_from_date,gdb_to_date) '
'VALUES ('+next_row_id+','+newFields+','+gdb_transaction_time +',julianday (\'9999-12-31 23:59:59\')); END;'))
#do I need to include the gdb_archive_oid?
#'VALUES ('+next_row_id+','+newFields+','+next_row_id+','+gdb_transaction_time +',julianday (\'9999-12-31 23:59:59\')); END;'))
sql5.append(('CREATE TRIGGER '+featureName+'__ATTACH_evw_update INSTEAD OF UPDATE ON '+featureName+'__ATTACH_evw BEGIN '
'UPDATE OR IGNORE '+featureName+'__ATTACH SET gdb_to_date = '+gdb_transaction_time +' '
'WHERE ATTACHMENT = OLD.ATTACHMENTID AND gdb_to_date BETWEEN (julianday (\'9999-12-31 23:59:59\') - 0.000000001) AND (julianday (\'9999-12-31 23:59:59\') + 0.000000001);'
'REPLACE INTO '+featureName+'__ATTACH ('+newallfields+',gdb_from_date,gdb_to_date) '
'VALUES (NEW.ATTACHMENTID,'+newFields+',(SELECT MAX (gdb_to_date) FROM '+featureName+'__ATTACH '
'WHERE ATTACHMENTID = OLD.ATTACHMENTID AND gdb_to_date < julianday (\'9999-12-31 23:59:59\')),julianday (\'9999-12-31 23:59:59\')); END;'))
id = id + 1
idx=idx+1
conn.close()
#sql3=('INSERT INTO GDB_Items("ObjectID", "UUID", "Type", "Name", "PhysicalName", "Path", "Url", "Properties", "Defaults", "DatasetSubtype1", "DatasetSubtype2", "DatasetInfo1", "DatasetInfo2", "Definition", "Documentation", "ItemInfo", "Shape")'
#" select MAX(ObjectID)+1, '{55C5E7E4-834D-4D44-A12C-991E7F8B4645}', '{D86502F9-9758-45C6-9D23-6DD1A0107B47}', '"+layerName+"__ATTACH', '"+layerName.upper()+"__ATTACH', 'MyReplica_"+str(id)+"\\"+layerName+"__ATTACH', '', 1, NULL, NULL, NULL, NULL, NULL, "
#"'<GPSyncDataset xsi:type=''typens:GPSyncDataset'' xmlns:xsi=''http://www.w3.org/2001/XMLSchema-instance'' xmlns:xs=''http://www.w3.org/2001/XMLSchema'' xmlns:typens=''http://www.esri.com/schemas/ArcGIS/10.3''>"
#"<DatasetID>6</DatasetID>"
#"<DatasetName>"+layerName+"__ATTACH</DatasetName><DatasetType>esriDTTable</DatasetType><LayerID>0</LayerID><LayerName>"+layerName+"</LayerName><Direction>esriSyncDirectionBidirectional</Direction>"
#"<ReplicaServerGen xsi:type=''xs:long''>2590</ReplicaServerGen><ReplicaClientDownloadGen xsi:type=''xs:long''>1000</ReplicaClientDownloadGen><ReplicaClientUploadGen xsi:type=''xs:long''>1000</ReplicaClientUploadGen>"
#"<ReplicaClientAcknowledgeUploadGen xsi:type=''xs:long''>1000</ReplicaClientAcknowledgeUploadGen><UseGeometry>true</UseGeometry><IncludeRelated>false</IncludeRelated><QueryOption>esriRowsTypeFilter</QueryOption>"
#"<IsAttachment>true</IsAttachment></GPSyncDataset>',"
#" NULL, NULL, NULL from GDB_Items")
sql4='update "GDB_ServiceItems" set "ItemInfo"=replace("ItemInfo",|"capabilities":"Query"|,|"capabilities":"Create,Delete,Query,Update,Editing,Sync"|);'
sql4=sql4.replace("|","'")
serviceItemsStr = json.dumps(serviceItems)
sql5.append(('insert into "GDB_ServiceItems"("OBJECTID", "DatasetName", "ItemType", "ItemId", "ItemInfo", "AdvancedDrawingInfo")'
'values((select max(OBJECTID)+1 from "GDB_ServiceItems"),\''+serviceName+'\',0,-1,\''+serviceItemsStr+'\',NULL)'))
sql5.append(('update "GDB_Items" set Definition=replace(Definition,\'<ChangeTracked>false</ChangeTracked>\',\'<ChangeTracked>true</ChangeTracked>\') where "Name" !=\'main.'+featureName+'__ATTACHREL\''))
sql5.append(('update "GDB_ServiceItems" set "ItemInfo" = replace("ItemInfo",\'Create,Delete,Query,Update,Editing\',\'Create,Delete,Query,Update,Editing,Sync\') where "ItemInfo" like \'%Create,Delete,Query,Update,Editing"%\''))
sql5.append(('update "GDB_ServiceItems" set "ItemInfo"=replace("ItemInfo",\'"hasAttachments":true\',\'"hasAttachments":true,"attachmentProperties":[{"name":"name","isEnabled":true},{"name":"size","isEnabled":true},{"name":"contentType","isEnabled":true},{"name":"keywords","isEnabled":true}]\')'))
#sql5.append(('update "GDB_ServiceItems" set "ItemInfo"=replace("ItemInfo",\'"advancedQueryCapabilities":{\',\'"supportsCalculate":true,"supportsTruncate":false,"supportsAttachmentsByUploadId":true,"supportsValidateSql":true,"supportsCoordinatesQuantization":true,"supportsApplyEditsWithGlobalIds":true,"useStandardizedQueries":false,"allowGeometryUpdates":true,"advancedQueryCapabilities":{"supportsQueryRelatedPagination":true,"supportsQueryWithResultType":true,"supportsSqlExpression":true,"supportsAdvancedQueryRelated":true,"supportsQueryAttachments":true,"supportsReturningGeometryCentroid":false,\')'))
#sql5.append(('UPDATE "GDB_ServiceItems" set "DatasetName"="' + featureName + '" where "ItemId"='+datasetId))
#sql5='update "GDB_Items" set ObjectId=ROWID'
sql5.append(('update "GDB_ColumnRegistry" set object_flags=4 where table_name=\'GDB_ServiceItems\' and column_name in(\'DatasetName\',\'ItemType\',\'ItemId\',\'ItemInfo\')'))
sql1=sql1+("</GPSyncDatasets><AttachmentsSyncDirection>esriAttachmentsSyncDirectionBidirectional</AttachmentsSyncDirection></GPSyncReplica>'"
", NULL, NULL, NULL from GDB_Items;")
#sql5.append(('PRAGMA writable_schema=ON;'))
#sql5.append(('DROP TRIGGER "st_insert_trigger_GDB_Items_Shape";'))
#sql5.append(('DROP TRIGGER "st_delete_trigger_GDB_Items_Shape";'))
#sql5.append(('DROP TRIGGER "st_update_trigger_GDB_Items_Shape";'))
#sql5.append(('DROP TRIGGER "st_update1_trigger_GDB_Items_Shape"'))
#sql5.append(('CREATE TRIGGER "st_insert_trigger_GDB_Items_Shape" AFTER INSERT ON GDB_Items FOR EACH ROW BEGIN SELECT InsertIndexEntry ("st_spindex__GDB_Items_Shape",NEW.Shape,NEW._ROWID_,2); END'));
#sql5.append(('CREATE TRIGGER "st_delete_trigger_GDB_Items_Shape" AFTER DELETE ON GDB_Items FOR EACH ROW BEGIN DELETE FROM "st_spindex__GDB_Items_Shape" WHERE pkid = OLD._ROWID_; END'));
#sql5.append(('CREATE TRIGGER "st_update_trigger_GDB_Items_Shape" AFTER UPDATE OF Shape ON GDB_Items WHEN OLD._ROWID_ = NEW._ROWID_ BEGIN SELECT UpdateIndexEntry ("st_spindex__GDB_Items_Shape",NEW.Shape,NEW._ROWID_,2); END'));
#sql5.append(('CREATE TRIGGER "st_update1_trigger_GDB_Items_Shape" AFTER UPDATE OF Shape ON GDB_Items WHEN OLD._ROWID_ != NEW._ROWID_ BEGIN DELETE FROM "st_spindex__GDB_Items_Shape" WHERE pkid = OLD._ROWID_; SELECT UpdateIndexEntry ("st_spindex__GDB_Items_Shape",NEW.Shape,NEW._ROWID_,2); END'));
#sql5.append(('PRAGMA writable_schema=OFF;'))
#sql5.append(('PRAGMA writable_schema=ON;'))
#sql5.append(('delete FROM sqlite_master where type=\'trigger\' and name like \'%_Shape\''))
#sql5.append(('PRAGMA writable_schema=OFF;'))
#sql1=sql1+("#PRAGMA writable_schema=ON;update sqlite_master set sql=replace(sql,'OBJECTID integer','OBJECTID int32') where name in ("+tables+") and type='table';#PRAGMA writable_schema=OFF;")
#serviceRep=[sql1,sql2,sql4]
#NON_OPTIMIZE_SIZE"
name=replicaDestinationPath + "/"+serviceName+".sql"
with open(name,'w') as f:
f.write("SELECT load_extension( 'stgeometry_sqlite.dll', 'SDE_SQL_funcs_init');\n")
#not sure here - use wal or not?
#f.write("PRAGMA journal_mode=WAL;\n")
#f.write(";\n")
f.write(sql1)
f.write(";\n")
for i in sql2:
f.write(i)
f.write(";\n")
for i in sql3:
f.write(i)
f.write(";\n")
f.write(";\n")
for i in sql5:
f.write(i)
f.write(";\n")
f.write(sql4)
f.close()
#printMessage("Running \"" + toolkitPath+"/spatialite/spatialite.exe\" \"" + newFullReplicaDB + "\" < " + name)
#printMessage("Running \"" + spatialite_path+ "\" \"" + newFullReplicaDB + "\" < \"" + name + "\"")
cmd = "\"" + spatialite_path+"\" \"" + newFullReplicaDB + "\" < \"" + name + "\" >>\"" + replicaDestinationPath + os.sep + serviceName + ".log\" 2>&1"
try:
#os.system("\"" + spatialite_path+"\" \"" + newFullReplicaDB + "\" < \"" + name + "\" >>" + replicaDestinationPath + os.sep + serviceName + ".log 2>&1")
result = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except Exception, e:
printMessage("Unable to run sql commands: "+cmd + "\n" + e.output)
#create a replica sqlite database for a single layer/table
def createSingleReplica(templatePath,df,lyr,replicaDestinationPath,toolkitPath,feature_json,serverName,serviceName,username,id):
global spatialite_path
blankmxd = arcpy.mapping.MapDocument(templatePath + "/blank.mxd")
df = arcpy.mapping.ListDataFrames(blankmxd)[0]
arcpy.mapping.AddLayer(df, lyr)
tmpMxd=templatePath+"/temp.mxd"
if os.path.exists(tmpMxd):
os.remove(tmpMxd)
blankmxd.saveACopy(tmpMxd)
#mxd.save()
desc=arcpy.Describe(lyr)
saveReplica(tmpMxd,replicaDestinationPath + "/"+lyr.name,lyr,desc)
#move to root folder and delete the "data" folder
filenames = next(os.walk(replicaDestinationPath + "/"+lyr.name+"/data/"))[2]
printMessage("Renamed " + replicaDestinationPath + "/"+lyr.name+"/data/"+filenames[0]+" to "+ replicaDestinationPath+"/"+lyr.name+".geodatabase")
#if offline geodatabase exists, must delete first
newReplicaDB=replicaDestinationPath+"/"+lyr.name+".geodatabase"
try:
if os.path.exists(newReplicaDB):
os.rmdir(newReplicaDB)
except:
printMessage("Unable to remove old replica geodatabase")
os.rename(replicaDestinationPath + "/"+lyr.name+"/data/"+filenames[0], newReplicaDB)
try:
os.rmdir(replicaDestinationPath + "/"+lyr.name+"/data/")
os.rmdir(replicaDestinationPath + "/"+lyr.name)
except:
printMessage("Unable to remove replica folders")
#if os.path.exists(tmpMxd):
# os.remove(tmpMxd)
#sqliteDb=replicaDestinationPath + "/"+lyr.name+"/data/"+serviceName+".geodatabase"
#sqliteDb=replicaDestinationPath + "/"+lyr.name+".geodatabase"
serviceRep=[]
if os.path.exists(newReplicaDB):
#dom = parse(templatePath+"/replica.xml")
#xml = createXML(dom,serverName,serviceName,lyr.name):
ret=updateReplicaPaths(newReplicaDB,lyr.name,feature_json,"http://"+serverName + "/arcgis/rest/services/"+serviceName+"/FeatureServer",serverName,serviceName,username,id)
ret1 = updateReplicaPaths(newReplicaDB,lyr.name,feature_json,"http://"+serverName + "/arcgis/rest/services/"+serviceName+"/FeatureServer",serverName,serviceName,username,id)
for i in ret1:
serviceRep.append(i)
name=replicaDestinationPath + os.sep + lyr.name+".sql"
with open(name,'w') as f:
f.write("SELECT load_extension( 'stgeometry_sqlite.dll', 'SDE_SQL_funcs_init');\n")
for i in ret:
f.write(i)
f.write(";\n")
f.close()
#printMessage("Running \"" + spatialite_path + "\" \""+ newReplicaDB + "\" < \"" + name + "\"")
cmd="\"" + spatialite_path + "\" \"" + newReplicaDB + "\" < \"" + name + "\" >> \"" + replicaDestinationPath + os.sep + lyr.name + ".log\" 2>&1"
try:
#os.system("\"" + spatialite_path + "\" \"" + newReplicaDB + "\" < \"" + name + "\" >> \"" + replicaDestinationPath + os.sep + lyr.name + ".log\" 2>&1")
result = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except Exception, e:
printMessage("Unable to run sql commands: "+e)
def saveReplica(tmpMxd,replicaPath,lyr,desc):
arcpy.CreateRuntimeContent_management(tmpMxd,
replicaPath,
lyr.name,"#","#",
"FEATURE_AND_TABULAR_DATA","NON_OPTIMIZE_SIZE","ONLINE","PNG","1","#")
printMessage("Saved replica: "+ replicaPath)
def updateReplicaPaths(replicaPath,layerName,feature_json,servicePath,serverName,serviceName,username,id):
minx=str(feature_json['extent']['xmin'])
miny=str(feature_json['extent']['ymin'])
maxx=str(feature_json['extent']['xmax'])
maxy=str(feature_json['extent']['ymax'])
sql1=('INSERT INTO GDB_Items("ObjectID", "UUID", "Type", "Name", "PhysicalName", "Path", "Url", "Properties", "Defaults", "DatasetSubtype1", "DatasetSubtype2", "DatasetInfo1", "DatasetInfo2", "Definition", "Documentation", "ItemInfo", "Shape")'
" select MAX(ObjectID)+1, '{7B6EB064-7BF6-42C8-A116-2E89CD24A000}', '{5B966567-FB87-4DDE-938B-B4B37423539D}', 'MyReplica', 'MYREPLICA', 'MyReplica', '', 1, NULL, NULL, NULL, "
"'http://"+serverName+"/arcgis/rest/services/"+serviceName+"/FeatureServer', '"+username+"',"
"'<GPSyncReplica xsi:type=''typens:GPSyncReplica'' xmlns:xsi=''http://www.w3.org/2001/XMLSchema-instance'' xmlns:xs=''http://www.w3.org/2001/XMLSchema'' xmlns:typens=''http://www.esri.com/schemas/ArcGIS/10.3''>"
"<ReplicaName>MyReplica</ReplicaName><ID>"+str(id)+"</ID><ReplicaID>{7b6eb064-7bf6-42c8-a116-2e89cd24a000}</ReplicaID>"
"<ServiceName>http://"+serverName+"/arcgis/rest/services/"+serviceName+"/FeatureServer</ServiceName>"
"<Owner>"+username+"</Owner>"
"<Role>esriReplicaRoleChild</Role><SyncModel>esriSyncModelPerLayer</SyncModel><Direction>esriSyncDirectionBidirectional</Direction><CreationDate>2015-09-02T13:48:33</CreationDate><LastSyncDate>1970-01-01T00:00:01</LastSyncDate>"
"<ReturnsAttachments>true</ReturnsAttachments><SpatialRelation>esriSpatialRelIntersects</SpatialRelation><QueryGeometry xsi:type=''typens:PolygonN''><HasID>false</HasID><HasZ>false</HasZ><HasM>false</HasM><Extent xsi:type=''typens:EnvelopeN''>"
"<XMin>"+minx+"</XMin><YMin>"+miny+"</YMin><XMax>"+maxx+"</XMax><YMax>"+maxy+"</YMax></Extent><RingArray xsi:type=''typens:ArrayOfRing''><Ring xsi:type=''typens:Ring''>"
"<PointArray xsi:type=''typens:ArrayOfPoint''>"
"<Point xsi:type=''typens:PointN''><X>"+minx+"</X><Y>"+miny+"</Y></Point><Point xsi:type=''typens:PointN''><X>"+maxx+"</X><Y>"+miny+"</Y></Point>"
"<Point xsi:type=''typens:PointN''><X>"+maxx+"</X><Y>"+maxy+"</Y></Point><Point xsi:type=''typens:PointN''><X>"+minx+"</X><Y>"+maxy+"</Y></Point>"
"<Point xsi:type=''typens:PointN''><X>"+minx+"</X><Y>"+miny+"</Y></Point></PointArray></Ring></RingArray>"
"<SpatialReference xsi:type=''typens:ProjectedCoordinateSystem''><WKT>PROJCS["WGS_1984_Web_Mercator_Auxiliary_Sphere",GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",SPHEROID["WGS_1984",6378137.0,298.257223563]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]],PROJECTION["Mercator_Auxiliary_Sphere"],PARAMETER["False_Easting",0.0],PARAMETER["False_Northing",0.0],PARAMETER["Central_Meridian",0.0],PARAMETER["Standard_Parallel_1",0.0],PARAMETER["Auxiliary_Sphere_Type",0.0],UNIT["Meter",1.0],AUTHORITY["EPSG",3857]]</WKT><XOrigin>-20037700</XOrigin><YOrigin>-30241100</YOrigin><XYScale>10000</XYScale><ZOrigin>-100000</ZOrigin><ZScale>10000</ZScale><MOrigin>-100000</MOrigin><MScale>10000</MScale><XYTolerance>0.001</XYTolerance><ZTolerance>0.001</ZTolerance><MTolerance>0.001</MTolerance><HighPrecision>true</HighPrecision><WKID>102100</WKID><LatestWKID>3857</LatestWKID></SpatialReference></QueryGeometry>"
"<GPSyncDatasets xsi:type=''typens:ArrayOfGPSyncDataset''>"
"<GPSyncDataset xsi:type=''typens:GPSyncDataset''><DatasetID>5</DatasetID><DatasetName>"+layerName+"</DatasetName><DatasetType>esriDTFeatureClass</DatasetType>"
"<LayerID>"+str(id)+"</LayerID><LayerName>"+layerName+"</LayerName><Direction>esriSyncDirectionBidirectional</Direction><ReplicaServerGen xsi:type=''xs:long''>2590</ReplicaServerGen><ReplicaClientDownloadGen xsi:type=''xs:long''>1000</ReplicaClientDownloadGen>"
"<ReplicaClientUploadGen xsi:type=''xs:long''>1000</ReplicaClientUploadGen><ReplicaClientAcknowledgeUploadGen xsi:type=''xs:long''>1000</ReplicaClientAcknowledgeUploadGen><UseGeometry>true</UseGeometry><IncludeRelated>true</IncludeRelated>"
"<QueryOption>esriRowsTypeFilter</QueryOption><HasAttachments>true</HasAttachments></GPSyncDataset>"
"<GPSyncDataset xsi:type=''typens:GPSyncDataset''><DatasetID>6</DatasetID><DatasetName>"+layerName+"__ATTACH</DatasetName><DatasetType>esriDTTable</DatasetType>"
"<LayerID>0</LayerID>"
"<LayerName>"+layerName+"</LayerName><Direction>esriSyncDirectionBidirectional</Direction>"
"<ReplicaServerGen xsi:type=''xs:long''>2590</ReplicaServerGen><ReplicaClientDownloadGen xsi:type=''xs:long''>1000</ReplicaClientDownloadGen><ReplicaClientUploadGen xsi:type=''xs:long''>1000</ReplicaClientUploadGen>"
"<ReplicaClientAcknowledgeUploadGen xsi:type=''xs:long''>1000</ReplicaClientAcknowledgeUploadGen><UseGeometry>true</UseGeometry><IncludeRelated>false</IncludeRelated><QueryOption>esriRowsTypeFilter</QueryOption><IsAttachment>true</IsAttachment></GPSyncDataset>"
"</GPSyncDatasets></GPSyncReplica>'"
", NULL, NULL, NULL from GDB_Items")
sql2=('INSERT INTO GDB_Items("ObjectID", "UUID", "Type", "Name", "PhysicalName", "Path", "Url", "Properties", "Defaults", "DatasetSubtype1", "DatasetSubtype2", "DatasetInfo1", "DatasetInfo2", "Definition", "Documentation", "ItemInfo", "Shape")'
" select MAX(ObjectID)+1, '{AE8D3C7E-9890-4BF4-B946-5BE50A1CC279}', '{D86502F9-9758-45C6-9D23-6DD1A0107B47}', '"+layerName+"', '"+layerName.upper()+"', 'MyReplica"+str(id)+"\\"+layerName+"', '', 1, NULL, NULL, NULL, NULL, NULL, "
"'<GPSyncDataset xsi:type=''typens:GPSyncDataset'' xmlns:xsi=''http://www.w3.org/2001/XMLSchema-instance'' xmlns:xs=''http://www.w3.org/2001/XMLSchema'' xmlns:typens=''http://www.esri.com/schemas/ArcGIS/10.3''>"
"<DatasetID>5</DatasetID>"
"<DatasetName>"+layerName+"</DatasetName>"
"<DatasetType>esriDTFeatureClass</DatasetType><LayerID>0</LayerID><LayerName>"+layerName+"</LayerName><Direction>esriSyncDirectionBidirectional</Direction>"
"<ReplicaServerGen xsi:type=''xs:long''>2590</ReplicaServerGen><ReplicaClientDownloadGen xsi:type=''xs:long''>1000</ReplicaClientDownloadGen><ReplicaClientUploadGen xsi:type=''xs:long''>1000</ReplicaClientUploadGen>"
"<ReplicaClientAcknowledgeUploadGen xsi:type=''xs:long''>1000</ReplicaClientAcknowledgeUploadGen><UseGeometry>true</UseGeometry><IncludeRelated>true</IncludeRelated><QueryOption>esriRowsTypeFilter</QueryOption>"
"<HasAttachments>true</HasAttachments></GPSyncDataset>', NULL, NULL, NULL from GDB_Items")
sql3=('INSERT INTO GDB_Items("ObjectID", "UUID", "Type", "Name", "PhysicalName", "Path", "Url", "Properties", "Defaults", "DatasetSubtype1", "DatasetSubtype2", "DatasetInfo1", "DatasetInfo2", "Definition", "Documentation", "ItemInfo", "Shape")'
" select MAX(ObjectID)+1, '{55C5E7E4-834D-4D44-A12C-991E7F8B4645}', '{D86502F9-9758-45C6-9D23-6DD1A0107B47}', '"+layerName+"__ATTACH', '"+layerName.upper()+"__ATTACH', 'MyReplica_"+str(id)+"\\"+layerName+"__ATTACH', '', 1, NULL, NULL, NULL, NULL, NULL, "
"'<GPSyncDataset xsi:type=''typens:GPSyncDataset'' xmlns:xsi=''http://www.w3.org/2001/XMLSchema-instance'' xmlns:xs=''http://www.w3.org/2001/XMLSchema'' xmlns:typens=''http://www.esri.com/schemas/ArcGIS/10.3''>"
"<DatasetID>6</DatasetID>"
"<DatasetName>"+layerName+"__ATTACH</DatasetName><DatasetType>esriDTTable</DatasetType><LayerID>0</LayerID><LayerName>"+layerName+"</LayerName><Direction>esriSyncDirectionBidirectional</Direction>"
"<ReplicaServerGen xsi:type=''xs:long''>2590</ReplicaServerGen><ReplicaClientDownloadGen xsi:type=''xs:long''>1000</ReplicaClientDownloadGen><ReplicaClientUploadGen xsi:type=''xs:long''>1000</ReplicaClientUploadGen>"
"<ReplicaClientAcknowledgeUploadGen xsi:type=''xs:long''>1000</ReplicaClientAcknowledgeUploadGen><UseGeometry>true</UseGeometry><IncludeRelated>false</IncludeRelated><QueryOption>esriRowsTypeFilter</QueryOption>"
"<IsAttachment>true</IsAttachment></GPSyncDataset>',"
" NULL, NULL, NULL from GDB_Items")
#sql4='update "GDB_ServiceItems" set "ItemInfo"=replace("ItemInfo",|"capabilities":"Query"|,|"capabilities":"Create,Delete,Query,Update,Editing,Sync"|)'
#sql4=sql4.replace("|","'")
#sql5='update "GDB_Items" set ObjectId=ROWID'
return [sql1,sql2,sql3]
#sqliteReplicaPaths(sql1,sql2,sql3)
def sqliteReplicaPaths(sql1,sql2,sql3):
conn = sqlite3.connect(replicaPath)
conn.enable_load_extension(True)
c = conn.cursor()
sql4='update "GDB_ServiceItems" set "ItemInfo"=replace("ItemInfo",|"capabilities":"Query"|,|"capabilities":"Create,Delete,Query,Update,Editing,Sync"|)'
sql4=sql4.replace("|","'")
# A) Inserts an ID with a specific value in a second column
#http://services5.arcgis.com/KOH6W4WICH5gzytg/ArcGIS/rest/services/rangeland_units/FeatureServer
try:
#c.execute("update GDB_Items set DatasetInfo1='"+servicePath + "',DatasetInfo2='"+username+"',Definition='+xml.toxml()+' where Name='"+name+"'")
printMessage("SELECT load_extension( 'c:\Program Files (x86)\ArcGIS\Desktop10.3\DatabaseSupport\SQLite\Windows32\stgeometry_sqlite.dll', 'SDE_SQL_funcs_init')")
c.execute("SELECT load_extension( 'c:/Program Files (x86)/ArcGIS/Desktop10.3/DatabaseSupport/SQLite/Windows32/stgeometry_sqlite.dll', 'SDE_SQL_funcs_init')")
#c.execute("SELECT load_extension( 'c:/Program Files (x86)/ArcGIS/Desktop10.3/DatabaseSupport/SQLite/Windows64/stgeometry_sqlite.dll', 'SDE_SQL_funcs_init')")
except sqlite3.IntegrityError:
printMessage("Error in sql integrity")
else:
printMessage("Error in sql")
try:
#c.execute("update GDB_Items set DatasetInfo1='"+servicePath + "',DatasetInfo2='"+username+"',Definition='+xml.toxml()+' where Name='"+name+"'")
printMessage(sql1)
c.execute(sql1)
except sqlite3.IntegrityError:
printMessage("Error in sql integrity")
else:
printMessage("Error in sql")
try:
printMessage(sql2)
c.execute(sql2)
except sqlite3.IntegrityError:
printMessage("Error in sql integrity")
else:
printMessage("Error in sql")
try:
printMessage(sql3)
c.execute(sql3)
except sqlite3.IntegrityError:
printMessage("Error in sql integrity")
else:
printMessage("Error in sql")
try:
printMessage(sql4)
c.execute(sql4)
except sqlite3.IntegrityError:
printMessage("Error in sql integrity")
else:
printMessage("Error in sql")
conn.commit()
conn.close()
def __createXML(xmlFile,serverName,serviceName,layerName):
dom = parse(zz.open(xmlFile))
symb = dom.getElementsByTagName("Symbolizer")
dom.getElementsByTagName("ServiceName")
dom.getElementsByTagName("ReplicaName")
dom.getElementsByTagName("ID")
dom.getElementsByTagName("ReplicaID")
dom.getElementsByTagName("Owner")
key.firstChild.data = "new text"
#DatasetID
#DatasetName
#DatasetType>esriDTFeatureClass</DatasetType>
#LayerID
#LayerName
# Open original file
#et = xml.etree.ElementTree.parse(xmlFile)
# Append new tag: <a x='1' y='abc'>body text</a>
#new_tag = xml.etree.ElementTree.SubElement(et.getroot(), 'a')
#new_tag.text = 'body text'
#new_tag.attrib['x'] = '1' # must be str; cannot be an int
#new_tag.attrib['y'] = 'abc'
# Write back to file
#et.write('file.xml')
#et.write('file_new.xml')
def __updateReplica(layer):
import sqlite3
sqlite_file = 'my_first_db.sqlite'
table_name = 'my_table_2'
id_column = 'my_1st_column'
column_name = 'my_2nd_column'
# Connecting to the database file
conn = sqlite3.connect(sqlite_file)
c = conn.cursor()
# A) Inserts an ID with a specific value in a second column
try:
c.execute("INSERT INTO {tn} ({idf}, {cn}) VALUES (123456, 'test')".\
format(tn=table_name, idf=id_column, cn=column_name))
except sqlite3.IntegrityError:
printMessage('ERROR: ID already exists in PRIMARY KEY column {}'.format(id_column))
# B) Tries to insert an ID (if it does not exist yet)
# with a specific value in a second column
c.execute("INSERT OR IGNORE INTO {tn} ({idf}, {cn}) VALUES (123456, 'test')".\
format(tn=table_name, idf=id_column, cn=column_name))
# C) Updates the newly inserted or pre-existing entry
c.execute("UPDATE {tn} SET {cn}=('Hi World') WHERE {idf}=(123456)".\
format(tn=table_name, cn=column_name, idf=id_column))
conn.commit()
conn.close()
def copyGeodatabase():
# Execute CopyRuntimeGdbToFileGdb
arcpy.CopyRuntimeGdbToFileGdb_conversion("D:\runtimedata\replica.geodatabase", 'replica_Copy.gdb')
def createSqliteDb():
# Set local variables
sqlite_database_path = "C:\data\example.gpkg"
# Execute CreateSQLiteDatabase
arcpy.gp.CreateSQLiteDatabase(sqlite_database_path, "GEOPACKAGE")
# Set environment settings
arcpy.env.workspace = "C:/data"
# Set local variables
outWorkspace = "c:/output/output.gdb"
# Use ListFeatureClasses to generate a list of shapefiles in the
# workspace shown above.
fcList = arcpy.ListFeatureClasses()
# Execute CopyFeatures for each input shapefile
for shapefile in fcList:
# Determine the new output feature class path and name
outFeatureClass = os.path.join(outWorkspace, shapefile.strip(".shp"))
arcpy.CopyFeatures_management(shapefile, outFeatureClass)
mxd = arcpy.mapping.MapDocument(r"D:\xTemp\sample.mxd")
df = arcpy.mapping.ListDataFrames(mxd, "*")[0]
mxd.saveACopy(r"d:\xTemp\name.mxd")
mxd.save()
arcpy.CreateRuntimeContent("D:/Geoprocessing/OfflineMapping/sandiego_locators_basemap.mxd",
"D:/Geoprocessing/Results/RRuntimeContent_sandiego",
"MyBasemapLayer","#","#",
"FEATURE_AND_TABULAR_DATA","NON_OPTIMIZE_SIZE","ONLINE","PNG","1","#")
def saveMapfile(mapfile,lyr,desc,dataDestinationPath,mapserver_json):
mapsize="400 400"
symbol=""
type=""
style=""
try:
size=str(mapserver_json['drawingInfo']['renderer']['symbol']['size'])
except: # AttributeError:
size="1"
try:
width=str(mapserver_json['drawingInfo']['renderer']['symbol']['width'])
except: # AttributeError:
width="1"
try:
color=str(mapserver_json['drawingInfo']['renderer']['symbol']['color'][0]) +" "+ str(mapserver_json['drawingInfo']['renderer']['symbol']['color'][1])+" " + str(mapserver_json['drawingInfo']['renderer']['symbol']['color'][2])
except: # AttributeError:
color="0 0 0"
try:
outlinecolor=str(mapserver_json['drawingInfo']['renderer']['symbol']['outline']['color'][0]) +" "+ str(mapserver_json['drawingInfo']['renderer']['symbol']['outline']['color'][1])+" " + str(mapserver_json['drawingInfo']['renderer']['symbol']['outline']['color'][2])
except: # AttributeError:
outlinecolor="0 0 0"
if mapserver_json['geometryType']=='esriGeometryPolygon':
type="POLYGON"
style=("COLOR "+color+" \n"
"OUTLINECOLOR "+outlinecolor+" \n"
"WIDTH "+width+" \n")
elif mapserver_json['geometryType']=='esriGeometryPolyline':
type="LINE"
style=("COLOR "+color+" \n"
"OUTLINECOLOR "+outlinecolor+" \n"
"WIDTH "+width+" \n")
elif mapserver_json['geometryType']=='esriGeometryPoint':
symbol=("SYMBOL\n"
"NAME 'circle'\n"
"TYPE ellipse\n"
"FILLED true\n"
"POINTS\n"
"1 1\n"
"END\n"
"END\n")
type="POINT"
style=("COLOR "+color+"\n"
"SYMBOL 'circle'\n"
"SIZE "+size+"\n")
mapstr = ("MAP \n "
"NAME '" +lyr.name + "' \n"
"STATUS ON \n"
"EXTENT " + str(mapserver_json['extent']['xmin']) + " " + str(mapserver_json['extent']['ymin']) + " " + str(mapserver_json['extent']['xmax']) + " " + str(mapserver_json['extent']['ymax']) + "\n"
"SIZE " + mapsize + "\n"
"IMAGECOLOR 255 255 255 \n"
"TRANSPARENT on \n"
+symbol+
"LAYER\n"
"NAME "+lyr.name+"\n"
"STATUS ON \n"
"DATA 'data/"+lyr.name+"'\n"
"TYPE "+type+"\n"
"CLASS\n"
"NAME '"+lyr.name+"'\n"
"STYLE\n"
+style+
"END\n"
"END\n"
"END\n"
"END")
try:
file = open(mapfile,'w') # Trying to create a new file or open one
file.write(mapstr)
file.close()
except:
printMessage("Unable to create mapfile: " + mapstr)
def getOperationalLayers(opLayers,serverName,serviceName,symbols):
layers=[]
id = 0
for lyr in opLayers:
desc = arcpy.Describe(lyr)
if hasattr(desc, "layer"):
featureName=os.path.basename(desc.layer.catalogPath)
else:
featureName=os.path.basename(desc.catalogPath)
opLayer = {
"id": str(id),
"itemId": lyr.name.replace(" ","_")+str(id),
"layerType":"ArcGISFeatureLayer",
"title": lyr.name,
"url": "http://"+serverName + "/arcgis/rest/services/"+serviceName+"/FeatureServer/"+str(id),
"popupInfo": getPopupInfo(lyr),
"layerDefinition":{"drawingInfo":getSymbol(lyr,symbols[featureName]["symbols"],lyr.name)},
"opacity": (100.0 - float(lyr.transparency)) / 100.0,
"capabilities": "Create,Delete,Query,Update,Editing,Sync",
"visibility": lyr.visible
}
lbl=""
if lyr.supports("LABELCLASSES"):
for lblclass in lyr.labelClasses:
lblclass.showClassLabels = True
lbl=lblclass.expression.replace("[","").replace("]","")
if lbl!="":
opLayer['popupInfo']['title']=lyr.name + ": {" + lbl + "}"
#"opacity": (100 - lyr.transparency) / 100,
#"url": lyr.serviceProperties["Resturl"]+ "/" + lyr.longName + "/" + lyr.serviceProperties["ServiceType"],
id=id+1
layers.append(opLayer)
return layers
def getTables(opTables,serverName,serviceName,id=0):
tbls=[]
for tbl in opTables:
opTable = {
"id": str(id),
"itemId": tbl.name.replace(" ","_")+str(id),
#"layerType":"ArcGISFeatureLayer",
"title": tbl.name,
"url": "http://"+serverName + "/arcgis/rest/services/"+serviceName+"/FeatureServer/"+str(id),
"popupInfo": getPopupInfo(tbl)
#"visibility": lyr.visible,
}
#"capabilities": "Create,Delete,Query,Update,Editing,Sync"
id=id+1
tbls.append(opTable)
return tbls
def getDisplayField(fields):
displayField=""
for field in fields:
#printMessage(field['type'])
if field['type']=="esriFieldTypeString":
return field['name']
#type=esriFieldTypeOID
return displayField
def getDisplayFieldName(lyr):
desc = arcpy.Describe(lyr)
if desc.dataType == "FeatureLayer":
# Create a fieldinfo object
fields= arcpy.ListFields(desc.dataElement.catalogPath)
else:
fields= arcpy.ListFields(desc.catalogPath)
displayField=""
for field in fields:
#printMessage(field.type)
if field.type=="String":
return field.name + " {"+field.name +"}"
#type=esriFieldTypeOID
return displayField
def getPopupInfo(lyr):
popInfo = {'title': getDisplayFieldName(lyr),
'description':None,
'showAttachments': True,
'mediaInfo': [],
'fieldInfos': getFieldInfos(lyr)
}
# 'relatedRecordsInfo':{
# 'showRelatedRecords':True,
# 'orderByFields':None
# },
desc = arcpy.Describe(lyr)
if not hasAttachments(desc.catalogPath):
popInfo["showAttachments"]=False
return popInfo
#def getLayerDefinition(lyr,symbol):
# return getSymbol(lyr,symbols[featureName],lyr.name)
# layerDef={
# "drawingInfo":{
# "renderer":getRendere(lyr)
# }
# }
#get the fields for the popup
def getFieldInfos(layer):
popInfo=[]
#printMessage("Layer name: " + layer.name)
#if layer.supports['FEATURECLASS']:
# printMessage("Layer has featureclass")
desc = arcpy.Describe(layer)
if desc.dataType == "FeatureLayer":
# Create a fieldinfo object
allfields= arcpy.ListFields(desc.dataElement.catalogPath)
else:
allfields= arcpy.ListFields(desc.catalogPath)
#return popInfo
#SmallInteger, Integer, Single, Double, String, Date, OID, Geometry, Blob
# Iterate through the fields and set them to fieldinfo
#"GlobalID",
invisFields = ["Shape_Length","Shape_Area","has_permittee","permittee_globalid"]
for field in allfields:
fieldInfos = None
#printMessage("Field: " + field.name + ": " + field.type)
visible = True
if field.name in invisFields:
visible=False
if field.type=='Geometry':
continue
if field.type == 'OID':
oidFldName = field.name
fieldInfos = {
'fieldName':field.name,
'label':field.aliasName,
'isEditable':field.editable,
'isEditableOnLayer':field.editable,
'tooltip':'',
'visible':visible,
'format':None,
'stringFieldOption':'textbox'
}
elif field.type == 'Integer':
fieldInfos = {
'fieldName':field.name,
'label':field.aliasName,
'isEditable':field.editable,
'isEditableOnLayer':field.editable,
'tooltip':'',
'visible':visible,
'format':{
'places':0,
'digitSeparator':True
},
'stringFieldOption':'textbox'
}
elif field.type == 'Double':
fieldInfos = {
'fieldName':field.name,
'label':field.aliasName,
'isEditable':field.editable,
'isEditableOnLayer':field.editable,
'tooltip':'',
'visible':visible,
'format':{
'places':2,
'digitSeparator':True
},
'stringFieldOption':'textbox'
}
elif field.type == 'String':
fieldInfos = {
'fieldName':field.name,
'label':field.aliasName,
'isEditable':field.editable,
'isEditableOnLayer':field.editable,
'tooltip':'',
'visible':visible,
'format':None,
'stringFieldOption':'textbox'
}
elif field.type == 'Date':
fieldInfos = {
'fieldName':field.name,
'label':field.aliasName,
'isEditable':field.editable,
'isEditableOnLayer':field.editable,
'tooltip':'',
'visible':visible,
'format':{"dateFormat":"longMonthDayYear"},
'stringFieldOption':'textbox'
}
else:
fieldInfos = {
'fieldName':field.name,
'label':field.aliasName,
'isEditable':field.editable,
'isEditableOnLayer':field.editable,
'tooltip':'',
'visible':visible,
'format':None,
'stringFieldOption':'textbox'
}
if fieldInfos is not None:
popInfo.append(fieldInfos)
return popInfo
def getFields(layer):
fields=[]
desc = arcpy.Describe(layer)
if hasattr(desc, "layer"):
catPath = desc.dataElement.catalogPath
else:
catPath = desc.catalogPath
#printMessage("Catalog path: "+catPath)
#printMessage(desc.dataType)
if desc.dataType == "FeatureLayer":
allfields= arcpy.ListFields(catPath)
elif desc.dataType == "TableView":
allfields= arcpy.ListFields(catPath)
else:
return fields
#SmallInteger, Integer, Single, Double, String, Date, OID, Geometry, Blob,Guid
# Iterate through the fields and set them to fieldinfo
for field in allfields:
fieldInfos = None
#printMessage("Field: " + field.name + ": " + field.type)
if field.type=='Geometry':
continue
fieldInfos = {
'alias':field.aliasName,
'defaultValue':None,
'domain':None,
'editable':field.editable,
'name':field.name,
'nullable':field.isNullable,
'sqlType':'sqlTypeOther'
}
if field.length:
fieldInfos['length']=field.length
#"domain":{"type":"codedValue","name":"cover_type","codedValues":[{"name":"No trees","code":0},{"name":"Trees","code":1}]}
if field.domain: #domain contains the name of the domain. You must look it up to get the full definition using ListDomains
fieldInfos['domain']={"name":field.domain}
domains = arcpy.da.ListDomains(getFeatureClassParentWorkspace(catPath))
for domain in domains:
if domain.name == field.domain:
#printMessage('Domain name: {0}'.format(domain.name))
#printMessage('Domain name: {0}'.format(domain.name) )
if domain.domainType == 'CodedValue':
fieldInfos['domain']['type']='codedValue'
codedValuesArray=[]
for val,desc in domain.codedValues.iteritems():
codedValuesArray.append({"code":val,"name":desc})
#[domain.codedValues]
fieldInfos['domain']['codedValues'] = codedValuesArray
#for val, desc in coded_values.iteritems():
# printMessage('{0} : {1}'.format(val, desc))
elif domain.domainType == 'Range':
fieldInfos['domain']['type']='range'
fieldInfos['domain']['rangeValues']=domain.range
#printMessage('Min: {0}'.format(domain.range[0]))
#printMessage('Max: {0}'.format(domain.range[1]))
#for iDomain in arcpy.da.ListDomains(getFeatureClassParentWorkspace(inFeatureClass)):
# if iDomain.name == searchDomainName:
# return iDomain
if field.type == 'OID':
oidFldName = field.name
fieldInfos['type']='esriFieldTypeOID'
del fieldInfos['length']
#elif field.name == 'OBJECTID':
# oidFldName = field.name
# fieldInfos['type']='esriFieldTypeOID'
elif field.type == 'Integer':
fieldInfos['type']='esriFieldTypeInteger'
elif field.type == 'Single':
fieldInfos['type']='esriFieldTypeSingle'
#del fieldInfos['length']
elif field.type == 'SmallInteger':
fieldInfos['type']='esriFieldTypeSmallInteger'
#del fieldInfos['length']
elif field.type == 'Double':
fieldInfos['type']='esriFieldTypeDouble'
elif field.type == 'String':
fieldInfos['type']='esriFieldTypeString'
elif field.type == 'Date':
fieldInfos['type']='esriFieldTypeDate'
elif field.type == 'Guid':
fieldInfos['type']='esriFieldTypeGUID'
elif field.type == 'GlobalID':
fieldInfos['type']='esriFieldTypeGlobalID'
#fieldInfos['defaultValue']='NEWID() WITH VALUES'
else:
printMessage("Unknown field type for " + field.name + ": " + field.type)
fieldInfos['type']='esriFieldTypeOID'
if fieldInfos is not None:
fields.append(fieldInfos)
#{
# "domain": null,
# "name": "OBJECTID",
# "nullable": false,
# "defaultValue": null,
# "editable": false,
# "alias": "OBJECTID",
# "sqlType": "sqlTypeOther",
# "type": "esriFieldTypeInteger"
#},
# "domain": null,
# "name": "GlobalID",
# "nullable": false,
# "defaultValue":"NEWID() WITH VALUES",
# "editable": false,
# "alias": "GlobalID",
# "length": 38,
# "sqlType": "sqlTypeOther",
# "type": "esriFieldTypeGlobalID"
#if field.name=='OBJECTID':
# fieldInfos['type']='esriFieldTypeInteger'
return fields
#{
# "id" : <relationshipId1>,
# "name" : "<relationshipName1>",
# "relatedTableId" : <relatedTableId1>,
# "cardinality" : "<esriRelCardinalityOneToOne>|<esriRelCardinalityOneToMany>|<esriRelCardinalityManyToMany>";,//Added at 10.1
# "role" : "<esriRelRoleOrigin>|<esriRelRoleDestination>";,//Added at 10.1
# "keyField" : "<keyFieldName2>",//Added at 10.1
# "composite" : <true>|<false>,//Added at 10.1
# "relationshipTableId": <attributedRelationshipClassTableId>, //Added in 10.1. Returned only for attributed relationships
# "keyFieldInRelationshipTable": "<key field in AttributedRelationshipClass table that matches keyField>" //Added in 10.1. Returned only for attributed relationships
#},
#def getRelationships(lyr,lyrid,cnt,tables,relationshipObj):
def getJoinField(lyr):
desc = arcpy.Describe(lyr)
if not desc.relationshipClassNames:
return ""
for j,rel in enumerate(desc.relationshipClassNames):
#printMessage("Relationship class name: " + rel)
relDesc = arcpy.Describe(desc.path +"/"+rel)
#originClassKeys=relDesc.originClassKeys
for i in relDesc.originClassKeys:
if i[1]=="OriginPrimary":
return i[0]
def getRelationshipsUnused(lyr,relationshipObj):
relArr=[]
desc = arcpy.Describe(lyr)
if not desc.relationshipClassNames:
return relArr
for j,rel in enumerate(desc.relationshipClassNames):
printMessage("Relationship class name: " + rel)
relArr.append(relationshipObj[rel])
return relArr
def getRelationshipsNoGood(lyr,lyrid,cnt,tables,relationships):
relArr=[]
desc = arcpy.Describe(lyr)
if not desc.relationshipClassNames:
return rel
if hasattr(desc, "layer"):
featureName=os.path.basename(desc.layer.catalogPath)
rootFGDB=desc.layer.catalogPath.replace("\\","/")
else:
featureName=os.path.basename(desc.catalogPath)
rootFGDB=os.path.dirname(desc.catalogPath).replace("\\","/")
relid=0
#for index in xrange(0, field_info.count):
for j,rel in enumerate(desc.relationshipClassNames):
relDesc = arcpy.Describe(rootFGDB+"/"+rel)
if relDesc.isAttachmentRelationship:
continue
#printMessage(relDesc)
#for i in desc:
# printMessage(i)
label = relDesc.backwardPathLabel
cardinality = relDesc.cardinality
#key=relDesc.classKey
originClassKeys=relDesc.originClassKeys
key=relDesc.destinationClassNames[0]
printMessage("Origin Class Names")
printMessage(relDesc.originClassNames)
printMessage("Origin Class Keys")
printMessage(relDesc.originClassKeys)
printMessage("Destination Class Names")
printMessage(relDesc.destinationClassNames)
printMessage("Destination Class Keys")
printMessage(relDesc.destinationClassKeys)
printMessage("Key type: "+relDesc.keyType)
printMessage(relDesc.notification)
printMessage("backwardPathLabel: "+relDesc.backwardPathLabel)
printMessage("forwardPathLabel: "+relDesc.forwardPathLabel)
role="esriRelRoleOrigin"
role="esriRelRoleDestination"
id=0
relatedTableId=0
for i in relDesc.destinationClassNames:
for j in tables:
#printMessage(i+": " + j.datasetName)
if i == j.datasetName:
relatedTableId=id
printMessage("Relationship name: "+i+": " + j.name + "(" + j.datasetName + ") with id: " +str(relatedTableId+cnt))
id=id+1
relatedTableId=relatedTableId+cnt
relObj = {"id":relid,"name":label,"relatedTableId":relatedTableId,"cardinality":"esriRelCardinality"+cardinality,"role":role,"keyField":key,"composite":relDesc.isComposite}
relArr.append(relObj)
#relClasses.add(rel)
return relArr
def getDataIndex(arr,name):
for j,rel in enumerate(arr):
#printMessage(str(j) + ": " + rel.name)
if hasattr(rel, "datasetName"):
#printMessage(i+": " + str(j) + ": " + rel.datasetName)
datasetName=rel.datasetName
else:
desc = arcpy.Describe(rel)
if hasattr(desc, "layer"):
datasetName=os.path.basename(desc.layer.catalogPath)
else:
datasetName=os.path.basename(desc.catalogPath)
if name == datasetName:
printMessage("Found relationship name: "+name+": " + rel.name + "(" + datasetName + ") with id: " +str(j))
return j
return -1
# getFeatureClassParentWorkspace: This script gets the geodatabase for a
# feature class. The trick here is that feature classes can be within a
# feature dataset so you need to account for two possible levels in the
# directory structure.
def getFeatureClassParentWorkspace(inFeatureClass):
describeFC = arcpy.Describe(inFeatureClass)
if (describeFC.dataType == 'FeatureClass') or (describeFC.dataType == 'Table'):
workspace1 = describeFC.path
describeWorkspace1 = arcpy.Describe(workspace1)
if (describeWorkspace1.dataType == 'FeatureDataset'):
return describeWorkspace1.path
return workspace1
return None
def getIndexes(lyr):
indexes=[]
desc = arcpy.Describe(lyr)
if desc.dataType == "FeatureLayer":
lyrindexes = arcpy.ListIndexes(desc.dataElement.catalogPath)
elif desc.dataType == "TableView":
lyrindexes = arcpy.ListIndexes(desc.catalogPath)
for index in lyrindexes:
fields=[]
for field in index.fields:
fields.append(field.name)
indexes.append({"name":index.name,"fields":",".join(fields),"isAscending":index.isAscending,"isUnique":index.isUnique})
return indexes
#printMessage("Name : {0}".format(index.name))
#printMessage("IsAscending : {0}".format(index.isAscending))
#printMessage("IsUnique : {0}".format(index.isUnique))
def hasAttachments(tbl):
return arcpy.Exists(tbl+"__ATTACH")
def hasEditorTracking(tbl):
desc = arcpy.Describe(tbl)
return desc.editorTrackingEnabled
def num(s):
try:
return int(s)
except ValueError:
return float(s)
def getSymbolColor(sym):
printMessage("here")
def getPointSymbol(sym):
#"style" : "< esriSMSCircle | esriSMSCross | esriSMSDiamond | esriSMSSquare | esriSMSX | esriSMSTriangle >",
obj = {}
obj['type']="esriSMS"
obj['style']="esriSMSCircle"
obj['size']= 4
obj['angle']= 0
obj['xoffset']= 0
obj['yoffset']= 0
#obj['outline']={}
#obj['outline']['width']= 1
symb = sym.getElementsByTagName("CIMSymbolLayer")
size = sym.getElementsByTagName("Size")
if len(size) > 0:
#printMessage("Size: " + size[0].childNodes[0].nodeValue)
obj['size']= num(size[0].childNodes[0].nodeValue)
#type = symb.getAttribute("xsi:type")
#if type == "typens:CIMFilledStroke":
# x=1
#elif type == "typens:CIMCharacterMarker":
# x=2
for i in symb:
if i.getAttribute("xsi:type")=="typens:CIMFill":
#obj['color']['type']="esriSLS"
#obj['outline']['style']="esriSLSSolid"
#2drawingInfo['renderer']['symbol']['outline']['style']="esriSFSSolid"
#obj['outline']['style']="esriSLSSolid"
obj['color'] = getSymbolColor(i)
elif i.getAttribute("xsi:type")== "typens:CIMFilledStroke":
obj['outline']={}
obj['outline']['type']="esriSLS"
obj['outline']['style']="esriSLSSolid"
obj['outline']['color'] = getSymbolColor(i)
width = i.getElementsByTagName("Width")
if len(width)>0:
#printMessage("Width: " + width[0].childNodes[0].nodeValue)
obj['outline']['width']=num(width[0].childNodes[0].nodeValue)
#obj = getSymbolColor(sym,obj)
return obj
def getPolygonSymbol(sym):
#"style" : "< esriSFSBackwardDiagonal | esriSFSCross | esriSFSDiagonalCross | esriSFSForwardDiagonal | esriSFSHorizontal | esriSFSNull | esriSFSSolid | esriSFSVertical >",
obj = {}
obj['type']="esriSFS"
obj['style']="esriSFSSolid"
symb = sym.getElementsByTagName("CIMSymbolLayer")
for i in symb:
if i.getAttribute("xsi:type")=="typens:CIMFill":
#obj['color']= {}
#obj['color']['type']="esriSLS"
#2drawingInfo['renderer']['symbol']['outline']['style']="esriSFSSolid"
#obj['outline']['style']="esriSLSSolid"
obj['color'] = getSymbolColor(i)
elif i.getAttribute("xsi:type")== "typens:CIMFilledStroke":
obj['outline']={}
obj['outline']['type']="esriSLS"
obj['outline']['style']="esriSLSSolid"
obj['outline']['color'] = getSymbolColor(i)
#size = i.getElementsByTagName("Size")
width = i.getElementsByTagName("Width")
#if len(size) > 0:
# printMessage("Size: " + size[0].childNodes[0].nodeValue)
# obj['outline']['size']= num(size[0].childNodes[0].nodeValue)
if len(width)>0:
#printMessage("Width: " + width[0].childNodes[0].nodeValue)
obj['outline']['width']=num(width[0].childNodes[0].nodeValue)
return obj
def getPolylineSymbol(sym):
#"style" : "< esriSLSDash | esriSLSDashDot | esriSLSDashDotDot | esriSLSDot | esriSLSNull | esriSLSSolid >",
obj = {}
obj['type']="esriSLS"
obj['style']="esriSFSSolid"
obj['outline']={}
return obj
def hsv_to_rgb(h, s, v,a):
if s == 0.0: v*=255; return [v, v, v,a]
i = int(h*6.) # XXX assume int() truncates!
f = (h*6.)-i; p,q,t = int(255*(v*(1.-s))), int(255*(v*(1.-s*f))), int(255*(v*(1.-s*(1.-f)))); v*=255; i%=6
if i == 0: return [v, t, p,a]
if i == 1: return [q, v, p,a]
if i == 2: return [p, v, t,a]
if i == 3: return [p, q, v,a]
if i == 4: return [t, p, v,a]
if i == 5: return [v, p, q,a]
def getColorObj(color):
if len(color[0].getElementsByTagName("R")) > 0:
colorStr = (str(color[0].getElementsByTagName("R")[0].childNodes[0].nodeValue) + "," +
str(color[0].getElementsByTagName("G")[0].childNodes[0].nodeValue) + "," +
str(color[0].getElementsByTagName("B")[0].childNodes[0].nodeValue) + "," +
str(color[0].getElementsByTagName("Alpha")[0].childNodes[0].nodeValue))
colorObj = [
int(color[0].getElementsByTagName("R")[0].childNodes[0].nodeValue),
int(color[0].getElementsByTagName("G")[0].childNodes[0].nodeValue),
int(color[0].getElementsByTagName("B")[0].childNodes[0].nodeValue),
int(color[0].getElementsByTagName("Alpha")[0].childNodes[0].nodeValue)
]
#printMessage("Color (polygon): " + colorStr)
return colorObj
elif len(color[0].getElementsByTagName("H")) > 0:
colorStr = (str(color[0].getElementsByTagName("H")[0].childNodes[0].nodeValue) + "," +
str(color[0].getElementsByTagName("S")[0].childNodes[0].nodeValue) + "," +
str(color[0].getElementsByTagName("V")[0].childNodes[0].nodeValue) + "," +
str(color[0].getElementsByTagName("Alpha")[0].childNodes[0].nodeValue))
colorObj = hsv_to_rgb(
int(color[0].getElementsByTagName("H")[0].childNodes[0].nodeValue),
int(color[0].getElementsByTagName("S")[0].childNodes[0].nodeValue),
int(color[0].getElementsByTagName("V")[0].childNodes[0].nodeValue),
int(color[0].getElementsByTagName("Alpha")[0].childNodes[0].nodeValue)
)
#printMessage("Color (polygon): " + colorStr)
return colorObj
#if patt[0].getAttribute("xsi:type")=="typens:CIMFilledStroke":
# obj['color']=[ int(color[0].getElementsByTagName("R")[0].childNodes[0].nodeValue), int(color[0].getElementsByTagName("G")[0].childNodes[0].nodeValue), int(color[0].getElementsByTagName("B")[0].childNodes[0].nodeValue),255]
#else:
# obj['color']=[ int(color[0].getElementsByTagName("R")[0].childNodes[0].nodeValue), int(color[0].getElementsByTagName("G")[0].childNodes[0].nodeValue), int(color[0].getElementsByTagName("B")[0].childNodes[0].nodeValue),255]
return []
def getSymbolColor(sym):
patt = sym.getElementsByTagName("Pattern")
colorObj=[]
#colorObj = {}
if len(patt)>0:
color = patt[0].getElementsByTagName("Color")
if len(color)==0:
return colorObj
colorObj = getColorObj(color)
return colorObj
def getSymbolColora(sym,obj):
patt = sym.getElementsByTagName("Pattern")
if len(patt)>0:
color = patt[0].getElementsByTagName("Color")
colorStr = str(color[0].getElementsByTagName("R")[0].childNodes[0].nodeValue) + "," + str(color[0].getElementsByTagName("G")[0].childNodes[0].nodeValue) + "," + str(color[0].getElementsByTagName("B")[0].childNodes[0].nodeValue)
if patt[0].getAttribute("xsi:type")=="typens:CIMFilledStroke":
obj['outline']['color']=[ int(color[0].getElementsByTagName("R")[0].childNodes[0].nodeValue), int(color[0].getElementsByTagName("G")[0].childNodes[0].nodeValue), int(color[0].getElementsByTagName("B")[0].childNodes[0].nodeValue),255]
else:
obj['color']=[ int(color[0].getElementsByTagName("R")[0].childNodes[0].nodeValue), int(color[0].getElementsByTagName("G")[0].childNodes[0].nodeValue), int(color[0].getElementsByTagName("B")[0].childNodes[0].nodeValue),255]
#printMessage("Color (polygon): " + colorStr)
return obj
def getGroupSymbols(sym):
#loop through sym and return Array
#get geometry type
group=[]
for i in sym:
obj = {}
val = i.getElementsByTagName("FieldValues")
if len(val)>0:
#obj["value"]=val[0].childNodes[0].nodeValue
s = val[0].getElementsByTagName("String")
obj["value"]=s[0].childNodes[0].nodeValue
label = i.getElementsByTagName("Label")
if len(label)>0:
obj["label"]=label[0].childNodes[0].nodeValue
else:
obj["label"]=obj["value"]
for j in i.childNodes:
if j.tagName == "Symbol":
#get the next symbol
k = j.getElementsByTagName("Symbol")
for m in k:
#type = geomtype[0].getAttribute("xsi:type")=="typens:CIMPolygonSymbol"
if m.getAttribute("xsi:type")=="typens:CIMPointSymbol":
obj['symbol'] = getPointSymbol(m)
elif m.getAttribute("xsi:type")=="typens:CIMPolygonSymbol":
obj['symbol']=getPolygonSymbol(m)
group.append(obj)
return group
#see http://resources.arcgis.com/en/help/arcgis-rest-api/index.html#//02r30000019t000000
# and http://resources.arcgis.com/en/help/arcgis-rest-api/index.html#//02r3000000n5000000
#"symbol":{ "type": "esriSMS", "style": "esriSMSSquare", "color": [76,115,0,255], "size": 8, "angle": 0, "xoffset": 0, "yoffset": 0, "outline": { "color": [152,230,0,255], "width": 1 } }
def getSymbol(lyr,sym,name):
drawingInfo= {
"renderer": {
"label": "",
"description": ""
},
"transparency": lyr.transparency,
"labelingInfo": None
}
if sym.getAttribute("xsi:type") == "typens:CIMUniqueValueSymbolizer":
drawingInfo['renderer']['type']="uniqueValue"
#drawingInfo['renderer']['uniqueValueInfos']=[]
else:
drawingInfo['renderer']['type']="simple"
#drawingInfo['renderer']['symbol']={}
#drawingInfo['renderer']['symbol']['outline']={}
#renderer->uniqueValueInfos
#printMessage("******Creating symbology for " + name + "*******")
for i in sym.childNodes:
#printMessage(i.tagName + ": " + i.getAttribute("xsi:type"))
#printMessage(i)
#printMessage(str(i.childNodes.length))
if i.tagName=='Fields':
c=1
k = i.getElementsByTagName("String")
for m in k:
lbl = 'field'+str(c)
drawingInfo['renderer'][lbl]=m.childNodes[0].nodeValue
c=c+1
elif i.tagName=='Groups':
if i.getAttribute("xsi:type") == "typens:ArrayOfCIMUniqueValueGroup":
k = i.getElementsByTagName("CIMUniqueValueClass")
drawingInfo['renderer']['uniqueValueInfos'] = getGroupSymbols(k)
elif i.tagName == "Symbol":
for j in i.childNodes:
#printMessage(" " + j.tagName + ": " + j.getAttribute("xsi:type"))
#get first symbol
if j.tagName=='Symbol':
if j.getAttribute("xsi:type")=="typens:CIMPointSymbol":
drawingInfo['renderer']['symbol'] = getPointSymbol(j)
elif j.getAttribute("xsi:type")=="typens:CIMPolygonSymbol":
drawingInfo['renderer']['symbol']=getPolygonSymbol(j)
#for k in j.childNodes:
# printMessage(" " + k.tagName + ": " + k.getAttribute("xsi:type"))
#get second symbol
#if k.getAttribute("xsi:type")=="typens:CIMSymbolReference":
#if k.tagName=='Symbol':
#elif k.tagName=='SymbolLayers':
#drawingInfo['renderer']['symbol'] = getSymbolLayers(k)
# drawingInfo['renderer']['uniqueValueInfos']=getSymbolLayers(k)
return drawingInfo
def saveSqliteToPG(tables,sqliteDb,pg):
#-lco LAUNDER=NO keeps the case for column names
#must run the following in the Database afterwards
#alter table services alter column json type jsonb using json::jsonb;
#alter table catalog alter column json type jsonb using json::jsonb;
#--config OGR_SQLITE_CACHE 1024
#need to add gdal/bin directory to path
#os.environ['PATH'] += os.pathsep + (gdal_path + os.sep + "bin").replace("/","\\")
os.putenv("PATH",(gdal_path + os.sep + "bin").replace("/","\\"))
#printMessage(os.environ['PATH'])
for table in tables:
cmd = "\""+ogr2ogr_path+"\" -lco FID=OBJECTID -preserve_fid --config OGR_SQLITE_SYNCHRONOUS OFF -gt 65536 --config GDAL_DATA \""+gdal_data_path + "\" -f \"Postgresql\" PG:\"" + pg + "\" \"" + sqliteDb + "\" " + table + " -nlt None -overwrite"
#printMessage("Running " + cmd)
try:
#os.system(cmd)
result = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except Exception, e:
printMessage("Unable to run sql commands: "+cmd + "\n" + e.output)
cmd = "\""+ogrinfo_path+"\" PG:\"" + pg + "\" -sql \"alter table services alter column json type jsonb using json::jsonb\""
#printMessage("Running " + cmd)
try:
#os.system(cmd)
result = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except Exception, e:
printMessage("Unable to run sql commands: "+cmd + "\n" + e.output)
cmd = "\""+ogrinfo_path+"\" PG:\"" + pg + "\" -sql \"alter table catalog alter column json type jsonb using json::jsonb\""
#printMessage("Running " + cmd)
try:
#os.system(cmd)
result = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except Exception, e:
printMessage("Unable to run sql commands: "+cmd + "\n" + e.output)
def saveSqliteServiceTablesToPG(serviceDb,pg):
global ogr2ogr_path
global gdal_data_path
global gdal_path
#need to add gdal/bin directory to path
#os.environ['PATH'] += os.pathsep + (gdal_path + os.sep + "bin").replace("/","\\")
os.putenv("PATH",(gdal_path + os.sep + "bin").replace("/","\\"))
#printMessage(os.environ['PATH'])
#SET PGCLIENTENCODING=LATIN1
#-lco SCHEMA postgres
tables=["GDB_ColumnRegistry","GDB_ItemRelationshipTypes","GDB_ItemRelationships","GDB_ItemTypes","GDB_Items","GDB_Layers","GDB_Locators","GDB_Metadata","GDB_ReplicaLog","GDB_RowidGenerators","GDB_ServiceItems","GDB_TableRegistry","GDB_TablesModified","GDB_Tables_Last_Modified","GDB_Version"]
os.putenv("PGCLIENTENCODING","LATIN1")
for table in tables:
#-lco FID=OBJECTID -preserve_fid
cmd = "\"" + ogr2ogr_path+"\" -lco LAUNDER=NO -lco GEOMETRY_NAME=the_geom --config OGR_SQLITE_SYNCHRONOUS OFF -gt 65536 --config GDAL_DATA \""+gdal_data_path + "\" -f \"Postgresql\" PG:\"" + pg + "\" \"" + serviceDb + "\" \""+table+"\" -overwrite"
#printMessage("Running " + cmd)
try:
#os.system(cmd)
result = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except Exception, e:
printMessage("Unable to run sql commands: "+cmd + "\n" + e.output)
os.unsetenv("PGCLIENTENCODING")
def saveToPg(lyr,pg):
global ogr2ogr_path
global gdal_data_path
global gdal_path
#need to add gdal/bin directory to path
#os.environ['PATH'] += os.pathsep + (gdal_path + os.sep + "bin").replace("/","\\")
os.putenv("PATH",(gdal_path + os.sep + "bin").replace("/","\\"))
#printMessage(os.environ['PATH'])
desc = arcpy.Describe(lyr)
#-lco FID=OBJECTID -preserve_fid
if hasattr(desc,"shapeType"):
cmd = "\"" + ogr2ogr_path+"\" -lco LAUNDER=NO -forceNullable --config OGR_SQLITE_SYNCHRONOUS OFF -gt 65536 --config GDAL_DATA \""+ gdal_data_path + "\" -f \"Postgresql\" PG:\"" + pg + "\" \"" + desc.path + "\" " + desc.name.replace(".shp","") + " -overwrite"
else:
cmd = "\"" + ogr2ogr_path+"\" -lco LAUNDER=NO -forceNullable --config OGR_SQLITE_SYNCHRONOUS OFF -gt 65536 --config GDAL_DATA \""+gdal_data_path + "\" -f \"Postgresql\" PG:\"" + pg + "\" \"" + desc.path + "\" " + desc.name.replace(".shp","") + " -nlt None -overwrite"
#printMessage("Running " + cmd)
try:
#os.system(cmd)
result = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except Exception, e:
printMessage("Unable to run sql commands: "+cmd + "\n" + e.output)
def saveAttachTableToPg(fgdb,lyr,suffix,pg):
global ogr2ogr_path
global ogrinfo_path
global gdal_data_path
global gdal_path
#need to add gdal/bin directory to path
#os.environ['PATH'] += os.pathsep + (gdal_path + os.sep + "bin").replace("/","\\")
os.putenv("PATH",(gdal_path + os.sep + "bin").replace("/","\\"))
#printMessage(os.environ['PATH'])
desc = arcpy.Describe(fgdb+"/"+lyr+suffix)
#-lco FID=OBJECTID -preserve_fid
if hasattr(desc,"shapeType"):
cmd = "\"" + ogr2ogr_path+"\" -lco LAUNDER=NO -forceNullable --config OGR_SQLITE_SYNCHRONOUS OFF -gt 65536 --config GDAL_DATA \""+gdal_data_path + "\" -f \"Postgresql\" PG:\"" + pg + "\" \"" + desc.path + "\" " + desc.name.replace(".shp","") + " -overwrite"
else:
cmd = "\""+ogr2ogr_path+"\" -lco LAUNDER=NO -forceNullable --config OGR_SQLITE_SYNCHRONOUS OFF -gt 65536 --config GDAL_DATA \""+gdal_data_path + "\" -f \"Postgresql\" PG:\"" + pg + "\" \"" + desc.path + "\" " + desc.name.replace(".shp","") + " -nlt None -overwrite"
#printMessage("Running " + cmd)
try:
#os.system(cmd)
result = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except Exception, e:
printMessage("Unable to run sql commands: "+cmd + "\n" + e.output)
#find the globalid
cmd = "\""+ogrinfo_path+"\" PG:\"" + pg + "\" -sql \"alter table \\\""+lyr+suffix+"\\\" rename \\\"GlobalID\\\" to \\\"GLOBALID\\\""
#printMessage("Running " + cmd)
try:
#os.system(cmd)
result = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except Exception, e:
printMessage("Unable to run sql commands: "+cmd + "\n" + e.output)
#need to rename grazing_inspections_GlobalID fields to REL_GLOBALID
cmd = "\""+ogrinfo_path+"\" PG:\"" + pg + "\" -sql \"alter table \\\""+lyr+suffix+"\\\" rename \\\""+lyr + "_GlobalID\\\" to \\\"REL_GLOBALID\\\""
#printMessage("Running " + cmd)
try:
#os.system(cmd)
result = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except Exception, e:
printMessage("Unable to run sql commands: "+cmd + "\n" + e.output)
def saveToSqlite(lyr,sqliteDb):
global ogr2ogr_path
global gdal_data_path
global gdal_path
#need to add gdal/bin directory to path
#os.environ['PATH'] += os.pathsep + (gdal_path + os.sep + "bin").replace("/","\\")
os.putenv("PATH",(gdal_path + os.sep + "bin").replace("/","\\"))
#printMessage(os.environ['PATH'])
#drop table if it exists
desc = arcpy.Describe(lyr)
if hasattr(desc,"shapeType"):
cmd = "\""+ogr2ogr_path+"\" -lco LAUNDER=NO -lco FID=OBJECTID -preserve_fid -forceNullable --config OGR_SQLITE_SYNCHRONOUS OFF -gt 65536 --config GDAL_DATA \""+gdal_data_path + "\" -f \"SQLITE\" \"" + sqliteDb + "\" \"" + desc.path + "\" " + desc.name.replace(".shp","") + " -overwrite"
else:
cmd = "\""+ogr2ogr_path+"\" -lco LAUNDER=NO -lco FID=OBJECTID -preserve_fid -forceNullable --config OGR_SQLITE_SYNCHRONOUS OFF -gt 65536 --config GDAL_DATA \""+ gdal_data_path + "\" -f \"SQLITE\" \"" + sqliteDb + "\" \"" + desc.path + "\" " + desc.name.replace(".shp","") + " -nlt None -overwrite"
#printMessage("Running " + cmd)
try:
#os.system(cmd)
result = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except Exception, e:
printMessage("Unable to run sql commands: "+cmd + "\n" + e.output)
def saveToSqliteUsingArcpy(lyr,sqliteDb):
desc = arcpy.Describe(lyr)
inFeaturesSqlName = desc.name.lower().replace(".shp","") .replace("-","_") #.replace("_","")
if hasattr(desc,"shapeType"):
try:
arcpy.CreateFeatureclass_management(sqliteDb,inFeaturesSqlName, desc.shapeType.upper())
except Exception as e:
printMessage("Table already exists")
printMessage(e)
try:
arcpy.CopyFeatures_management(desc.catalogPath, sqliteDb+"/"+inFeaturesSqlName)
except Exception as e:
printMessage("Unable to copy features")
printMessage(e)
else:
arcpy.CopyRows_management(desc.catalogPath, sqliteDb+"/"+inFeaturesSqlName)
printMessage("")
arcpy.ClearWorkspaceCache_management(sqliteDb)
desc = arcpy.Describe(sqliteDb)
def initializeSqlite(sqliteDb):
conn = sqlite3.connect(sqliteDb)
c = conn.cursor()
#c.execute("PRAGMA journal_mode=WAL")
c.execute("DROP TABLE IF EXISTS catalog")
#c.execute("DROP TABLE IF EXISTS services")
c.execute("CREATE TABLE IF NOT EXISTS catalog (id INTEGER PRIMARY KEY AUTOINCREMENT, name text, type text, json text)")
c.execute("CREATE TABLE IF NOT EXISTS services (id INTEGER PRIMARY KEY AUTOINCREMENT, service text,name text, layername text,layerid int, type text,json text)")
#c.execute("Create table "+inFeaturesName+" (objectid integer,t_r text,sect text,shape_area double)")
#c.executemany("Insert into "+inFeaturesName+"(objectid,t_r,sect,shape_area) values (?,?,?,?)", map(tuple, array.tolist()))
conn.commit()
conn.close()
return conn
def LoadCatalog(sqliteDb,name, dtype,file):
conn = sqlite3.connect(sqliteDb)
c = conn.cursor()
json = file.replace("'", "''")
json = json.replace("\xa0", "")
json = json.replace("\n", "")
array = [name,dtype,json]
#c.execute("DELETE FROM catalog where name=? and type=?", (name,dtype))
c.execute("INSERT INTO catalog(name,type,json) VALUES(?,?,?)", (name,dtype,json))
c.close()
conn.commit()
#map(tuple, array.tolist())
conn.close()
def ClearService(sqliteDb,service):
conn = sqlite3.connect(sqliteDb)
c = conn.cursor()
#c.execute("DELETE FROM catalog where name=?", (name,dtype))
c.execute("DELETE FROM services where service=?", (service,))
c.close()
conn.commit()
conn.close()
def LoadService(sqliteDb,service,name, layername, layerid,dtype,file):
conn = sqlite3.connect(sqliteDb)
c = conn.cursor()
json = file.replace("'", "''")
json = json.replace("\xa0", "")
json = json.replace("\n", "")
array = [service,name,layerid,dtype,json]
#c.execute("DELETE FROM services where service=? and name=? and layerid=? and type=?", (service,name,layerid,dtype))
c.execute("INSERT INTO services(service,name,layername,layerid,type,json) VALUES(?,?,?,?,?,?)", (service,name,layername,layerid,dtype,json))
c.close()
conn.commit()
conn.close()
def printUsage():
print "Usage:\n"
print "python \"Create arcgis project tool.pyt\" -user myusername -host myhostname -mxd <fullpath_to_my_project.mxd> -output <full_path_to_output_directory> -spatialite_path <full_path_to_spatialite_executable> -gdal_path <full_path_to_gdal_directory> -pem <full_path_to_pem> -cert <full_path_to_cert>"
print "Note: the output folder can contain only one type of database (file, Sqlite, or Postgresql)"
def printMessage(str):
logger.info(str)
if sys.executable.find("python.exe") != -1:
print(str)
else:
try:
arcpy.AddMessage(str)
except Exception as e:
print(str)
def main():
tbx=Toolbox()
tool=CreateNewProject()
pg=None
#"user=postgres dbname=gis host=192.168.99.100"
#set dummy values
user="user"
host="my.host.com"
#db="collectorDb.sqlite"
mxd=None
output="../collector"
spatialite_path="../arcrestgo/spatialite/spatialite.exe"
gdal_path="../arcrestgo/gdal/"
cert=""
pem=""
datasrc=""
#load settings
Config.read(os.getcwd()+"/settings.ini")
try:
host = Config.get("settings","server")
except Exception as e:
pass
try:
user= Config.get("settings","username")
except Exception as e:
pass
try:
output= Config.get("settings","destination")
except Exception as e:
pass
if not output:
output=os.getcwd().replace("\\","/")
try:
mxd= Config.get("settings","mxd")
except Exception as e:
pass
#try:
# db= Config.get("settings","sqlitedb")
#except Exception as e:
# pass
try:
pg= Config.get("settings","pg")
except Exception as e:
pass
try:
pem= Config.get("settings","pem")
except Exception as e:
pass
try:
cert= Config.get("settings","cert")
except Exception as e:
pass
try:
spatialite_path= Config.get("settings","spatialite_path")
except Exception as e:
pass
try:
gdal_path= Config.get("settings","gdal_path")
except Exception as e:
pass
try:
datasrc= Config.get("settings","datasrc")
except Exception as e:
pass
if len(sys.argv)==1 and not mxd and not host and not user and not output and not spatialite_path and not gdal_path:
printUsage()
return
#print "This is the name of the script: ", sys.argv[0]
#print "Number of arguments: ", len(sys.argv)
#print "The arguments are: " , str(sys.argv)
for i in xrange(0,len(sys.argv)):
if sys.argv[i] == "-user":
user=sys.argv[i+1]
elif sys.argv[i]=="-host":
host = sys.argv[i+1]
#elif sys.argv[i]=="-db":
# db = sys.argv[i+1]
elif sys.argv[i]=="-mxd":
mxd = sys.argv[i+1]
elif sys.argv[i]=="-output":
output = sys.argv[i+1]
elif sys.argv[i]=="-pg":
pg = sys.argv[i+1]
elif sys.argv[i]=="-spatialite_path":
spatialite_path = sys.argv[i+1]
elif sys.argv[i]=="-gdal_path":
gdal_path = sys.argv[i+1]
elif sys.argv[i]=="-cert":
cert = sys.argv[i+1]
elif sys.argv[i]=="-pem":
pem = sys.argv[i+1]
elif sys.argv[i]=="-src":
datasrc = sys.argv[i+1]
elif sys.argv[i]=="-h":
printUsage()
return
tool.execute(tool.getParameterInfo(),[mxd,host,user,datasrc,output,pg,spatialite_path,gdal_path,cert,pem])
if __name__ == '__main__':
if sys.executable.find("python.exe") != -1:
main()
| 186,501 | 11 | 1,485 |
55facd052781622c3e1a9c0e26c779edb3f62050 | 1,313 | py | Python | test/cases/response/text.py | zefirka/bobot | 76d4f9e8d1cf57341ae1c709b46d7f9ceea5064d | [
"MIT"
] | 5 | 2016-08-07T06:05:05.000Z | 2017-01-12T20:19:28.000Z | test/cases/response/text.py | zefirka/bobot | 76d4f9e8d1cf57341ae1c709b46d7f9ceea5064d | [
"MIT"
] | 1 | 2016-12-15T16:28:01.000Z | 2016-12-16T09:26:27.000Z | test/cases/response/text.py | zefirka/bobot | 76d4f9e8d1cf57341ae1c709b46d7f9ceea5064d | [
"MIT"
] | 2 | 2016-08-07T09:38:59.000Z | 2016-10-05T16:10:35.000Z | "Testcases for text messages"
from .. import Case
from bobot.Rule import Rule
from bobot.Response import Text
responseAsTextDict = Case.Case([
Rule({
'match': 'text',
'response': {
'text': 'Waiting for text'
}
})
], [
{
'expected': [Case.Expectation('Waiting for text').value()],
'message': Case.Message('text').value()
}
])
responseAsTextDictOptions = Case.Case([
Rule({
'match': 'text',
'response': {
'text': {
'text': 'Waiting for {text}',
'interpolate': True
}
}
})
], [
{
'expected': [Case.Expectation('Waiting for text').value()],
'message': Case.Message('text').value()
}
])
responseAsTextObject = Case.Case([
Rule({
'match': 'text',
'response': Text('Waiting for text')
})
], [
{
'expected': [Case.Expectation('Waiting for text').value()],
'message': Case.Message('text').value()
}
])
responseAsTextObjectOptions = Case.Case([
Rule({
'match': 'text',
'response': Text('Waiting for {text}', interpolate=True)
})
], [
{
'expected': [Case.Expectation('Waiting for text').value()],
'message': Case.Message('text').value()
}
])
| 21.177419 | 67 | 0.514851 | "Testcases for text messages"
from .. import Case
from bobot.Rule import Rule
from bobot.Response import Text
responseAsTextDict = Case.Case([
Rule({
'match': 'text',
'response': {
'text': 'Waiting for text'
}
})
], [
{
'expected': [Case.Expectation('Waiting for text').value()],
'message': Case.Message('text').value()
}
])
responseAsTextDictOptions = Case.Case([
Rule({
'match': 'text',
'response': {
'text': {
'text': 'Waiting for {text}',
'interpolate': True
}
}
})
], [
{
'expected': [Case.Expectation('Waiting for text').value()],
'message': Case.Message('text').value()
}
])
responseAsTextObject = Case.Case([
Rule({
'match': 'text',
'response': Text('Waiting for text')
})
], [
{
'expected': [Case.Expectation('Waiting for text').value()],
'message': Case.Message('text').value()
}
])
responseAsTextObjectOptions = Case.Case([
Rule({
'match': 'text',
'response': Text('Waiting for {text}', interpolate=True)
})
], [
{
'expected': [Case.Expectation('Waiting for text').value()],
'message': Case.Message('text').value()
}
])
| 0 | 0 | 0 |
7fdd40e4ebe3b02e3d9613f13bc2c9e4556a6616 | 5,977 | py | Python | axon/db/sql/repository.py | gitvipin/validation-app-engine | 5581c506bd9d7f1810edc9ae25faec0761f2c840 | [
"BSD-2-Clause"
] | null | null | null | axon/db/sql/repository.py | gitvipin/validation-app-engine | 5581c506bd9d7f1810edc9ae25faec0761f2c840 | [
"BSD-2-Clause"
] | null | null | null | axon/db/sql/repository.py | gitvipin/validation-app-engine | 5581c506bd9d7f1810edc9ae25faec0761f2c840 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2019 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2 License
# The full license information can be found in LICENSE.txt
# in the root directory of this project.
import uuid
from sqlalchemy.orm import joinedload
from sqlalchemy.sql import func
from axon.db.sql.config import models as cmodels
from axon.db.sql.analytics import models as amodels
| 35.158824 | 77 | 0.655011 | #!/usr/bin/env python
# Copyright (c) 2019 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2 License
# The full license information can be found in LICENSE.txt
# in the root directory of this project.
import uuid
from sqlalchemy.orm import joinedload
from sqlalchemy.sql import func
from axon.db.sql.config import models as cmodels
from axon.db.sql.analytics import models as amodels
class BaseRepository(object):
model_class = None
def count(self, session, **filters):
return session.query(self.model_class).filter_by(**filters).count()
def create(self, session, **model_kwargs):
with session.begin(subtransactions=True):
model = self.model_class(**model_kwargs)
session.add(model)
return model.to_dict()
def delete(self, session, **filters):
model = session.query(self.model_class).filter_by(**filters).one()
with session.begin(subtransactions=True):
session.delete(model)
session.flush()
def delete_batch(self, session, ids=None):
ids = ids or []
[self.delete(session, id=id) for id in ids]
def delete_all(self, session):
session.query(self.model_class).delete()
session.flush()
def get(self, session, **filters):
model = session.query(self.model_class).filter_by(**filters).first()
if not model:
return {}
return model.to_dict()
def get_all(self, session, **filters):
query = session.query(self.model_class).filter_by(**filters)
# Only make one trip to the database
query = query.options(joinedload('*'))
model_list = query.all()
data_model_list = [model.to_dict() for model in model_list]
return data_model_list
def exists(self, session, id):
return bool(session.query(self.model_class).filter_by(id=id).first())
class Repositories(object):
def __init__(self):
self.record = TrafficRecordsRepositery()
self.connected_state = ConnectedStateRepository()
self.request_count = RequestCountRepository()
self.latency = LatencyStatsRepository()
self.fault = FaultRepository()
def create_latency_stats(self, session, latency_sum, samples, created):
id = str(uuid.uuid4())
record = amodels.LatencyStats(id=id, latency_sum=latency_sum,
samples=samples, created=created)
session.add(record)
def create_record_count(self, session, proto, success, failure, created):
id = str(uuid.uuid4())
record = amodels.RequestCount(id=id, type=proto, success=success,
failure=failure, created=created)
session.add(record)
def create_record(self, session, **traffic_dict):
if not traffic_dict.get('id'):
traffic_dict['id'] = str(uuid.uuid4())
if traffic_dict.get('success'):
del traffic_dict['error']
record = amodels.TrafficRecord(**traffic_dict)
else:
del traffic_dict['latency']
del traffic_dict['success']
record = amodels.Fault(**traffic_dict)
session.add(record)
def create_connected_state(self, session, **cs_dict):
if not cs_dict.get('id'):
cs_dict['id'] = str(uuid.uuid4())
record = cmodels.ConnectedState(**cs_dict)
session.add(record)
class ConnectedStateRepository(BaseRepository):
model_class = cmodels.ConnectedState
def get_servers(self, session, endpoint_ip):
result = self.get(session, endpoint=endpoint_ip)
return result.get('servers', [])
def get_clients(self, session, endpoint_ip):
result = self.get(session, endpoint=endpoint_ip)
return result.get('clients', [])
def update(self, session, endpoint, **model_kwargs):
with session.begin(subtransactions=True):
session.query(self.model_class).filter_by(
endpoint=endpoint).update(model_kwargs)
class TrafficRecordsRepositery(BaseRepository):
model_class = amodels.TrafficRecord
def get_record_count(self, session, start_time, end_time, **filters):
return session.query(self.model_class).filter_by(
**filters).filter(
self.model_class.created.between(start_time, end_time)).count()
def get_records(self, session, start_time, end_time, **filters):
query = session.query(self.model_class).filter_by(
**filters).filter(
self.model_class.created.between(start_time, end_time))
model_list = query.all()
data_model_list = [model.to_dict() for model in model_list]
return data_model_list
class FaultRepository(TrafficRecordsRepositery):
model_class = amodels.Fault
class LatencyStatsRepository(BaseRepository):
model_class = amodels.LatencyStats
def get_latency_stats(self, session, start_time, end_time):
model = self.model_class
query = session.query(
func.sum(model.latency_sum).label("latency_sum"),
func.sum(model.samples).label("samples")).filter(
model.created.between(start_time, end_time))
result = query.all()[0]
avg_latency = 0 if not result[1] else result[0] / result[1]
return avg_latency
class RequestCountRepository(BaseRepository):
model_class = amodels.RequestCount
def get_request_count(self, session, start_time, end_time):
model = self.model_class
query = session.query(
func.sum(model.failure).label("failure"),
func.sum(model.success).label("success")).filter(
model.created.between(start_time, end_time))
result = query.all()[0]
return {'success': result[1] if result[1] else 0,
'failure': result[0] if result[0] else 0}
class ResourceMetricsRepository(BaseRepository):
model_class = amodels.ResourceMetrics
| 4,417 | 829 | 319 |
b759390c7b719a8979700c907dcbf9dda1a7735f | 856 | py | Python | test3d.py | ivandumas/Algoritmos | 6ba6e548f4d0150a052346a53e11cc40fa234fd5 | [
"MIT"
] | null | null | null | test3d.py | ivandumas/Algoritmos | 6ba6e548f4d0150a052346a53e11cc40fa234fd5 | [
"MIT"
] | null | null | null | test3d.py | ivandumas/Algoritmos | 6ba6e548f4d0150a052346a53e11cc40fa234fd5 | [
"MIT"
] | null | null | null | from py3dbp import Packer, Bin, Item
packer = Packer()
packer.add_bin(Bin('small', 300,300,200,5))
packer.add_bin(Bin('big', 500,600,400,8))
packer.add_item(Item('Producto 1',45,60,70,0.5))
packer.add_item(Item('Producto 2',30,50,30,0.7))
packer.add_item(Item('Producto 3',20,70,70,1))
packer.add_item(Item('Producto 4',45,60,70,1.2))
packer.add_item(Item('Producto 5',170,80,70,1.2))
packer.add_item(Item('Producto 6',300,200,70,1.2))
packer.pack(bigger_first=True)
for b in packer.bins:
print(":::::::::::", b.string())
print("FITTED ITEMS:")
for item in b.items:
print("====> ", item.string())
print("UNFITTED ITEMS:")
for item in b.unfitted_items:
print("====> ", item.string())
print("***************************************************")
print("***************************************************") | 29.517241 | 64 | 0.560748 | from py3dbp import Packer, Bin, Item
packer = Packer()
packer.add_bin(Bin('small', 300,300,200,5))
packer.add_bin(Bin('big', 500,600,400,8))
packer.add_item(Item('Producto 1',45,60,70,0.5))
packer.add_item(Item('Producto 2',30,50,30,0.7))
packer.add_item(Item('Producto 3',20,70,70,1))
packer.add_item(Item('Producto 4',45,60,70,1.2))
packer.add_item(Item('Producto 5',170,80,70,1.2))
packer.add_item(Item('Producto 6',300,200,70,1.2))
packer.pack(bigger_first=True)
for b in packer.bins:
print(":::::::::::", b.string())
print("FITTED ITEMS:")
for item in b.items:
print("====> ", item.string())
print("UNFITTED ITEMS:")
for item in b.unfitted_items:
print("====> ", item.string())
print("***************************************************")
print("***************************************************") | 0 | 0 | 0 |
31e4b8813206b272d5411d76736c75a9b6ad48fb | 840 | py | Python | Beginner/day-3-5-exercise.py | pathilink/TheAppBrewery | d8dc6c7c6908aca8ce039450dfb8f3c16891c8eb | [
"MIT"
] | 1 | 2021-06-02T12:55:24.000Z | 2021-06-02T12:55:24.000Z | Beginner/day-3-5-exercise.py | pathilink/TheAppBrewery | d8dc6c7c6908aca8ce039450dfb8f3c16891c8eb | [
"MIT"
] | null | null | null | Beginner/day-3-5-exercise.py | pathilink/TheAppBrewery | d8dc6c7c6908aca8ce039450dfb8f3c16891c8eb | [
"MIT"
] | null | null | null | # 🚨 Don't change the code below 👇
print("Welcome to the Love Calculator!")
name1 = input("What is your name? \n")
name2 = input("What is their name? \n")
# 🚨 Don't change the code above 👆
#Write your code below this line 👇
names_concat = name1.lower() + name2.lower()
names_true_total = names_concat.count('t') + names_concat.count('r') + names_concat.count('u') + names_concat.count('e')
names_love_total = names_concat.count('l') + names_concat.count('o') + names_concat.count('v') + names_concat.count('e')
score_str = str(names_true_total) + str(names_love_total)
score = int(score_str)
if score < 10 or score > 90:
print(f"Your score is {score}, you go together like coke and mentos.")
elif score >= 40 and score <= 50:
print(f"Your score is {score}, you are alright together.")
else:
print(f"Your score is {score}.")
| 36.521739 | 120 | 0.695238 | # 🚨 Don't change the code below 👇
print("Welcome to the Love Calculator!")
name1 = input("What is your name? \n")
name2 = input("What is their name? \n")
# 🚨 Don't change the code above 👆
#Write your code below this line 👇
names_concat = name1.lower() + name2.lower()
names_true_total = names_concat.count('t') + names_concat.count('r') + names_concat.count('u') + names_concat.count('e')
names_love_total = names_concat.count('l') + names_concat.count('o') + names_concat.count('v') + names_concat.count('e')
score_str = str(names_true_total) + str(names_love_total)
score = int(score_str)
if score < 10 or score > 90:
print(f"Your score is {score}, you go together like coke and mentos.")
elif score >= 40 and score <= 50:
print(f"Your score is {score}, you are alright together.")
else:
print(f"Your score is {score}.")
| 0 | 0 | 0 |
eb3b96adabc5946dd2424b870a549c05eed80663 | 6,557 | py | Python | input_adj_pipeline.py | Lednik7/SMART_Product | 069f63ec944747352b8609d103e7ffb5023c798f | [
"Apache-2.0"
] | 2 | 2021-03-21T18:07:04.000Z | 2022-01-23T10:18:20.000Z | input_adj_pipeline.py | Lednik7/SMART_Product | 069f63ec944747352b8609d103e7ffb5023c798f | [
"Apache-2.0"
] | null | null | null | input_adj_pipeline.py | Lednik7/SMART_Product | 069f63ec944747352b8609d103e7ffb5023c798f | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import os
import pymorphy2
from sklearn.preprocessing import MinMaxScaler
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import numpy as np
import pickle
PATH = 'models/'
with open(os.path.join(PATH, 'tfidf.pkl'), 'rb') as f:
tfidf = pickle.load(f)
time_related = ['лет', 'год ', 'меся', 'недел', 'дне', 'года']
specials_to_remove = [
'.', '"', "'", '?', '(', ')', '`',
]
specials_to_replace = [
'-', '\\', '/', ','
]
key_pos = ['NOUN', 'VERB', 'NUMR', 'ADJF', 'ADJS', 'INFN']
morph = pymorphy2.MorphAnalyzer()
| 37.901734 | 116 | 0.617813 | import pandas as pd
import os
import pymorphy2
from sklearn.preprocessing import MinMaxScaler
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import numpy as np
import pickle
PATH = 'models/'
with open(os.path.join(PATH, 'tfidf.pkl'), 'rb') as f:
tfidf = pickle.load(f)
time_related = ['лет', 'год ', 'меся', 'недел', 'дне', 'года']
specials_to_remove = [
'.', '"', "'", '?', '(', ')', '`',
]
specials_to_replace = [
'-', '\\', '/', ','
]
key_pos = ['NOUN', 'VERB', 'NUMR', 'ADJF', 'ADJS', 'INFN']
morph = pymorphy2.MorphAnalyzer()
def you_know_first_steps(x):
if x:
if 'не знаю' in x.lower():
return 0
else:
return 1
else:
return 0
def is_time_certain(x):
if x:
x = str(x).lower()
for i in time_related:
if i in x:
return 1
else:
continue
return 0
else:
return 0
def certainly_imagined(x):
if x:
if ' четко' in x.lower():
return 1
else:
return 0
else:
return 0
def are_obstackles_expected(x):
if x:
if 'не вижу преград' in str(x).lower() or 'нет' in str(x).lower():
return 0
else:
return 1
else:
return 1
def remove_special(x):
for special in specials_to_remove:
if special in x:
x = x.replace(special, '').strip()
else:
pass
return x
def replace_special(x):
for special in specials_to_replace:
if special in x:
x = x.replace(special, ' ').strip()
else:
pass
return x
def create_list_of_words(x):
return x.split(' ')
def clean_LoW_nv(x):
clean_LoW_nv = []
for word in x:
if word.isdigit() == True:
clean_LoW_nv.append(word)
else:
p = morph.parse(word)[0]
normal_form = p.normal_form
pos = p.tag
stop = 0
for s_pos in key_pos:
if s_pos in pos:
clean_LoW_nv.append(normal_form)
else:
continue
return ' '.join(clean_LoW_nv)
def word_counter(x):
return len(x)
def letters_counter(x):
counter = int()
for word in x:
counter += len(word)
return counter
def pos_counter(x, pos_to_comp):
pos_counter = int()
for word in x:
p = morph.parse(word)[0]
pos = p.tag
for pos_ in pos_to_comp:
if pos_ in pos:
pos_counter += 1
else:
pass
return pos_counter
def digit_counter(x):
digit_counter = int()
for word in x:
if word.isdigit() == True:
digit_counter += 1
return digit_counter
def create_featured_datasets(input:dict):
input_df = pd.DataFrame.from_dict(input, orient='index').T
input_df['are_first_steps_known'] = input_df.loc[:, 'goal_first_step'].apply(lambda x: you_know_first_steps(x))
input_df['is_time_certain'] = input_df['goal_time'].apply(lambda x: is_time_certain(x))
input_df['is_certainly_imagined'] = input_df['goal_result'].apply(lambda x: certainly_imagined(x))
input_df['are_obstackles_expected'] = input_df['goal_obstacle'].apply(lambda x: are_obstackles_expected(x))
input_df.drop(columns=['goal_result', 'goal_first_step', 'goal_obstacle', 'goal_time'], inplace=True)
input_df['space'] = ' '
input_df['name_type'] = input_df['goal_name'] + input_df['space'] + input_df['goal_type']
input_df.drop(columns=['goal_name', 'goal_type', 'space'], inplace=True)
input_df['goal_domain'] = input_df['goal_domain'].apply(lambda x: str(x).lower())
input_df['name_type'] = input_df['name_type'].apply(lambda x: str(x).lower())
input_df['goal_domain'] = input_df['goal_domain'].apply(lambda x: remove_special(x))
input_df['name_type'] = input_df['name_type'].apply(lambda x: remove_special(x))
input_df['goal_domain'] = input_df['goal_domain'].apply(lambda x: replace_special(x))
input_df['name_type'] = input_df['name_type'].apply(lambda x: replace_special(x))
input_df['goal_domain_LoW'] = input_df['goal_domain'].apply(lambda x: create_list_of_words(x))
input_df['name_type_LoW'] = input_df['name_type'].apply(lambda x: create_list_of_words(x))
input_df['goal_domain_clean_NV_LoW'] = input_df['goal_domain_LoW'].apply(lambda x: clean_LoW_nv(x))
input_df['name_type_clean_NV_LoW'] = input_df['name_type_LoW'].apply(lambda x: clean_LoW_nv(x))
input_df['topic_words'] = input_df['goal_domain_LoW'].apply(lambda x: word_counter(x))
input_df['goal_words'] = input_df['name_type_LoW'].apply(lambda x: word_counter(x))
input_df['topic_letters'] = input_df['goal_domain_LoW'].apply(lambda x: letters_counter(x))
input_df['goal_letters'] = input_df['name_type_LoW'].apply(lambda x: letters_counter(x))
input_df['topic_aver_word_len'] = round(input_df['topic_letters'].div(input_df['topic_words']), 2)
input_df['goal_aver_word_len'] = round(input_df['goal_letters'].div(input_df['goal_words']), 2)
input_df['goal_verbs_counter'] = input_df['name_type_LoW'].apply(lambda x: pos_counter(x, ['VERB', 'INFN']))
input_df['goal_nouns_counter'] = input_df['name_type_LoW'].apply(lambda x: pos_counter(x, ['NOUN']))
input_df['goal_numr_counter'] = input_df['name_type_LoW'].apply(lambda x: pos_counter(x, ['NUMR']))
input_df['goal_adj_counter'] = input_df['name_type_LoW'].apply(lambda x: pos_counter(x, ['ADJF', 'ADJS']))
input_df['goal_digit_counter'] = input_df['name_type_LoW'].apply(lambda x: digit_counter(x))
# Features
df_features = input_df[['are_first_steps_known', 'is_time_certain',
'is_certainly_imagined', 'are_obstackles_expected',
'topic_words', 'goal_words', 'topic_letters',
'goal_letters', 'topic_aver_word_len', 'goal_aver_word_len',
'goal_verbs_counter', 'goal_nouns_counter', 'goal_numr_counter',
'goal_adj_counter', 'goal_digit_counter']]
mms = MinMaxScaler()
features = mms.fit_transform(df_features.values)
# Vectors
df_vectors = input_df[['name_type_clean_NV_LoW']] # only name-type for now
text = df_vectors['name_type_clean_NV_LoW']
vectors = tfidf.transform(text)
return features, vectors, input_df | 5,665 | 0 | 324 |
269b2b608f55030cd416d3cd10c67de0ffea7a94 | 2,175 | py | Python | rebound/rebound/tests/test_shearingsheet.py | rodluger/ttv-devil | e534e4f3cd75db951cba54441f7a5458c87e0cf9 | [
"MIT"
] | null | null | null | rebound/rebound/tests/test_shearingsheet.py | rodluger/ttv-devil | e534e4f3cd75db951cba54441f7a5458c87e0cf9 | [
"MIT"
] | null | null | null | rebound/rebound/tests/test_shearingsheet.py | rodluger/ttv-devil | e534e4f3cd75db951cba54441f7a5458c87e0cf9 | [
"MIT"
] | null | null | null | import rebound
import unittest
import math
import numpy as np
if __name__ == "__main__":
unittest.main()
| 33.984375 | 70 | 0.512644 | import rebound
import unittest
import math
import numpy as np
class TestShearingSheet(unittest.TestCase):
def test_saturnsrings(self):
sim = rebound.Simulation()
OMEGA = 0.00013143527 # [1/s]
sim.ri_sei.OMEGA = OMEGA
surface_density = 400. # kg/m^2
particle_density = 400. # kg/m^3
sim.G = 6.67428e-11 # N m^2 / kg^2
sim.dt = 1e-3*2.*np.pi/OMEGA
sim.softening = 0.2 # [m]
boxsize = 50. # [m]
sim.configure_box(boxsize)
sim.configure_ghostboxes(2,2,0)
sim.integrator = "sei"
sim.boundary = "shear"
sim.gravity = "tree"
sim.collision = "tree"
def cor_bridges(r, v):
eps = 0.32*pow(abs(v)*100.,-0.234)
if eps>1.:
eps=1.
if eps<0.:
eps=0.
return eps
sim.coefficient_of_restitution = cor_bridges
def powerlaw(slope, min_v, max_v):
y = np.random.uniform()
pow_max = pow(max_v, slope+1.)
pow_min = pow(min_v, slope+1.)
return pow((pow_max-pow_min)*y + pow_min, 1./(slope+1.))
total_mass = 0.
while total_mass < surface_density*(boxsize**2):
radius = powerlaw(slope=-3, min_v=1, max_v=4) # [m]
mass = particle_density*4./3.*np.pi*(radius**3)
x = np.random.uniform(low=-boxsize/2., high=boxsize/2.)
sim.add(
m=mass,
r=radius,
x=x,
y=np.random.uniform(low=-boxsize/2., high=boxsize/2.),
z=np.random.normal(),
vx = 0.,
vy = -3./2.*x*OMEGA,
vz = 0.)
total_mass += mass
self.assertGreater(sim.N,50)
sim.integrate(2.*np.pi/OMEGA)
self.assertGreater(sim.collisions_Nlog,1000)
Nbefore = sim.N
sim.remove(0,keepSorted=0)
sim.tree_update()
self.assertEqual(Nbefore-1,sim.N)
with self.assertRaises(RuntimeError):
sim.remove(0,keepSorted=1)
if __name__ == "__main__":
unittest.main()
| 1,989 | 22 | 54 |
b2a466cb655eb0070b424a1359656da5287dd289 | 4,282 | py | Python | examples/self_supervised/common.py | sandutsar/catalyst | 55a3a557cb9276149cf2f70381878d87264e71c2 | [
"Apache-2.0"
] | 4 | 2019-12-14T07:27:09.000Z | 2021-03-23T14:34:37.000Z | examples/self_supervised/common.py | sandutsar/catalyst | 55a3a557cb9276149cf2f70381878d87264e71c2 | [
"Apache-2.0"
] | null | null | null | examples/self_supervised/common.py | sandutsar/catalyst | 55a3a557cb9276149cf2f70381878d87264e71c2 | [
"Apache-2.0"
] | null | null | null | from typing import Dict, Optional
from datasets import datasets
import torch
from torch.utils.data import DataLoader
from catalyst.contrib import nn
from catalyst.contrib.models.cv.encoders import ResnetEncoder
from catalyst.data.dataset.self_supervised import SelfSupervisedDatasetWrapper
def add_arguments(parser) -> None:
"""Function to add common arguments to argparse:
feature_dim: Feature dim for latent vector
temperature: Temperature used in softmax
batch_size: Number of images in each mini-batch
epochs: Number of sweeps over the dataset to train
num_workers: Number of workers to process a dataloader
logdir: Logs directory (tensorboard, weights, etc)
dataset: CIFAR-10, CIFAR-100 or STL10
learning-rate: Learning rate for optimizer
Args:
parser: argparser like object
"""
parser.add_argument(
"--feature_dim", default=128, type=int, help="Feature dim for latent vector"
)
parser.add_argument(
"--temperature", default=0.5, type=float, help="Temperature used in softmax"
)
parser.add_argument(
"--batch_size", default=512, type=int, help="Number of images in each mini-batch"
)
parser.add_argument(
"--epochs", default=1000, type=int, help="Number of sweeps over the dataset to train"
)
parser.add_argument(
"--num_workers", default=8, type=float, help="Number of workers to process a dataloader"
)
parser.add_argument(
"--logdir",
default="./logdir",
type=str,
help="Logs directory (tensorboard, weights, etc)",
)
parser.add_argument(
"--dataset",
default="CIFAR-10",
type=str,
choices=datasets.keys(),
help="Dataset: CIFAR-10, CIFAR-100 or STL10",
)
parser.add_argument(
"--learning-rate", default=0.001, type=float, help="Learning rate for optimizer"
)
class ContrastiveModel(torch.nn.Module):
"""Contrastive model with projective head.
Args:
model: projective head for the train time
encoder: model for the future uses
"""
def forward(self, x):
"""Forward method.
Args:
x: input for the encoder
Returns:
(embeddings, projections)
"""
emb = self.encoder(x)
projection = self.model(emb)
return emb, projection
def get_loaders(
dataset: str, batch_size: int, num_workers: Optional[int]
) -> Dict[str, DataLoader]:
"""Init loaders based on parsed parametrs.
Args:
dataset: dataset for the experiment
batch_size: batch size for loaders
num_workers: number of workers to process loaders
Returns:
{"train":..., "valid":...}
"""
transforms = datasets[dataset]["train_transform"]
transform_original = datasets[dataset]["valid_transform"]
train_data = SelfSupervisedDatasetWrapper(
datasets[dataset]["dataset"](root="data", train=True, transform=None, download=True),
transforms=transforms,
transform_original=transform_original,
)
valid_data = SelfSupervisedDatasetWrapper(
datasets[dataset]["dataset"](root="data", train=False, transform=None, download=True),
transforms=transforms,
transform_original=transform_original,
)
train_loader = DataLoader(train_data, batch_size=batch_size, num_workers=num_workers)
valid_loader = DataLoader(valid_data, batch_size=batch_size, num_workers=num_workers)
return {"train": train_loader, "valid": valid_loader}
def get_contrastive_model(feature_dim: int) -> ContrastiveModel:
"""Init contrastive model based on parsed parametrs.
Args:
feature_dim: dimensinality of contrative projection
Returns:
ContrstiveModel instance
"""
encoder = nn.Sequential(ResnetEncoder(arch="resnet50", frozen=False), nn.Flatten())
projection_head = nn.Sequential(
nn.Linear(2048, 512, bias=False),
nn.ReLU(inplace=True),
nn.Linear(512, feature_dim, bias=True),
)
model = ContrastiveModel(projection_head, encoder)
return model
| 31.255474 | 96 | 0.671649 | from typing import Dict, Optional
from datasets import datasets
import torch
from torch.utils.data import DataLoader
from catalyst.contrib import nn
from catalyst.contrib.models.cv.encoders import ResnetEncoder
from catalyst.data.dataset.self_supervised import SelfSupervisedDatasetWrapper
def add_arguments(parser) -> None:
"""Function to add common arguments to argparse:
feature_dim: Feature dim for latent vector
temperature: Temperature used in softmax
batch_size: Number of images in each mini-batch
epochs: Number of sweeps over the dataset to train
num_workers: Number of workers to process a dataloader
logdir: Logs directory (tensorboard, weights, etc)
dataset: CIFAR-10, CIFAR-100 or STL10
learning-rate: Learning rate for optimizer
Args:
parser: argparser like object
"""
parser.add_argument(
"--feature_dim", default=128, type=int, help="Feature dim for latent vector"
)
parser.add_argument(
"--temperature", default=0.5, type=float, help="Temperature used in softmax"
)
parser.add_argument(
"--batch_size", default=512, type=int, help="Number of images in each mini-batch"
)
parser.add_argument(
"--epochs", default=1000, type=int, help="Number of sweeps over the dataset to train"
)
parser.add_argument(
"--num_workers", default=8, type=float, help="Number of workers to process a dataloader"
)
parser.add_argument(
"--logdir",
default="./logdir",
type=str,
help="Logs directory (tensorboard, weights, etc)",
)
parser.add_argument(
"--dataset",
default="CIFAR-10",
type=str,
choices=datasets.keys(),
help="Dataset: CIFAR-10, CIFAR-100 or STL10",
)
parser.add_argument(
"--learning-rate", default=0.001, type=float, help="Learning rate for optimizer"
)
class ContrastiveModel(torch.nn.Module):
"""Contrastive model with projective head.
Args:
model: projective head for the train time
encoder: model for the future uses
"""
def __init__(self, model, encoder):
super(ContrastiveModel, self).__init__()
self.model = model
self.encoder = encoder
def forward(self, x):
"""Forward method.
Args:
x: input for the encoder
Returns:
(embeddings, projections)
"""
emb = self.encoder(x)
projection = self.model(emb)
return emb, projection
def get_loaders(
dataset: str, batch_size: int, num_workers: Optional[int]
) -> Dict[str, DataLoader]:
"""Init loaders based on parsed parametrs.
Args:
dataset: dataset for the experiment
batch_size: batch size for loaders
num_workers: number of workers to process loaders
Returns:
{"train":..., "valid":...}
"""
transforms = datasets[dataset]["train_transform"]
transform_original = datasets[dataset]["valid_transform"]
train_data = SelfSupervisedDatasetWrapper(
datasets[dataset]["dataset"](root="data", train=True, transform=None, download=True),
transforms=transforms,
transform_original=transform_original,
)
valid_data = SelfSupervisedDatasetWrapper(
datasets[dataset]["dataset"](root="data", train=False, transform=None, download=True),
transforms=transforms,
transform_original=transform_original,
)
train_loader = DataLoader(train_data, batch_size=batch_size, num_workers=num_workers)
valid_loader = DataLoader(valid_data, batch_size=batch_size, num_workers=num_workers)
return {"train": train_loader, "valid": valid_loader}
def get_contrastive_model(feature_dim: int) -> ContrastiveModel:
"""Init contrastive model based on parsed parametrs.
Args:
feature_dim: dimensinality of contrative projection
Returns:
ContrstiveModel instance
"""
encoder = nn.Sequential(ResnetEncoder(arch="resnet50", frozen=False), nn.Flatten())
projection_head = nn.Sequential(
nn.Linear(2048, 512, bias=False),
nn.ReLU(inplace=True),
nn.Linear(512, feature_dim, bias=True),
)
model = ContrastiveModel(projection_head, encoder)
return model
| 121 | 0 | 27 |
63a068b07249210cbb884b5404a8565cf36c1246 | 20,121 | py | Python | versgedropt.py | Kunstenpunt/versgedropt | b1cc889d601f3ad4b1e86e6c7a054cf09ad281bb | [
"Apache-2.0"
] | null | null | null | versgedropt.py | Kunstenpunt/versgedropt | b1cc889d601f3ad4b1e86e6c7a054cf09ad281bb | [
"Apache-2.0"
] | 1 | 2021-06-02T00:46:21.000Z | 2021-06-02T00:46:21.000Z | versgedropt.py | Kunstenpunt/versgedropt | b1cc889d601f3ad4b1e86e6c7a054cf09ad281bb | [
"Apache-2.0"
] | null | null | null | import spotipy
import sys
import musicbrainzngs
from spotipy.oauth2 import SpotifyClientCredentials
from requests import get, exceptions
from json import loads
from dateparser import parse
from pandas import DataFrame, read_excel
from bs4 import BeautifulSoup
from time import sleep
from datetime import datetime, timedelta
from re import compile, sub
from codecs import open
import pysftp
from glob import glob
import os
if __name__ == "__main__":
tst = sys.argv[1] if len(sys.argv) > 1 else False
vg = Versgedropt(test=tst)
vg.set_mbids(mscbrnz_path="")
if vg.test:
vg.get_drops_for_musicbrainz_belgians()
vg.generate_website()
vg.put_website_online()
else:
while True:
if datetime.now().hour == 14:
vg.get_drops_for_musicbrainz_belgians()
vg.generate_website()
vg.put_website_online()
| 40.648485 | 196 | 0.551315 | import spotipy
import sys
import musicbrainzngs
from spotipy.oauth2 import SpotifyClientCredentials
from requests import get, exceptions
from json import loads
from dateparser import parse
from pandas import DataFrame, read_excel
from bs4 import BeautifulSoup
from time import sleep
from datetime import datetime, timedelta
from re import compile, sub
from codecs import open
import pysftp
from glob import glob
import os
class Platform(object):
def __init__(self):
self.test = None
self.platform = None
def do_it(self, current_url):
return self.platform in current_url
def authenticate(self):
pass
def get_drops_for_artist(self, mbartist, current_url):
pass
class Youtube(Platform):
def __init__(self):
super().__init__()
self.google_api_key = None
self.platform = "youtube"
def authenticate(self):
with open('resources/google.txt', 'r') as f:
self.google_api_key = f.read().strip()
def get_drops_for_artist(self, mbartist, current_url):
data = []
try:
tiepeurl = current_url.strip('/').split('/')[-2]
username = current_url.strip('/').split('/')[-1]
if tiepeurl != "channel":
channel_url = 'https://www.googleapis.com/youtube/v3/channels?part=contentDetails&forUsername={0}&key={1}'.format(username, self.google_api_key)
channel_info = loads(get(channel_url).text)
playlist_id = channel_info['items'][0]['contentDetails']['relatedPlaylists']['uploads']
items_url = 'https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&maxResults=50&playlistId={0}&key={1}'.format(playlist_id, self.google_api_key)
else:
items_url = "https://www.googleapis.com/youtube/v3/search?part=snippet&channelId={0}&type=video&key={1}".format(username, self.google_api_key)
items = loads(get(items_url).text)['items']
for item in items:
album_data = {
'band': mbartist['artist']['name'],
'band_id': username,
'platform': 'youtube',
'drop': item['snippet']['title'],
'drop_id': item['id']["videoId"] if tiepeurl == "channel" else item["id"],
'drop_url': 'https://www.youtube.com/watch?v=' + item['snippet']['resourceId']['videoId'] if tiepeurl == "user" else 'https://www.youtube.com/watch?v=' + item['id']['videoId'],
'drop_visual': item['snippet']['thumbnails']['default']['url'],
'release_date': parse(item['snippet']['publishedAt']).date()
}
print(album_data)
data.append(album_data)
except Exception as e:
print("youtube says", e)
return data
class Spotify(Platform):
def __init__(self):
super().__init__()
self.spotify = None
self.platform = "spotify"
def authenticate(self):
with open('resources/spotify.txt', 'r') as f:
client_id, client_secret = f.read().strip().split('\n')
client_credentials_manager = SpotifyClientCredentials(client_id=client_id, client_secret=client_secret)
self.spotify = spotipy.Spotify(client_credentials_manager=client_credentials_manager)
def get_drops_for_artist(self, mbartist, current_url):
data = []
birdy_uri = 'spotify:artist:{0}'.format(current_url.split('/')[-1])
results = self.spotify.artist_albums(birdy_uri, album_type='album')
albums = results['items']
while results['next']:
results = self.spotify.next(results)
albums.extend(results['items'])
results = self.spotify.artist_albums(birdy_uri, album_type='single')
albums.extend(results['items'])
while results['next']:
results = self.spotify.next(results)
albums.extend(results['items'])
results = self.spotify.artist_albums(birdy_uri, album_type='compilation')
albums.extend(results['items'])
while results['next']:
results = self.spotify.next(results)
albums.extend(results['items'])
for album in albums:
album_data = {
'band': mbartist['artist']['name'],
'band_id': current_url.split('/')[-1],
'platform': 'spotify',
'drop': album['name'],
'drop_id': album['id'],
'drop_url': 'https://open.spotify.com/album/' + album['id'],
'drop_visual': album['images'][-2]['url'],
'release_date': parse(album['release_date']).date()
}
print(album_data)
data.append(album_data)
return data
class Itunes(Platform):
def __init__(self):
super().__init__()
self.platform = "itunes"
def authenticate(self):
pass
def get_drops_for_artist(self, mbartist, current_url):
data = []
url = 'https://itunes.apple.com/lookup?id={0}&entity=album'.format(current_url.split('/')[-1].lstrip('id'))
d = get(url)
if 'results' in loads(d.text):
for item in loads(d.text)['results']:
if item['wrapperType'] == 'collection':
album_data = {
'band': mbartist['artist']['name'],
'band_id': current_url.split('/')[-1].lstrip('id'),
'platform': 'itunes',
'drop': item['collectionName'],
'drop_id': item['collectionId'],
'drop_url': item['collectionViewUrl'],
'drop_visual': item['artworkUrl100'],
'release_date': parse(item['releaseDate']).date()
}
print(album_data)
data.append(album_data)
return data
class Deezer(Platform):
def __init__(self):
super().__init__()
self.platform = "deezer"
def authenticate(self):
pass
def get_drops_for_artist(self, mbartist, current_url):
data = []
deezer_id = current_url.split('/')[-1]
d = get('https://api.deezer.com/artist/{0}/albums'.format(deezer_id))
for item in loads(d.text)['data']:
print(item)
album_data = {
'band': mbartist['artist']['name'],
'band_id': deezer_id,
'platform': 'deezer',
'drop': item['title'],
'drop_id': item['id'],
'drop_url': item['link'],
'drop_visual': item['cover_medium'],
'release_date': parse(item['release_date']).date()
}
print(album_data)
data.append(album_data)
return data
class Bandcamp(Platform):
def __init__(self):
super().__init__()
self.platform = "bandcamp"
def authenticate(self):
pass
def get_drops_for_artist(self, mbartist, current_url):
data = []
current_url = current_url.rstrip('/')
print("current_url", current_url)
html = get(current_url + '/music', headers={'User-agent': 'Mozilla/5.0'}).text
soup = BeautifulSoup(html, 'html.parser')
done = []
if soup.find("div", attrs={"class": "playbutton"}):
album_data = self.parse_bc_release(current_url + "/music")
print(current_url, album_data)
if album_data:
album_data['band_id'] = current_url
album_data['platform'] = 'bandcamp'
print(album_data)
data.append(album_data)
done.append(current_url)
else:
for a in soup.find_all('a', attrs={'href': compile('album|track')}):
href = a['href']
if href not in done and (current_url in href or href.startswith('/album/') or href.startswith('/track/')):
album_url = href if href.startswith('http') else current_url + href
album_data = self.parse_bc_release(album_url)
if album_data:
album_data['band_id'] = current_url
album_data['platform'] = 'bandcamp'
print(album_data)
data.append(album_data)
done.append(href)
return data
def get_soup(self, url):
try:
release_request = get(url, headers={'User-agent': 'Mozilla/5.0'})
return BeautifulSoup(release_request.text, "html.parser")
except exceptions.ConnectionError:
sleep(5.0)
return self.get_soup(url)
def parse_bc_release(self, url):
soup = self.get_soup(url)
data = soup.find("script", attrs={"type": "application/ld+json"})
d = loads(str(data.get_text()))
title = d["name"]
artist = d["byArtist"]
releasedate = parse(d["datePublished"])
visual = d["image"][0]
return {
"drop": title,
"band": artist,
"release_date": releasedate,
"drop_url": url,
"drop_id": url,
"drop_visual": visual
}
class Soundcloud(Platform):
def __init__(self):
super().__init__()
self.platform = "soundcloud"
def authenticate(self):
pass
def get_drops_for_artist(self, mbartist, current_url):
data = []
current_url = current_url.rstrip('/')
try:
html = get(current_url, headers={
'User-agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/44.0.2403.155 Safari/537.36'}).text
soup = BeautifulSoup(html, 'html.parser')
for item in soup.find_all('article', attrs={'class': 'audible'}):
release_date = parse(item.find("time").contents[0],
settings={'RETURN_AS_TIMEZONE_AWARE': False}).date()
drop = item.find('a', attrs={'itemprop': 'url'})
html_detail = get('https://soundcloud.com' + drop['href'], headers={
'User-agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML like Gecko) Chrome/44.0.2403.155 Safari/537.36'}).text
soup_detail = BeautifulSoup(html_detail, 'html.parser')
visual = soup_detail.find('img', attrs={'width': '500'})['src']
album_data = {
'band': mbartist['artist']['name'],
'band_id': current_url,
'platform': 'soundcloud',
'drop': str(drop.contents[0]),
'drop_id': drop["href"],
'drop_url': 'https://soundcloud.com' + drop["href"],
'drop_visual': visual,
'release_date': release_date
}
print(album_data)
data.append(album_data)
except Exception as e:
print("soundcloud says", e)
return data
class Versgedropt(object):
def set_mbids(self, mscbrnz_path):
if self.test:
self.mbids = [
"2d0ec174-2bff-4f46-ae1b-dfea0ed9391c", #deus
"5481f951-8a58-4f13-8ad5-8f0dad360bda", #jamaican jazz orchestra
"26895123-efb1-4b0b-9868-9fc2138d46b6", #poldoore
]
else:
mscbrnz_path = "resources/belgian_mscbrnz_artists.xlsx"
mscbrnzs = read_excel(mscbrnz_path)
self.mbids = mscbrnzs["mbid"].unique().tolist()
def __init__(self, test=False):
self.test = test
self.youtube = Youtube()
self.youtube.authenticate()
self.spotify = Spotify()
self.spotify.authenticate()
self.deezer = Deezer()
self.deezer.authenticate()
self.itunes = Itunes()
self.itunes.authenticate()
self.bandcamp = Bandcamp()
self.bandcamp.authenticate()
self.soundcloud = Soundcloud()
self.soundcloud.authenticate()
self.mbids = []
self.data = []
self.df = None
def get_drops_for_musicbrainz_belgians(self):
for mbid in self.mbids:
try:
musicbrainzngs.set_useragent("versgeperst", "0.1", contact=None)
mbartist = musicbrainzngs.get_artist_by_id(mbid, includes=['url-rels'])
if 'url-relation-list' not in mbartist['artist']:
mbartist['artist']['url-relation-list'] = []
print(mbartist)
types = ['streaming music', 'purchase for download', 'bandcamp', 'soundcloud', 'youtube', 'free streaming']
for url in mbartist['artist']['url-relation-list']:
if url['type'] in types:
current_url = url['target']
for platform in ["youtube", "spotify", "deezer", "itunes", "bandcamp", "soundcloud"]:
print("platform", platform, current_url)
cls = getattr(self, platform)
print(cls.do_it(current_url))
if cls.do_it(current_url):
platform_data = cls.get_drops_for_artist(mbartist, current_url)
self.data.extend(platform_data)
except Exception as e:
print(e)
self.df = DataFrame(self.data)
self.df.sort_values(by=['release_date', 'band', 'drop'], ascending=False, inplace=True)
self.df.drop_duplicates(subset=["drop_url"], inplace=True)
self.df.to_excel('output/versgedropt.xlsx')
def put_website_online(self):
cnopts = pysftp.CnOpts()
cnopts.hostkeys = None
with open("resources/sftp.txt", "r") as f:
user, pwd = tuple(f.read().split("\n"))
with pysftp.Connection('sftp.dc2.gpaas.net', username=user, password=pwd, cnopts=cnopts) as sftp:
with sftp.cd('/lamp0/web/vhosts/versgeperst.be/htdocs/versgedropt'):
files = sftp.listdir()
for file in files:
if file.endswith(".html"):
sftp.remove(file)
for html_file in glob("output/*.html"):
print("pushing", html_file)
sftp.put(html_file, os.path.basename(html_file))
def generate_website(self):
# purge previous version of html files
fl = glob("output/*.html")
for f in fl:
os.remove(f)
soundcloud_logo = 'https://upload.wikimedia.org/wikipedia/commons/thumb/a/a2/Antu_soundcloud.svg/1024px-Antu_soundcloud.svg.png'
deezer_logo = 'https://e-cdns-files.dzcdn.net/cache/slash/images/common/logos/deezer.c0869f01203aa5800fe970cf4d7b4fa4.png'
default_logo = 'https://upload.wikimedia.org/wikipedia/commons/thumb/3/37/Vinyl_disc_icon.svg/240px-Vinyl_disc_icon.svg.png'
with open("resources/template.html", "r", "utf-8") as f:
html = f.read()
template = BeautifulSoup(html, "html.parser")
# set last updated line
template.find("p", attrs={"id": "last_updated"}).insert(0, "Laatste update op: " + datetime.now().strftime(
'%d/%m/%Y, %H:%M:%S'))
filtered_df = self.df.loc[(self.df['release_date'] >= (datetime(2010, 1, 1).date())) & (self.df['release_date'] <= (datetime.now() + timedelta(days=120)).date())]
rows = filtered_df.iterrows()
previous_release_date = ''
previous_page_name = ''
previous_previous_page_name = ''
for row in rows:
row = row[1]
release_date = row['release_date'].isoformat()
release_date_str = row['release_date'].strftime('%d %b %Y')
page_name = row['release_date'].strftime('%b %Y').replace(' ', '') + ".html"
if not str(row['drop_visual']).startswith('http'):
if row['platform'] == 'soundcloud':
drop_visual = soundcloud_logo
elif row['platform'] == 'deezer':
drop_visual = deezer_logo
else:
drop_visual = default_logo
else:
drop_visual = row['drop_visual']
if not str(row['drop_url']).startswith('http'):
if row['platform'] == 'soundcloud':
drop_url = 'https://soundcloud.com' + row['drop_url']
else:
drop_url = row["drop_url"]
else:
drop_url = row['drop_url']
if row['platform'] == 'soundcloud':
drop_icon = 'images/dropicon-soundcloud.png'
elif row['platform'] == 'deezer':
drop_icon = 'images/dropicon-deezer.png'
elif row['platform'] == 'itunes':
drop_icon = 'images/dropicon-apple.png'
elif row['platform'] == 'bandcamp':
drop_icon = 'images/dropicon-bandcamp.png'
elif row['platform'] == 'spotify':
drop_icon = 'images/dropicon-spotify.png'
elif row['platform'] == 'youtube':
drop_icon = 'images/dropicon-youtube.png'
else:
drop_icon = default_logo
if page_name != previous_page_name and previous_page_name != '':
print("making", previous_page_name)
template.find("a", attrs={"id": "vorige-maand"})["href"] = page_name if page_name != datetime.now().strftime('%b %Y').replace(' ', '') + ".html" else "index.html"
template.find("a", attrs={"id": "volgende-maand"})["href"] = previous_previous_page_name
template.find("b", attrs={"id": "deze-maand"}).string = previous_page_name.rstrip(".html")
current_page_name = "index.html" if previous_page_name == datetime.now().strftime('%b %Y').replace(' ', '') + ".html" else previous_page_name
with open("output/" + current_page_name, 'w', 'utf-8') as f:
f.write(template.prettify(formatter="html"))
with open("resources/template.html", "r", "utf-8") as f:
html = f.read()
template = BeautifulSoup(html, "html.parser")
# set last updated line
template.find("p", attrs={"id": "last_updated"}).insert(0, "Laatste update op:" + datetime.now().strftime('%d/%m/%Y, %H:%M:%S'))
previous_previous_page_name = current_page_name
previous_page_name = page_name
if release_date != previous_release_date:
new_date_tag = BeautifulSoup('<li class="dropdate"><span>{0}<br />→</span></li>'.format(release_date_str), "html.parser")
template.find("ul", attrs={"id": "da-thumbs"}).append(new_date_tag)
new_drop_tag_str = '<li><a href="{0}"><span class="hoes" style="background:url({1});background-size: 175px 175px;"><img src="{2}" /></span><div><span>{3}</span></div></a></li>'.format(
drop_url,
drop_visual,
drop_icon,
sub('', '', (row['band'] + ' - ' + row['drop'])[0:85])
)
new_drop_tag = BeautifulSoup(new_drop_tag_str, "html.parser")
template.find("ul", attrs={"id": "da-thumbs"}).append(new_drop_tag)
previous_release_date = release_date
if __name__ == "__main__":
tst = sys.argv[1] if len(sys.argv) > 1 else False
vg = Versgedropt(test=tst)
vg.set_mbids(mscbrnz_path="")
if vg.test:
vg.get_drops_for_musicbrainz_belgians()
vg.generate_website()
vg.put_website_online()
else:
while True:
if datetime.now().hour == 14:
vg.get_drops_for_musicbrainz_belgians()
vg.generate_website()
vg.put_website_online()
| 18,225 | 27 | 959 |
c7f7911950c25ab495d084c77ad18ed923650d20 | 42 | py | Python | tests/components/pushbullet/__init__.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | tests/components/pushbullet/__init__.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | tests/components/pushbullet/__init__.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Tests for the pushbullet component."""
| 21 | 41 | 0.714286 | """Tests for the pushbullet component."""
| 0 | 0 | 0 |
1638445b983d8d6ecbff1ab862d979b7aba9bc68 | 5,321 | py | Python | sosia/processing/tests/test_querying.py | sosia-dev/sosia | d4d2d5edb0cd1d085b5a457eb6d19bf8e9fea7f5 | [
"MIT"
] | 14 | 2019-03-12T22:07:47.000Z | 2022-03-08T14:05:05.000Z | sosia/processing/tests/test_querying.py | sosia-dev/sosia | d4d2d5edb0cd1d085b5a457eb6d19bf8e9fea7f5 | [
"MIT"
] | 31 | 2018-10-15T16:02:44.000Z | 2021-04-09T08:13:44.000Z | sosia/processing/tests/test_querying.py | sosia-dev/sosia | d4d2d5edb0cd1d085b5a457eb6d19bf8e9fea7f5 | [
"MIT"
] | 2 | 2020-01-09T06:47:09.000Z | 2020-12-05T13:21:03.000Z | # -*- coding: utf-8 -*-
"""Tests for processing.querying module."""
from os.path import expanduser
from nose.tools import assert_equal, assert_true
from string import Template
from sosia.establishing import connect_database
from sosia.processing import base_query, count_citations, create_queries,\
query_pubs_by_sourceyear, stacked_query
test_cache = expanduser("~/.sosia/test.sqlite")
test_conn = connect_database(test_cache)
test_id = 53164702100
year = 2017
refresh = 30
| 39.708955 | 83 | 0.66698 | # -*- coding: utf-8 -*-
"""Tests for processing.querying module."""
from os.path import expanduser
from nose.tools import assert_equal, assert_true
from string import Template
from sosia.establishing import connect_database
from sosia.processing import base_query, count_citations, create_queries,\
query_pubs_by_sourceyear, stacked_query
test_cache = expanduser("~/.sosia/test.sqlite")
test_conn = connect_database(test_cache)
test_id = 53164702100
year = 2017
refresh = 30
def test_base_query():
q = f"AU-ID({test_id}) AND PUBYEAR BEF {year}"
size = base_query("docs", q, size_only=True)
assert_equal(size, 5)
def test_base_query_author():
query = f"AU-ID({test_id})"
size = base_query("author", query, size_only=True)
assert_equal(size, 1)
def test_count_citations():
identifier = ["55208373700", "55208373700"]
count1 = count_citations(identifier, 2017)
assert_equal(count1, 23)
eids = ["2-s2.0-84959420483", "2-s2.0-84949113230"]
count2 = count_citations(eids, 2017, exclusion_ids=identifier)
assert_equal(count2, 1)
eids_long = eids * 100
count3 = count_citations(eids_long, 2017, exclusion_ids=identifier)
assert_equal(count3, 1)
def test_create_queries_long():
# Set variables
group = list(range(1, 2000))
template = Template(f"SOURCE-ID($fill) AND PUBYEAR IS {year+1}")
joiner = " OR "
maxlen = 200
# Run test
received = create_queries(group, joiner, template, maxlen)
query_maxlen = max([len(q[0]) for q in received])
# Compare
assert_true(isinstance(received, list))
assert_true(isinstance(received[0], tuple))
assert_true(query_maxlen <= maxlen)
expected = 'SOURCE-ID(1 OR 10 OR 100 OR 1000 OR 1001 OR 1002 OR 1003 OR '\
'1004 OR 1005 OR 1006 OR 1007 OR 1008 OR 1009 OR 101 OR 1010 '\
'OR 1011 OR 1012 OR 1013 OR 1014 OR 1015 OR 1016 OR 1017) '\
'AND PUBYEAR IS 2018'
sub_group = ['1', '10', '100', '1000', '1001', '1002', '1003', '1004',
'1005', '1006', '1007', '1008', '1009', '101', '1010', '1011',
'1012', '1013', '1014', '1015', '1016', '1017']
assert_equal(received[0][0], expected)
assert_equal(received[0][1], sub_group)
def test_create_queries_short():
# Set variables
group = list(range(1, 2000))
template = Template(f"SOURCE-ID($fill) AND PUBYEAR IS {year+1}")
joiner = " OR "
maxlen = 1
# Run test
received = create_queries(group, joiner, template, maxlen)
group_maxlen = max([len(q[1]) for q in received])
# Compare
assert_true(isinstance(received, list))
assert_true(isinstance(received[0], tuple))
assert_true(group_maxlen <= maxlen)
expected = 'SOURCE-ID(1) AND PUBYEAR IS 2018'
sub_group = ['1']
assert_equal(received[0][0], expected)
assert_equal(received[0][1], sub_group)
def test_query_sources_by_year():
# Test a journal and year
res = query_pubs_by_sourceyear([22900], 2010, refresh=refresh)
assert_equal(res["source_id"].unique(), ['22900'])
assert_equal(res["year"].unique(), [2010])
assert_true(isinstance(res["auids"][0], str))
assert_true(len(res["auids"][0]) > 0)
# Test a journal and year that are not in Scopus
res = query_pubs_by_sourceyear([22900], 1969, refresh=refresh)
assert_true(res.empty)
# Test a large query (>5000 results)
source_ids = [13703, 13847, 13945, 14131, 14150, 14156, 14204, 14207,
14209, 14346, 14438, 14536, 14539, 15034, 15448, 15510, 15754]
res = query_pubs_by_sourceyear(source_ids, 1984, refresh=refresh)
assert_true(3380 < res.dropna(subset=["auids"]).shape[0] < 3400)
assert_true(res.columns.tolist(), ['source_id', 'year', 'afid', 'auids'])
assert_true(isinstance(res["auids"][0], str))
assert_true(len(res["auids"][0]) > 0)
def test_query_sources_by_year_stacked():
# Test a journal and year
res = query_pubs_by_sourceyear([22900], 2010, refresh=refresh, stacked=True)
assert_equal(res["source_id"].unique(), ['22900'])
assert_equal(res["year"].unique(), [2010])
assert_true(isinstance(res["auids"][0], str))
assert_true(len(res["auids"][0]) > 0)
# Test a journal and year that are not in Scopus
res = query_pubs_by_sourceyear([22900], 1969, refresh=refresh, stacked=True)
assert_true(res.empty)
# Test a large query (>5000 results)
source_ids = [13703, 13847, 13945, 14131, 14150, 14156, 14204, 14207,
14209, 14346, 14438, 14536, 14539, 15034, 15448, 15510, 15754]
res = query_pubs_by_sourceyear(source_ids, 1984, refresh=refresh, stacked=True)
assert_true(3380 < res.dropna(subset=["auids"]).shape[0] < 3400)
assert_true(res.columns.tolist(), ['source_id', 'year', 'afid', 'auids'])
assert_true(isinstance(res["auids"][0], str))
assert_true(len(res["auids"][0]) > 0)
def test_stacked_query():
group = [18400156716, 19300157101, 19400157208, 19400157312, 19500157223,
19600166213, 19700175482, 19700182353, 19800188009, 19900193211]
template = Template(f"SOURCE-ID($fill) AND PUBYEAR IS {year+1}")
res = stacked_query(group, template, joiner=" OR ", q_type="docs",
refresh=False, stacked=True, verbose=False)
assert_equal(len(res), 797)
| 4,646 | 0 | 184 |
87cb912625c5f0172dccede1f05b6398960d3c64 | 3,004 | py | Python | data/scripts/weather/weather.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/weather/weather.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/weather/weather.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | # swgpy
from swgpy import app, utility, weather
from swgpy.weather import WeatherEvent, WeatherSequence
# modules
import random
service_mgr = kernel.serviceManager()
weather_svc = service_mgr.weatherService()
#This script is called every 30 minutes.
#Weather duration is now set in minutes: weather_event(duration(mins),weatherType,cloudVector(X,Y,Z)).
#Each of the following is a list of weather sequences. Each sequence runs until its duration has expired.
#At the end of the sequence, NOSTORM is used to set the weather back to clear. Whilst each weather sequence list
#is running, no other weather sequence can be used on the same scene until the sequences have expired.
lightStormSequence = WeatherSequence()
lightStormSequence.append(WeatherEvent(20, weather.WEATHER.CLOUDY, utility.vector3(1.0, 0.0, 0.0)))
lightStormSequence.append(WeatherEvent(10, weather.WEATHER.LIGHTSTORM, utility.vector3(1.0, 0.0, 0.0)))
lightStormSequence.append(WeatherEvent(10, weather.WEATHER.CLOUDY, utility.vector3(1.0, 0.0, 0.0)))
lightStormSequence.append(WeatherEvent(10, weather.WEATHER.NOSTORM, utility.vector3(1.0, 0.0, 0.0)))
mediumStormSequence = WeatherSequence()
mediumStormSequence.append(WeatherEvent(20, weather.WEATHER.CLOUDY, utility.vector3(1.0, 0.0, 0.0)))
mediumStormSequence.append(WeatherEvent(3, weather.WEATHER.LIGHTSTORM, utility.vector3(1.0, 0.0, 0.0)))
mediumStormSequence.append(WeatherEvent(10, weather.WEATHER.MEDIUMSTORM, utility.vector3(1.0, 0.0, 0.0)))
mediumStormSequence.append(WeatherEvent(3, weather.WEATHER.LIGHTSTORM, utility.vector3(1.0, 0.0, 0.0)))
mediumStormSequence.append(WeatherEvent(10, weather.WEATHER.CLOUDY, utility.vector3(1.0, 0.0, 0.0)))
mediumStormSequence.append(WeatherEvent(10, weather.WEATHER.NOSTORM, utility.vector3(1.0, 0.0, 0.0)))
heavyStormSequence = WeatherSequence()
heavyStormSequence.append(WeatherEvent(20, weather.WEATHER.CLOUDY, utility.vector3(1.0, 0.0, 0.0)))
heavyStormSequence.append(WeatherEvent(3, weather.WEATHER.LIGHTSTORM, utility.vector3(1.0, 0.0, 0.0)))
heavyStormSequence.append(WeatherEvent(5, weather.WEATHER.MEDIUMSTORM, utility.vector3(1.0, 0.0, 0.0)))
heavyStormSequence.append(WeatherEvent(20, weather.WEATHER.HEAVYSTORM, utility.vector3(1.0, 0.0, 0.0)))
heavyStormSequence.append(WeatherEvent(5, weather.WEATHER.MEDIUMSTORM, utility.vector3(1.0, 0.0, 0.0)))
heavyStormSequence.append(WeatherEvent(3, weather.WEATHER.LIGHTSTORM, utility.vector3(1.0, 0.0, 0.0)))
heavyStormSequence.append(WeatherEvent(10, weather.WEATHER.CLOUDY, utility.vector3(1.0, 0.0, 0.0)))
heavyStormSequence.append(WeatherEvent(10, weather.WEATHER.NOSTORM, utility.vector3(1.0, 0.0, 0.0)))
#Crude random function with weight. Needs improving.
weatherChoice([(heavyStormSequence, 0.02), (mediumStormSequence, 0.15), (lightStormSequence,0.2)],weather.SCENE.CORELLIA)
| 61.306122 | 121 | 0.778296 | # swgpy
from swgpy import app, utility, weather
from swgpy.weather import WeatherEvent, WeatherSequence
# modules
import random
service_mgr = kernel.serviceManager()
weather_svc = service_mgr.weatherService()
#This script is called every 30 minutes.
#Weather duration is now set in minutes: weather_event(duration(mins),weatherType,cloudVector(X,Y,Z)).
#Each of the following is a list of weather sequences. Each sequence runs until its duration has expired.
#At the end of the sequence, NOSTORM is used to set the weather back to clear. Whilst each weather sequence list
#is running, no other weather sequence can be used on the same scene until the sequences have expired.
lightStormSequence = WeatherSequence()
lightStormSequence.append(WeatherEvent(20, weather.WEATHER.CLOUDY, utility.vector3(1.0, 0.0, 0.0)))
lightStormSequence.append(WeatherEvent(10, weather.WEATHER.LIGHTSTORM, utility.vector3(1.0, 0.0, 0.0)))
lightStormSequence.append(WeatherEvent(10, weather.WEATHER.CLOUDY, utility.vector3(1.0, 0.0, 0.0)))
lightStormSequence.append(WeatherEvent(10, weather.WEATHER.NOSTORM, utility.vector3(1.0, 0.0, 0.0)))
mediumStormSequence = WeatherSequence()
mediumStormSequence.append(WeatherEvent(20, weather.WEATHER.CLOUDY, utility.vector3(1.0, 0.0, 0.0)))
mediumStormSequence.append(WeatherEvent(3, weather.WEATHER.LIGHTSTORM, utility.vector3(1.0, 0.0, 0.0)))
mediumStormSequence.append(WeatherEvent(10, weather.WEATHER.MEDIUMSTORM, utility.vector3(1.0, 0.0, 0.0)))
mediumStormSequence.append(WeatherEvent(3, weather.WEATHER.LIGHTSTORM, utility.vector3(1.0, 0.0, 0.0)))
mediumStormSequence.append(WeatherEvent(10, weather.WEATHER.CLOUDY, utility.vector3(1.0, 0.0, 0.0)))
mediumStormSequence.append(WeatherEvent(10, weather.WEATHER.NOSTORM, utility.vector3(1.0, 0.0, 0.0)))
heavyStormSequence = WeatherSequence()
heavyStormSequence.append(WeatherEvent(20, weather.WEATHER.CLOUDY, utility.vector3(1.0, 0.0, 0.0)))
heavyStormSequence.append(WeatherEvent(3, weather.WEATHER.LIGHTSTORM, utility.vector3(1.0, 0.0, 0.0)))
heavyStormSequence.append(WeatherEvent(5, weather.WEATHER.MEDIUMSTORM, utility.vector3(1.0, 0.0, 0.0)))
heavyStormSequence.append(WeatherEvent(20, weather.WEATHER.HEAVYSTORM, utility.vector3(1.0, 0.0, 0.0)))
heavyStormSequence.append(WeatherEvent(5, weather.WEATHER.MEDIUMSTORM, utility.vector3(1.0, 0.0, 0.0)))
heavyStormSequence.append(WeatherEvent(3, weather.WEATHER.LIGHTSTORM, utility.vector3(1.0, 0.0, 0.0)))
heavyStormSequence.append(WeatherEvent(10, weather.WEATHER.CLOUDY, utility.vector3(1.0, 0.0, 0.0)))
heavyStormSequence.append(WeatherEvent(10, weather.WEATHER.NOSTORM, utility.vector3(1.0, 0.0, 0.0)))
#Crude random function with weight. Needs improving.
def weatherChoice(wList, scene):
n = random.uniform(0, 1)
for item, weight in wList:
if n < weight:
weather_svc.setSceneWeather(scene, item)
break
weatherChoice([(heavyStormSequence, 0.02), (mediumStormSequence, 0.15), (lightStormSequence,0.2)],weather.SCENE.CORELLIA)
| 165 | 0 | 22 |
bad18646aa55721ad9b73e97436ebeaa678b78df | 3,353 | py | Python | run.py | tomplays/raspberry-car | 1458b0ed698232bc03133248b5ff616f3b6f3290 | [
"MIT"
] | 6 | 2015-07-13T10:19:55.000Z | 2018-11-08T13:16:55.000Z | run.py | tomplays/raspberry-car | 1458b0ed698232bc03133248b5ff616f3b6f3290 | [
"MIT"
] | null | null | null | run.py | tomplays/raspberry-car | 1458b0ed698232bc03133248b5ff616f3b6f3290 | [
"MIT"
] | 1 | 2021-02-13T19:49:16.000Z | 2021-02-13T19:49:16.000Z | #!/usr/bin/env python
# blou
# Raspberry Car v1.xx
# MIT Licence - Tom Wersinger https://github.com/tomplays/raspberry-car/
import RPi.GPIO as io
import time
# pins attribution
#motor A
in3_pin = 4
in4_pin = 17
#motor B
in1_pin = 27
in2_pin = 22
#Blinking orange leds
o_pin = 24
or_pin = 23
# always stop motors after xx seconds..
securetime = 10
#misc def
glowtime = .1
turntime = .04
#GPIO inits
io.setmode(io.BCM)
io.setup(in1_pin, io.OUT)
io.setup(in2_pin, io.OUT)
io.setup(in3_pin, io.OUT)
io.setup(in4_pin, io.OUT)
io.setup(o_pin, io.OUT)
io.setup(or_pin, io.OUT)
#demo mode for glow
#make the two orange leds blink
#params
# dir: left|right
#params
# dir: forward|backward
# long: how long (to check)
# turns all gpio off
# Main loop
# expects keyboard inputs:
# zw(drive) - ae(turn) - lm -hg(glows) - s(stop) p(demo) :
while True:
cmd = raw_input("zw - ae - lm - hg - s :")
direction = cmd[0]
if direction == "e":
turn('right')
elif direction == "a":
turn('left')
elif direction == "s":
stopall()
elif direction == "z":
drive('forward',1)
elif direction == "w":
drive('back', 1)
elif direction == "p":
demoa()
elif direction == "h":
glow(True, True, .1, 2, "no")
elif direction == "l":
glow(True, False, .1, 5, "yes")
elif direction == "g":
glow(True, True, .5, 5, "yes")
else:
stopall()
| 22.503356 | 72 | 0.554131 | #!/usr/bin/env python
# blou
# Raspberry Car v1.xx
# MIT Licence - Tom Wersinger https://github.com/tomplays/raspberry-car/
import RPi.GPIO as io
import time
# pins attribution
#motor A
in3_pin = 4
in4_pin = 17
#motor B
in1_pin = 27
in2_pin = 22
#Blinking orange leds
o_pin = 24
or_pin = 23
# always stop motors after xx seconds..
securetime = 10
#misc def
glowtime = .1
turntime = .04
#GPIO inits
io.setmode(io.BCM)
io.setup(in1_pin, io.OUT)
io.setup(in2_pin, io.OUT)
io.setup(in3_pin, io.OUT)
io.setup(in4_pin, io.OUT)
io.setup(o_pin, io.OUT)
io.setup(or_pin, io.OUT)
#demo mode for glow
def demoa():
# alone left and right
glow(False, True, .2, 5, "yes")
glow(True, False, .2, 5, "yes")
# both together and both alternate
glow(True, True, .1, 5, "yes")
glow(True, True, .1, 5, "no")
stopall()
#make the two orange leds blink
def glow(a,b,tim, ran, alternate):
for x in range(0, ran):
if alternate == "yes":
if a==True:
io.output(o_pin, False)
if b==True:
io.output(or_pin, True)
time.sleep(tim)
if a==True:
io.output(o_pin, True)
if b==True:
io.output(or_pin, False)
time.sleep(tim)
else:
if a==True:
io.output(o_pin, False)
if b==True:
io.output(or_pin, False)
time.sleep(tim)
if a==True:
io.output(o_pin, True)
if b==True:
io.output(or_pin, True)
time.sleep(tim)
print x
io.output(o_pin, False)
io.output(or_pin, False)
#params
# dir: left|right
def turn(dir):
if dir == "right":
io.output(in3_pin, False)
io.output(in4_pin, True)
time.sleep(turntime)
glow(False, True,.1, 5, "no")
if dir == "left":
io.output(in3_pin, True)
io.output(in4_pin, False)
time.sleep(turntime)
glow(True, False, .1, 5, "no")
io.output(in3_pin, False)
io.output(in4_pin, False)
#params
# dir: forward|backward
# long: how long (to check)
def drive(dir, long):
if dir == "forward":
io.output(in1_pin, False)
io.output(in2_pin, True)
if dir == "back":
io.output(in1_pin, True)
io.output(in2_pin, False)
time.sleep(long)
io.output(in1_pin, False)
io.output(in2_pin, False)
# turns all gpio off
def stopall():
io.output(in1_pin, False)
io.output(in2_pin, False)
io.output(in3_pin, False)
io.output(in4_pin, False)
io.output(o_pin, False)
io.output(or_pin, False)
# Main loop
# expects keyboard inputs:
# zw(drive) - ae(turn) - lm -hg(glows) - s(stop) p(demo) :
while True:
cmd = raw_input("zw - ae - lm - hg - s :")
direction = cmd[0]
if direction == "e":
turn('right')
elif direction == "a":
turn('left')
elif direction == "s":
stopall()
elif direction == "z":
drive('forward',1)
elif direction == "w":
drive('back', 1)
elif direction == "p":
demoa()
elif direction == "h":
glow(True, True, .1, 2, "no")
elif direction == "l":
glow(True, False, .1, 5, "yes")
elif direction == "g":
glow(True, True, .5, 5, "yes")
else:
stopall()
| 1,795 | 0 | 110 |
d716fb576f2df8487d65e77afa4a5b2379a0f174 | 1,821 | py | Python | api_recipes/serializers.py | szypkiwonsz/Recipes-Rest-Api | 8fa5c826f26a8160314d81f5f0e8df7407ee4c13 | [
"MIT"
] | null | null | null | api_recipes/serializers.py | szypkiwonsz/Recipes-Rest-Api | 8fa5c826f26a8160314d81f5f0e8df7407ee4c13 | [
"MIT"
] | null | null | null | api_recipes/serializers.py | szypkiwonsz/Recipes-Rest-Api | 8fa5c826f26a8160314d81f5f0e8df7407ee4c13 | [
"MIT"
] | null | null | null | from drf_writable_nested import WritableNestedModelSerializer, UniqueFieldsMixin
from rest_framework import serializers
from api_recipes.models import Recipe, Ingredient, Food, Step, User
class FoodSerializer(UniqueFieldsMixin, serializers.ModelSerializer):
"""Food serializer."""
class StepSerializer(UniqueFieldsMixin, serializers.ModelSerializer):
"""Recipe step serializer."""
class IngredientSerializer(WritableNestedModelSerializer):
"""Food ingredient serializer."""
food = FoodSerializer()
class UserSerializer(serializers.ModelSerializer):
"""User serializer."""
class RecipeSerializer(WritableNestedModelSerializer):
"""Recipe serializer."""
ingredients = IngredientSerializer(many=True)
steps = StepSerializer(many=True)
author = UserSerializer(read_only=True)
| 27.179104 | 80 | 0.674355 | from drf_writable_nested import WritableNestedModelSerializer, UniqueFieldsMixin
from rest_framework import serializers
from api_recipes.models import Recipe, Ingredient, Food, Step, User
class FoodSerializer(UniqueFieldsMixin, serializers.ModelSerializer):
"""Food serializer."""
def create(self, validated_data):
obj, created = Food.objects.get_or_create(**validated_data)
return obj
def update(self, instance, validated_data):
return super(FoodSerializer, self).update(instance, validated_data)
class Meta:
model = Food
fields = '__all__'
class StepSerializer(UniqueFieldsMixin, serializers.ModelSerializer):
"""Recipe step serializer."""
class Meta:
model = Step
fields = '__all__'
class IngredientSerializer(WritableNestedModelSerializer):
"""Food ingredient serializer."""
food = FoodSerializer()
class Meta:
model = Ingredient
fields = '__all__'
class UserSerializer(serializers.ModelSerializer):
"""User serializer."""
class Meta:
model = User
fields = ('id', 'username', 'email', 'password')
extra_kwargs = {'password': {'write_only': True}, 'required': True}
def create(self, validated_data):
# Function needed to properly hash the password.
user = User(
email=validated_data['email'],
username=validated_data['username']
)
user.set_password(validated_data['password'])
user.save()
return user
class RecipeSerializer(WritableNestedModelSerializer):
"""Recipe serializer."""
ingredients = IngredientSerializer(many=True)
steps = StepSerializer(many=True)
author = UserSerializer(read_only=True)
class Meta:
model = Recipe
fields = '__all__'
| 482 | 304 | 216 |
0d575dfb068e866aa2a4d160851da5eda651fa00 | 1,971 | py | Python | jdcloud_sdk/services/monitor/models/UpdateCmAlarmSpec.py | jdcloud-demo/jdcloud-sdk-python | fddc2af24031c597948b8b8091978ac7e01a2695 | [
"Apache-2.0"
] | null | null | null | jdcloud_sdk/services/monitor/models/UpdateCmAlarmSpec.py | jdcloud-demo/jdcloud-sdk-python | fddc2af24031c597948b8b8091978ac7e01a2695 | [
"Apache-2.0"
] | null | null | null | jdcloud_sdk/services/monitor/models/UpdateCmAlarmSpec.py | jdcloud-demo/jdcloud-sdk-python | fddc2af24031c597948b8b8091978ac7e01a2695 | [
"Apache-2.0"
] | null | null | null | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
| 39.42 | 186 | 0.679351 | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class UpdateCmAlarmSpec(object):
def __init__(self, calculation, metric, operation, period, serviceCode, threshold, times, contactGroups=None, contactPersons=None, downSample=None, noticePeriod=None, ruleName=None):
"""
:param calculation: 统计方法:平均值=avg、最大值=max、最小值=min、总和=sum
:param contactGroups: (Optional) 通知的联系组,如 [“联系组1”,”联系组2”]
:param contactPersons: (Optional) 通知的联系人,如 [“联系人1”,”联系人2”]
:param downSample: (Optional) 取样频次
:param metric: 根据产品线查询可用监控项列表 接口 返回的Metric字段
:param noticePeriod: (Optional) 通知周期 单位:小时
:param operation: >=、>、<、<=、=、!=
:param period: 统计周期(单位:分钟)目前支持的取值:2,5,15,30,60
:param ruleName: (Optional) 规则名称,最大长度42个字符,只允许中英文、数字、''-''和"_"
:param serviceCode: 产品名称
:param threshold: 阈值
:param times: 连续多少次后报警,可选值:1,2,3,5
"""
self.calculation = calculation
self.contactGroups = contactGroups
self.contactPersons = contactPersons
self.downSample = downSample
self.metric = metric
self.noticePeriod = noticePeriod
self.operation = operation
self.period = period
self.ruleName = ruleName
self.serviceCode = serviceCode
self.threshold = threshold
self.times = times
| 0 | 1,599 | 23 |
e12340c19552627ebf352b0af090b6a4d4ab0aac | 728 | py | Python | ella/articles/admin.py | petrlosa/ella | 120eac56b46b7b79cdeea0582d2711f5a4b0bf51 | [
"BSD-3-Clause"
] | 75 | 2015-01-17T08:30:08.000Z | 2022-02-23T19:05:36.000Z | ella/articles/admin.py | petrlosa/ella | 120eac56b46b7b79cdeea0582d2711f5a4b0bf51 | [
"BSD-3-Clause"
] | 3 | 2016-04-21T22:16:37.000Z | 2021-07-08T12:47:37.000Z | ella/articles/admin.py | petrlosa/ella | 120eac56b46b7b79cdeea0582d2711f5a4b0bf51 | [
"BSD-3-Clause"
] | 26 | 2015-02-12T04:13:41.000Z | 2022-01-08T05:26:27.000Z | from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from ella.core.admin import PublishableAdmin, ListingInlineAdmin, RelatedInlineAdmin
from ella.articles.models import Article
admin.site.register(Article, ArticleAdmin)
| 36.4 | 96 | 0.677198 | from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from ella.core.admin import PublishableAdmin, ListingInlineAdmin, RelatedInlineAdmin
from ella.articles.models import Article
class ArticleAdmin(PublishableAdmin):
ordering = ('-publish_from',)
fieldsets = (
(_("Article heading"), {'fields': ('title', 'slug')}),
(_("Article contents"), {'fields': ('description', 'content')}),
(_("Metadata"), {'fields': ('category', 'authors', 'source', 'photo')}),
(_("Publication"), {'fields': (('publish_from', 'publish_to'), 'published', 'static')}),
)
inlines = [ListingInlineAdmin, RelatedInlineAdmin]
admin.site.register(Article, ArticleAdmin)
| 0 | 443 | 23 |