hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
96de8cfd18855bd3db54e341bcf82c6193cb52f1 | 1,213 | py | Python | web-server/webserver/urls.py | ApLight/groenlandicus | 61d230d2b1de0674424c2ea00d2d256f7a68db2e | [
"MIT"
] | null | null | null | web-server/webserver/urls.py | ApLight/groenlandicus | 61d230d2b1de0674424c2ea00d2d256f7a68db2e | [
"MIT"
] | 14 | 2018-09-01T07:41:10.000Z | 2018-09-01T19:41:11.000Z | web-server/webserver/urls.py | ApLight/groenlandicus | 61d230d2b1de0674424c2ea00d2d256f7a68db2e | [
"MIT"
] | null | null | null | """webserver URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from account import views as account_views
from quiz import views as quiz_views
from entry import views as entry_views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^login/', account_views.login, name="login"),
url(r'^feed/', account_views.feed_seolgi, name="feed"),
url(r'^quizzes/', quiz_views.get_quizzes, name="get_quizzes"),
url(r'^index/', entry_views.get_index, name="get_index"),
]
quiz_views.update_quizzes() # When the project starts, execute "update_quizzes".
| 37.90625 | 80 | 0.718054 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 782 | 0.644683 |
96df38e80d482351507d9e44ebf768c69656d95b | 958 | py | Python | pasteScripts.py | alexanderbeatson/electiondashboard | 8567e183a61c4e0b9e3f6ff0ee30acb61be93f15 | [
"MIT"
] | null | null | null | pasteScripts.py | alexanderbeatson/electiondashboard | 8567e183a61c4e0b9e3f6ff0ee30acb61be93f15 | [
"MIT"
] | null | null | null | pasteScripts.py | alexanderbeatson/electiondashboard | 8567e183a61c4e0b9e3f6ff0ee30acb61be93f15 | [
"MIT"
] | null | null | null | def main():
mainFile = open("index.html", 'r', encoding='utf-8')
writeFile = open("index_pasted.html", 'w+', encoding='utf-8')
classId = 'class="internal"'
cssId = '<link rel='
for line in mainFile:
if (classId in line):
pasteScript(line, writeFile)
elif (cssId in line):
pasteCSS(line, writeFile)
else:
writeFile.write(line)
writeFile.close()
def pasteCSS(line, writeFile):
filename = line.split('"')[-2]
importFile = open(filename, 'r', encoding='utf-8')
writeFile.write("<style>\n")
for row in importFile:
writeFile.write(row)
writeFile.write("</style>\n")
def pasteScript(line, writeFile):
filename = line.strip().split(" ")[3].split('"')[1]
importFile = open(filename, 'r', encoding='utf-8')
writeFile.write("<script>\n")
for row in importFile:
writeFile.write(row)
writeFile.write("</script>\n")
main()
| 28.176471 | 65 | 0.591858 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 159 | 0.165971 |
96dfc543c2de8dddb6e747e4e8c3935460f47a78 | 1,016 | py | Python | apps/core/views.py | InfinityLoopA-Z/BigBoxChallenge | eb1b70412af6859032d78d23edfb4c588c17b8cd | [
"MIT"
] | null | null | null | apps/core/views.py | InfinityLoopA-Z/BigBoxChallenge | eb1b70412af6859032d78d23edfb4c588c17b8cd | [
"MIT"
] | null | null | null | apps/core/views.py | InfinityLoopA-Z/BigBoxChallenge | eb1b70412af6859032d78d23edfb4c588c17b8cd | [
"MIT"
] | null | null | null | from rest_framework import viewsets
from django_filters.rest_framework import DjangoFilterBackend
from . import models, serializers, filtersets, pagination
class ActivityViewSet(viewsets.ModelViewSet):
"""A viewset of Activity model"""
queryset = models.Activity.objects.all()
serializer_class = serializers.ActivitySerializer
permission_classes = []
filter_backends = [
DjangoFilterBackend
]
filterset_class = filtersets.ActivityFilterSet
pagination_class = pagination.CustomPagination
lookup_field = 'slug'
class BoxViewSet(viewsets.ModelViewSet):
"""A ViewSet of Box model"""
queryset = models.Box.objects.all()
serializer_class = serializers.BoxSerializer
permission_classes = []
lookup_field = 'slug'
class CategoryViewSet(viewsets.ModelViewSet):
"""A ViewSet of Category model"""
queryset = models.Category.objects.all()
serializer_class = serializers.CategorySerializer
permission_classes = []
lookup_field = 'slug'
| 28.222222 | 61 | 0.744094 | 850 | 0.836614 | 0 | 0 | 0 | 0 | 0 | 0 | 112 | 0.110236 |
96e0ca518913b396a050050562a88480c52f946d | 3,721 | py | Python | hearthbreaker/cards/spells/neutral.py | souserge/hearthbreaker | 481dcaa3ae13c7dc16c0e6b7f59f11c36fdb29a7 | [
"MIT"
] | 429 | 2015-01-01T16:07:20.000Z | 2022-03-16T22:30:50.000Z | hearthbreaker/cards/spells/neutral.py | souserge/hearthbreaker | 481dcaa3ae13c7dc16c0e6b7f59f11c36fdb29a7 | [
"MIT"
] | 47 | 2015-01-01T17:07:57.000Z | 2018-05-07T10:49:37.000Z | hearthbreaker/cards/spells/neutral.py | souserge/hearthbreaker | 481dcaa3ae13c7dc16c0e6b7f59f11c36fdb29a7 | [
"MIT"
] | 135 | 2015-01-12T21:52:17.000Z | 2022-02-25T21:18:08.000Z | from hearthbreaker.cards.base import SpellCard
from hearthbreaker.constants import CHARACTER_CLASS, CARD_RARITY
from hearthbreaker.tags.base import BuffUntil, Buff
from hearthbreaker.tags.event import TurnStarted
from hearthbreaker.tags.status import Stealth, Taunt, Frozen
import hearthbreaker.targeting
class TheCoin(SpellCard):
def __init__(self):
super().__init__("The Coin", 0, CHARACTER_CLASS.ALL, CARD_RARITY.COMMON, False)
def use(self, player, game):
super().use(player, game)
if player.mana < 10:
player.mana += 1
class ArmorPlating(SpellCard):
def __init__(self):
super().__init__("Armor Plating", 1, CHARACTER_CLASS.ALL, CARD_RARITY.COMMON, False,
target_func=hearthbreaker.targeting.find_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.increase_health(1)
class EmergencyCoolant(SpellCard):
def __init__(self):
super().__init__("Emergency Coolant", 1, CHARACTER_CLASS.ALL, CARD_RARITY.COMMON, False,
target_func=hearthbreaker.targeting.find_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.add_buff(Buff(Frozen()))
class FinickyCloakfield(SpellCard):
def __init__(self):
super().__init__("Finicky Cloakfield", 1, CHARACTER_CLASS.ALL, CARD_RARITY.COMMON, False,
target_func=hearthbreaker.targeting.find_friendly_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.add_buff(BuffUntil(Stealth(), TurnStarted()))
class ReversingSwitch(SpellCard):
def __init__(self):
super().__init__("Reversing Switch", 1, CHARACTER_CLASS.ALL, CARD_RARITY.COMMON, False,
target_func=hearthbreaker.targeting.find_minion_spell_target)
def use(self, player, game):
super().use(player, game)
temp_attack = self.target.calculate_attack()
temp_health = self.target.health
if temp_attack == 0:
self.target.die(None)
else:
self.target.set_attack_to(temp_health)
self.target.set_health_to(temp_attack)
class RustyHorn(SpellCard):
def __init__(self):
super().__init__("Rusty Horn", 1, CHARACTER_CLASS.ALL, CARD_RARITY.COMMON, False,
target_func=hearthbreaker.targeting.find_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.add_buff(Buff(Taunt()))
class TimeRewinder(SpellCard):
def __init__(self):
super().__init__("Time Rewinder", 1, CHARACTER_CLASS.ALL, CARD_RARITY.COMMON, False,
target_func=hearthbreaker.targeting.find_friendly_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.bounce()
class WhirlingBlades(SpellCard):
def __init__(self):
super().__init__("Whirling Blades", 1, CHARACTER_CLASS.ALL, CARD_RARITY.COMMON, False,
target_func=hearthbreaker.targeting.find_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.change_attack(1)
spare_part_list = [ArmorPlating(), EmergencyCoolant(), FinickyCloakfield(), TimeRewinder(), ReversingSwitch(),
RustyHorn(), WhirlingBlades()]
class GallywixsCoin(SpellCard):
def __init__(self):
super().__init__("Gallywix's Coin", 0, CHARACTER_CLASS.ALL, CARD_RARITY.COMMON, False)
def use(self, player, game):
super().use(player, game)
if player.mana < 10:
player.mana += 1
| 34.775701 | 110 | 0.67025 | 3,226 | 0.866971 | 0 | 0 | 0 | 0 | 0 | 0 | 143 | 0.038431 |
96e125ec5aa02313a1615a9630dae1a661e5c4fd | 2,339 | py | Python | srgan_pytorch/utils/transform.py | nisargshah1999/SRGAN-PyTorch | 093fba8ee4e571d71ac9644350bdd03a1a547765 | [
"Apache-2.0"
] | 2 | 2021-08-22T06:27:48.000Z | 2021-08-22T06:36:43.000Z | srgan_pytorch/utils/transform.py | ekstra26/SRGAN-PyTorch | 3cc4c034362070ba1e02549acca2088572fd7ec2 | [
"Apache-2.0"
] | null | null | null | srgan_pytorch/utils/transform.py | ekstra26/SRGAN-PyTorch | 3cc4c034362070ba1e02549acca2088572fd7ec2 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Dakewe Biotech Corporation. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import PIL.BmpImagePlugin
import cv2
import numpy as np
import torch
import torchvision.transforms as transforms
from PIL import Image
__all__ = [
"opencv2pil", "opencv2tensor", "pil2opencv", "process_image"
]
def opencv2pil(image: np.ndarray) -> PIL.BmpImagePlugin.BmpImageFile:
""" OpenCV Convert to PIL.Image format.
Returns:
PIL.Image.
"""
image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
return image
def opencv2tensor(image: np.ndarray, gpu: int) -> torch.Tensor:
""" OpenCV Convert to torch.Tensor format.
Returns:
torch.Tensor.
"""
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
nhwc_image = torch.from_numpy(rgb_image).div(255.0).unsqueeze(0)
input_tensor = nhwc_image.permute(0, 3, 1, 2)
if gpu is not None:
input_tensor = input_tensor.cuda(gpu, non_blocking=True)
return input_tensor
def pil2opencv(image: PIL.BmpImagePlugin.BmpImageFile) -> np.ndarray:
""" PIL.Image Convert to OpenCV format.
Returns:
np.ndarray.
"""
image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
return image
def process_image(image: PIL.BmpImagePlugin.BmpImageFile, gpu: int = None) -> torch.Tensor:
""" PIL.Image Convert to PyTorch format.
Args:
image (PIL.BmpImagePlugin.BmpImageFile): File read by PIL.Image.
gpu (int): Graphics card model.
Returns:
torch.Tensor.
"""
tensor = transforms.ToTensor()(image)
input_tensor = tensor.unsqueeze(0)
if gpu is not None:
input_tensor = input_tensor.cuda(gpu, non_blocking=True)
return input_tensor
| 30.376623 | 91 | 0.68106 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,193 | 0.510047 |
96e210cba8e910f4151833c51c27bccda1cc8790 | 12,828 | py | Python | dorado/ceres/ceresClass.py | Mucephie/DORADO | 20c1742af7eef0265e670b1f600aea0a8021135b | [
"BSD-3-Clause"
] | null | null | null | dorado/ceres/ceresClass.py | Mucephie/DORADO | 20c1742af7eef0265e670b1f600aea0a8021135b | [
"BSD-3-Clause"
] | 13 | 2020-11-23T06:08:46.000Z | 2021-09-10T03:24:57.000Z | dorado/ceres/ceresClass.py | Mucephie/DORADO | 20c1742af7eef0265e670b1f600aea0a8021135b | [
"BSD-3-Clause"
] | null | null | null | import warnings
warnings.filterwarnings('ignore')
# import sys
# import os
import numpy as np
import ccdprocx
from astropy.time import Time
from astropy.table import QTable, Table
import astroalign as aa
from astropy.wcs import WCS
# from astropy.utils.console import ProgressBar, ProgressBarOrSpinner
from tqdm import tqdm
# from astropy.coordinates import SkyCoord as acoord
# import astropy.units as un
from astropy.io import fits
from astropy.nddata.ccddata import CCDData
# photometry imports
# from photutils.psf import IntegratedGaussianPRF, DAOGroup
# from photutils.background import MMMBackground, MADStdBackgroundRMS
# from astropy.modeling.fitting import LevMarLSQFitter
# from astropy.stats import gaussian_sigma_to_fwhm
# from photutils.psf import IterativelySubtractedPSFPhotometry
from photutils.aperture import CircularAperture, aperture_photometry, CircularAnnulus
# from photutils import DAOStarFinder
# from astropy.stats import mad_std
# from astroquery.simbad import Simbad
from astropy.visualization import SqrtStretch
from astropy.visualization.mpl_normalize import ImageNormalize
from astropy.stats import SigmaClip
from photutils import Background2D, MedianBackground
from ..timeseries import timeSeries
'''
Ceres is the handler of observational image series in Dorado.
'''
__all__ = ['Ceres']
class Ceres:
'''
The Ceres class encapsulates a set of astronomical data from a single night of observation.
Ceres can handle multiple stacks of data in different filters and perform a variety of
actions on them in an orgainized fashion.
Attributes
----------
filters: dictionary
data: Stack array
bias: CCDdata
time: 'astropy.Time'
datestr: str
'''
def __init__(self, filters = {}, data = [], bias = None, time = None, datestr = None):
# metadata
self.filters = filters
self.data = data
self.bias = bias
self.time = time
self.datestr = datestr
# location, weather, timezone, camera, observer array
# refering instance of clippy to call and save later?
# or call clippy directly and feed it a ceres object
try:
self.date = Time(int(self.time.mjd), format = 'mjd')
except:
self.date = None
if datestr == None:
try:
day = str(self.date.ymdhms['day'])
day2 = str(self.date.ymdhms['day'] + 1)
month = str(self.date.ymdhms['month'])
if self.date.ymdhms['day'] < 10:
day = '0' + str(self.date.ymdhms['day'])
if self.date.ymdhms['day'] < 9:
day2 = '0' + str(self.date.ymdhms['day'] + 1)
if self.date.ymdhms['month'] < 10:
month = '0' + str(self.date.ymdhms['month'])
self.datestr = str(self.date.ymdhms['year']) + '-' + month + '-' + day + '+' + day2
except:
self.datestr = datestr
def add_stack(self, stack):
# eventually stacks themelves should have some metadata
# to denote stuff like calibration status
self.filters[stack.filter] = len(self.data)
self.data.append(stack)
def rem_stack(self, filter):
del self.data[self.filters[filter]]
# delete time strings
def calibrate(self, filter, use_med_cr_removal = False, rb = 0):
# for bla in series: add bias corrected = True to header
stack = self.data[self.filters[filter]]
flat = stack.flat
bias = self.bias
c_series = []
# with ProgressBar(len(stack.data)) as bar:
print('Calibrating')
for im in tqdm(stack.data, colour = 'green'):
# bar.update()
im.data = im.data.astype('uint16')
flat.data = flat.data.astype('uint16')
bias.data = bias.data.astype('uint16')
im = ccdprocx.ccd_process(im, master_bias = bias, master_flat = flat)
if use_med_cr_removal:
im = ccdprocx.cosmicray_median(im, rbox = rb)
im.data = im.data.astype('uint16')
c_series.append(im)
self.data[self.filters[filter]].data = c_series
self.data[self.filters[filter]].calibrated = True
def imarith(self, filter, operator, operand):
# mod to check datatype using type()
# mod to remove im_count and make possible to use single image
# mod to accomodate CCDdata object
series = self.data[self.filters[filter]]
for i in range(len(series)):
if (operator == '+'):
series[i].data = series[i].data + operand
elif (operator == '-'):
series[i].data = series[i].data - operand
elif (operator == '/'):
series[i].data = series[i].data / operand
elif (operator == '*'):
series[i].data = series[i].data * operand
self.data[self.filters[filter]] = series
def getWCS(self, filter, filer, alignto = None, cache = True):
series = self.data[self.filters[filter]]
if alignto == None:
alignto = series.alignTo
if cache:
hdulist = fits.open(filer.dordir / 'cache' / 'astrometryNet' / 'solved.fits')
self.data[self.filters[filter]].wcs = WCS(hdulist[0].header, hdulist)
self.data[self.filters[filter]].solved = CCDData.read(filer.dordir / 'cache' / 'astrometryNet' / 'solved.fits')
hdulist.close()
else:
toalign = series.data[alignto]
fname, cachedir = filer.mkcacheObj(toalign, 'astrometryNet')
path = [cachedir, fname]
writearray = [cachedir, 'solved.fits']
solved, wcs_header = filer.plate_solve(path, writearray = writearray)
filer.delcacheObj( fname, 'astrometryNet')
self.data[self.filters[filter]].wcs = WCS(wcs_header)
self.data[self.filters[filter]].solved = solved
def align(self, filter, filer, alignto = None, getWCS = True, cache = False, ds = 2, ma = 5):
series = self.data[self.filters[filter]]
if alignto == None:
alignto = series.alignTo
else:
series.alignTo = alignto
toalign = series.data[alignto]
## TODO :: make this use ceres.getWCS()
if getWCS:
if cache:
toalign = CCDData.read(filer.dordir / 'cache' / 'astrometryNet' / 'solved.fits', unit = filer.unit)
hdulist = fits.open(filer.dordir / 'cache' / 'astrometryNet' / 'solved.fits')
self.data[self.filters[filter]].wcs = WCS(hdulist[0].header, hdulist)
hdulist.close()
self.data[self.filters[filter]].solved = toalign
else:
fname, cachedir = filer.mkcacheObj(toalign, 'astrometryNet')
path = [cachedir, fname]
writearray = [cachedir, 'solved.fits']
solved, wcs_header = filer.plate_solve(path, writearray = writearray)
toalign = solved
filer.delcacheObj( fname, 'astrometryNet')
self.data[self.filters[filter]].wcs = WCS(wcs_header)
self.data[self.filters[filter]].solved = solved
# delete cache object
# save solved to target
aa_series = []
skipped = []
## TODO :: fix this progressbar so it prints on one line then updates that line.
# with ProgressBar(len(series.data)) as bar:
print('Aligning')
for image in tqdm(series.data, colour = 'green'):
# bar.update()
try:
img, _ = aa.register(image.data, toalign.data, detection_sigma = ds, min_area = ma)
aaim = image
aaim.data = img
aa_series.append(aaim)
except:
skipped.append(image)
# print('Image skipped')
if len(skipped) != 0:
print(len(skipped), ' images skipped.')
## TODO :: need to redo times and such for less ims
self.data[self.filters[filter]].data = aa_series
self.data[self.filters[filter]].aligned = True
def dorphot(self, filter, toi, control_toi = None, shape = 21, unc = 0.1):
# get seeing from PSF
stack = self.data[self.filters[filter]]
# if no wcs, complain alot
w = stack.wcs
xy = w.wcs_world2pix(toi.coords.ra.deg, toi.coords.dec.deg, 1)
ra = toi.coords.ra.deg
dec = toi.coords.dec.deg
# pos = Table(names=['x_0', 'y_0'], data = ([float(xy[0])], [float(xy[1])]))
pos = [(float(xy[0]), float(xy[1]))]
aperture = CircularAperture(pos, r = shape)
annulus_aperture = CircularAnnulus(pos, r_in = shape + 2, r_out = shape + 5)
apers = [aperture, annulus_aperture]
if control_toi != None:
xyc = w.wcs_world2pix(control_toi.coords.ra.deg, control_toi.coords.dec.deg, 1)
# posc = Table(names=['x_0', 'y_0'], data = ([float(xyc[0])], [float(xyc[1])]))
posc = [(float(xyc[0]), float(xyc[1]))]
aperturec = CircularAperture(posc, r = shape)
annulus_aperturec = CircularAnnulus(posc, r_in = shape + 2, r_out = shape + 5)
apersc = [aperturec, annulus_aperturec]
times = []
exptimes = []
ray = []
decx = []
x = []
y = []
flux = []
fluxunc = []
apsum = []
apsum_unc = []
print('Performing photometry')
for image in tqdm(stack.data, colour = 'green'):
error = unc * image.data
results = aperture_photometry(image, apers, error = error)
bkg_mean = results['aperture_sum_1'] / annulus_aperture.area
bkg_sum = bkg_mean * aperture.area
results['flux_fit'] = results['aperture_sum_0'] - bkg_sum
times.append(Time(image.header['DATE-OBS']))
exptimes.append(image.header['EXPTIME'])
ray.append(ra)
decx.append(dec)
x.append(results['xcenter'][0])
y.append(results['ycenter'][0])
# x.append(results['x_fit'][0])
# y.append(results['y_fit'][0])
if control_toi != None:
resultsc = aperture_photometry(image, apersc, error = error)
bkg_meanc = resultsc['aperture_sum_1'] / annulus_aperturec.area
bkg_sumc = bkg_meanc * aperturec.area
resultsc['flux_fit'] = resultsc['aperture_sum_0'] - bkg_sumc
apsum.append(results['flux_fit'][0] - resultsc['flux_fit'][0])
flux.append((results['flux_fit'][0] - resultsc['flux_fit'][0])/image.header['EXPTIME'])
else:
apsum.append(results['flux_fit'][0])
flux.append(results['flux_fit'][0]/image.header['EXPTIME'])
fluxunc.append(1) ## TODO:: modify this to account for exposure time and control
apsum_unc.append(1)
ts = timeSeries(times = times, flux = flux, exptimes = exptimes, x = x, y = y, ra = ray, dec = decx, flux_unc = fluxunc, apsum = apsum, apsum_unc = apsum_unc)
toi.filters[filter] = len(toi.ts)
toi.ts.append(ts)
def mkBase(self, filter, sigClip = False, minmax = False):
## TODO :: add the option to change the combination method. Right now default is
# sigma clipped median combination.
series = self.data[self.filters[filter]]
# toalign = series.data[series.alignTo]
c = ccdprocx.Combiner(series.data)
if minmax:
c.minmax_clipping(min_clip = 0.1)
if sigClip:
c.sigma_clipping()
self.data[self.filters[filter]].base = c.median_combine()
## TODO :: sort out what is in the header of this base file.
## TODO :: Sort out how to save this to the filesystem
def calBase(self, filter):
img = self.data[self.filters[filter]].base
norm = ImageNormalize(stretch=SqrtStretch())
sigma_clip = SigmaClip(sigma=3.)
bkg_estimator = MedianBackground()
bkg = Background2D(img, (50, 50), filter_size=(3, 3), sigma_clip=sigma_clip, bkg_estimator=bkg_estimator)
base = img
base.data = img.data / bkg.background.value
base.data[np.isnan(base.data)] = 0
self.data[self.filters[filter]].base = base
## TODO :: figure out how to handle aligning and processing planetary images and image with low star counts
## TODO :: remove dorphot and shift it to its own class which will take the ceres object as an input.
| 40.0875 | 166 | 0.587933 | 11,286 | 0.879794 | 0 | 0 | 0 | 0 | 0 | 0 | 3,382 | 0.263642 |
96e2491a9adc0b19fb3614740c106c51b3b7bfed | 610 | py | Python | scrawl.py | I-mm/Lianjia-houseInfo | c3a22b200637ef65bd3c90e2fe20a7f2ee9144ea | [
"MIT"
] | null | null | null | scrawl.py | I-mm/Lianjia-houseInfo | c3a22b200637ef65bd3c90e2fe20a7f2ee9144ea | [
"MIT"
] | null | null | null | scrawl.py | I-mm/Lianjia-houseInfo | c3a22b200637ef65bd3c90e2fe20a7f2ee9144ea | [
"MIT"
] | null | null | null | import core
import model
import settings
def get_communitylist():
res = []
for community in model.Community.select():
res.append(community.title)
return res
if __name__ == "__main__":
regionlist = settings.REGIONLIST # only pinyin support
model.database_init()
core.GetHouseByRegionlist(regionlist)
core.GetRentByRegionlist(regionlist)
core.GetCommunityByRegionlist(regionlist) # Init,scrapy celllist and insert database; could run only 1st time
communitylist = get_communitylist() # Read celllist from database
core.GetSellByCommunitylist(communitylist)
| 29.047619 | 114 | 0.74918 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 127 | 0.208197 |
96e2e02958eb8b2f96b29096a9887913b5f2e8c3 | 733 | py | Python | monitoring/deployment_manager/systems/configuration.py | interuss/InterUSS-Platform | 099abaa1159c4c143f8f1fde6b88956c86608281 | [
"Apache-2.0"
] | null | null | null | monitoring/deployment_manager/systems/configuration.py | interuss/InterUSS-Platform | 099abaa1159c4c143f8f1fde6b88956c86608281 | [
"Apache-2.0"
] | 1 | 2021-11-29T21:53:39.000Z | 2021-11-29T21:53:39.000Z | monitoring/deployment_manager/systems/configuration.py | interuss/InterUSS-Platform | 099abaa1159c4c143f8f1fde6b88956c86608281 | [
"Apache-2.0"
] | null | null | null | from typing import Optional
from monitoring.monitorlib.typing import ImplicitDict
from monitoring.deployment_manager.systems.dss.configuration import DSS
from monitoring.deployment_manager.systems.test.configuration import Test
class KubernetesCluster(ImplicitDict):
name: str
"""Name of the Kubernetes cluster containing this deployment.
Contained in the NAME column of the response to
`kubectl config get-contexts`.
"""
class DeploymentSpec(ImplicitDict):
cluster: Optional[KubernetesCluster]
"""Definition of Kubernetes cluster containing this deployment."""
test: Optional[Test]
"""Test systems in this deployment."""
dss: Optional[DSS]
"""DSS instance in this deployment."""
| 27.148148 | 73 | 0.758527 | 497 | 0.678035 | 0 | 0 | 0 | 0 | 0 | 0 | 299 | 0.407913 |
96e2f6791d358888a299b482a3e7912cf3a6c4c5 | 627 | py | Python | src/Broker_Instance/streamer.py | zxq0404/Raven | 398e208330619d76c0236a43493f217c1dd198be | [
"RSA-MD"
] | null | null | null | src/Broker_Instance/streamer.py | zxq0404/Raven | 398e208330619d76c0236a43493f217c1dd198be | [
"RSA-MD"
] | null | null | null | src/Broker_Instance/streamer.py | zxq0404/Raven | 398e208330619d76c0236a43493f217c1dd198be | [
"RSA-MD"
] | null | null | null | import zmq
def main():
try:
context = zmq.Context(1)
# Socket facing clients
frontend = context.socket(zmq.PULL)
frontend.bind("tcp://*:5556")
# Socket facing services
backend = context.socket(zmq.PUSH)
backend.bind("tcp://*:5557")
zmq.device(zmq.STREAMER, frontend, backend)
except Exception as e:
print(e)
print("bringing down zmq device")
finally:
pass
frontend.close()
backend.close()
1,1 Top
if __name__ == "__main__":
main()
| 24.115385 | 79 | 0.503987 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 111 | 0.177033 |
96e374927020d16c7dc665a056f81ae42d3dde0f | 1,724 | py | Python | tests/users/lists/items/test_people.py | mza921/trakt.py | 391b02a509d6ccc367711e66c8789eea3de89161 | [
"MIT"
] | null | null | null | tests/users/lists/items/test_people.py | mza921/trakt.py | 391b02a509d6ccc367711e66c8789eea3de89161 | [
"MIT"
] | null | null | null | tests/users/lists/items/test_people.py | mza921/trakt.py | 391b02a509d6ccc367711e66c8789eea3de89161 | [
"MIT"
] | null | null | null | # flake8: noqa: F403, F405
from tests.core import mock
from trakt import Trakt
from trakt.objects import Person
from datetime import datetime
from dateutil.tz import tzutc
from hamcrest import *
from httmock import HTTMock
def test_basic():
with HTTMock(mock.fixtures, mock.unknown):
with Trakt.configuration.auth('mock', 'mock'):
items = Trakt['users/me/lists/people'].items()
# Ensure collection is valid
assert_that(items, not_none())
# Validate items
assert_that(items, contains_exactly(
# Bryan Cranston
all_of(
instance_of(Person),
has_properties({
'pk': ('tmdb', '17419'),
'name': 'Bryan Cranston',
# Timestamps
'listed_at': datetime(2014, 6, 17, 6, 52, 3, tzinfo=tzutc()),
# Keys
'keys': [
('tmdb', '17419'),
('imdb', 'nm0186505'),
('tvrage', '1797'),
('slug', 'bryan-cranston'),
('trakt', '1')
]
})
),
# Aaron Paul
all_of(
instance_of(Person),
has_properties({
'pk': ('tmdb', '84497'),
'name': 'Aaron Paul',
# Timestamps
'listed_at': datetime(2014, 6, 17, 6, 52, 3, tzinfo=tzutc()),
# Keys
'keys': [
('tmdb', '84497'),
('imdb', 'nm0666739'),
('tvrage', '1823'),
('slug', 'aaron-paul'),
('trakt', '415249')
]
})
)
))
| 26.523077 | 77 | 0.441415 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 430 | 0.24942 |
96e3d2507b0d0ae8602498cb1566776722035d2c | 870 | py | Python | code-challanges/401/linked_list/ll_merge/ll_merge.py | schoentr/data-structures-and-algorithms | 535ac617a2ab32293014946b043bdb40a647d43b | [
"MIT"
] | null | null | null | code-challanges/401/linked_list/ll_merge/ll_merge.py | schoentr/data-structures-and-algorithms | 535ac617a2ab32293014946b043bdb40a647d43b | [
"MIT"
] | 1 | 2019-03-11T02:13:58.000Z | 2019-03-11T02:13:58.000Z | code-challanges/401_code_challenges/linked_list/ll_merge/ll_merge.py | schoentr/data-structures-and-algorithms | 535ac617a2ab32293014946b043bdb40a647d43b | [
"MIT"
] | null | null | null | from linked_list.linked_listf import LinkedList
def ll_merge(list_A, list_B):
curr_B = list_B.head
curr_A = list_A.head
temp_C = None
while curr_A._next and curr_B:
curr_B = list_B.head
temp_A = curr_A._next
temp_B = curr_B._next
# if curr_B._next._next:
# temp_C = curr_B._next._next
curr_A._next = curr_B
curr_B._next = temp_A
list_B.head = temp_B
curr_A= curr_A._next._next
curr_B=list_B.head
if curr_B:
curr_A._next=curr_B
return list_A.head
ones = LinkedList()
ones.insert('1')
ones.insert('2')
ones.insert('3')
ones.insert('4')
ones.insert('5')
print(ones.print())
tens = LinkedList()
tens.insert('10')
tens.insert('20')
# tens.insert('30')
# tens.insert('40')
# tens.insert('50')
print(tens.print())
ll_merged(ones,tens)
print(ones.print())
| 21.75 | 47 | 0.636782 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 137 | 0.157471 |
96e3e6827970c8c66b107feb95851d4718ea2a5e | 6,159 | py | Python | wr_ks_reader.py | Kevin-Prichard/werobots-kickstarter-python | aa089f66dcd7defbfd7d06a1fb5821bb59ead10f | [
"BSD-2-Clause"
] | 1 | 2020-10-27T11:06:19.000Z | 2020-10-27T11:06:19.000Z | wr_ks_reader.py | Kevin-Prichard/werobots-kickstarter-python | aa089f66dcd7defbfd7d06a1fb5821bb59ead10f | [
"BSD-2-Clause"
] | null | null | null | wr_ks_reader.py | Kevin-Prichard/werobots-kickstarter-python | aa089f66dcd7defbfd7d06a1fb5821bb59ead10f | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json, sys, copy, os, re
import pprint
import locale
import csv
locale.setlocale(locale.LC_ALL, 'en_US')
from collections import defaultdict
from operator import itemgetter
pp = pprint.PrettyPrinter(indent=4)
# For MacPorts ... need to eliminate TODO
sys.path.append('/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages')
"""
Iterate a dictionary, generate a string buffer of key\tvalue pairs, assuming number
Allows second dictionary (d2), treated as denominator
"""
def dict_value_sort(d,d2=None):
hdr="\n\tUSD"
if d2!=None:
hdr+="\t#Projects\tUSD/Proj\n"
buf=""
for key in sorted(d, key=d.get, reverse=True):
buf += "%s\t%s" % (key, locale.format("%12d", d[key], grouping=True))
if d2 != None:
buf += "\t%s\t%s" % (locale.format("%6d", d2[key], grouping=True), locale.format("%6d", d[key]/d2[key], grouping=True))
buf += "\n"
return hdr+buf
def read_usd_fx_table(usd_fx_csv_pathname):
fxusd = dict();
with open(usd_fx_csv_pathname, 'rb') as csvfile:
fxreader = csv.reader(csvfile, delimiter=',')
for row in fxreader:
if len(row)>0 and row[0]!='Currency':
fxusd[row[1]] = {
'Name': row[0],
'cur_buys_usd': float(row[2]),
'usd_buys_cur': float(row[3])
}
return fxusd
def prep_predicates(filters):
preds = []
for filter in filters:
(path,value) = re.split('\s*=\s*',filter)
path_els = path.split('/')
values = value.split(',')
preds.append({"path_els":path_els,"values":values})
return preds
def project_predicate_test(proj,predicates):
v = proj
match = False
for pred in predicates:
for path_el in pred['path_els']:
v = v[path_el]
sv=str(v)
if (sv==pred['values']) or (type(pred['values'])==list and sv in pred['values']):
match = True
break
return match
def main(wr_kickstarter_json_path,usd_fx_pathname,filter_predicates=[]):
predicates = prep_predicates(filter_predicates) if filter_predicates else []
fxusd = read_usd_fx_table(usd_fx_pathname)
report = gen_ks_report(wr_kickstarter_json_path,fxusd,predicates)
print report
def gen_ks_report(wr_kickstarter_json_path,fxusd,predicates=[]):
json_data = open(wr_kickstarter_json_path).read()
j = json.loads(json_data)
schema_tree = defaultdict(dict)
tots = defaultdict(dict)
corpus = []
template = {
"pled_ctry" : dict(),
"goal_ctry" : dict(),
"cnt_ctry" : dict(),
"pled_cat" : dict(),
"goal_cat" : dict(),
"cnt_cat" : dict(),
"pled_state" : 0,
"goal_state" : 0,
"cnt_state" : 0
}
"""
92562 failed
74635 successful
17296 canceled
6496 live
395 suspended
"""
cnt_all = 0
for block_of_projects in j:
proj_count = len(block_of_projects["projects"])
for i in range(proj_count):
proj = block_of_projects["projects"][i]
if predicates and not project_predicate_test(proj,predicates):
continue
# Grab project values
pled = proj["pledged"] * fxusd[proj["currency"]]["cur_buys_usd"]
#goal = proj["goal"] * fxusd[proj["currency"]]["cur_buys_usd"]
ctry = proj["country"]
cat = "%s (%s)" % (proj["category"]["name"],proj["category"]["id"])
state = proj["state"]
# Ingest descriptive text for TF-IDF
corpus.append( "%s %s" % (proj["blurb"].lower(), proj["name"].lower()) )
# Ensure accumulation skeleton exists
if state not in tots:
tots[state] = copy.deepcopy(template)
# Accumulate totals, increment counters
tots[state]["pled_ctry"][ctry] = tots[state]["pled_ctry"][ctry] + pled if ctry in tots[state]["pled_ctry"] else pled
#tots[state]["goal_ctry"][ctry] = tots[state]["goal_ctry"][ctry] + goal if ctry in tots[state]["goal_ctry"] else goal
tots[state]["cnt_ctry"][ctry] = tots[state]["cnt_ctry"][ctry] + 1 if ctry in tots[state]["cnt_ctry"] else 1
tots[state]["pled_cat"][cat] = tots[state]["pled_cat"][cat] + pled if cat in tots[state]["pled_cat"] else pled
#tots[state]["goal_cat"][cat] = tots[state]["goal_cat"][cat] + goal if cat in tots[state]["goal_cat"] else goal
tots[state]["cnt_cat"][cat] = tots[state]["cnt_cat"][cat] + 1 if cat in tots[state]["cnt_cat"] else 1
tots[state]["pled_state"] += pled
#tots[state]["goal_state"] += goal
tots[state]["cnt_state"] += 1
cnt_all += 1
# Generate the report
buf = ""
for state in tots:
buf += "Per country, %s: %s\n" % (state,dict_value_sort(tots[state]["pled_ctry"],tots[state]["cnt_ctry"]))
buf += "Per category, %s: %s\n" % (state,dict_value_sort(tots[state]["pled_cat"],tots[state]["cnt_cat"]))
buf += "Pledged overall for %s: %s\n" % (state,locale.format("%6d", tots[state]["pled_state"], grouping=True))
#buf += "Goal overall for %s: %s\n" % (state,locale.format("%6d", tots[state]["goal_state"], grouping=True))
buf += "Count overall for %s: %s\n" % (state,locale.format("%6d", tots[state]["cnt_state"], grouping=True))
buf += "Per project for %s: %s\n" % (state,locale.format("%6d", tots[state]["pled_state"]/tots[state]["cnt_state"], grouping=True))
buf += "'%s\n" % ("=" * 40)
buf += "Number of projects, overall: %d\n" % cnt_all
return buf
if __name__ == '__main__':
min_args = 3
if (len(sys.argv)<min_args) or (not os.path.exists(sys.argv[1]) or not os.path.exists(sys.argv[2])):
print "Usage: wr_ks_reader.py <webrobots_ks_data.json> <usd_fx_csv>"
print "e.g. ./wr_ks_reader.py sample-data/five_projects_from-2014-12-02.json sample-data/usd_all_2015-03-25.csv"
exit()
main(sys.argv[1],sys.argv[2],sys.argv[3:] if len(sys.argv)>min_args else None)
| 39.993506 | 139 | 0.599448 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,013 | 0.326839 |
96e5665a895a1fb9a756f3fb69c43aacf59d81ff | 886 | py | Python | partialflow/utils.py | jakob-bauer/partialflow | bea1b46ca66fb5c10aefbcd1570aff922a903118 | [
"MIT"
] | 3 | 2017-02-03T15:59:10.000Z | 2020-05-23T07:26:10.000Z | partialflow/utils.py | jakob-bauer/partialflow | bea1b46ca66fb5c10aefbcd1570aff922a903118 | [
"MIT"
] | null | null | null | partialflow/utils.py | jakob-bauer/partialflow | bea1b46ca66fb5c10aefbcd1570aff922a903118 | [
"MIT"
] | 1 | 2018-08-02T02:16:34.000Z | 2018-08-02T02:16:34.000Z | import time
class Timer(object):
def __init__(self):
self._start = 0
self._end = 0
def __enter__(self):
self._start = time.time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._end = time.time()
@property
def duration(self):
if self._end == 0:
return time.time() - self._start
else:
return self._end - self._start
class VerboseTimer(Timer):
def __init__(self, name):
super(VerboseTimer, self).__init__()
self._name = name
def __enter__(self):
print('START: %s...' % self._name)
return super(VerboseTimer, self).__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
super(VerboseTimer, self).__exit__(exc_type, exc_val, exc_tb)
print('DONE: %s took %.3f seconds.' % (self._name, self.duration)) | 24.611111 | 74 | 0.595937 | 869 | 0.980813 | 0 | 0 | 162 | 0.182844 | 0 | 0 | 43 | 0.048533 |
96e61a11791baafd8b6c85cae2fa69be8d6833f2 | 11,891 | py | Python | scripts/cloud/aws/ops-ec2-add-snapshot-tag-to-ebs-volumes.py | fahlmant/openshift-tools | dbb4f16ccde3404c36c23108c45ca7b67138ee12 | [
"Apache-2.0"
] | 164 | 2015-07-29T17:35:04.000Z | 2021-12-16T16:38:04.000Z | scripts/cloud/aws/ops-ec2-add-snapshot-tag-to-ebs-volumes.py | fahlmant/openshift-tools | dbb4f16ccde3404c36c23108c45ca7b67138ee12 | [
"Apache-2.0"
] | 3,634 | 2015-06-09T13:49:15.000Z | 2022-03-23T20:55:44.000Z | scripts/cloud/aws/ops-ec2-add-snapshot-tag-to-ebs-volumes.py | fahlmant/openshift-tools | dbb4f16ccde3404c36c23108c45ca7b67138ee12 | [
"Apache-2.0"
] | 250 | 2015-06-08T19:53:11.000Z | 2022-03-01T04:51:23.000Z | #!/usr/bin/env python
# vim: expandtab:tabstop=4:shiftwidth=4
"""
This is a script that can be used to tag EBS volumes in OpenShift v3.
This script assume that your AWS credentials are setup in ~/.aws/credentials like this:
[default]
aws_access_key_id = xxxx
aws_secret_access_key = xxxx
Or that environment variables are setup:
AWS_ACCESS_KEY_ID=xxxx
AWS_SECRET_ACCESS_KEY=xxxx
"""
# Ignoring module name
# pylint: disable=invalid-name
import argparse
import os
import sys
import logging
from logging.handlers import RotatingFileHandler
from openshift_tools.cloud.aws.ebs_snapshotter import SUPPORTED_SCHEDULES, EbsSnapshotter
from openshift_tools.cloud.aws.ebs_util import EbsUtil
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logFormatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s')
logFile = '/var/log/ec2-add-snapshot-tag-to-ebs-volumes.log'
logRFH = RotatingFileHandler(logFile, mode='a', maxBytes=2*1024*1024, backupCount=5, delay=0)
logRFH.setFormatter(logFormatter)
logRFH.setLevel(logging.INFO)
logger.addHandler(logRFH)
logConsole = logging.StreamHandler()
logConsole.setFormatter(logFormatter)
logConsole.setLevel(logging.WARNING)
logger.addHandler(logConsole)
TAGGER_SUPPORTED_SCHEDULES = ['never'] + SUPPORTED_SCHEDULES
ROOT_VOLUME_PURPOSE = "root volume"
DOCKER_VOLUME_PURPOSE = "docker storage volume"
PV_PURPOSE = "customer persistent volume"
class TaggerCli(object):
""" Implements the cli interface to the EBS snapshot tagger. """
def __init__(self):
self.args = None
self.parse_args()
if self.args.verbose:
logConsole.setLevel(logging.INFO)
if self.args.debug:
logConsole.setLevel(logging.DEBUG)
if self.args.skip_boto_logs:
logging.getLogger('boto').setLevel(logging.WARNING)
def parse_args(self):
""" parse the args from the cli """
parser = argparse.ArgumentParser(description='EBS Volume Tagger')
parser.add_argument('--master-root-volumes', choices=TAGGER_SUPPORTED_SCHEDULES,
help='The snapshot schedule that master root volumes ' + \
'should be tagged with.')
parser.add_argument('--node-root-volumes', choices=TAGGER_SUPPORTED_SCHEDULES,
help='The snapshot schedule that node root volumes ' + \
'should be tagged with.')
parser.add_argument('--docker-storage-volumes', choices=TAGGER_SUPPORTED_SCHEDULES,
help='The snapshot schedule that docker storage ' + \
'volumes should be tagged with.')
parser.add_argument('--autoprovisioned-pv-volumes', choices=TAGGER_SUPPORTED_SCHEDULES,
help='The snapshot schedule that autoprovisioned pv ' + \
'volumes should be tagged with.')
parser.add_argument('--manually-provisioned-pv-volumes', choices=TAGGER_SUPPORTED_SCHEDULES,
help='The snapshot schedule that manually provisioned pv ' + \
'volumes should be tagged with.')
parser.add_argument('--unidentified-volumes', choices=TAGGER_SUPPORTED_SCHEDULES,
help='The snapshot schedule that unidentified ' + \
'volumes should be tagged with.')
parser.add_argument('--set-name-tag', action='store_true', default=False,
help='Add the Name tag to volumes of the host where this ' + \
'volume is attached.')
parser.add_argument('--set-purpose-tag', action='store_true', default=False,
help='Add the purpose tag to volumes')
parser.add_argument('--retag-volumes', action='store_true', default=False,
help='Retag volumes that already have a snapshot tag. ' + \
'DANGEROUS - Only do this if you know what you\'re doing!')
parser.add_argument('--aws-creds-profile', required=False,
help='The AWS credentials profile to use.')
parser.add_argument('--dry-run', action='store_true', default=False,
help='Say what would have been done, but don\'t actually do it.')
parser.add_argument('-v', '--verbose', action='store_true', default=None, help='Verbose?')
parser.add_argument('--debug', action='store_true', default=None, help='Debug?')
parser.add_argument('--skip-boto-logs', action='store_true', default=False, help='Skip boto logs')
parser.add_argument('--region', required=True,
help='The region that we want to process snapshots in')
self.args = parser.parse_args()
def set_master_root_volume_tags(self, master_root_vol_ids, ebs_snapshotter, ebs_util):
""" Sets tags on master root volumes """
logger.debug("Setting master root volume tags:")
ebs_snapshotter.set_volume_snapshot_tag(master_root_vol_ids, self.args.master_root_volumes,
prefix=" ", dry_run=self.args.dry_run)
if self.args.set_purpose_tag:
ebs_util.set_volume_purpose_tag(master_root_vol_ids, ROOT_VOLUME_PURPOSE,
prefix=" ", dry_run=self.args.dry_run)
if self.args.set_name_tag:
ebs_util.set_volume_name_tag(master_root_vol_ids, prefix=" ", dry_run=self.args.dry_run)
def set_node_root_volume_tags(self, node_root_vol_ids, ebs_snapshotter, ebs_util):
""" Sets tags on node root volumes """
logger.debug("Setting node root volume tags:")
ebs_snapshotter.set_volume_snapshot_tag(node_root_vol_ids, self.args.node_root_volumes,
prefix=" ", dry_run=self.args.dry_run)
if self.args.set_purpose_tag:
ebs_util.set_volume_purpose_tag(node_root_vol_ids, ROOT_VOLUME_PURPOSE,
prefix=" ", dry_run=self.args.dry_run)
if self.args.set_name_tag:
ebs_util.set_volume_name_tag(node_root_vol_ids, prefix=" ", dry_run=self.args.dry_run)
def set_docker_storage_volume_tags(self, docker_storage_vol_ids, ebs_snapshotter, ebs_util):
""" Sets tags on docker storage volumes """
logger.debug("Setting docker storage volume tags:")
ebs_snapshotter.set_volume_snapshot_tag(docker_storage_vol_ids, self.args.docker_storage_volumes,
prefix=" ", dry_run=self.args.dry_run)
if self.args.set_purpose_tag:
ebs_util.set_volume_purpose_tag(docker_storage_vol_ids, DOCKER_VOLUME_PURPOSE,
prefix=" ", dry_run=self.args.dry_run)
if self.args.set_name_tag:
ebs_util.set_volume_name_tag(docker_storage_vol_ids, prefix=" ", dry_run=self.args.dry_run)
def set_manually_provisioned_pv_volume_tags(self, manually_provisioned_pv_vol_ids, ebs_snapshotter, ebs_util):
""" Sets tags on manually provisioned pv volumes """
logger.debug("Setting manually provisioned pv volume tags:")
ebs_snapshotter.set_volume_snapshot_tag(manually_provisioned_pv_vol_ids,
self.args.manually_provisioned_pv_volumes,
prefix=" ", dry_run=self.args.dry_run)
# NOTE: not setting Name tag because PVs don't belong to a specific host.
if self.args.set_purpose_tag:
ebs_util.set_volume_purpose_tag(manually_provisioned_pv_vol_ids, PV_PURPOSE,
prefix=" ", dry_run=self.args.dry_run)
def set_autoprovisioned_pv_volume_tags(self, autoprovisioned_pv_vol_ids, ebs_snapshotter, ebs_util):
""" Sets tags on autoprovisioned pv volumes """
logger.debug("Setting autoprovisioned pv volume tags:")
ebs_snapshotter.set_volume_snapshot_tag(autoprovisioned_pv_vol_ids,
self.args.autoprovisioned_pv_volumes,
prefix=" ", dry_run=self.args.dry_run)
# NOTE: not setting Name tag because PVs don't belong to a specific host.
if self.args.set_purpose_tag:
ebs_util.set_volume_purpose_tag(autoprovisioned_pv_vol_ids, PV_PURPOSE,
prefix=" ", dry_run=self.args.dry_run)
def set_unidentified_volume_tags(self, unidentified_vol_ids, ebs_snapshotter):
""" Sets tags on unidentified pv volumes """
logger.debug("Setting unidentified volume tags:")
ebs_snapshotter.set_volume_snapshot_tag(unidentified_vol_ids, self.args.unidentified_volumes,
prefix=" ", dry_run=self.args.dry_run)
# NOTE: not setting purpose tag because volumes are unidentified, so we don't know.
# NOTE: not setting Name tag because we don't know if it makes sense in this context.
def main(self):
""" Serves as the entry point for the CLI """
logger.info('Starting snapshot tagging')
if self.args.aws_creds_profile:
os.environ['AWS_PROFILE'] = self.args.aws_creds_profile
ebs_snapshotter = EbsSnapshotter(self.args.region, verbose=True)
if not ebs_snapshotter.is_region_valid(self.args.region):
logger.info("Invalid region")
sys.exit(1)
else:
logger.info("Region: %s:", self.args.region)
ebs_util = EbsUtil(self.args.region, verbose=True)
ebs_snapshotter = EbsSnapshotter(self.args.region, verbose=True)
# filter out the already tagged volumes
skip_volume_ids = []
if not self.args.retag_volumes:
# They don't want us to retag volumes that are already tagged, so
# add the already tagged volumes to the list of volume IDs to skip.
skip_volume_ids += ebs_snapshotter.get_already_tagged_volume_ids()
logger.info('Skipping this many volume ids: %s', len(skip_volume_ids))
vol_ids = ebs_util.get_classified_volume_ids(skip_volume_ids)
for id_name, id_list in vol_ids._asdict().iteritems():
logger.info('name: %s amount: %s', id_name, len(id_list))
## Actually create the snapshot tags now
if self.args.master_root_volumes and vol_ids.master_root:
self.set_master_root_volume_tags(vol_ids.master_root, ebs_snapshotter, ebs_util)
if self.args.node_root_volumes and vol_ids.node_root:
self.set_node_root_volume_tags(vol_ids.node_root, ebs_snapshotter, ebs_util)
if self.args.docker_storage_volumes and vol_ids.docker_storage:
self.set_docker_storage_volume_tags(vol_ids.docker_storage, ebs_snapshotter, ebs_util)
if self.args.manually_provisioned_pv_volumes and vol_ids.manually_provisioned_pv:
self.set_manually_provisioned_pv_volume_tags(vol_ids.manually_provisioned_pv,
ebs_snapshotter, ebs_util)
if self.args.autoprovisioned_pv_volumes and vol_ids.autoprovisioned_pv:
self.set_autoprovisioned_pv_volume_tags(vol_ids.autoprovisioned_pv, ebs_snapshotter,
ebs_util)
if self.args.unidentified_volumes and vol_ids.unidentified:
self.set_unidentified_volume_tags(vol_ids.unidentified, ebs_snapshotter)
if __name__ == "__main__":
TaggerCli().main()
| 50.6 | 114 | 0.641241 | 10,409 | 0.875368 | 0 | 0 | 0 | 0 | 0 | 0 | 3,256 | 0.273821 |
96e773f3eb8d9309f3f610d7802ef89439807320 | 3,679 | py | Python | rush00/rush00/settings.py | ppichier/moviemon_game | 556948b4763879c6c88d60037e4faf5f3555e432 | [
"MIT"
] | null | null | null | rush00/rush00/settings.py | ppichier/moviemon_game | 556948b4763879c6c88d60037e4faf5f3555e432 | [
"MIT"
] | null | null | null | rush00/rush00/settings.py | ppichier/moviemon_game | 556948b4763879c6c88d60037e4faf5f3555e432 | [
"MIT"
] | null | null | null | """
Django settings for rush00 project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "tqki)wptx*uv78p@l=b-(pd#-0e=c#*(1+^z((&9u8kxfx_f^d"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"titlescreen",
"worldmap",
"option",
"moviedex",
"battle"
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "rush00.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "rush00.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = "/static/"
# STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATIC_ROOT = os.path.join(BASE_DIR, "static")
API_KEY = "3cc63f26"
MOVIES_TITLE_LIST= [
"Parasite",
"Frozen Flesh",
"Disaster Movie",
"From Justin to Kelly",
"Catwoman",
"Hellboy",
"Pulp Fiction",
"Spice World",
"Suicide Squad",
"Aladdin"
]
GAME_INSTANCE = None
FILE_SAVE = "pickle_datas.bin"
PLAYER_START_POSITION = {'x': 0, 'y': 9}
PLAYER_START_STRENGTH = 0
MOVIEBALL_PLAYER_NBR = 3
MOVIEBALL_TOTAL = 50
GRID_SIZE = 10
## MAP RESPONSIVE GRID SIZE
| 24.203947 | 91 | 0.693395 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,573 | 0.699375 |
96e7d1ddaf507ff49c193f3c3f481b154f3b0eda | 13,122 | py | Python | test/jpypetest/test_jchar.py | baztian/jpype | 034d44e6c719995c25e9cd61348ebc1860030a9b | [
"Apache-2.0"
] | null | null | null | test/jpypetest/test_jchar.py | baztian/jpype | 034d44e6c719995c25e9cd61348ebc1860030a9b | [
"Apache-2.0"
] | null | null | null | test/jpypetest/test_jchar.py | baztian/jpype | 034d44e6c719995c25e9cd61348ebc1860030a9b | [
"Apache-2.0"
] | null | null | null | import _jpype
import jpype
import common
from jpype.types import *
class JChar2TestCase(common.JPypeTestCase):
def setUp(self):
common.JPypeTestCase.setUp(self)
def testCharRange(self):
self.assertEqual(ord(str(jpype.JChar(65))), 65)
self.assertEqual(ord(str(jpype.JChar(512))), 512)
@common.requireInstrumentation
def testJPChar_new(self):
_jpype.fault("PyJPChar_new")
with self.assertRaisesRegex(SystemError, "fault"):
JChar("a")
_jpype.fault("PyJPModule_getContext")
with self.assertRaisesRegex(SystemError, "fault"):
JChar("a")
JChar("a")
@common.requireInstrumentation
def testJPChar_str(self):
jc = JChar("a")
_jpype.fault("PyJPChar_str")
with self.assertRaisesRegex(SystemError, "fault"):
str(jc)
@common.requireInstrumentation
def testJCharGetJavaConversion(self):
_jpype.fault("JPCharType::findJavaConversion")
with self.assertRaisesRegex(SystemError, "fault"):
JChar._canConvertToJava(object())
@common.requireInstrumentation
def testJPJavaFrameCharArray(self):
ja = JArray(JChar)(5)
_jpype.fault("JPJavaFrame::NewCharArray")
with self.assertRaisesRegex(SystemError, "fault"):
JArray(JChar)(1)
_jpype.fault("JPJavaFrame::SetCharArrayRegion")
with self.assertRaisesRegex(SystemError, "fault"):
ja[0] = 0
_jpype.fault("JPJavaFrame::GetCharArrayRegion")
with self.assertRaisesRegex(SystemError, "fault"):
print(ja[0])
_jpype.fault("JPJavaFrame::GetCharArrayElements")
# Special case, only BufferError is allowed from getBuffer
with self.assertRaises(BufferError):
memoryview(ja[0:3])
_jpype.fault("JPJavaFrame::ReleaseCharArrayElements")
with self.assertRaisesRegex(SystemError, "fault"):
ja[0:3] = bytes([1, 2, 3])
# Not sure why this one changed.
_jpype.fault("JPJavaFrame::ReleaseCharArrayElements")
with self.assertRaisesRegex(SystemError, "fault"):
jpype.JObject(ja[::2], jpype.JObject)
_jpype.fault("JPJavaFrame::ReleaseCharArrayElements")
def f():
# Special case no fault is allowed
memoryview(ja[0:3])
f()
ja = JArray(JChar)(5) # lgtm [py/similar-function]
_jpype.fault("JPCharType::setArrayRange")
with self.assertRaisesRegex(SystemError, "fault"):
ja[1:3] = [0, 0]
def testFromObject(self):
ja = JArray(JChar)(5)
with self.assertRaises(TypeError):
ja[1] = object()
jf = JClass("jpype.common.Fixture")
with self.assertRaises(TypeError):
jf.static_char_field = object()
with self.assertRaises(TypeError):
jf().char_field = object()
def testCharArrayAsString(self):
t = JClass("jpype.array.TestArray")()
v = t.getCharArray()
self.assertEqual(str(v), 'avcd')
def testArrayConversionChar(self):
t = JClass("jpype.array.TestArray")()
v = t.getCharArray()
self.assertEqual(str(v[:]), 'avcd')
def testArrayEqualsChar(self):
contents = "abc"
array = jpype.JArray(jpype.JChar)(contents)
array2 = jpype.JArray(jpype.JChar)(contents)
self.assertEqual(array, array)
self.assertNotEqual(array, array2)
self.assertEqual(array, "abc")
def testArrayHash(self):
ja = JArray(JByte)([1, 2, 3])
self.assertIsInstance(hash(ja), int)
def testNone(self):
self.assertEqual(JChar._canConvertToJava(None), "none")
class JCharTestCase(common.JPypeTestCase):
def setUp(self):
common.JPypeTestCase.setUp(self)
self.nc = JChar('B')
def testStr(self):
self.assertEqual(type(str(self.nc)), str)
self.assertEqual(str(self.nc), 'B')
def testRepr(self):
self.assertEqual(type(repr(self.nc)), str)
self.assertEqual(repr(self.nc), "'B'")
def testOrd(self):
self.assertEqual(ord(self.nc), 66)
def testInt(self):
self.assertEqual(int(self.nc), 66)
def testFloat(self):
self.assertEqual(float(self.nc), 66.0)
def testSub(self):
self.assertEqual(len(self.nc), 1)
def testHash(self):
self.assertEqual(hash(self.nc), hash('B'))
def testAdd(self):
self.assertEqual(self.nc + 1, 67)
self.assertIsInstance(self.nc + 1, int)
self.assertEqual(self.nc + 1.1, 67.1)
self.assertIsInstance(self.nc + 1.1, float)
def testSub(self):
self.assertEqual(self.nc - 1, 65)
self.assertIsInstance(self.nc - 1, int)
self.assertEqual(self.nc - 1.1, 64.9)
self.assertIsInstance(self.nc - 1.1, float)
def testMult(self):
self.assertEqual(self.nc * 2, 132)
self.assertIsInstance(self.nc * 2, int)
self.assertEqual(self.nc * 0.25, 16.5)
self.assertIsInstance(self.nc * 2.0, float)
def testRshift(self):
self.assertEqual(self.nc >> 1, 33)
self.assertIsInstance(self.nc >> 2, int)
def testLshift(self):
self.assertEqual(self.nc << 1, 132)
self.assertIsInstance(self.nc << 2, int)
def testAnd(self):
self.assertEqual(self.nc & 244, 66 & 244)
self.assertIsInstance(self.nc & 2, int)
def testOr(self):
self.assertEqual(self.nc | 40, 66 | 40)
self.assertIsInstance(self.nc | 2, int)
def testXor(self):
self.assertEqual(self.nc ^ 1, 66 ^ 1)
self.assertIsInstance(self.nc ^ 1, int)
def testPass(self):
fixture = jpype.JClass('jpype.common.Fixture')()
self.assertEqual(type(fixture.callChar(self.nc)), JChar)
self.assertEqual(type(fixture.callObject(self.nc)), jpype.java.lang.Character)
def check(self, u, v0, v1, v2):
self.assertEqual(v1, u)
self.assertEqual(u, v1)
self.assertNotEqual(v0, u)
self.assertNotEqual(u, v0)
self.assertNotEqual(v2, u)
self.assertNotEqual(u, v2)
self.assertTrue(u > v0)
self.assertFalse(u > v2)
self.assertTrue(u < v2)
self.assertFalse(u < v0)
self.assertTrue(v0 < u)
self.assertFalse(v2 < u)
self.assertTrue(v2 > u)
self.assertFalse(v0 > u)
self.assertTrue(u >= v1)
self.assertFalse(u >= v2)
self.assertTrue(v1 <= u)
self.assertFalse(v2 <= u)
def testCompareInt(self):
self.check(self.nc, 65, 66, 67)
def testCompareFloat(self):
self.check(self.nc, 65.0, 66.0, 67.0)
def testCompareJInt(self):
self.check(self.nc, JInt(65), JInt(66), JInt(67))
def testCompareJFloat(self):
self.check(self.nc, JFloat(65.0), JFloat(66.0), JFloat(67.0))
class JCharBoxedTestCase(common.JPypeTestCase):
def setUp(self):
common.JPypeTestCase.setUp(self)
self.nc = jpype.java.lang.Character('B')
def testStr(self):
self.assertEqual(type(str(self.nc)), str)
self.assertEqual(str(self.nc), 'B')
def testRepr(self):
self.assertEqual(type(repr(self.nc)), str)
self.assertEqual(repr(self.nc), "'B'")
def testOrd(self):
self.assertEqual(ord(self.nc), 66)
def testInt(self):
self.assertEqual(int(self.nc), 66)
def testFloat(self):
self.assertEqual(float(self.nc), 66.0)
def testSub(self):
self.assertEqual(len(self.nc), 1)
def testHash(self):
self.assertEqual(hash(self.nc), hash('B'))
def testAdd(self):
self.assertEqual(self.nc + 1, 67)
self.assertIsInstance(self.nc + 1, int)
self.assertEqual(self.nc + 1.1, 67.1)
self.assertIsInstance(self.nc + 1.1, float)
def testSub(self):
self.assertEqual(self.nc - 1, 65)
self.assertIsInstance(self.nc - 1, int)
self.assertEqual(self.nc - 1.1, 64.9)
self.assertIsInstance(self.nc - 1.1, float)
def testMult(self):
self.assertEqual(self.nc * 2, 132)
self.assertIsInstance(self.nc * 2, int)
self.assertEqual(self.nc * 0.25, 16.5)
self.assertIsInstance(self.nc * 2.0, float)
def testRshift(self):
self.assertEqual(self.nc >> 1, 33)
self.assertIsInstance(self.nc >> 2, int)
def testLshift(self):
self.assertEqual(self.nc << 1, 132)
self.assertIsInstance(self.nc << 2, int)
def testAnd(self):
self.assertEqual(self.nc & 244, 66 & 244)
self.assertIsInstance(self.nc & 2, int)
def testOr(self):
self.assertEqual(self.nc | 40, 66 | 40)
self.assertIsInstance(self.nc | 2, int)
def testXor(self):
self.assertEqual(self.nc ^ 1, 66 ^ 1)
self.assertIsInstance(self.nc ^ 1, int)
def testFloorDiv(self):
self.assertEqual(self.nc // 3, 66 // 3)
self.assertEqual(3 // self.nc, 3 // 66)
def testDivMod(self):
self.assertEqual(divmod(self.nc, 3), divmod(66, 3))
self.assertEqual(divmod(3, self.nc), divmod(3, 66))
def testInv(self):
self.assertEqual(~self.nc, ~66)
def testPos(self):
self.assertEqual(+self.nc, +66)
def testPass(self):
fixture = jpype.JClass('jpype.common.Fixture')()
self.assertEqual(type(fixture.callObject(self.nc)), type(self.nc))
def check(self, u, v0, v1, v2):
self.assertEqual(v1, u)
self.assertEqual(u, v1)
self.assertNotEqual(v0, u)
self.assertNotEqual(u, v0)
self.assertNotEqual(v2, u)
self.assertNotEqual(u, v2)
self.assertTrue(u > v0)
self.assertFalse(u > v2)
self.assertTrue(u < v2)
self.assertFalse(u < v0)
self.assertTrue(v0 < u)
self.assertFalse(v2 < u)
self.assertTrue(v2 > u)
self.assertFalse(v0 > u)
self.assertTrue(u >= v1)
self.assertFalse(u >= v2)
self.assertTrue(v1 <= u)
self.assertFalse(v2 <= u)
def testCompareInt(self):
self.check(self.nc, 65, 66, 67)
def testCompareFloat(self):
self.check(self.nc, 65.0, 66.0, 67.0)
def testCompareJInt(self):
self.check(self.nc, JInt(65), JInt(66), JInt(67))
def testCompareJFloat(self):
self.check(self.nc, JFloat(65.0), JFloat(66.0), JFloat(67.0))
class JCharBoxedNullTestCase(common.JPypeTestCase):
def setUp(self):
common.JPypeTestCase.setUp(self)
self.nc = jpype.JObject(None, jpype.java.lang.Character)
def testStr(self):
self.assertEqual(type(str(self.nc)), str)
self.assertEqual(str(self.nc), 'None')
def testRepr(self):
self.assertEqual(type(repr(self.nc)), str)
self.assertEqual(repr(self.nc), 'None')
def testInt(self):
with self.assertRaises(TypeError):
int(self.nc)
def testFloat(self):
with self.assertRaises(TypeError):
float(self.nc)
def testSub(self):
with self.assertRaises(TypeError):
len(self.nc)
def testHash(self):
self.assertEqual(hash(self.nc), hash(None))
def testAdd(self):
with self.assertRaises(TypeError):
self.nc + 1
with self.assertRaises(TypeError):
1 + self.nc
def testSub(self):
with self.assertRaises(TypeError):
self.nc - 1
with self.assertRaises(TypeError):
1 - self.nc
def testMult(self):
with self.assertRaises(TypeError):
self.nc * 1
with self.assertRaises(TypeError):
1 * self.nc
def testRshift(self):
with self.assertRaises(TypeError):
self.nc >> 1
with self.assertRaises(TypeError):
1 >> self.nc
def testLshift(self):
with self.assertRaises(TypeError):
self.nc << 1
with self.assertRaises(TypeError):
1 << self.nc
def testAnd(self):
with self.assertRaises(TypeError):
self.nc & 1
with self.assertRaises(TypeError):
1 & self.nc
def testOr(self):
with self.assertRaises(TypeError):
self.nc | 1
with self.assertRaises(TypeError):
1 | self.nc
def testXor(self):
with self.assertRaises(TypeError):
self.nc ^ 1
with self.assertRaises(TypeError):
1 ^ self.nc
def testFloorDiv(self):
with self.assertRaises(TypeError):
self.nc // 1
with self.assertRaises(TypeError):
1 // self.nc
def testDivMod(self):
with self.assertRaises(TypeError):
divmod(self.nc, 1)
with self.assertRaises(TypeError):
divmod(1, self.nc)
def testInv(self):
with self.assertRaises(TypeError):
~self.nc
def testPos(self):
with self.assertRaises(TypeError):
+self.nc
def testPass(self):
fixture = jpype.JClass('jpype.common.Fixture')()
self.assertEqual(fixture.callObject(self.nc), None)
| 30.658879 | 86 | 0.601357 | 13,043 | 0.99398 | 0 | 0 | 2,216 | 0.168877 | 0 | 0 | 791 | 0.06028 |
96e84b02847af097942827575530d900a0b5412e | 5,279 | py | Python | pysensors.py | zakrzem1/pysensors | bfbef5f1442d845e5fa5febf1de5a35a81e436ee | [
"Apache-2.0"
] | null | null | null | pysensors.py | zakrzem1/pysensors | bfbef5f1442d845e5fa5febf1de5a35a81e436ee | [
"Apache-2.0"
] | null | null | null | pysensors.py | zakrzem1/pysensors | bfbef5f1442d845e5fa5febf1de5a35a81e436ee | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Google Spreadsheet DHT Sensor Data-logging Example
# Depends on the 'gspread' package being installed. If you have pip installed
# execute:
# sudo pip install gspread
# Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import sys
import getopt
import moskito
import json
import pytz
import dht_sensor as dht
import onewire_temp_sensor as ow
import airquality_sensor_serial as aqss
import sensor_serial_float as ssf
import time
import datetime
import influks
from log import warning, info
from config import conf
try:
import google_spreadsheet as gs
except ImportError:
warning('google_spreadsheet module cannot be loaded')
#from oauth2client.client import SignedJwtAssertionCredentials
# targetTz = pytz.timezone('Europe/Warsaw')
targetTz = pytz.timezone('UTC')
output_fmt = '%Y-%m-%dT%H:%M:%SZ'
client = moskito.start_client()
i=0
#mqtt_topic_temp=conf['mqtt']['topic_temp']
sensors_cfg_arr = conf['sensors']
roomName = conf['roomName']
def main_loop():
sensor_read_freq_secs = conf.get('sensor_read_freq_secs', 30)
while True:
global i
i+=1
now = datetime.datetime.now(targetTz)
for a in sensors_cfg_arr:
reading = ()
publishableDoc = None
readingType = a.get('type')
if(readingType == 'ow'):
info('reading ow sensor')
reading = ow.read(a.get('addr'))
publishableDoc = readingObj(now, reading)
elif(readingType == 'dht'):
info('reading dht sensor')
reading = dht.read(a.get('addr'))
publishableDoc = readingObj(now, reading)
elif(readingType == 'air_quality_serial'):
if(not aqss.inited()):
serialDevice = a.get('serialDevice')
aqss.init(serialDevice)
info('reading air quality sensor [serial]')
reading = aqss.read(output_fmt, targetTz)
if(reading):
publishableDoc = airquality_readingObj(reading)
else:
publishableDoc = None
elif(readingType == 'serial_float'):
if(not ssf.inited()):
serialDevice = a.get('serialDevice')
ssf.init(serialDevice)
info('reading sensor [serial] float')
reading = ssf.read()
publishableDoc = {'current':reading}
info(publishableDoc)
else:
info('unsupported reading type ', readingType)
continue
if(not publishableDoc):
warning('skipping malformed reading')
continue
topic = a.get('topic','')
if(topic):
info('publishing ', publishableDoc, ' to mqtt ', topic)
client.publish(topic, json.dumps(publishableDoc))
if(a.get('influx')):
influks.write('readings',publishableDoc)
if(i%10==0 and conf['gdocs']):
info("GDOCS object:", conf['gdocs'])
gs.append_reading(reading)
time.sleep(sensor_read_freq_secs)
def readingObj(now, reading):
if(not reading or len(reading)<1):
warning('Invalid reading data. Expected at least temp')
return None
obj = {'temp':reading[0],'tstamp':now.strftime(output_fmt), 'roomName':roomName}
if len(reading)>1:
obj['hum'] = reading[1]
return obj
def airquality_readingObj(reading):
if(len(reading)!=2):
warning('invalid data format read from air quality sensor file')
return
jason = {'level':reading[1],'tstamp':reading[0], 'roomName':roomName}
info('airquality_readingObj\n', json.dumps(jason))
return jason
def main(argv=None):
#info('Logging sensor measurements to {0} every {1} seconds.'.format(conf['gdocs']['doc_name'], conf['sensor_read_freq']))
if argv is None:
argv = sys.argv
try:
opts, args = getopt.getopt(argv[1:], "h", ["help"])
except getopt.error, msg:
print(msg)
sys.exit(2)
main_loop()
if __name__ == "__main__":
main()
| 37.176056 | 126 | 0.639326 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,200 | 0.416746 |
96e9083445d26a4f829c2bf35d90cd2813d1671b | 4,287 | py | Python | compare/utils.py | l0rb/buzzword | 1f12875cfb883be07890b2da5482d3b53d878a09 | [
"MIT"
] | null | null | null | compare/utils.py | l0rb/buzzword | 1f12875cfb883be07890b2da5482d3b53d878a09 | [
"MIT"
] | null | null | null | compare/utils.py | l0rb/buzzword | 1f12875cfb883be07890b2da5482d3b53d878a09 | [
"MIT"
] | null | null | null | """
Random utilities this app needs
"""
import os
import re
from buzz import Corpus as BuzzCorpus
from buzz import Collection
from django.conf import settings
from explore.models import Corpus
from .models import OCRUpdate, PDF
# from django.core.exceptions import ObjectDoesNotExist
# when doing OCR, re.findall will be run on it using this regex, which sort of
# approximates a word. note that this will mean that "the end" will be marked
# as blank, but that is a decent tradeoff for marking blank a lot of junk pages.
MEANINGFUL = r"[A-Za-z]{3,}"
THRESHOLD = 3
def markdown_to_buzz_input(markdown, slug):
"""
todo
User can use markdown when correcting OCR
We need to parse out headers and bulletpoints into <meta> features,
handle italics and that sort of thing...perhaps we can convert the text
to html and then postprocess that...
"""
fixed = []
lines = markdown.splitlines()
for line in lines:
# handle headings
# note that this doesn't put text inside respective headings as sections.
# doing so would not work across pages anyway.
if line.startswith("#"):
pref, head = line.split(" ", 1)
depth = len(pref.strip())
head = head.strip()
line = f"<meta heading=\"true\" depth=\"{depth}\">{head}</meta>"
# handle bulletpoints
elif line.startswith("* "):
line = line.lstrip("* ")
line = "<meta point=\"true\">{line}</meta>"
for styler in ["***", "**", "*", "`"]:
pass
fixed.append(line)
return "\n".join(fixed)
def store_buzz_raw(raw, slug, pdf_path):
"""
Put the raw text into the right place for eventual parsing
"""
# todo: corpora dir?
base = os.path.join("static", "corpora", slug, "txt")
os.makedirs(base, exist_ok=True)
filename = os.path.basename(pdf_path).replace(".pdf", ".txt")
with open(os.path.join(base, filename), "w") as fo:
fo.write(raw)
return base
def dump_latest():
"""
Get the latest OCR corrections and build a parseable corpus.
Maybe even parse it?
"""
slugs = OCRUpdate.objects.values_list("slug")
slugs = set(slugs)
for slug in slugs:
corp = Corpus.objects.get(slug=slug)
lang = corp.language.name
# get the associated pdfs
pdfs = PDF.objects.filter(slug=slug)
for pdf in pdfs:
updates = OCRUpdate.objects.filter(pdf=pdf, slug=slug)
plaintext = updates.latest("timestamp").text
corpus_path = store_buzz_raw(plaintext, slug, pdf.path)
print(f"Parsing ({lang}): {corpus_path}")
corp = BuzzCorpus(corpus_path)
parsed = corp.parse(language=lang, multiprocess=1)
corp.parsed = True
corp.path = parsed.path
corp.save()
return parsed
def _is_meaningful(plaintext, language):
"""
Determine if an OCR page contains something worthwhile
"""
# skip this check for non latin alphabet ... right now the parser doesn't
# accept most non-latin languages, so it's mostly academic for now...
if lang in {"zh", "ja", "fa", "iw", "ar"}:
return True
words = re.findall(plaintext, MEANINGFUL)
return len(words) >= THRESHOLD
def _handle_page_numbers(text):
"""
Attempt to make page-level metadata containing page number
"""
# if no handling, just return text
if settings.COMPARE_HANDLE_PAGE_NUMBERS is False:
return text
# get first and maybe last line as list
lines = [i.strip() for i in text.splitlines() if i.strip()]
if not lines:
return text
if len(lines) == 1:
lines = [lines[0]]
else:
lines = [lines[0], lines[-1]]
page_number = None
ix_to_delete = set()
# lines is just the first and last, stripped
for i, line in enumerate(lines):
if line.isnumeric():
page_number = line
ix_to_delete.add(i)
break
if page_number is not None:
form = f"<meta page={page_number} />\n"
# we also want to REMOVE page number from the actual text
cut = [x for i, x in enumerate(text.splitlines()) if i not in ix_to_delete]
if form:
cut = [form] + cut
return "\n".join(cut)
| 31.291971 | 81 | 0.623513 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,707 | 0.398181 |
96e90e29c3780580d721f8e007e8c29d7b8c4d6c | 55 | py | Python | apps/sitemap/__init__.py | MySmile/mysmile | 5abe4baa7970674d1f8365d875519283c2e29dae | [
"BSD-3-Clause"
] | 5 | 2015-05-03T09:51:32.000Z | 2019-05-21T14:19:02.000Z | apps/sitemap/__init__.py | MySmile/mysmile | 5abe4baa7970674d1f8365d875519283c2e29dae | [
"BSD-3-Clause"
] | 24 | 2015-04-05T16:28:08.000Z | 2022-03-11T23:36:56.000Z | apps/sitemap/__init__.py | MySmile/mysmile | 5abe4baa7970674d1f8365d875519283c2e29dae | [
"BSD-3-Clause"
] | 1 | 2017-01-23T23:00:11.000Z | 2017-01-23T23:00:11.000Z | default_app_config = 'apps.sitemap.apps.SitemapConfig'
| 27.5 | 54 | 0.836364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 33 | 0.6 |
96ead76583647a3211d18d56e012be5c362ce02d | 1,399 | py | Python | extra_tests/snippets/stdlib_subprocess.py | dbrgn/RustPython | 6d371cea8a62d84dbbeec5a53cfd040f45899211 | [
"CC-BY-4.0",
"MIT"
] | 11,058 | 2018-05-29T07:40:06.000Z | 2022-03-31T11:38:42.000Z | extra_tests/snippets/stdlib_subprocess.py | dbrgn/RustPython | 6d371cea8a62d84dbbeec5a53cfd040f45899211 | [
"CC-BY-4.0",
"MIT"
] | 2,105 | 2018-06-01T10:07:16.000Z | 2022-03-31T14:56:42.000Z | extra_tests/snippets/stdlib_subprocess.py | dbrgn/RustPython | 6d371cea8a62d84dbbeec5a53cfd040f45899211 | [
"CC-BY-4.0",
"MIT"
] | 914 | 2018-07-27T09:36:14.000Z | 2022-03-31T19:56:34.000Z | import subprocess
import time
import sys
import signal
from testutils import assert_raises
is_unix = not sys.platform.startswith("win")
if is_unix:
def echo(text):
return ["echo", text]
def sleep(secs):
return ["sleep", str(secs)]
else:
def echo(text):
return ["cmd", "/C", f"echo {text}"]
def sleep(secs):
# TODO: make work in a non-unixy environment (something with timeout.exe?)
return ["sleep", str(secs)]
p = subprocess.Popen(echo("test"))
time.sleep(0.1)
assert p.returncode is None
assert p.poll() == 0
assert p.returncode == 0
p = subprocess.Popen(sleep(2))
assert p.poll() is None
with assert_raises(subprocess.TimeoutExpired):
assert p.wait(1)
p.wait()
assert p.returncode == 0
p = subprocess.Popen(echo("test"), stdout=subprocess.PIPE)
p.wait()
assert p.stdout.read().strip() == b"test"
p = subprocess.Popen(sleep(2))
p.terminate()
p.wait()
if is_unix:
assert p.returncode == -signal.SIGTERM
else:
assert p.returncode == 1
p = subprocess.Popen(sleep(2))
p.kill()
p.wait()
if is_unix:
assert p.returncode == -signal.SIGKILL
else:
assert p.returncode == 1
p = subprocess.Popen(echo("test"), stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
assert stdout.strip() == b"test"
p = subprocess.Popen(sleep(5), stdout=subprocess.PIPE)
with assert_raises(subprocess.TimeoutExpired):
p.communicate(timeout=1)
| 19.985714 | 82 | 0.686919 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 154 | 0.110079 |
96ece3d883b91ffe8949433754af0502b5ee3b91 | 341 | py | Python | pythonProject1/chap2/demo8.py | zhudi7/pythonAK | d52995a4c35a3c9aeb1542922e9d786f4dcc8d1c | [
"Apache-2.0"
] | null | null | null | pythonProject1/chap2/demo8.py | zhudi7/pythonAK | d52995a4c35a3c9aeb1542922e9d786f4dcc8d1c | [
"Apache-2.0"
] | null | null | null | pythonProject1/chap2/demo8.py | zhudi7/pythonAK | d52995a4c35a3c9aeb1542922e9d786f4dcc8d1c | [
"Apache-2.0"
] | null | null | null | # 公众号:MarkerJava
# 开发时间:2020/10/5 17:25
scores = {'kobe': 100, 'lebron': 99, 'AD': 88}
# 获取所有key
keys = scores.keys()
print(keys)
print(type(keys))
print(list(keys)) # 将所有key组成的视图转换层列表
# 获取所有的值
value = scores.values()
print(value)
print(type(value)) # 将所有value组成的视图转换层列表
# 获取所有键值对
items = scores.items()
print(items)
print(type(items))
| 17.05 | 46 | 0.695015 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 224 | 0.503371 |
96ed4bcf4dda34f4a0193956be88e898b8ddc8c2 | 6,579 | py | Python | src/command_modules/azure-cli-storage/tests/test_storage_blob_scenarios.py | saurabsa/azure-cli-old | f77477a98c9aa9cb55daf5b0d2f410d1455a9225 | [
"MIT"
] | null | null | null | src/command_modules/azure-cli-storage/tests/test_storage_blob_scenarios.py | saurabsa/azure-cli-old | f77477a98c9aa9cb55daf5b0d2f410d1455a9225 | [
"MIT"
] | null | null | null | src/command_modules/azure-cli-storage/tests/test_storage_blob_scenarios.py | saurabsa/azure-cli-old | f77477a98c9aa9cb55daf5b0d2f410d1455a9225 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import re
from datetime import datetime, timedelta
from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer, StorageAccountPreparer,
JMESPathCheck)
class StorageBlobUploadTests(ScenarioTest):
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_storage_blob_upload_small_file(self, resource_group, storage_account):
self.verify_blob_upload_and_download(resource_group, storage_account, 1, 'block', 0)
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_storage_blob_upload_midsize_file(self, resource_group, storage_account):
self.verify_blob_upload_and_download(resource_group, storage_account, 4096, 'block', 0)
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_storage_blob_upload_101mb_file(self, resource_group, storage_account):
self.verify_blob_upload_and_download(resource_group, storage_account, 101 * 1024, 'block',
26, skip_download=True)
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_storage_blob_upload_100mb_file(self, resource_group, storage_account):
self.verify_blob_upload_and_download(resource_group, storage_account, 100 * 1024, 'block',
25, skip_download=True)
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_storage_blob_upload_99mb_file(self, resource_group, storage_account):
self.verify_blob_upload_and_download(resource_group, storage_account, 99 * 1024, 'block',
25, skip_download=True)
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_storage_blob_upload_64mb_file(self, resource_group, storage_account):
self.verify_blob_upload_and_download(resource_group, storage_account, 64 * 1024, 'block',
16, skip_download=True)
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_storage_blob_upload_63mb_file(self, resource_group, storage_account):
# 64MB is the put request size limit
self.verify_blob_upload_and_download(resource_group, storage_account, 63 * 1024, 'block',
skip_download=True)
def verify_blob_upload_and_download(self, group, account, file_size_kb, blob_type,
block_count=0, skip_download=False):
container = self.create_random_name(prefix='cont', length=24)
local_dir = self.create_temp_dir()
local_file = self.create_temp_file(file_size_kb)
blob_name = self.create_random_name(prefix='blob', length=24)
account_key = self.get_account_key(group, account)
self.set_env('AZURE_STORAGE_ACCOUNT', account)
self.set_env('AZURE_STORAGE_KEY', account_key)
self.cmd('storage container create -n {}'.format(container))
self.cmd('storage blob exists -n {} -c {}'.format(blob_name, container),
checks=JMESPathCheck('exists', False))
self.cmd('storage blob upload -c {} -f {} -n {} --type {}'
.format(container, local_file, blob_name, blob_type))
self.cmd('storage blob exists -n {} -c {}'.format(blob_name, container),
checks=JMESPathCheck('exists', True))
self.cmd('storage blob show -n {} -c {}'.format(blob_name, container),
checks=JMESPathCheck('name', blob_name)) # TODO: more checks
expiry = (datetime.utcnow() + timedelta(hours=1)).strftime('%Y-%m-%dT%H:%MZ')
sas = self.cmd('storage blob generate-sas -n {} -c {} --expiry {} '
'--permissions r --https-only'.format(blob_name, container, expiry)).output
assert dict(pair.split('=') for pair in sas.split('&')) # TODO: more checks
self.cmd('storage blob update -n {} -c {} --content-type application/test-content'
.format(blob_name, container))
self.cmd('storage blob show -n {} -c {}'.format(blob_name, container), checks=[
JMESPathCheck('properties.contentSettings.contentType', 'application/test-content'),
JMESPathCheck('properties.contentLength', file_size_kb * 1024)])
self.cmd('storage blob service-properties show',
checks=JMESPathCheck('hourMetrics.enabled', True))
if not skip_download:
downloaded = os.path.join(local_dir, 'test.file')
self.cmd('storage blob download -n {} -c {} --file {}'
.format(blob_name, container, downloaded))
self.assertTrue(os.path.isfile(downloaded), 'The file is not downloaded.')
self.assertEqual(file_size_kb * 1024, os.stat(downloaded).st_size,
'The download file size is not right.')
# Verify the requests in cassette to ensure the count of the block requests is expeected
# This portion of validation doesn't verify anything during playback because the recording
# is fixed.
def is_block_put_req(request):
if request.method != 'PUT':
return False
if not re.search('/cont[0-9]+/blob[0-9]+', request.path):
return False
comp_block = False
has_blockid = False
for key, value in request.query:
if key == 'comp' and value == 'block':
comp_block = True
elif key == 'blockid':
has_blockid = True
return comp_block and has_blockid
requests = self.cassette.requests
put_blocks = [request for request in requests if is_block_put_req(request)]
self.assertEqual(block_count, len(put_blocks),
'The expected number of block put requests is {} but the actual '
'number is {}.'.format(block_count, len(put_blocks)))
def get_account_key(self, group, name):
return self.cmd('storage account keys list -n {} -g {} --query "[0].value" -otsv'
.format(name, group)).output
if __name__ == '__main__':
import unittest
unittest.main()
| 47.673913 | 98 | 0.619851 | 5,961 | 0.906065 | 0 | 0 | 2,032 | 0.308862 | 0 | 0 | 1,596 | 0.24259 |
96ee1e1fed4d3684ce5d45ae63f74a644486fff2 | 29,363 | py | Python | fmtrack/post_process.py | elejeune11/FM-Track | ce255388872f4c1f500c89f6aaacba57562d45c2 | [
"MIT"
] | 3 | 2019-11-01T06:37:36.000Z | 2019-11-08T05:13:35.000Z | fmtrack/post_process.py | elejeune11/FM-Track | ce255388872f4c1f500c89f6aaacba57562d45c2 | [
"MIT"
] | 1 | 2019-11-01T06:44:54.000Z | 2019-11-01T15:34:00.000Z | fmtrack/post_process.py | elejeune11/FM-Track | ce255388872f4c1f500c89f6aaacba57562d45c2 | [
"MIT"
] | 2 | 2019-11-01T06:38:44.000Z | 2020-02-07T09:08:23.000Z | import fmtrack
import os
import matplotlib.colors as colors
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import pickle
import pyvista
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import (RBF, Matern, RationalQuadratic,
ExpSineSquared, DotProduct,
ConstantKernel, WhiteKernel)
from sklearn.neighbors import KernelDensity
from sklearn import preprocessing
##########################################################################################
# get filepath for matplotlib style
##########################################################################################
stylepath = os.path.dirname(os.path.abspath(fmtrack.__file__)) + '/el_papers.mplstyle'
##########################################################################################
# import data
##########################################################################################
def import_cell_info(file_prefix_1,file_prefix_2,root_directory):
cell_mesh_1 = np.loadtxt(root_directory+'/Gel_cell_coords/' + file_prefix_1 + '_cell_mesh.txt')
cell_normal_1 = np.loadtxt(root_directory+'/Gel_cell_coords/' + file_prefix_1 + '_cell_normals.txt')
cell_center_1 = np.loadtxt(root_directory+'/Gel_cell_coords/' + file_prefix_1 + '_cell_center.txt')
cell_vol_1 = np.loadtxt(root_directory+'/Gel_cell_coords/' + file_prefix_1 + '_cell_volume.txt')
cell_mesh_2 = np.loadtxt(root_directory+'/Gel_cell_coords/' + file_prefix_2 + '_cell_mesh.txt')
cell_normal_2 = np.loadtxt(root_directory+'/Gel_cell_coords/' + file_prefix_2 + '_cell_normals.txt')
cell_center_2 = np.loadtxt(root_directory+'/Gel_cell_coords/' + file_prefix_2 + '_cell_center.txt')
cell_vol_2 = np.loadtxt(root_directory+'/Gel_cell_coords/' + file_prefix_2 + '_cell_volume.txt')
return cell_mesh_1, cell_normal_1, cell_center_1, cell_vol_1, cell_mesh_2, cell_normal_2, cell_center_2, cell_vol_2
def import_bead_disps(folder):
X = np.loadtxt(folder + '/X.txt')
Y = np.loadtxt(folder + '/Y.txt')
Z = np.loadtxt(folder + '/Z.txt')
U = np.loadtxt(folder + '/U.txt')
V = np.loadtxt(folder + '/V.txt')
W = np.loadtxt(folder + '/W.txt')
return X, Y, Z, U, V, W
##########################################################################################
# additional computations based on the data
##########################################################################################
# can be implemented as needed based on some rule for excluding outliers
def remove_outliers(X, Y, Z, U, V, W):
# maximum plausible displacement
# z-score based displacement
# more complex strategy for determining outliers (hot-spot analysis)
# this code should be implemented on a case by case basis
return X_new, Y_new, Z_new, U_new, V_new, W_new
# compare bead displacement to it's neighbors
def color_point_neighbor_similarity(X, Y, Z, U, V, W, num_neigh):
num_beads = X.shape[0]
neigh_score = []
num_pts = X.shape[0]
for kk in range(0,num_pts):
x = X[kk]; y = Y[kk]; z = Z[kk]
u = U[kk]; v = V[kk]; w = W[kk]
dist_all = ((x - X)**2.0 + (y - Y)**2.0 + (z - Z)**2.0)**(1.0/2.0)
dist_all_sorted = np.argsort(dist_all)
score_dist = np.zeros((num_neigh))
for jj in range(0,num_neigh):
idx = dist_all_sorted[jj]
u2 = U[idx]; v2 = V[idx]; w2 = W[idx]
score_dist[jj] = ((u - u2)**2.0 + (v - v2)**2.0 + (w - w2)**2.0)**(1.0/2.0)
neigh_score.append(np.mean(score_dist))
return neigh_score
# compare bead displacement direction to the initial cell configuration
def color_point_direction(X, Y, Z, U, V, W, cell_mesh, cell_normal):
num_beads = X.shape[0]
dir_score = []
dist_from_cell = []
mag_list = []
# --> down sample the cell mesh (computational efficiency)
num_pts = X.shape[0]
samp = np.random.randint(cell_mesh.shape[0]-1,size=np.min([num_pts,10000]))
reduced_cell_mesh = cell_mesh[samp,:]
reduced_cell_normal = cell_normal[samp,:]
for kk in range(0,num_pts):
x = X[kk]; y = Y[kk]; z = Z[kk]
u = U[kk]; v = V[kk]; w = W[kk]
mag = (u**2.0 + v**2.0 + w**2.0)**(1.0/2.0)
du = u/mag; dv = v/mag; dw = w/mag
dist_all = ((x - reduced_cell_mesh[:,0])**2.0 + (y - reduced_cell_mesh[:,1])**2.0\
+ (z - reduced_cell_mesh[:,2])**2.0)**(1.0/2.0)
arg = np.argmin(dist_all)
val = du*reduced_cell_normal[arg,0] + dv*reduced_cell_normal[arg,1] + dw*reduced_cell_normal[arg,2]
dir_score.append(val)
dist_from_cell.append(dist_all[arg])
mag_list.append(mag)
return dir_score, dist_from_cell, mag_list
# compute bead displacement to the domain edge
def compute_dist_from_edge(X, Y, Z, X_DIM, Y_DIM, Z_DIM):
num_pts = X.shape[0]
dist_from_edge = []
for kk in range(0,num_pts):
x = X[kk]; y = Y[kk]; z = Z[kk]
x_edge = np.min([np.abs(x),np.abs(X_DIM-x)])
y_edge = np.min([np.abs(y),np.abs(Y_DIM-y)])
z_edge = np.min([np.abs(z),np.abs(Z_DIM-z)])
dist = np.min([x_edge,y_edge,z_edge])
dist_from_edge.append(dist)
return dist_from_edge
# bin data to assist with plotting
def mean_bins(data1, data2):
cent_mark = [10,30,50,70]
less_than = [20,40,60,80]
mean_val = []
arg = np.argsort(data1)
data1 = np.sort(data1)
data2 = data2[arg]
idx_d = 0
for idx_l in range(0,len(less_than)):
arr = []; arr.append(0)
while idx_d < data1.shape[0] and data1[idx_d] < less_than[idx_l]:
arr.append(data2[idx_d])
idx_d += 1
mean_val.append(np.mean(arr))
return cent_mark, mean_val
##########################################################################################
# plot raw data (position)
##########################################################################################
# --> y axis is bead displacement magnitude, x axis is distance from cell surface
def plot_surface_disp(axi,cell_mesh,dist_from_edge,dist_from_cell, mag_list):
#--> remove points
keep = []
for kk in range(0,len(dist_from_edge)):
if dist_from_edge[kk] > 5:
keep.append(kk)
keep = np.asarray(keep)
dist_from_cell = np.asarray(dist_from_cell)
mag_list = np.asarray(mag_list)
cent_mark,mean_val = mean_bins(dist_from_cell[keep],mag_list[keep])
axi.plot(dist_from_cell[keep],mag_list[keep],'k.',markersize=0.75)
axi.plot(cent_mark, mean_val,'ro',markersize=10)
axi.set_ylim((0,10))
axi.set_xlabel('distance to cell surface')
axi.set_ylabel(r'displacement magnitude $\mu m$')
# --> 3D plot of the cell, configuration number influences title and color
def plot_cell_3D(ax,cell_num,cell_mesh, cell_center, cell_vol, X_DIM, Y_DIM, Z_DIM):
if cell_num == 1:
col = (0.75,0.75,0.75)
elif cell_num == 2:
col = (0,0,0)
verts = cell_mesh; cent = cell_center; vol = cell_vol
ax.set_aspect('auto')
ax.plot(verts[:,0],verts[:,1],verts[:,2],'.',color=col)
ax.set_xlim((-1,X_DIM))
ax.set_ylim((-1,Y_DIM))
ax.set_zlim((-1,Z_DIM))
if cell_num == 1:
ax.set_title('cell config 1, %.1f $\mu m^3$'%(vol))
elif cell_num == 2:
ax.set_title('cell config 2, %.1f $\mu m^3$'%(vol))
# --> plot of scores (type 1 is similarity to neighbors, type 2 is direction relative to cell)
def plot_scores_subplot(data,title,axi,color_type):
num_pts = 250
X_plot = np.linspace(np.min(data),np.max(data),num_pts).reshape(-1,1)
X = data.reshape(-1,1)
kde = KernelDensity(kernel='gaussian', bandwidth=0.1).fit(X)
log_dens = kde.score_samples(X_plot)
axi.set_xlabel('score')
axi.set_ylabel('probability density function')
axi.set_title(title)
ci_max = np.max(data); ci_min = np.min(data)
axi.plot(X_plot[:,0],np.exp(log_dens),'k-',linewidth=0.5)
for kk in range(0,num_pts):
ci = X_plot[kk,0]
if color_type == 1: #--> all positive numbers, blue is 0, red is high
col = ((ci-ci_min)/(ci_max-ci_min), 0, 1.0 - (ci-ci_min)/(ci_max-ci_min))
elif color_type == 2:
if ci < 0.0:
col = (np.abs(ci),0,0.5*np.abs(ci))
else:
col = (0, np.abs(ci), np.abs(ci))
axi.plot(X_plot[kk, 0], np.exp(log_dens[kk]),'.',color=col)
return
# --> helper function plots slice of cell
def plot_cell(cent,project_1,project_2,project_out,col,axi):
buffer = 0.5
buffer_up = cent + buffer
buffer_low = cent - buffer
plot_1 = []
plot_2 = []
num_pts = project_1.shape[0]
for kk in range(0,num_pts):
if project_out[kk] < buffer_up and project_out[kk] > buffer_low:
plot_1.append(project_1[kk])
plot_2.append(project_2[kk])
axi.plot(plot_1,plot_2,'.',color=col)
return
# --> helper function plots slice of vectors
def plot_vectors(color_type, color_info, project_1, project_2, project_1d, project_2d, cent, project_out, axi):
ci_min = np.min(color_info); ci_max = np.max(color_info)
num_pts = project_1.shape[0]
for kk in range(0,num_pts):
# --> the vectors themselves
scale = 1
proj1_a = project_1[kk]; proj1_d = project_1d[kk]*scale
proj2_a = project_2[kk]; proj2_d = project_2d[kk]*scale
pout = project_out[kk];
buffer = 10
# --> color of the vectors
ci = color_info[kk]
if pout > cent - buffer and pout < cent + buffer:
# --> colortype
if color_type == 1: #--> all positive numbers, blue is 0, red is high
col = ((ci-ci_min)/(ci_max-ci_min), 0, 1.0 - (ci-ci_min)/(ci_max-ci_min))
elif color_type == 2:
if ci < 0.0:
col = (np.abs(ci),0,0.5*np.abs(ci))
else:
col = (0, np.abs(ci), np.abs(ci))
# --> plot the vectors
axi.arrow(proj1_a,proj2_a,proj1_d,proj2_d,color = col,linewidth=1.0,head_width=1.5)
return
# --> plot a slice plot, each has beads and a cell
def plot_cell_vector_slice(color_type, color_info, X, Y, Z, U, V, W, cell_center_1,\
cell_mesh_1, cell_center_2, cell_mesh_2, plane_type, axi, X_DIM, Y_DIM, Z_DIM):
num_beads = X.shape[0]
XYZ = np.zeros((num_beads,3)); XYZ[:,0] = X; XYZ[:,1] = Y; XYZ[:,2] = Z
UVW = np.zeros((num_beads,3)); UVW[:,0] = U; UVW[:,1] = V; UVW[:,2] = W
cell_center_avg = 0.5*cell_center_1 + 0.5*cell_center_2
if plane_type == 1: #XZ-plane
idx_1 = 0
idx_2 = 2
idx_out = 1
elif plane_type == 2: #YZ-plane
idx_1 = 1
idx_2 = 2
idx_out = 0
elif plane_type == 3: #XY-plane
idx_1 = 0
idx_2 = 1
idx_out = 2
cent = cell_center_avg[idx_out]
project_1_cell_A = cell_mesh_1[:,idx_1]
project_2_cell_A = cell_mesh_1[:,idx_2]
project_out_cell_A = cell_mesh_1[:,idx_out]
cell_color_A = (0.75,0.75,0.75)
project_1_cell_B = cell_mesh_2[:,idx_1]
project_2_cell_B = cell_mesh_2[:,idx_2]
project_out_cell_B = cell_mesh_2[:,idx_out]
cell_color_B = (0.0,0.0,0.0)
project_1_bead = XYZ[:,idx_1]
project_2_bead = XYZ[:,idx_2]
project_1d_bead = UVW[:,idx_1]
project_2d_bead = UVW[:,idx_2]
project_out_bead = XYZ[:,idx_out]
# call cell plot for cell 1
plot_cell(cent,project_1_cell_A,project_2_cell_A,project_out_cell_A,cell_color_A,axi)
# call cell plot for cell 2
plot_cell(cent,project_1_cell_B,project_2_cell_B,project_out_cell_B,cell_color_B,axi)
# call vector plot
plot_vectors(color_type, color_info, project_1_bead, project_2_bead, project_1d_bead, project_2d_bead, cent, project_out_bead, axi)
center = cell_center_avg
if plane_type == 1: #XZ-plane
axi.plot([-1,X_DIM],[center[2],center[2]],'k:',linewidth=1.0)
axi.plot([center[0],center[0]],[-1,Z_DIM],'k:',linewidth=1.0)
axi.set_xlim((-1,X_DIM))
axi.set_ylim((-1,Z_DIM))
axi.set_xlabel(r'x-position $\mu m$')
axi.set_ylabel(r'z-position $\mu m$')
elif plane_type == 2: #YZ-plane
axi.plot([-1,Y_DIM],[center[2],center[2]],'k:',linewidth=1.0)
axi.plot([center[1],center[1]],[-1,Z_DIM],'k:',linewidth=1.0)
axi.set_xlim((-1,Y_DIM))
axi.set_ylim((-1,Z_DIM))
axi.set_xlabel(r'y-position $\mu m$')
axi.set_ylabel(r'z-position $\mu m$')
elif plane_type == 3: #XY-plane
axi.plot([-1,X_DIM],[center[1],center[1]],'k:',linewidth=1.0)
axi.plot([center[0],center[0]],[-1,Y_DIM],'k:',linewidth=1.0)
axi.set_xlim((-1,X_DIM))
axi.set_ylim((-1,Y_DIM))
axi.set_xlabel(r'x-position $\mu m$')
axi.set_ylabel(r'y-position $\mu m$')
return
def plot_vector_field(X,Y,Z,U,V,W,cell_init,cell_final,dir_score,should_show,should_save,foldername):
XYZ = np.vstack((X,Y,Z)).transpose()
UVW = np.vstack((U,V,W)).transpose()
point_cloud = pyvista.PolyData(XYZ)
point_cloud["dot(cell normal, displacement)"] = dir_score
point_cloud['vectors'] = UVW
geom = pyvista.Arrow()
arrows = point_cloud.glyph(orient='vectors', scale=False, factor=5.0,geom=geom)
mesh_init = pyvista.PolyData(cell_init)
mesh_final = pyvista.PolyData(cell_final)
if should_show:
plotter = pyvista.Plotter()
plotter.add_mesh(cell_final, color='maroon')
cmap = plt.cm.get_cmap("viridis_r")
plotter.add_mesh(arrows, cmap=cmap)
plotter.remove_scalar_bar()
plotter.add_scalar_bar('Dot(Cell Normal, Vector)', title_font_size=20, label_font_size=15, position_y=0.05)
plotter.show_grid()
plotter.show(title='Bead Deformation around Cell')
if should_save:
mesh_init.save(os.path.join(foldername,'cell_init.vtk'))
mesh_final.save(os.path.join(foldername,'cell_final.vtk'))
arrows.save(os.path.join(foldername,'arrows.vtk'))
# --> plot a cell-vector row
def plot_cell_vector_slice_row(ax_list,color_type,color_info,X,Y,Z,U,V,W,cell_center_1,cell_mesh_1,cell_center_2,cell_mesh_2,X_DIM,Y_DIM,Z_DIM):
axi = ax_list[0]
plane_type = 1
plot_cell_vector_slice(color_type, color_info, X, Y, Z, U, V, W, cell_center_1,cell_mesh_1, cell_center_2, cell_mesh_2, plane_type, axi, X_DIM, Y_DIM, Z_DIM)
axi = ax_list[1]
plane_type = 2
plot_cell_vector_slice(color_type, color_info, X, Y, Z, U, V, W, cell_center_1,cell_mesh_1, cell_center_2, cell_mesh_2, plane_type, axi, X_DIM, Y_DIM, Z_DIM)
axi = ax_list[2]
plane_type = 3
plot_cell_vector_slice(color_type, color_info, X, Y, Z, U, V, W, cell_center_1,cell_mesh_1, cell_center_2, cell_mesh_2, plane_type, axi, X_DIM, Y_DIM, Z_DIM)
return
# --> plot cells
def plot_only_cells(cell_mesh_1,cell_center_1,cell_vol_1,cell_mesh_2,cell_center_2,cell_vol_2,X_DIM,Y_DIM,Z_DIM,folder,figtype_list):
fig = plt.figure()
plt.style.use(stylepath)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
fig.set_figheight(5)
fig.set_figwidth(10)
axi = fig.add_subplot(1, 2, 1, projection='3d')
plot_cell_3D(axi,1,cell_mesh_1, cell_center_1, cell_vol_1, X_DIM, Y_DIM, Z_DIM)
axi = fig.add_subplot(1, 2, 2, projection='3d')
plot_cell_3D(axi,2,cell_mesh_2, cell_center_2, cell_vol_2, X_DIM, Y_DIM, Z_DIM)
plt.tight_layout()
for end in figtype_list:
fname = folder + '/Cell_plots_3D' + end
plt.savefig(fname)
return
# --> plot scores
def plot_only_scores(neigh_score,dir_score,folder,figtype_list):
fig = plt.figure()
plt.style.use(stylepath)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
fig.set_figheight(5)
fig.set_figwidth(10)
axi = fig.add_subplot(1,2,1)
data = np.asarray(neigh_score)
color_type = 1
title = 'neighbor distance score'
plot_scores_subplot(data,title,axi,color_type)
axi = fig.add_subplot(1,2,2)
data = np.asarray(dir_score)
color_type = 2
title = r'$n_{cell} \cdot n_{vector}$'
plot_scores_subplot(data,title,axi,color_type)
plt.tight_layout()
for end in figtype_list:
fname = folder + '/Score_plots' + end
plt.savefig(fname)
return
# --> plot slice
def plot_only_slice(dir_score,X,Y,Z,U,V,W,cell_center_1,cell_mesh_1,cell_center_2,cell_mesh_2,X_DIM,Y_DIM,Z_DIM,folder,figtype_list):
fig = plt.figure()
plt.style.use(stylepath)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
fig.set_figheight(5)
fig.set_figwidth(15)
color_type = 2
color_info = dir_score
axi = fig.add_subplot(1,3,1)
plane_type = 1
plot_cell_vector_slice(color_type, color_info, X, Y, Z, U, V, W, cell_center_1,\
cell_mesh_1, cell_center_2, cell_mesh_2, plane_type, axi, X_DIM, Y_DIM, Z_DIM)
axi = fig.add_subplot(1,3,2)
plane_type = 2
plot_cell_vector_slice(color_type, color_info, X, Y, Z, U, V, W, cell_center_1,\
cell_mesh_1, cell_center_2, cell_mesh_2, plane_type, axi, X_DIM, Y_DIM, Z_DIM)
axi = fig.add_subplot(1,3,3)
plane_type = 3
plot_cell_vector_slice(color_type, color_info, X, Y, Z, U, V, W, cell_center_1,\
cell_mesh_1, cell_center_2, cell_mesh_2, plane_type, axi, X_DIM, Y_DIM, Z_DIM)
plt.tight_layout()
for end in figtype_list:
fname = folder + '/Bead_disp_slice' + end
plt.savefig(fname)
return
# --> plot distance
def plot_only_distance(cell_mesh,dist_from_edge,dist_from_cell,mag_list,folder,figtype_list):
fig = plt.figure()
plt.style.use(stylepath)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
fig.set_figheight(5)
fig.set_figwidth(5)
axi = fig.add_subplot(1,1,1)
plot_surface_disp(axi,cell_mesh,dist_from_edge,dist_from_cell, mag_list)
plt.tight_layout()
for end in figtype_list:
fname = folder + '/Disp_wrt_dist' + end
plt.savefig(fname)
return
# --> plot all
def plot_all(folder, root_directory, file_prefix_1,file_prefix_2,dir_score,neigh_score,dist_from_edge,dist_from_cell,mag_list,\
X,Y,Z,U,V,W,cell_center_1,cell_mesh_1,cell_vol_1,cell_center_2,cell_mesh_2,cell_vol_2,X_DIM,Y_DIM,Z_DIM,figtype_list):
fig = plt.figure()
plt.style.use(stylepath)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
fig.set_figheight(10)
fig.set_figwidth(20)
axi = fig.add_subplot(2,4,1)
data = np.asarray(dir_score)
color_type = 2
title = r'$n_{cell} \cdot n_{vector}$'
plot_scores_subplot(data,title,axi,color_type)
axi = fig.add_subplot(2,4,2)
plane_type = 1
plot_cell_vector_slice(color_type, dir_score, X, Y, Z, U, V, W, cell_center_1,\
cell_mesh_1, cell_center_2, cell_mesh_2, plane_type, axi, X_DIM, Y_DIM, Z_DIM)
axi = fig.add_subplot(2,4,3)
plane_type = 2
plot_cell_vector_slice(color_type, dir_score, X, Y, Z, U, V, W, cell_center_1,\
cell_mesh_1, cell_center_2, cell_mesh_2, plane_type, axi, X_DIM, Y_DIM, Z_DIM)
axi = fig.add_subplot(2,4,4)
plane_type = 3
plot_cell_vector_slice(color_type, dir_score, X, Y, Z, U, V, W, cell_center_1,\
cell_mesh_1, cell_center_2, cell_mesh_2, plane_type, axi, X_DIM, Y_DIM, Z_DIM)
axi = fig.add_subplot(2,4,5)
data = np.asarray(neigh_score)
color_type = 1
title = 'neighbor distance score'
plot_scores_subplot(data,title,axi,color_type)
axi = fig.add_subplot(2,4,6)
plot_surface_disp(axi,cell_mesh_1,dist_from_edge,dist_from_cell, mag_list)
axi = fig.add_subplot(2,4,7, projection='3d')
plot_cell_3D(axi,1,cell_mesh_1, cell_center_1, cell_vol_1, X_DIM, Y_DIM, Z_DIM)
axi = fig.add_subplot(2,4,8, projection='3d')
plot_cell_3D(axi,2,cell_mesh_2, cell_center_2, cell_vol_2, X_DIM, Y_DIM, Z_DIM)
plt.tight_layout()
for end in figtype_list:
fname = folder + '/Summary_plot' + end
plt.savefig(fname)
for end in figtype_list:
fname = root_directory + '/Post_proc_summary' + '/' + 'Summary_' + file_prefix_1 + '_to_' + file_prefix_2 + end
plt.savefig(fname)
return
# call individual plots, plus call multiple subplots
def call_plot_main(plot_type,file_prefix_1,file_prefix_2,num_feat,X_DIM,Y_DIM,Z_DIM,figtype_list,use_corrected_cell,root_directory,should_plot):
folder = root_directory + '/Track_' + file_prefix_1 + '_to_' + file_prefix_2
if use_corrected_cell:
cell_mesh_2 = np.loadtxt(folder + '/cell_mesh_2_corrected.txt')
X, Y, Z, U, V, W = import_bead_disps(folder)
cell_mesh_1, cell_normal_1, cell_center_1, cell_vol_1, cell_mesh_2, cell_normal_2, cell_center_2, cell_vol_2 = import_cell_info(file_prefix_1,file_prefix_2,root_directory)
neigh_score = color_point_neighbor_similarity(X, Y, Z, U, V, W, num_feat)
dir_score, dist_from_cell, mag_list = color_point_direction(X, Y, Z, U, V, W, cell_mesh_1, cell_normal_1)
dist_from_edge = compute_dist_from_edge(X, Y, Z, X_DIM, Y_DIM, Z_DIM)
#type 6 will create all plots
# --> arrange data
if plot_type == 1 or plot_type == 6: # big plot with everything, saves it in two directories
plot_all(folder, root_directory, file_prefix_1,file_prefix_2,dir_score,neigh_score,dist_from_edge,dist_from_cell,mag_list,\
X,Y,Z,U,V,W,cell_center_1,cell_mesh_1,cell_vol_1,cell_center_2,cell_mesh_2,cell_vol_2,X_DIM,Y_DIM,Z_DIM,figtype_list)
if plot_type == 2 or plot_type == 6: # plots cells in both configurations
plot_only_cells(cell_mesh_1,cell_center_1,cell_vol_1,cell_mesh_2,cell_center_2,cell_vol_2,X_DIM,Y_DIM,Z_DIM,folder,figtype_list)
if plot_type == 3 or plot_type == 6: # plots scores only
plot_only_scores(neigh_score,dir_score,folder,figtype_list)
if plot_type == 4 or plot_type == 6: # plots slice only
plot_only_slice(dir_score,X,Y,Z,U,V,W,cell_center_1,cell_mesh_1,cell_center_2,cell_mesh_2,X_DIM,Y_DIM,Z_DIM,folder,figtype_list)
if plot_type == 5 or plot_type == 6: # plots magnitude wrt distance from surface
plot_only_distance(cell_mesh_1,dist_from_edge,dist_from_cell,mag_list,folder,figtype_list)
if should_plot:
plot_vector_field(X,Y,Z,U,V,W, cell_mesh_1, cell_mesh_2, dir_score,should_plot,True,folder)
return
##########################################################################################
# displacement interpolation -- use GPR
##########################################################################################
# --> create GP model
def create_gp_model(X,Y,Z,QoI):
num_pts = X.shape[0]
X_train_unscale = np.zeros((num_pts,3))
X_train_unscale[:,0] = X
X_train_unscale[:,1] = Y
X_train_unscale[:,2] = Z
scaler = preprocessing.StandardScaler().fit(X_train_unscale)
X_train = scaler.transform(X_train_unscale)
kernel = RationalQuadratic()
gp = GaussianProcessRegressor(kernel=kernel)
gp.fit(X_train, QoI)
return gp , scaler
# --> create GP models
def create_GP_model(file_prefix_1,file_prefix_2,root_directory):
folder = root_directory + '/Track_' + file_prefix_1 + '_to_' + file_prefix_2
X, Y, Z, U, V, W = import_bead_disps(folder)
gp_U, scaler = create_gp_model(X,Y,Z,U)
gp_V, scaler = create_gp_model(X,Y,Z,V)
gp_W, scaler = create_gp_model(X,Y,Z,W)
pickle.dump(gp_U, open(folder + '/gp_U.sav','wb'))
pickle.dump(gp_V, open(folder + '/gp_V.sav','wb'))
pickle.dump(gp_W, open(folder + '/gp_W.sav','wb'))
pickle.dump(scaler,open(folder + '/scaler.sav','wb'))
return
# --> interpolate GP model
def interpolate_gp_model(plane_case, center, gp, scaler, X_DIM, Y_DIM, Z_DIM ):
x_min = -1; x_max = X_DIM
y_min = -1; y_max = Y_DIM
z_min = -1; z_max = Z_DIM
grid_pts = 100
# --> construct artificial grid for plotting
if plane_case == 1: #x plane
x = center[1]
y = np.linspace(y_min,y_max,grid_pts)
z = np.linspace(z_min,z_max,grid_pts)
Y, Z = np.meshgrid(y,z)
X = x * np.ones((grid_pts,grid_pts))
RES = np.zeros((grid_pts,grid_pts))
elif plane_case == 2: #y plane
x = np.linspace(x_min,x_max,grid_pts)
y = center[0]
z = np.linspace(z_min,z_max,grid_pts)
X, Z = np.meshgrid(x, z)
Y = y * np.ones((grid_pts,grid_pts))
RES = np.zeros((grid_pts,grid_pts))
elif plane_case == 3: #z plane
x = np.linspace(x_min,x_max,grid_pts)
y = np.linspace(y_min,y_max,grid_pts)
z = center[2]
X, Y = np.meshgrid(x, y)
Z = z * np.ones((grid_pts,grid_pts))
RES = np.zeros((grid_pts,grid_pts))
# --> fit model grid
for j in range(0,grid_pts):
input = []
for k in range(0,grid_pts):
li = [X[j,k],Y[j,k],Z[j,k]]
input.append(li)
input = np.asarray(input)
input = scaler.transform(input)
pred = gp.predict(input)
RES[j,:] = pred[:]
if plane_case == 1:
return Y, Z, RES
elif plane_case == 2:
return X, Z, RES
elif plane_case == 3:
return X, Y, RES
# --> create a single GP plot
def plot_gp_model_single_plot(axi,is_mag,data_1,data_2,result,title):
# --> plot interpolated field
vmin = -5; vmax = 5
if is_mag:
vmin = 0; vmax = 10
CS1 = axi.pcolor(data_1, data_2, result, cmap=plt.cm.coolwarm,vmin=vmin,vmax=vmax)
cbar = plt.colorbar(CS1, ax=axi)
cbar.set_label(title,labelpad=-95,y=1.13,rotation=0)
return
# --> plot GPR model, one row
def plot_gp_model_one_row(ax_list,is_mag,X_DIM,Y_DIM,Z_DIM,title,center,gp_model,scaler,cell_mesh_1,cell_mesh_2):
axi = ax_list[0]
plane_case = 2
if is_mag == False:
data_1, data_2, result = interpolate_gp_model(plane_case, center, gp_model, scaler, X_DIM, Y_DIM, Z_DIM)
else:
data_1, data_2, result_0 = interpolate_gp_model(plane_case, center, gp_model[0], scaler, X_DIM, Y_DIM, Z_DIM)
data_1, data_2, result_1 = interpolate_gp_model(plane_case, center, gp_model[1], scaler, X_DIM, Y_DIM, Z_DIM)
data_1, data_2, result_2 = interpolate_gp_model(plane_case, center, gp_model[2], scaler, X_DIM, Y_DIM, Z_DIM)
result = (result_0**2.0 + result_1**2.0 + result_2**2.0)**(1.0/2.0)
plot_gp_model_single_plot(axi,is_mag,data_1,data_2,result,title)
idx0 = 0; idx1 = 2; idx2 = 1
plot_cell(center[idx2],cell_mesh_1[:,idx0],cell_mesh_1[:,idx1],cell_mesh_1[:,idx2],(0.75,0.75,0.75),axi)
plot_cell(center[idx2],cell_mesh_2[:,idx0],cell_mesh_2[:,idx1],cell_mesh_2[:,idx2],(0,0,0),axi)
axi.plot([-1,X_DIM],[center[2],center[2]],'k:',linewidth=1.0)
axi.plot([center[0],center[0]],[-1,Z_DIM],'k:',linewidth=1.0)
axi.set_xlabel(r'x-position $\mu m$')
axi.set_ylabel(r'z-position $\mu m$')
axi.set_xlim((-1,X_DIM))
axi.set_ylim((-1,Z_DIM))
axi = ax_list[1]
place_case = 1
if is_mag == False:
data_1, data_2, result = interpolate_gp_model(plane_case, center, gp_model, scaler, X_DIM, Y_DIM, Z_DIM)
else:
data_1, data_2, result_0 = interpolate_gp_model(plane_case, center, gp_model[0], scaler, X_DIM, Y_DIM, Z_DIM)
data_1, data_2, result_1 = interpolate_gp_model(plane_case, center, gp_model[1], scaler, X_DIM, Y_DIM, Z_DIM)
data_1, data_2, result_2 = interpolate_gp_model(plane_case, center, gp_model[2], scaler, X_DIM, Y_DIM, Z_DIM)
result = (result_0**2.0 + result_1**2.0 + result_2**2.0)**(1.0/2.0)
plot_gp_model_single_plot(axi,is_mag,data_1,data_2,result,title)
idx0 = 1; idx1 = 2; idx2 = 0
plot_cell(center[idx2],cell_mesh_1[:,idx0],cell_mesh_1[:,idx1],cell_mesh_1[:,idx2],(0.75,0.75,0.75),axi)
plot_cell(center[idx2],cell_mesh_2[:,idx0],cell_mesh_2[:,idx1],cell_mesh_2[:,idx2],(0,0,0),axi)
axi.plot([-1,Y_DIM],[center[2],center[2]],'k:',linewidth=1.0)
axi.plot([center[1],center[1]],[-1,Z_DIM],'k:',linewidth=1.0)
axi.set_xlabel(r'y-position $\mu m$')
axi.set_ylabel(r'z-position $\mu m$')
axi.set_xlim((-1,Y_DIM))
axi.set_ylim((-1,Z_DIM))
axi = ax_list[2]
plane_case = 3
if is_mag == False:
data_1, data_2, result = interpolate_gp_model(plane_case, center, gp_model, scaler, X_DIM, Y_DIM, Z_DIM)
else:
data_1, data_2, result_0 = interpolate_gp_model(plane_case, center, gp_model[0], scaler, X_DIM, Y_DIM, Z_DIM)
data_1, data_2, result_1 = interpolate_gp_model(plane_case, center, gp_model[1], scaler, X_DIM, Y_DIM, Z_DIM)
data_1, data_2, result_2 = interpolate_gp_model(plane_case, center, gp_model[2], scaler, X_DIM, Y_DIM, Z_DIM)
result = (result_0**2.0 + result_1**2.0 + result_2**2.0)**(1.0/2.0)
plot_gp_model_single_plot(axi,is_mag,data_1,data_2,result,title)
idx0 = 0; idx1 = 1; idx2 = 2
plot_cell(center[idx2],cell_mesh_1[:,idx0],cell_mesh_1[:,idx1],cell_mesh_1[:,idx2],(0.75,0.75,0.75),axi)
plot_cell(center[idx2],cell_mesh_2[:,idx0],cell_mesh_2[:,idx1],cell_mesh_2[:,idx2],(0,0,0),axi)
axi.plot([-1,X_DIM],[center[1],center[1]],'k:',linewidth=1.0)
axi.plot([center[0],center[0]],[-1,Z_DIM],'k:',linewidth=1.0)
axi.set_xlabel(r'x-position $\mu m$')
axi.set_ylabel(r'y-position $\mu m$')
axi.set_xlim((-1,X_DIM))
axi.set_ylim((-1,Z_DIM))
# --> plot GPR model
def plot_gp_model(file_prefix_1,file_prefix_2,X_DIM,Y_DIM,Z_DIM,figtype_list,use_corrected_cell, root_directory):
cell_mesh_1, cell_normal_1, cell_center_1, cell_vol_1, cell_mesh_2, cell_normal_2, cell_center_2, cell_vol_2 = import_cell_info(file_prefix_1,file_prefix_2,root_directory)
center = 0.5*cell_center_1 + 0.5*cell_center_2
folder = root_directory + '/Track_' + file_prefix_1 + '_to_' + file_prefix_2
if use_corrected_cell:
cell_mesh_2 = np.loadtxt(folder + '/cell_mesh_2_corrected.txt')
gp_U = pickle.load(open(folder + '/gp_U.sav', 'rb'))
gp_V = pickle.load(open(folder + '/gp_V.sav', 'rb'))
gp_W = pickle.load(open(folder + '/gp_W.sav', 'rb'))
scaler = pickle.load(open(folder + '/scaler.sav','rb'))
fig = plt.figure()
plt.style.use(stylepath)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
fig.set_figheight(20)
fig.set_figwidth(15)
ax1 = fig.add_subplot(4, 3, 1); ax2 = fig.add_subplot(4, 3, 2);ax3 = fig.add_subplot(4, 3, 3)
ax_list1 = [ax1,ax2,ax3]
title = r'x-displacement $\mu m$'
is_mag = False
plot_gp_model_one_row(ax_list1,is_mag,X_DIM,Y_DIM,Z_DIM,title,center,gp_U,scaler,cell_mesh_1,cell_mesh_2)
ax4 = fig.add_subplot(4, 3, 4); ax5 = fig.add_subplot(4, 3, 5); ax6 = fig.add_subplot(4, 3, 6)
ax_list2 = [ax4,ax5,ax6]
title = r'y-displacement $\mu m$'
is_mag = False
plot_gp_model_one_row(ax_list2,is_mag,X_DIM,Y_DIM,Z_DIM,title,center,gp_V,scaler,cell_mesh_1,cell_mesh_2)
ax7 = fig.add_subplot(4, 3, 7); ax8 = fig.add_subplot(4, 3, 8); ax9 = fig.add_subplot(4, 3, 9)
ax_list3 = [ax7,ax8,ax9]
title = r'z-displacement $\mu m$'
is_mag = False
plot_gp_model_one_row(ax_list3,is_mag,X_DIM,Y_DIM,Z_DIM,title,center,gp_W,scaler,cell_mesh_1,cell_mesh_2)
ax10 = fig.add_subplot(4, 3, 10); ax11 = fig.add_subplot(4, 3, 11); ax12 = fig.add_subplot(4, 3, 12)
ax_list4 = [ax10,ax11,ax12]
title = r'mag-displacement $\mu m$'
is_mag = True
plot_gp_model_one_row(ax_list4,is_mag,X_DIM,Y_DIM,Z_DIM,title,center,[gp_U, gp_V, gp_W],scaler,cell_mesh_1,cell_mesh_2)
plt.tight_layout()
for end in figtype_list:
fname = folder + '/Interpolate_plot' + end
plt.savefig(fname) | 41.590652 | 172 | 0.691789 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,695 | 0.159895 |
96eebe16c5c97601b0056f0ec84be091e97855b8 | 1,592 | py | Python | chapter_6/C.py | staguchi0703/ALDS1 | bb3d5865fb0a3f459f7a7331190e146c0e817a33 | [
"MIT"
] | null | null | null | chapter_6/C.py | staguchi0703/ALDS1 | bb3d5865fb0a3f459f7a7331190e146c0e817a33 | [
"MIT"
] | null | null | null | chapter_6/C.py | staguchi0703/ALDS1 | bb3d5865fb0a3f459f7a7331190e146c0e817a33 | [
"MIT"
] | null | null | null | # %%
# VScodeで入力をテキストから読み込んで標準入力に渡す
import sys
import os
f=open(r'.\chapter_6\C_input.txt', 'r', encoding="utf-8")
# inputをフルパスで指定
# win10でファイルを作るとs-jisで保存されるため、読み込みをutf-8へエンコードする必要あり
# VScodeでinput file開くとutf8になってるんだけど中身は結局s-jisになっているらしい
sys.stdin=f
#
# 入力スニペット
# num = int(input())
# num_list = [int(item) for item in input().split()]
# num_list = [input() for _ in range(3)]
##################################
# %%
# 以下ペースト可
import sys
sys.setrecursionlimit(2000000000)
N = int(input())
origin_list = [input().split() for _ in range(N)]
card_list = origin_list[:]
card_list2 = sorted(card_list[:], key=lambda x:x[1])
def partition(A, p, r):
x = int(A[r][1])
i = p - 1
for j in range(p, r):
if int(A[j][1]) <= x:
i += 1
A[i], A[j] = A[j], A[i]
A[i+1], A[r] = A[r], A[i+1]
return i+1
def quickSort(A, p, r):
if p < r:
q = partition(A, p, r)
quickSort(A, p, q-1)
quickSort(A, q+1, r)
quickSort(card_list, 0, len(card_list)-1)
# sorce_list = ['' for _ in range(N)]
# test_list = ['' for _ in range(N)]
# temp_num = 0
# cnt = 0
# for i in range(N):
# if test_list[i][1] > temp_num:
# temp_num = test_list[i][1]
# cnt += 1
# else:
# sorce_list[cnt] +=
# index = int(origin_list[i][1])
# sorce_list[index] += origin_list[i][0]+origin_list[i][1]
# index2 = int(card_list[i][1])
# test_list[index2] += card_list[i][0]+card_list[i][1]
if card_list == card_list2:
print('Stable')
else:
print('Not stable')
for item in card_list:
print(*item)
| 21.226667 | 62 | 0.574121 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,030 | 0.570953 |
96eece2555a7b7330f57153533780f7760ac2702 | 5,001 | py | Python | cloudformation/solution-assistant/src/lambda_function.py | NihalHarish/sagemaker-explaining-credit-decisions | e5965902d8901819a60f8c56517a82ddd17c1f95 | [
"Apache-2.0"
] | 80 | 2020-04-15T09:35:11.000Z | 2022-03-23T01:56:12.000Z | cloudformation/solution-assistant/src/lambda_function.py | IronOnet/sagemaker-explaining-credit-decisions | dbb8ea1a685412033c774c2a79cc1e5794438cf9 | [
"Apache-2.0"
] | 8 | 2020-04-16T16:53:09.000Z | 2022-02-06T17:07:02.000Z | cloudformation/solution-assistant/src/lambda_function.py | IronOnet/sagemaker-explaining-credit-decisions | dbb8ea1a685412033c774c2a79cc1e5794438cf9 | [
"Apache-2.0"
] | 28 | 2020-05-25T09:26:41.000Z | 2022-01-25T22:23:54.000Z | import boto3
from pathlib import Path
import sys
sys.path.append('./site-packages')
from crhelper import CfnResource
import datasets
helper = CfnResource()
@helper.update
@helper.create
def on_create(event, _):
folderpath = Path("/tmp")
solutions_s3_bucket = event["ResourceProperties"]["SolutionsS3BucketName"]
solutions_s3_object = (
"Explaining-credit-decisions/dataset/german.data" # noqa
)
filepaths = datasets.generate_datasets(
solutions_s3_bucket, solutions_s3_object, folderpath
)
s3_client = boto3.client("s3")
s3_bucket = event["ResourceProperties"]["S3BucketName"]
for filepath in filepaths:
object_name = filepath.relative_to(folderpath)
object_name = str(Path("datasets", object_name))
s3_client.upload_file(str(filepath), s3_bucket, object_name)
def on_update(_, __):
pass
def delete_sagemaker_endpoint(endpoint_name):
sagemaker_client = boto3.client("sagemaker")
try:
sagemaker_client.delete_endpoint(EndpointName=endpoint_name)
print(
"Successfully deleted endpoint "
"called '{}'.".format(endpoint_name)
)
except sagemaker_client.exceptions.ClientError as e:
if "Could not find endpoint" in str(e):
print(
"Could not find endpoint called '{}'. "
"Skipping delete.".format(endpoint_name)
)
else:
raise e
def delete_sagemaker_endpoint_config(endpoint_config_name):
sagemaker_client = boto3.client("sagemaker")
try:
sagemaker_client.delete_endpoint_config(
EndpointConfigName=endpoint_config_name
)
print(
"Successfully deleted endpoint configuration "
"called '{}'.".format(endpoint_config_name)
)
except sagemaker_client.exceptions.ClientError as e:
if "Could not find endpoint configuration" in str(e):
print(
"Could not find endpoint configuration called '{}'. "
"Skipping delete.".format(endpoint_config_name)
)
else:
raise e
def delete_sagemaker_model(model_name):
sagemaker_client = boto3.client("sagemaker")
try:
sagemaker_client.delete_model(ModelName=model_name)
print("Successfully deleted model called '{}'.".format(model_name))
except sagemaker_client.exceptions.ClientError as e:
if "Could not find model" in str(e):
print(
"Could not find model called '{}'. "
"Skipping delete.".format(model_name)
)
else:
raise e
def delete_s3_objects(bucket_name):
s3_resource = boto3.resource("s3")
try:
s3_resource.Bucket(bucket_name).objects.all().delete()
print(
"Successfully deleted objects in bucket "
"called '{}'.".format(bucket_name)
)
except s3_resource.meta.client.exceptions.NoSuchBucket:
print(
"Could not find bucket called '{}'. "
"Skipping delete.".format(bucket_name)
)
def delete_ecr_images(repository_name):
ecr_client = boto3.client("ecr")
try:
images = ecr_client.describe_images(repositoryName=repository_name)
image_details = images["imageDetails"]
if len(image_details) > 0:
image_ids = [
{"imageDigest": i["imageDigest"]} for i in image_details
]
ecr_client.batch_delete_image(
repositoryName=repository_name, imageIds=image_ids
)
print(
"Successfully deleted {} images from repository "
"called '{}'. ".format(len(image_details), repository_name)
)
else:
print(
"Could not find any images in repository "
"called '{}' not found. "
"Skipping delete.".format(repository_name)
)
except ecr_client.exceptions.RepositoryNotFoundException:
print(
"Could not find repository called '{}' not found. "
"Skipping delete.".format(repository_name)
)
@helper.delete
def on_delete(event, __):
# remove sagemaker endpoint
resource_name = event["ResourceProperties"]["SolutionPrefix"]
endpoint_names = [
"{}-explainer".format(resource_name),
"{}-predictor".format(resource_name)
]
for endpoint_name in endpoint_names:
delete_sagemaker_model(endpoint_name)
delete_sagemaker_endpoint_config(endpoint_name)
delete_sagemaker_endpoint(endpoint_name)
# remove sagemaker endpoint config
# remove files in s3
s3_bucket = event["ResourceProperties"]["S3BucketName"]
delete_s3_objects(s3_bucket)
# delete images in ecr repository
ecr_repository = event["ResourceProperties"]["ECRRepository"]
delete_ecr_images(ecr_repository)
def handler(event, context):
helper(event, context)
| 31.853503 | 78 | 0.634873 | 0 | 0 | 0 | 0 | 1,430 | 0.285943 | 0 | 0 | 1,241 | 0.24815 |
96ef264174a3778804b0a88b93099995faf0b946 | 1,209 | py | Python | rover/controls-systems/mobility/I2C_Test_Code_Enum.py | CSUFTitanRover/TitanRover2018 | 4926d377322a37ba644d7e852faa305fb8bb9b55 | [
"Apache-2.0"
] | 16 | 2017-09-01T23:33:17.000Z | 2021-01-04T02:41:19.000Z | rover/controls-systems/mobility/I2C_Test_Code_Enum.py | WesleyBaxter/TitanRover2018 | be69fa908ed0cbb1f4fe4708d0394881b3a4b105 | [
"Apache-2.0"
] | 56 | 2017-08-30T01:14:46.000Z | 2021-02-28T22:18:44.000Z | rover/controls-systems/mobility/I2C_Test_Code_Enum.py | WesleyBaxter/TitanRover2018 | be69fa908ed0cbb1f4fe4708d0394881b3a4b105 | [
"Apache-2.0"
] | 15 | 2017-09-14T19:55:55.000Z | 2020-05-03T19:44:39.000Z | import smbus
import time
from enum import Enum
# Sets enumerators and their values
class commands(Enum):
UNKNOWN_COMMAND = 0
LED = 1
SERVO = 2
# Initializes bus to smbus
bus = smbus.SMBus(1)
# This is the slave address we setup in the Arduino Program
address = 0x04
def writeCommand(cmd):
# Converts any capital letters to lower case
cmd = cmd.lower()
# Writes the value that is equivalent to the enumerator to the bus
if cmd == 'led':
bus.write_byte(address, commands.LED.value)
elif cmd == 'servo':
bus.write_byte(address, commands.SERVO.value)
# If user input is not a valid command, does not write anything to the bus
return -1
def readCommand():
# Reads value that the Arduino passes back
cmd = bus.read_byte(address)
return cmd
while True:
# Stores user input as var
var = input("Enter a valid command {led, servo}: ")
if not var:
continue
# Function call to send Arduino a command based on user input
writeCommand(var)
print("RPI: Hi Arduino, I sent you", var)
# Retrieve value that was received by the Arduino
cmd = readCommand()
print("Arduino: Hey RPI, I received", commands(cmd))
| 27.477273 | 78 | 0.679901 | 71 | 0.058726 | 0 | 0 | 0 | 0 | 0 | 0 | 591 | 0.488834 |
96f175f0b51602c5e97137cd52bc7e95509e5606 | 423 | py | Python | tests/test_matrix.py | avere001/dsplot | 89948c2f1b16e00bb3a240f73d0cb100b3eac847 | [
"MIT"
] | 8 | 2021-08-08T06:06:39.000Z | 2022-02-04T18:30:38.000Z | tests/test_matrix.py | avere001/dsplot | 89948c2f1b16e00bb3a240f73d0cb100b3eac847 | [
"MIT"
] | 1 | 2022-01-04T02:01:36.000Z | 2022-01-04T02:01:36.000Z | tests/test_matrix.py | avere001/dsplot | 89948c2f1b16e00bb3a240f73d0cb100b3eac847 | [
"MIT"
] | 2 | 2021-08-18T12:28:40.000Z | 2022-01-03T23:56:41.000Z | import os
import pytest
from dsplot.errors import InputException
from dsplot.matrix import Matrix
def test_matrix():
matrix = Matrix([[1, 2, 3], [4, 5, 6], [1, 2, 6]])
matrix.plot('tests/test_data/matrix.png')
assert 'matrix.png' in os.listdir('tests/test_data')
with pytest.raises(InputException) as e:
Matrix(nodes=[[]])
assert str(e.value) == 'Input list must have at least 1 element.'
| 23.5 | 69 | 0.669031 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 99 | 0.234043 |
96f1aad1a0a08c20f4321ab24752eaca035e74c9 | 1,724 | py | Python | odoo-13.0/addons/board/controllers/main.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | odoo-13.0/addons/board/controllers/main.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | odoo-13.0/addons/board/controllers/main.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from lxml import etree as ElementTree
from odoo.http import Controller, route, request
class Board(Controller):
@route('/board/add_to_dashboard', type='json', auth='user')
def add_to_dashboard(self, action_id, context_to_save, domain, view_mode, name=''):
# Retrieve the 'My Dashboard' action from its xmlid
action = request.env.ref('board.open_board_my_dash_action')
if action and action['res_model'] == 'board.board' and action['views'][0][1] == 'form' and action_id:
# Maybe should check the content instead of model board.board ?
view_id = action['views'][0][0]
board = request.env['board.board'].fields_view_get(view_id, 'form')
if board and 'arch' in board:
xml = ElementTree.fromstring(board['arch'])
column = xml.find('./board/column')
if column is not None:
new_action = ElementTree.Element('action', {
'name': str(action_id),
'string': name,
'view_mode': view_mode,
'context': str(context_to_save),
'domain': str(domain)
})
column.insert(0, new_action)
arch = ElementTree.tostring(xml, encoding='unicode')
request.env['ir.ui.view.custom'].create({
'user_id': request.session.uid,
'ref_id': view_id,
'arch': arch
})
return True
return False
| 42.04878 | 109 | 0.533643 | 1,533 | 0.889211 | 0 | 0 | 1,503 | 0.87181 | 0 | 0 | 475 | 0.275522 |
96f23e2c5a6ab2a675fd38cb4e2d70f563a35f7e | 485 | py | Python | JSONFormatter.py | ejkim1996/Unity-JSON-Manager | 29f98247263f2bb64f6960d4aa42bfc448fda3dd | [
"MIT"
] | 2 | 2019-01-12T09:53:50.000Z | 2021-03-02T02:19:28.000Z | JSONFormatter.py | ejkim1996/Unity-JSON-Manager | 29f98247263f2bb64f6960d4aa42bfc448fda3dd | [
"MIT"
] | null | null | null | JSONFormatter.py | ejkim1996/Unity-JSON-Manager | 29f98247263f2bb64f6960d4aa42bfc448fda3dd | [
"MIT"
] | null | null | null | import json
from tkinter import Tk
from tkinter.filedialog import askopenfilename
# Python script that allows user to select JSON file using TKinter and format it properly.
root = Tk()
filename = askopenfilename()
root.destroy() # Close the window
read = open(filename, 'r')
parsed = json.load(read)
write = open(filename, 'w')
newstr = json.dumps(parsed, indent = 3, sort_keys =True)
write.write(newstr) # Overwrite the old unformatted json file
read.close()
write.close()
| 23.095238 | 90 | 0.74433 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 155 | 0.319588 |
96f32ed1cb808038241c0062fb82667d9d2ca2d6 | 188 | py | Python | webs/douban/tasks/__init__.py | billvsme/videoSpider | e19111cc48d0a2a44c5245b0ddc9fad0c7a1824d | [
"MIT"
] | 216 | 2016-02-20T12:46:43.000Z | 2022-02-23T07:07:00.000Z | webs/douban/tasks/__init__.py | billvsme/tvCrawlers | e19111cc48d0a2a44c5245b0ddc9fad0c7a1824d | [
"MIT"
] | 3 | 2016-05-06T05:04:17.000Z | 2021-12-13T19:41:39.000Z | webs/douban/tasks/__init__.py | billvsme/tvCrawlers | e19111cc48d0a2a44c5245b0ddc9fad0c7a1824d | [
"MIT"
] | 99 | 2016-02-20T08:34:00.000Z | 2022-02-10T20:52:01.000Z | from . import get_main_movies_base_data
from . import get_main_movies_full_data
from . import get_celebrities_full_data
from . import down_video_images
from . import down_celebrity_images
| 31.333333 | 39 | 0.867021 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
96f370301695ce2890172ceee68267335e737203 | 397 | py | Python | examples/self_bot.py | LimeProgramming/defectio | ff5bbd2964a66fe44f9f1e192d0574c8db4a7404 | [
"MIT"
] | null | null | null | examples/self_bot.py | LimeProgramming/defectio | ff5bbd2964a66fe44f9f1e192d0574c8db4a7404 | [
"MIT"
] | null | null | null | examples/self_bot.py | LimeProgramming/defectio | ff5bbd2964a66fe44f9f1e192d0574c8db4a7404 | [
"MIT"
] | null | null | null | import defectio
client = defectio.Client()
@client.event
async def on_ready():
print("We have logged in.")
@client.event
async def on_message(message: defectio.Message):
if message.author == client.user:
return
if message.content.startswith("$hello"):
await message.channel.send("Hello!")
client.run(
session_token="session_token",
user_id="user_id",
)
| 16.541667 | 48 | 0.680101 | 0 | 0 | 0 | 0 | 273 | 0.687657 | 245 | 0.617128 | 60 | 0.151134 |
96f38ecfbeecd1933cbe011b730c71f4c32f4990 | 301 | py | Python | rtl/tests/test_log.py | kelceydamage/raspi-tasks | 18aa323e3e2428c998b7472c226d05a00c8ae8c2 | [
"Apache-2.0"
] | 1 | 2019-08-10T00:27:45.000Z | 2019-08-10T00:27:45.000Z | rtl/tests/test_log.py | kelceydamage/raspi-tasks | 18aa323e3e2428c998b7472c226d05a00c8ae8c2 | [
"Apache-2.0"
] | null | null | null | rtl/tests/test_log.py | kelceydamage/raspi-tasks | 18aa323e3e2428c998b7472c226d05a00c8ae8c2 | [
"Apache-2.0"
] | null | null | null | from rtl.tasks.log import log
from dummy_data import KWARGS, CONTENTS3
def test_log():
KWARGS = {
'operations': [
{
'a': 'b',
'column': 'c'
}
]
}
r = log(KWARGS, CONTENTS3)
assert r['c'][2] == 2.5649493574615367
| 20.066667 | 42 | 0.465116 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.106312 |
96f3a23cb8b0a87f238aa02340f2199afdec7644 | 1,970 | py | Python | CollaborativeFiltering/CollaborativeFiltering.py | darwin-b/MachineLearning | 834a07fcb5052053d7a7d6d9fe4fc5abbe3117d3 | [
"MIT"
] | null | null | null | CollaborativeFiltering/CollaborativeFiltering.py | darwin-b/MachineLearning | 834a07fcb5052053d7a7d6d9fe4fc5abbe3117d3 | [
"MIT"
] | null | null | null | CollaborativeFiltering/CollaborativeFiltering.py | darwin-b/MachineLearning | 834a07fcb5052053d7a7d6d9fe4fc5abbe3117d3 | [
"MIT"
] | null | null | null |
import numpy as np
train_ratings_path = "./../Data/netflix/TrainingRatings.txt"
test_ratings_path = "./../Data/netflix/TestingRatings.txt"
map_users={}
map_titles={}
data_matrix = np.empty((28978,1821),dtype=np.float32)
data_matrix[:] = np.nan
with open(train_ratings_path,'r') as reader:
counter_titles=0
counter_users = 0
for line in reader:
title,user_id,rating = line.split(',')
if not title in map_titles:
map_titles[title] = counter_titles
counter_titles +=1
if not user_id in map_users:
map_users[user_id]=counter_users
counter_users +=1
data_matrix[map_users[user_id]][map_titles[title]] = rating
del reader
mean_rating = np.nanmean(data_matrix,axis=1)
data_matrix[np.isnan(data_matrix)]=0
deviation = data_matrix - mean_rating[:,np.newaxis]
weights = {}
ratings={}
predicted = {}
squared_dev = (deviation**2).sum(axis=1)
act_ratings=[]
pred_ratings=[]
error_rating=[]
with open(test_ratings_path,'r') as reader:
c=0
for line in reader:
title,user_id,rating = line.split(',')
mapped_user = map_users[user_id]
mapped_title = map_titles[title]
if user_id not in weights:
n_correlation = np.abs((deviation[mapped_user] * deviation).sum(axis=1))
d_correlation = np.sqrt(squared_dev[mapped_user] * squared_dev)
weights[user_id]=n_correlation/d_correlation
normalising_constant = weights[user_id].sum()
weighted_sum = (weights[user_id]*(data_matrix[:,mapped_title] - mean_rating)).sum()
predicted[(mapped_title,user_id)] = mean_rating[mapped_user] + weighted_sum/normalising_constant
act_ratings.append(float(rating.replace("\n", "")))
error_rating.append(float(rating.replace("\n", ""))-predicted[(mapped_title,user_id)])
print(c," Acct : ",float(rating.replace("\n", "")), "Pred : ",predicted[(mapped_title,user_id)])
c+=1
| 29.402985 | 104 | 0.668528 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 126 | 0.063959 |
96f41d0268d46bff1736443eb02dd162aabd3a36 | 2,784 | py | Python | sources/lectures.py | JhoLee/ecampus-manager | 9c56678ba06b3b92f539b746d7103798592ad1ac | [
"MIT"
] | 1 | 2021-09-05T06:34:01.000Z | 2021-09-05T06:34:01.000Z | sources/lectures.py | JhoLee/BBang-Shuttle | 9c56678ba06b3b92f539b746d7103798592ad1ac | [
"MIT"
] | null | null | null | sources/lectures.py | JhoLee/BBang-Shuttle | 9c56678ba06b3b92f539b746d7103798592ad1ac | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/lectures.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.Qt import QMessageBox, QSize, QIcon
from PyQt5.QtWidgets import QDialog, QListWidgetItem
from main import show_messagebox
from PyQt5.QtGui import QStandardItemModel, QStandardItem
class Ui_Lectures(QDialog):
def __init__(self, manager):
super().__init__()
self.manager = manager
self.lectures = self.manager.lectures
self.items = QStandardItemModel()
self.setFixedSize(QSize(272, 200))
self.setWindowIcon(QIcon('../resources/breadzip.ico'))
self.is_clicked_selection = False
def setupUi(self):
self.setObjectName("Lectures")
self.resize(272, 200)
self.lst_lectures = QtWidgets.QListWidget(self)
self.lst_lectures.setGeometry(QtCore.QRect(10, 30, 251, 121))
font = QtGui.QFont()
font.setFamily("Malgun Gothic")
font.setPointSize(10)
self.lst_lectures.setFont(font)
self.lst_lectures.setObjectName("lst_lectures")
self.btn_select_subject = QtWidgets.QPushButton(self)
self.btn_select_subject.setGeometry(QtCore.QRect(180, 160, 81, 31))
font = QtGui.QFont()
font.setFamily("Malgun Gothic")
font.setPointSize(10)
self.btn_select_subject.setFont(font)
self.btn_select_subject.setObjectName("btn_start")
self.label_3 = QtWidgets.QLabel(self)
self.label_3.setGeometry(QtCore.QRect(10, 10, 251, 16))
font = QtGui.QFont()
font.setFamily("Malgun Gothic")
font.setPointSize(10)
self.label_3.setFont(font)
self.label_3.setText("<html><head/><body><p align=\"justify\">수강할 과목을 선택하십시오.</p></body></html>")
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setWordWrap(True)
self.label_3.setObjectName("label_3")
_translate = QtCore.QCoreApplication.translate
self.setWindowTitle(_translate("Lectures", "Dialog"))
self.btn_select_subject.setText(_translate("Lectures", "과목 선택"))
self.setWindowTitle(_translate("Login", "수강과목 선택 :: KNUT 빵셔틀"))
# self.items = QStandardItemModel()
# for lecture in self.lectures:
# self.items.appendRow(QStandardItem(lecture.text))
# self.lst_lectures.setModel(self.items)
for lec in self.lectures:
self.lst_lectures.addItem(lec.text)
QtCore.QMetaObject.connectSlotsByName(self)
self.btn_select_subject.clicked.connect(self.select)
def select(self):
self.is_clicked_selection = True
self.close()
| 37.12 | 105 | 0.674569 | 2,396 | 0.845448 | 0 | 0 | 0 | 0 | 0 | 0 | 657 | 0.231828 |
96f748a0816f7543ed6484d05af8d17f971b4e2c | 662 | py | Python | my_cv/gesture_recognition/demo.py | strawsyz/straw | db313c78c2e3c0355cd10c70ac25a15bb5632d41 | [
"MIT"
] | 2 | 2020-04-06T09:09:19.000Z | 2020-07-24T03:59:55.000Z | my_cv/gesture_recognition/demo.py | strawsyz/straw | db313c78c2e3c0355cd10c70ac25a15bb5632d41 | [
"MIT"
] | null | null | null | my_cv/gesture_recognition/demo.py | strawsyz/straw | db313c78c2e3c0355cd10c70ac25a15bb5632d41 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
fig, ax = plt.subplots()
text = ax.text(0.5, 0.5, 'event', ha='center', va='center', fontdict={'size': 20})
def call_back(event):
# print( event.xdata, event.ydata)
info = 'name:{}\n button:{}\n x,y:{},{}\n xdata,ydata:{}{}'.format(event.name, event.button, event.x, event.y,
int(event.xdata), int(event.ydata))
text.set_text(info)
fig.canvas.draw_idle()
fig.canvas.mpl_connect('button_press_event', call_back)
fig.canvas.mpl_connect('button_release_event', call_back)
fig.canvas.mpl_connect('motion_notify_event', call_back)
plt.show()
| 33.1 | 114 | 0.622356 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 178 | 0.268882 |
96f7b9468167fcacd877c20a27a062a48eb88c3a | 1,828 | py | Python | day16/part1.py | bugra-yilmaz/adventofcode2021 | 136cb1d4fba42af4eea934a73714c93710c8741e | [
"MIT"
] | null | null | null | day16/part1.py | bugra-yilmaz/adventofcode2021 | 136cb1d4fba42af4eea934a73714c93710c8741e | [
"MIT"
] | null | null | null | day16/part1.py | bugra-yilmaz/adventofcode2021 | 136cb1d4fba42af4eea934a73714c93710c8741e | [
"MIT"
] | null | null | null | import os.path
import pytest
INPUT_TXT = os.path.join(os.path.dirname(__file__), 'input.txt')
def compute(s: str) -> int:
packet = s.strip()
packet = "".join([hexadecimal_to_binary(c) for c in packet])
versions = []
parse_packet(versions, packet, 0)
return sum(versions)
def parse_packet(versions, packet, index):
if "1" not in packet:
return
version = int(packet[index:index + 3], 2)
versions.append(version)
index += 3
type_id = int(packet[index:index + 3], 2)
index += 3
if type_id == 4:
value = ""
while packet[index] == "1":
value += packet[index + 1:index + 5]
index += 5
value += packet[index + 1:index + 5]
index += 5
return index
else:
length_type_id = int(packet[index:index + 1], 2)
index += 1
if length_type_id == 0:
length = int(packet[index:index + 15], 2)
index += 15
end = index + length
while index < end:
index = parse_packet(versions, packet, index)
else:
count = int(packet[index:index + 11], 2)
index += 11
for _ in range(count):
index = parse_packet(versions, packet, index)
return index
def hexadecimal_to_binary(hexadecimal: str) -> str:
return bin(int(hexadecimal, 16))[2:].zfill(4)
INPUT_S = '''\
A0016C880162017C3686B18A3D4780
'''
EXPECTED = 31
@pytest.mark.parametrize(
('input_s', 'expected'),
(
(INPUT_S, EXPECTED),
),
)
def test(input_s: str, expected: int) -> None:
assert compute(input_s) == expected
def main() -> int:
with open(INPUT_TXT, "r") as f:
print(compute(f.read()))
return 0
if __name__ == '__main__':
raise SystemExit(main())
| 20.311111 | 64 | 0.560722 | 0 | 0 | 0 | 0 | 185 | 0.101204 | 0 | 0 | 92 | 0.050328 |
96f8088f165a5d53d7971cb2268d0b92f24f5e33 | 1,193 | py | Python | tests/managers/test_equal_managers.py | microprediction/precise | 0aa7c69c3c280926cec03fb6fc0934a6193da440 | [
"MIT"
] | 40 | 2022-01-13T00:40:59.000Z | 2022-03-31T20:33:19.000Z | tests/managers/test_equal_managers.py | microprediction/precise | 0aa7c69c3c280926cec03fb6fc0934a6193da440 | [
"MIT"
] | 14 | 2022-01-08T16:00:12.000Z | 2022-03-16T00:12:04.000Z | tests/managers/test_equal_managers.py | microprediction/precise | 0aa7c69c3c280926cec03fb6fc0934a6193da440 | [
"MIT"
] | 9 | 2022-01-26T21:14:43.000Z | 2022-03-21T17:32:02.000Z | import random
from precise.skaters.managerutil.managertesting import manager_test_run
from precise.skaters.managers.equalmanagers import equal_daily_long_manager, equal_long_manager
from precise.skaters.managers.equalmanagers import equal_weekly_long_manager, equal_weekly_buy_and_hold_long_manager
from precise.skatertools.data.equityhistorical import random_cached_equity_dense
from numpy.testing import assert_array_almost_equal
def test_random_manager():
from precise.skaters.managers.allmanagers import LONG_MANAGERS
mgr = random.choice(LONG_MANAGERS)
manager_test_run(mgr=mgr)
def test_daily_equal():
assert_equal_managing(equal_long_manager, equal_daily_long_manager)
def test_weekly_equal():
assert_equal_managing(equal_weekly_long_manager, equal_weekly_buy_and_hold_long_manager)
def assert_equal_managing(mgr1,mgr2):
ys = random_cached_equity_dense(k=1, n_obs=50, n_dim=3, as_frame=False)
s1 = {}
s2 = {}
for y in ys:
w1, s1 = mgr1(y=y, s=s1)
w2, s2 = mgr2(y=y, s=s2)
assert_array_almost_equal(w1,w2, err_msg='managers are not the same')
if __name__=='__main__':
test_daily_equal()
test_weekly_equal() | 30.589744 | 116 | 0.788768 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.031014 |
96f81fd3d9d30944b376af400b2a416db01f6a6d | 7,852 | py | Python | model/fcgn/grasp_proposal_target.py | ZhangHanbo/Visual-Manipulation-Relationship-Network-Pytorch | 9dd24947db318f6e404918d4758f1d824eea3748 | [
"MIT"
] | 26 | 2019-10-31T08:21:46.000Z | 2022-03-11T13:58:43.000Z | model/fcgn/grasp_proposal_target.py | xjtAlgo/Visual-Manipulation-Relationship-Network-Pytorch | da7fffcc6bed062fa1a5dc12b4279f3456825664 | [
"MIT"
] | 12 | 2019-11-07T09:12:50.000Z | 2022-03-12T02:58:18.000Z | model/fcgn/grasp_proposal_target.py | xjtAlgo/Visual-Manipulation-Relationship-Network-Pytorch | da7fffcc6bed062fa1a5dc12b4279f3456825664 | [
"MIT"
] | 11 | 2019-10-30T08:44:47.000Z | 2022-03-11T13:58:48.000Z | # --------------------------------------------------------
# Visual Detection: State-of-the-Art
# Copyright: Hanbo Zhang
# Licensed under The MIT License [see LICENSE for details]
# Written by Hanbo Zhang
# --------------------------------------------------------
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
import torch.nn.init as init
from model.utils.config import cfg
import numpy as np
import pdb
import time
from .bbox_transform_grasp import labels2points, points2labels, \
grasp_encode, grasp_decode,jaccard_overlap
class _GraspTargetLayer(nn.Module):
def __init__(self, feat_stride, ratios, scales, angles):
super(_GraspTargetLayer, self).__init__()
self.BBOX_NORMALIZE_MEANS = torch.FloatTensor(cfg.FCGN.BBOX_NORMALIZE_MEANS)
self.BBOX_NORMALIZE_STDS = torch.FloatTensor(cfg.FCGN.BBOX_NORMALIZE_STDS)
self.negpos_ratio = cfg.TRAIN.FCGN.NEG_POS_RATIO
self._feat_stride = feat_stride
def forward(self, conf, gt, priors, xthresh = None, ythresh = None, angle_thresh = None):
self.BBOX_NORMALIZE_MEANS = self.BBOX_NORMALIZE_MEANS.type_as(gt)
self.BBOX_NORMALIZE_STDS = self.BBOX_NORMALIZE_STDS.type_as(gt)
self.batch_size = gt.size(0)
if xthresh is None:
xthresh = self._feat_stride / 2
if ythresh is None:
ythresh = self._feat_stride / 2
if angle_thresh is None:
angle_thresh = cfg.TRAIN.FCGN.ANGLE_THRESH
if cfg.TRAIN.FCGN.ANGLE_MATCH:
loc_t, conf_t = self._match_gt_prior(priors, gt, xthresh, ythresh, angle_thresh)
else:
loc_t, conf_t = self._match_gt_prior_IoUbased(priors, gt)
iw, ow = self._mine_hard_samples(conf_t, conf)
if cfg.TRAIN.COMMON.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
loc_t = ((loc_t - self.BBOX_NORMALIZE_MEANS.expand_as(loc_t))
/ self.BBOX_NORMALIZE_STDS.expand_as(loc_t))
#if ((conf_t == 0).sum()/(conf_t == 1).sum()).item() != 3:
# pdb.set_trace()
return loc_t, conf_t, iw, ow
def _match_gt_prior(self, priors, gt, xthresh, ythresh, angle_thresh):
"""
:param priors: bs x K x 5
:param gt: bs x N x 5
:param angle_thresh:
:return:
"""
num_priors = priors.size(1)
x_gt = gt[:, :, 0:1].transpose(2,1)
y_gt = gt[:, :, 1:2].transpose(2,1)
ang_gt = gt[:, :, 4:5].transpose(2,1)
mask_gt = (torch.sum(gt==0, 2, keepdim = True) != gt.size(2)).transpose(2,1)
xdiff = torch.abs(priors[:, : ,0:1] - x_gt)
ydiff = torch.abs(priors[:, :, 1:2] - y_gt)
angdiff = torch.abs(priors[:, :, 4:5] - ang_gt)
mask = torch.zeros_like(xdiff) + mask_gt.float()
match_mat = (xdiff <= xthresh) \
& (ydiff <= ythresh) \
& (angdiff <= angle_thresh) \
& (mask != 0)
match_num = torch.sum(match_mat, 2, keepdim = True)
label = torch.zeros(self.batch_size, num_priors).type_as(gt).long()
label[(torch.sum(match_mat, 2) > 0)] = 1
# bs x N x K -> K x bs x N -> K x bs x N x 1
match_mat = match_mat.permute(2,0,1).unsqueeze(3)
# bs x K x 5 -> K x bs x 5 -> K x bs x 1 x 5
gt = gt.permute(1,0,2).unsqueeze(2)
# K x bs x N x 5 -> bs x N x 5
# When a prior matches multi gts, it will use
# the mean of all matched gts as its target.
loc = torch.sum(match_mat.float() * gt, dim = 0) + cfg.EPS
# make all nans zeros
keep = (match_num > 0).squeeze()
loc[keep] /= match_num[keep].float()
loc_encode = grasp_encode(loc, priors)
return loc_encode, label
def _match_gt_prior_IoUbased(self, priors, gt):
"""
:param priors: bs x K x 5
:param gt: bs x N x 5
:param angle_thresh:
:return:
"""
num_priors = priors.size(1)
x_gt = gt[:, :, 0:1].transpose(2,1)
y_gt = gt[:, :, 1:2].transpose(2,1)
#ang_gt = gt[:, :, 4:5].transpose(2, 1)
mask_gt = (torch.sum(gt==0, 2, keepdim = True) != gt.size(2)).transpose(2,1)
xdiff = torch.abs(priors[:, : ,0:1] - x_gt)
ydiff = torch.abs(priors[:, :, 1:2] - y_gt)
#angdiff = torch.abs(priors[:, :, 4:5] - ang_gt)
mask = torch.zeros_like(xdiff) + mask_gt.float()
match_mat = (xdiff <= self._feat_stride / 2) \
& (ydiff <= self._feat_stride / 2) \
& (mask != 0)
iou_ind = torch.nonzero(match_mat).data.cpu()
for i in iou_ind:
rec1 = np.array(priors[i[0].item(),i[1].item(),:])
rec2 = np.array(gt[i[0].item(),i[2].item(),:])
if jaccard_overlap(rec1,rec2) < cfg.TRAIN.FCGN.JACCARD_THRESH:
match_mat[i[0].item(),i[1].item(),i[2].item()] = 0
match_num = torch.sum(match_mat, 2, keepdim = True)
label = torch.zeros(self.batch_size, num_priors).type_as(gt).long()
label[(torch.sum(match_mat, 2) > 0)] = 1
# bs x N x K -> K x bs x N -> K x bs x N x 1
match_mat = match_mat.permute(2,0,1).unsqueeze(3)
# bs x K x 5 -> K x bs x 5 -> K x bs x 1 x 5
gt = gt.permute(1,0,2).unsqueeze(2)
# K x bs x N x 5 -> bs x N x 5
# When a prior matches multi gts, it will use
# the mean of all matched gts as its target.
loc = torch.sum(match_mat.float() * gt, dim = 0) + cfg.EPS
# make all nans zeros
keep = (match_num > 0).squeeze()
loc[keep] /= match_num[keep].float()
loc_encode = grasp_encode(loc, priors)
return loc_encode, label
def _mine_hard_samples(self, conf_t, conf):
"""
:param loc_t: bs x N x 5
:param conf_t: bs x N
:param conf: bs x N x 2
:return:
"""
pos = (conf_t > 0)
batch_conf = conf.data.view(-1, 2)
loss_c = self._log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1))
loss_c = loss_c.view(self.batch_size, -1)
loss_c[pos] = -1 # filter out pos boxes for now
_, loss_idx = loss_c.sort(1, descending=True)
# To find element indexes that indicate elements which have highest confidence loss
_, idx_rank = loss_idx.sort(1)
num_pos = pos.long().sum(1, keepdim=True)
num_neg = self.negpos_ratio * num_pos
neg = (idx_rank < num_neg.expand_as(idx_rank)) & (pos != 1)
conf_t[neg.eq(0) & pos.eq(0)] = -1
iw = pos.gt(0).float() * cfg.TRAIN.FCGN.BBOX_INSIDE_WEIGHTS[0]
iw = iw.unsqueeze(2).expand(conf.size(0), -1, 5)
if cfg.TRAIN.FCGN.BBOX_POSITIVE_WEIGHTS < 0:
ow = (pos + neg).gt(0).float() / ((num_pos + num_neg)|1).float()
ow = ow.unsqueeze(2).expand(conf.size(0), -1, 5)
else:
ow = (pos.gt(0).float() * cfg.TRAIN.FCGN.BBOX_POSITIVE_WEIGHTS \
+ neg.gt(0).float()) / ((num_pos + num_neg)|1).float()
ow = ow.unsqueeze(2).expand(conf.size(0), -1, 5)
if (ow != ow).sum().item() > 0:
pdb.set_trace()
if (neg.gt(0) & pos.gt(0)).sum().item() > 0:
pdb.set_trace()
return iw, ow
def _log_sum_exp(self,x):
"""Utility function for computing log_sum_exp while determining
This will be used to determine unaveraged confidence loss across
all examples in a batch.
Args:
x (Variable(tensor)): conf_preds from conf layers
"""
x_max, _ = x.data.max(dim = 1, keepdim = True)
return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max | 36.691589 | 93 | 0.566989 | 7,249 | 0.923204 | 0 | 0 | 0 | 0 | 0 | 0 | 1,696 | 0.215996 |
96f856b12bdb1da37c38499de024996b5d7da12b | 411 | py | Python | python-advanced/webscrap/wlog.py | Rokon-Uz-Zaman/thinkdiff_python_django | 5010c5f1dd8a028fb9e5235319bb6bb434831e6c | [
"MIT"
] | 92 | 2018-04-03T20:53:07.000Z | 2022-03-04T05:53:10.000Z | python-language/python-advanced/webscrap/wlog.py | mostafijur-rahman299/thinkdiff | b0e0c01fe38c406f4dfa8cc80b2f0c5654017079 | [
"MIT"
] | 11 | 2018-10-01T15:35:33.000Z | 2021-09-01T04:59:56.000Z | python-language/python-advanced/webscrap/wlog.py | mostafijur-rahman299/thinkdiff | b0e0c01fe38c406f4dfa8cc80b2f0c5654017079 | [
"MIT"
] | 98 | 2018-03-13T08:03:54.000Z | 2022-03-22T08:11:44.000Z | # author: Mahmud Ahsan
# code: https://github.com/mahmudahsan/thinkdiff
# blog: http://thinkdiff.net
# http://pythonbangla.com
# MIT License
# --------------------------
# Reporting Logs in text file
# --------------------------
import logging
def set_custom_log_info(filename):
logging.basicConfig(filename=filename, level=logging.INFO)
def report(e:Exception):
logging.exception(str(e))
| 21.631579 | 62 | 0.63017 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 226 | 0.549878 |
96fa22ff6672a9418b26fa05a3ae9f12341d9ea4 | 864 | py | Python | geoevents/core/management/commands/r3dumpdata.py | mcenirm/geoevents | f78cc09b8bcc8c6e4eee0d1de14e5becad7d49e6 | [
"MIT"
] | 25 | 2015-01-06T15:37:31.000Z | 2020-12-10T19:05:22.000Z | geoevents/core/management/commands/r3dumpdata.py | mcenirm/geoevents | f78cc09b8bcc8c6e4eee0d1de14e5becad7d49e6 | [
"MIT"
] | 2 | 2015-01-31T02:36:58.000Z | 2015-02-01T00:11:15.000Z | geoevents/core/management/commands/r3dumpdata.py | mcenirm/geoevents | f78cc09b8bcc8c6e4eee0d1de14e5becad7d49e6 | [
"MIT"
] | 5 | 2016-01-01T15:04:49.000Z | 2019-05-30T23:34:30.000Z | # This technical data was produced for the U. S. Government under Contract No. W15P7T-13-C-F600, and
# is subject to the Rights in Technical Data-Noncommercial Items clause at DFARS 252.227-7013 (FEB 2012)
from django.core.management.base import BaseCommand, CommandError
from django.core.management import call_command
class Command(BaseCommand):
#args = ''
help = 'Runs a standard dumpdata process'
def handle(self, *args, **options):
models_to_exclude = ['auth.permission', 'contenttypes']
options['exclude'] = [] if not options.get('exclude') else options.get('exclude')
map(options['exclude'].append, models_to_exclude)
options['natural']= True
options['indent']= options.get('indent') or 4
call_command('dumpdata',*args, **options)
self.stdout.write('Successfully dumped the data.\n')
| 41.142857 | 104 | 0.699074 | 541 | 0.626157 | 0 | 0 | 0 | 0 | 0 | 0 | 383 | 0.443287 |
96fac71ba63e9e0125078ad8dc31d6b80c225f02 | 1,343 | py | Python | src/MostrarGrafica.py | VictorVaquero/sentimentAnalysis | 947d3621db14fa28e05f328e80f1c98f6aa685ae | [
"MIT"
] | null | null | null | src/MostrarGrafica.py | VictorVaquero/sentimentAnalysis | 947d3621db14fa28e05f328e80f1c98f6aa685ae | [
"MIT"
] | null | null | null | src/MostrarGrafica.py | VictorVaquero/sentimentAnalysis | 947d3621db14fa28e05f328e80f1c98f6aa685ae | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[28]:
import matplotlib.pyplot as plt
import numpy as np
import csv
# In[32]:
listaHigh = []
listaLow = []
listaClose = []
contador = 0
lineas = len(open('GOOGLPrediccion.csv').readlines())
c = input()
if(int(c)!=0):
c = int(c)
else:
c = lineas
cantidad = lineas - c
with open('GOOGLPrediccion.csv', newline='') as File:
reader = csv.reader(File)
for row in reader:
if(contador>cantidad):
listaHigh.append(float(row[1]))
listaLow.append(float(row[2]))
listaClose.append(float(row[3]))
contador = contador + 1
plt.plot(listaHigh) # Dibuja el gráfico
plt.xlabel("Fila") # Inserta el título del eje X
plt.ylabel("Precio") # Inserta el título del eje Y
plt.ioff() # Desactiva modo interactivo de dibujo
plt.plot(listaLow) # No dibuja datos de lista2
plt.ion() # Activa modo interactivo de dibujo
plt.plot(listaLow) # Dibuja datos de lista2 sin borrar datos de lista1
plt.plot(listaClose) # No dibuja datos de lista2
plt.ion() # Activa modo interactivo de dibujo
plt.plot(listaClose) # Dibuja datos de lista2 sin borrar datos de lista1
plt.plot(listaHigh, label = "High", color="b")
plt.plot(listaLow, label = "Low", color="g")
plt.plot(listaClose, label = "Close", color="r")
plt.legend()
| 23.155172 | 74 | 0.658972 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 484 | 0.359584 |
96fb71abe1597b3445be3baa69638e7080246d4f | 207 | py | Python | src/Blog/settings/production.py | sadmanbd/wagtailblog | adeccb29826200cb1bedc658a0f1c57c2f705d0e | [
"MIT"
] | 1 | 2020-04-20T05:38:01.000Z | 2020-04-20T05:38:01.000Z | src/Blog/settings/production.py | sadmanbd/wagtailblog | adeccb29826200cb1bedc658a0f1c57c2f705d0e | [
"MIT"
] | 8 | 2020-02-11T21:41:52.000Z | 2022-01-13T00:33:02.000Z | src/Blog/settings/production.py | sadmanbd/wagtailblog | adeccb29826200cb1bedc658a0f1c57c2f705d0e | [
"MIT"
] | null | null | null | from __future__ import absolute_import, unicode_literals
import os
from .base import *
DEBUG = False
SECRET_KEY = os.environ.get("SECRET_KEY")
try:
from .local import *
except ImportError:
pass
| 13.8 | 56 | 0.743961 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.057971 |
96fbefce1bae3ccaee2952d5e7652edcc5609ce7 | 65,374 | py | Python | lookml/lookml.py | pythonruss/pylookml | 879c4c6325138295b4ce59c8663aa62da42837dc | [
"MIT"
] | null | null | null | lookml/lookml.py | pythonruss/pylookml | 879c4c6325138295b4ce59c8663aa62da42837dc | [
"MIT"
] | null | null | null | lookml/lookml.py | pythonruss/pylookml | 879c4c6325138295b4ce59c8663aa62da42837dc | [
"MIT"
] | null | null | null | import re, os, shutil
import lookml.config as conf
import lkml
import time, copy
from string import Template
from lookml.modules.project import *
import lkml, github
def snakeCase(string):
str1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', string)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', str1).lower()
def splice(*args):
return ''.join([arg for arg in args])
def removeSpace(string): # removing special character / [|]<>,.?}{+=~!$%^&*()-
return re.sub('(\s|/|\[|\]|\||\,|<|>|\.|\?|\{|\}|#|=|~|!|\+|\$|\%|\^|\&|\*|\(|\)|\-|\:)+', r'', string)
def tidy(string):
'''
cleans a string to remove multiple linebreaks and spaces (trims excess whitespace)
:return: returns input string, with excess whitespace removed
:rtype: str
'''
return re.sub(r'\s{10,}', r'\n ', string)
# return string
def lookCase(string):
return removeSpace(snakeCase(string))
def sortMe(func):
''' returns all the fields sorted first by alpabetical dimensions/filters, then alphabetical measures '''
return sorted(list(func), key=lambda field: field.identifier)
def stringify(collection,delim=conf.NEWLINEINDENT, prefix=True, postfix=False):
'''
calls string and concatinates each item in a collection
'''
# return delim + delim.join([str(item) for item in collection])
return (delim if prefix else '') + delim.join([str(item) for item in collection]) + (delim if postfix else '')
def parseReferences(inputString):
'''
Uses regular expresssions to preduce an iterator of the lookml references in a string.
result has the shape {'raw':'${exact.field_reference}','field':'exact.field_reference', fully_qualified_reference:True}
'''
for match in re.findall(r'(\$\{([a-z\._0-9]*)\}|\{\%\s{1,3}condition\s([a-z\._0-9]*)\s\%\}|\{\%\s{1,3}parameter\s([a-z\._0-9]*)\s\%\}|\{\{\s{0,10}([a-z\._0-9]*)\s{0,10}\}\}| \_filters\[\s{0,10}\'([a-z\._0-9]*)\'\])',inputString):
#Collapse the results from findall
result = ''.join(match[1:])
#Replace the liquid value references
if result.endswith('._value'):
result = result.replace('._value','')
#Check if a fully qualified reference was used
fq = True if '.' in ''.join(match[1:]) else False
yield {'raw':match[0],'field':result, 'fully_qualified_reference': fq }
class File:
'''
A file object represents a file within a LookML project. It can be several types, can contain views, explores
or other properties such as inlcude or data groups
It can be instantiated with a View, an Explore, a filepath on disk, or content from the Github API
'''
class view_collection:
'''
A container for views which allows us to use .operator syntax
'''
def __init__(self,viewlist):
self.views = {}
for view in viewlist:
self.add(view)
def __getattr__(self,key):
return self.views[key]
def __getitem__(self,key):
return self.__getattr__(key)
def add(self, v):
if isinstance(v,dict):
v = View(v)
self.views.update({v.name:v})
return self
def remove(self, v):
if not isinstance(v,str):
v = v.name
self.views.pop(v)
return self
def __iter__(self):
self.iterPointer = iter(self.views.values())
return self
def __next__(self):
try:
return next(self.iterPointer)
except:
raise StopIteration
class explore_collection:
'''
A container for explores which allows us to use .operator syntax
'''
def __init__(self,explorelist):
self.explores = {}
for explore in explorelist:
self.add(explore)
def __getattr__(self,key):
return self.explores[key]
def __getitem__(self,key):
return self.__getattr__(key)
def add(self, e):
if isinstance(e,dict):
e = Explore(e)
self.explores.update({e.name:e})
return self
def remove(self, e):
if not isinstance(e,str):
e = e.name
self.explores.pop(e)
return self
def __iter__(self):
self.iterPointer = iter(self.explores.values())
return self
def __next__(self):
try:
return next(self.iterPointer)
except:
raise StopIteration
def __init__(self, f):
def githubBootstrap():
#custom initialization for github_api type
#Set Basic Attributes
self.name = f._rawData['name']
self.sha = f._rawData['sha']
self.base_name = self.name.replace(".model.lkml", "").replace(".explore.lkml", "").replace(".view.lkml", "")
self.path = f._rawData['path']
#Parse Step: Github content is returned base64 encoded
data = base64.b64decode(f.content).decode('ascii')
self.json_data = lkml.load(data)
def filepathBootstrap():
#custom initialization for path type
#Set Basic Attributes
self.name = os.path.basename(f)
self.name_components = self.name.split('.')
if len(self.name_components) <= 1:
self.base_name = self.name
elif len(self.name_components) == 2:
self.base_name = self.name_components[0]
else:
self.base_name = '.'.join(self.name_components[:-2])
self.path = os.path.relpath(f)
self.sha = ''
#Parse Step: file is provided
with open(self.path, 'r') as tmp:
self.json_data = lkml.load(tmp)
def viewBootstrap():
#custom initialization for path type
#Set Basic Attributes
self.name = f.name + '.view.lkml'
self.base_name = f.name
self.path = self.name
self.sha = ''
#load as json_Data for compatibility with the rest of the class
#TODO: revist if this is needed to convert back and forth or if another more direct method would be preferable
self.json_data = lkml.load(str(f))
def exploreBootstrap():
#custom initialization for path type
#Set Basic Attributes
self.name = f.name + '.model.lkml' # What about explore filetypes?
self.base_name = f.name
self.path = self.name
self.sha = ''
#load as json_Data for compatibility with the rest of the class
#TODO: revist if this is needed to convert back and forth or if another more direct method would be preferable
self.json_data = lkml.load(str(f))
#Step 1 -- Data Type introspection
if isinstance(f, github.ContentFile.ContentFile):
self.f_type = "github_api"
githubBootstrap()
elif isinstance(f, View):
self.f_type = "view"
viewBootstrap()
elif isinstance(f, Explore):
self.f_type = "explore"
exploreBootstrap()
elif os.path.isfile(f):
self.f_type = "path"
filepathBootstrap()
#Step 2 -- set a lookml "file type" mostly only used for path info
if self.name.endswith('lkml'):
self.filetype = self.name.split('.')[-2]
else:
raise Exception("Unsupported filename " + self.name)
if 'views' in self.json_data.keys():
self.vws = self.view_collection(self.json_data['views'])
self.json_data.pop('views')
else:
self.vws = self.view_collection({})
if 'explores' in self.json_data.keys():
self.exps = self.explore_collection(self.json_data['explores'])
self.json_data.pop('explores')
else:
self.exps = self.explore_collection({})
self.properties = Properties(self.json_data)
self.props = self.properties.props()
def __getattr__(self, key):
if key in self.__dict__.keys():
return self.__dict__[key]
elif key == 'views':
return self.vws
elif key == 'explores':
return self.exps
#TODO: resolve attribute access issues
elif key in ['datagroups', 'map_layers', 'named_value_formats']:
return self.properties[key]
else:
# raise KeyError
return object.__getattr__(key)
def __getitem__(self,key):
if key == 'views':
return self.vws
elif key == 'explores':
return self.exps
def __str__(self):
return splice(
conf.NEWLINE.join([str(p) for p in self.properties.getProperties()])
,conf.NEWLINE
,conf.NEWLINE.join([ str(e) for e in self.explores] ) if self.exps else ''
,conf.NEWLINE
,conf.NEWLINE.join([ str(v) for v in self.views]) if self.vws else ''
)
def setSha(self,sha):
self.sha = sha
return self
def addView(self,v):
self.vws.add(v)
return self
def addExplore(self,e):
self.exps.add(e)
return self
def _bind_lkml(self, lkmldictraw):
lkmldict = copy.deepcopy(lkmldictraw)
if 'views' in lkmldict.keys():
for view in lkmldict['views']:
self.vws.add(View(view))
lkmldict.pop('views')
if 'explores' in lkmldict.keys():
for explore in lkmldict['explores']:
self.exps.add(Explore(explore))
lkmldict.pop('explores')
for k,v in lkmldict.items():
self.setProperty(k,v)
def __add__(self, other):
if isinstance(other, View):
self.addView(other)
elif isinstance(other, Explore):
self.addExplore(other)
else:
self._bind_lkml(lkml.load(other))
def getProperty(self, prop):
''' Get a property from the properties collection '''
return self.properties[prop]
def setProperty(self, name, value):
''' Set a property in the properties collection '''
self.properties.addProperty(name, value)
return self
def setFolder(self,folder):
self.path = folder + self.name if folder.endswith('/') else folder + '/' + self.name
return self
def write(self,overWriteExisting=True):
''' Checks to see if the file exists before writing'''
print("Writing to: %s" % (self.path) )
if overWriteExisting:
with open(self.path, 'w') as opened_file:
try:
opened_file.write(self.__str__())
except:
pass
else:
try:
fh = open(self.path, 'r')
fh.close()
except FileNotFoundError:
with open(self.path, 'w') as opened_file:
opened_file.write(self.__str__())
class base(object):
class _model:
pass
# Put it under a namespace in __dict__?
# Define types of collections for special types. Fields for example should be unique (but lkml itself passes these split out -- how to define uniqueness across 3-4 dictionaries etc)
class _view:
pass
# Bind model to __str__ (should be kept relatively simple)
class _cont:
''' '''
pass
#
#CU (much more at once?
def __add__(self, other):
self._bind_lkml(lkml.load(other))
# def __sub__(self, other): #←- subtract a key from the model?
# pass
# #R
# def __getattr__(self, attr): #← model / property getting
# pass
# # C,U
# def __setattr__(self, attr, val):
# pass
def __init__(self,input):
self.identifier = ''
self.properties = Properties({})
self.message = ''
self.token = ''
self.indentLevel = 1
if isinstance(input,str):
self.setName(input)
elif isinstance(input,dict):
self._bind_lkml(input)
self.templateMap = {}
def _bind_lkml(self, lkmldict):
# self.setName(lkmldict.pop('name'))
if 'name' in lkmldict.keys():
self.setName(lkmldict.pop('name'))
for k,v in lkmldict.items():
self.setProperty(k,v)
def setName(self, name):
'''
sets the name
:param arg1: name
:type arg1: string
:return: returns the overall object
:rtype: self
'''
self.identifier = name
return self
def setLabel(self, label):
''''''
return self.setProperty('label', label)
def hide(self):
''''''
self.properties.addProperty('hidden', 'yes')
return self
def unHide(self):
''''''
self.properties.delProperty('hidden')
return self
def setMessage(self,message):
self.message = message
return self
def getMessage(self):
if self.message:
return splice('#',self.message,conf.NEWLINE)
else:
return ''
def getProperty(self, prop):
''' Get a property from the properties collection '''
return self.properties[prop]
def setProperty(self, name, value):
''' Set a property in the properties collection '''
self.properties.addProperty(name, value)
return self
def unSetProperty(self, name):
''''''
self.properties.__del__(name)
return self
def getProperties(self):
return self.properties.getProperties()
def hasProp(self, property):
return property in self.properties.props()
def props(self):
return self.properties.props()
def rawProp(self,key):
'''
if dict type schema, needs a prop name. If list type schema needs a number index
'''
return self.properties.rawPropValue(key)
def __repr__(self):
return "%s name: %s id: %s" % (self.__class__, self.identifier, hex(id(self)))
def __len__(self):
return len([f for f in self.getProperties()])
def __iter__(self):
self.valueiterator = iter(self.getProperties())
return self
def __next__(self):
try:
return next(self.valueiterator)
except:
raise StopIteration
def __str__(self):
self.templateMap = {
'message': self.getMessage()
,'identifier': self.identifier
# ,'props': stringify([ conf.INDENT + str(p) for p in self.getProperties() if len(self) == 2])
,'props': stringify([ conf.INDENT + str(p) for p in self.getProperties()], prefix=(len(self) > 2))
,'token': self.token
}
return tidy(Template(getattr(conf.TEMPLATES,self.token)).substitute(**self.templateMap))
class View(base):
'''
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
def __init__(self, input):
self._fields = {}
self.primaryKey = ''
self.message = ''
self.children = {}
self.parent = None
super(View, self).__init__(input)
self.token = 'view'
def __str__(self):
self.templateMap = {
'message':self.getMessage()
,'token':self.token
,'identifier':self.identifier
,'props': stringify([str(p) for p in self.getProperties() if p.name != "sets"])
,'parameters':stringify(sortMe(self.parameters()))
,'filters': stringify(sortMe(self.filters()))
,'dimensions': stringify(sortMe(self.dims()))
,'dimensionGroups': stringify(sortMe(self.dimensionGroups()))
,'measures': stringify(sortMe(self.measures()))
,'sets': stringify([str(p) for p in self.getProperties() if p.name == "sets"])
,'children': stringify(self.children.values()) if self.children else ''
}
return tidy(Template(getattr(conf.TEMPLATES,self.token)).substitute(**self.templateMap))
def _bind_lkml(self,jsonDict):
t = 'measures'
if t in jsonDict.keys():
for field in jsonDict[t]:
self + Measure(field)
jsonDict.pop(t)
else:
pass
t = 'dimensions'
if t in jsonDict.keys():
for field in jsonDict[t]:
self + Dimension(field)
jsonDict.pop(t)
else:
pass
t = 'filters'
if t in jsonDict.keys():
for field in jsonDict[t]:
self + Filter(field)
jsonDict.pop(t)
else:
pass
t = 'dimension_groups'
if t in jsonDict.keys():
for field in jsonDict[t]:
self + DimensionGroup(field)
jsonDict.pop(t)
else:
pass
t = 'parameters'
if t in jsonDict.keys():
for field in jsonDict[t]:
self + Parameter(field)
jsonDict.pop(t)
else:
pass
super()._bind_lkml(jsonDict)
def getFieldsSorted(self):
'''
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
# ''' returns all the fields sorted first by alpabetical dimensions/filters, then alphabetical measures '''
return sorted(self._fields.values(), key=lambda field: ''.join([str(isinstance(field, Measure)), field.identifier]))
def __repr__(self):
return "%s (%r) fields: %s id: %s" % (self.__class__, self.identifier, len(self), hex(id(self)))
def __len__(self):
return len([f for f in self.fields()])
def __add__(self,other):
if isinstance(other, Field):
return self.addField(other)
elif isinstance(other, str):
#TODO: decide if still want to support view + 'id' behavior, and if so check regex first. Maybe a regex string to just ask: is snake str -> dim
if len(other) < 10:
return self.addDimension(dbColumn=other)
else:
self._bind_lkml(lkml.load(other))
else:
raise Exception(str(type(other)) + ' cannot be added to View')
def __radd__(self,other):
return self.__add__(other)
def __sub__(self,other):
if isinstance(other, Field):
return self.removeField(other)
elif isinstance(other, str):
return self.removeField(other)
elif isinstance(other,View):
return self.children.pop(other.identifier,None)
else:
raise Exception(str(type(other)) + ' cannot be subtracted from View')
def __rsub__(self,other):
return self.__sub__(other)
def __invert__(self):
''' hides all dimensions (not measures) '''
for dim in self.dims():
dim.hide()
for dim in self.dimensionGroups():
dim.hide()
for dim in self.parameters():
dim.hide()
for dim in self.filters():
dim.hide()
return self
def __contains__(self,item):
return item in self._fields.keys()
def __getitem__(self,identifier):
return self.field(identifier)
def __getattr__(self, key):
if key in self.__dict__.keys():
return self.__dict__[key]
elif key in self.properties.props():
return self.getProperty(key)
elif key == 'name':
return self.identifier
elif key == 'pk':
return self.getPrimaryKey()
elif key == '__ref__':
return splice('${',self.identifier,'}')
else:
return self.field(key)
def __setattr__(self, name, value):
if name == 'label':
self.setLabel(value)
return self
elif name == 'name':
self.setName(value)
return self
elif name == 'pk':
self.setPrimaryKey(value)
return self
elif name in conf.language_rules.view_props:
self.setProperty(name, value)
else:
object.__setattr__(self, name, value)
def setExtensionRequired(self):
'''
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
# ''' Sets the view to be "extension: required" '''
self.properties.addProperty('extension','required')
return self
def getFieldsByTag(self,tag):
'''
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
for field in self.fields():
if tag in field.tags:
yield field
def fields(self):
'''
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
# '''Returns all the fields as a generator'''
for field, literal in self._fields.items():
## Does this yeild only return the first instance it is looped?
yield literal
def fieldNames(self):
'''
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
return list(self._fields.keys())
def getFieldsByType(self, t):
'''
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
return filter(lambda field: str(field.type) == 'type: '+ t, list(self._fields.values()))
def sumAllNumDimensions(self):
'''
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
# '''
# Adds a "total" measure to the view for all number dimensions
# '''
for field in self.getFieldsByType('number'):
tmpFieldName = 'total_' + field.name
if tmpFieldName not in self.fieldNames() and isinstance(field,Dimension):
self + Measure({
'name': tmpFieldName
,'type':'sum'
,'sql':field.__refs__
})
def field(self, f):
'''
get a field (most commonly, will pass in a field name)
:param field: Field to return
:type field: str or Field (or Dimension, Measure...) object
:return: Returns a subtype of Field
:rtype: Dimension, Measure, Filter or Parameter
'''
# ''' retrieve a field, argument can be the name or a field'''
if isinstance(f,str):
try:
return self._fields[f]
except KeyError:
raise KeyError
elif isinstance(f,Field):
return self._fields[f.identifier]
def search(self, prop, pattern):
'''
pass a regex expression and will return the fields whose sql match
:param prop: name of proprty you'd like to search
:param pattern: the regex pattern
:type prop: str
:type patter: a regex search string
:return: a generator / iteratble set of fields who have a member property matching the pattern
:rtype: Field
'''
if isinstance(pattern,list):
pattern = '('+'|'.join(pattern)+')'
searchString = r''.join([r'.*',pattern,r'.*'])
for field in self.fields():
if re.match(searchString,str(field.getProperty(prop))):
yield field
def addField(self, field):
'''
add a field to the view
* if the field is a dimension and primary key it will be set as the view primary key
* the field will have its view set to so that the view may be referenced from the field object
:param arg1: Field
:type arg1: Field (or subtype)
:return: return self (allows call chaining i.e. obj.method().method() )
:rtype: self
'''
# '''Takes a field object as an argument and adds it to the view, if the field is a dimension and primary key it will be set as the view primary key'''
# uses the 'setView' method on field which returns self so that field can fully qualify itself and so that field can be a member of view
self._fields.update({field.identifier: field.setView(self)})
# If a primary key is added it will overwrite the existing primary key....
if isinstance(field, Dimension):
if field.isPrimaryKey():
# field.setPrimaryKey()
self.setPrimaryKey(field.identifier)
return self
def removeField(self,field):
'''
Removes a field from the View
* also unsets primary key
:param arg1: field to remove
:type arg1: Field object or str name of field
:return: returns the removed field
:rtype: Field or None
'''
# '''Removes a field, either by object or by string of identifier, safely checks and de-refs primary key'''
def pk(k):
if k.isPrimaryKey():
self.unSetPrimaryKey()
if isinstance(field,Field):
if isinstance(field,Dimension):
pk(field)
pk(self.field(field.identifier))
return self._fields.pop(field.identifier, None)
elif isinstance(field,str):
dimToDel = self.field(field)
if isinstance(dimToDel,Dimension):
pk(dimToDel)
return self._fields.pop(field, None)
else:
raise Exception('Not a string or Field instance provided')
def addFields(self, fields):
'''
Add multiple fields to a view. An iterable collection of field objects will be passed to the add field function. Helpful for adding many fields at once
:param fields: set or list of fields [field1, field2 ...]
:type fields: type description
:return: return self (allows call chaining i.e. obj.method().method() )
:rtype: self
'''
for field in fields:
self.addField(field)
return self
def setPrimaryKey(self, f, callFromChild=False):
'''
TODO: Complete Desctiption
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return self (allows call chaining i.e. obj.method().method() )
:rtype: self
'''
# ''' A string identifier or a field object can be passed, and will be set as the new primary key of the view'''
self.unSetPrimaryKey()
if isinstance(f, Dimension):
if not callFromChild:
f.setPrimaryKey()
self.primaryKey = f.identifier
else:
tmpField = self.field(f)
if isinstance(tmpField, Dimension):
self.primaryKey = tmpField.identifier
if not callFromChild:
tmpField.setPrimaryKey()
# tmpField.setPrimaryKey()
return self
def getPrimaryKey(self):
'''
TODO: Complete Desctiption
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
# '''returns the primary key'''
if self.primaryKey:
return self.field(self.primaryKey)
def unSetPrimaryKey(self):
'''
TODO: Complete Desctiption
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return self (allows call chaining i.e. obj.method().method() )
:rtype: self
'''
# '''Unsets the view primary key returns self'''
# pk = self.field(self.primaryKey)
pk = self.getPrimaryKey()
if isinstance(pk, Dimension):
pk.unSetPrimaryKey()
self.primaryKey = ''
return self
def dims(self):
'''a description of the function
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
# '''returns iterable of Dimension Fields'''
return filter(lambda dim: isinstance(dim, Dimension), self._fields.values())
def dimensionGroups(self):
'''a description of the function
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
# '''returns iterable of DimensionGroup Fields'''
return filter(lambda dim: isinstance(dim, DimensionGroup), self._fields.values())
def measures(self):
'''returns iterable of Measure Fields'''
return filter(lambda meas: isinstance(meas, Measure), self._fields.values())
def filters(self):
'''returns iterable of Filter Fields'''
return filter(lambda fil: isinstance(fil, Filter), self._fields.values())
def parameters(self):
'''returns iterable of Paramter Fields'''
return filter(lambda par: isinstance(par, Parameter), self._fields.values())
def addDimension(self,dbColumn, type='string'):
'''
:return: return self (allows call chaining i.e. obj.method().method() )
:rtype: self
'''
dim = Dimension(dbColumn)
dim.setType(type)
self.addField(dim)
return self
def sum(self,f):
''' A Synonym for addSum
:return: return self (allows call chaining i.e. obj.method().method() )
:rtype: self
'''
return self.addSum(f)
# def count(self):
# ''' A Synonym for addCount
# :return: return self (allows call chaining i.e. obj.method().method() )
# :rtype: self
# '''
# return self.addCout()
def countDistinct(self,f):
''' A Synonym for addCountDistinct
:return: return self (allows call chaining i.e. obj.method().method() )
:rtype: self
'''
return self.addCountDistinct(f)
def addCount(self):
'''Add a count measure to the view, returns self
:return: return self (allows call chaining i.e. obj.method().method() )
:rtype: self
'''
measure = Measure( 'count' )
measure.setType('count')
self.addField(measure)
return self
def addCountDistinct(self, f):
'''Add a count distinct to the view based on a field object or field name/identifier. returns self
:return: return self (allows call chaining i.e. obj.method().method() )
:rtype: self
'''
if isinstance(f, Field):
field = f
else:
field = self.field(f)
measure = Measure( 'count_distinct_' + field.identifier)
measure.sql = field.__refs__
measure.setType('count_distinct')
self.addField(measure)
return self
def addSum(self, f):
'''Add a sum to the view based on a field object or field name/identifier. returns self'''
if isinstance(f, Field):
field = f
else:
field = self.field(f)
measure = Measure('total_' + field.identifier)
measure.setType('sum')
self.addField(measure)
return self
def addAverage(self, f):
'''Add a average to the view based on a field object or field name/identifier. returns self'''
if isinstance(f, Field):
field = f
else:
field = self.field(f)
measure = Measure(
identifier=''.join(['average_', field.identifier]), schema={'sql': field.__refs__}
)
measure.setType('average')
self.addField(measure)
return self
def addComparisonPeriod(self,field_to_measure,date, measure_type='count_distinct'):
self.addFields(
[
Filter('reporting_period').setName('reporting_period').setProperty('type','date')
,Filter('comparison_period').setName('comparison_period').setProperty('type','date')
,Measure('reporting_period_measure').setName('reporting_period_measure')
,Measure('comparison_period_measure').setName('comparison_period_measure')
]
)
assert isinstance(field_to_measure,Dimension)
self.reporting_period_measure.setType(measure_type)
self.comparison_period_measure.setType(measure_type)
self.comparison_period.setProperty('sql',
'''
{0}>= {{% date_start comparison_period %}}
AND {0} <= {{% date_end reporting_period %}}
'''.format('${'+date.identifier+'_raw}')
)
self.reporting_period_measure.setProperty(
'sql'
,'''CASE
WHEN {{% condition reporting_period %}} {0} {{% endcondition %}} THEN {1}
ELSE NULL
END
'''.format('${'+date.identifier+'_raw}',field_to_measure.__refs__)
)
self.comparison_period_measure.setProperty('sql',
'''
CASE
WHEN {{% condition comparison_period %}} {0} {{% endcondition %}} THEN {1}
ELSE NULL
END
'''.format('${'+date.identifier+'_raw}',field_to_measure.__refs__)
)
return self
def extend(self, name='', sameFile=True, required=False, *args):
''' Creates an extended view, optionally within the same view file
name (string) -> name of the extended / child view. Will default to the parent + _extended
sameFile (boolean) -> default true, if true will result in the child being printed within the parent's string call / file print
required (boolean) -> default false, if true will result in the parent being set to extension required
returns the child view object
'''
if not name:
if len(args) > 1:
if isinstance(args[0],str):
child = View(args[0])
else:
child = View('_'.join([self.identifier,'extended']))
else:
child = View('_'.join([self.identifier,'extended']))
else:
child = View(name)
if required:
self.setExtensionRequired()
child.properties.addProperty('extends',self.identifier)
child.parent = self
if sameFile:
self.children.update({child.identifier: child})
return child
class Join(base):
''' Instantiates a LookML join object... '''
def __init__(self, input):
self.properties = Properties({})
self.identifier = ''
self._from = ''
self.to = ''
super(Join,self).__init__(input)
self.token = 'join'
def setFrom(self,f):
self._from = f
return self
def setTo(self,t):
if isinstance(t,View):
self.to = t
return self
def on(self,left,operand,right):
statement = splice(left.__ref__ ,operand, right.__ref__)
self.setOn(statement)
return self
def setOn(self,sql_on):
self.properties.addProperty('sql_on', sql_on )
return self
def setSql(self,sql):
self.setProperty('sql', sql)
return self
def setType(self, joinType):
assert joinType in conf.JOIN_TYPES
self.properties.addProperty('type',joinType)
return self
def setRelationship(self,rel):
assert rel in conf.RELATIONSHIPS
self.properties.addProperty('relationship',rel)
return self
def hide(self):
''''''
self.properties.addProperty('view_label', '')
return self
def unHide(self):
''''''
self.properties.delProperty('view_label')
return self
class Explore(base):
''' Represents an explore object in LookML'''
def __init__(self, input):
self.joins = {}
self.base_view = ''
super(Explore, self).__init__(input)
self.token = 'explore'
def _bind_lkml(self,jsonDict):
if 'name' in jsonDict.keys():
self.setName(jsonDict.pop('name'))
if 'joins' in jsonDict.keys():
for join in jsonDict['joins']:
self + Join(join)
jsonDict.pop('joins')
for k,v in jsonDict.items():
self.setProperty(k,v)
def __len__(self):
return len(self.joins)
def __str__(self):
self.templateMap = {
'message': self.getMessage()
,'identifier':self.identifier
,'props': stringify([str(p) for p in self.getProperties()])
,'joins': stringify([str(j) for j in self.getJoins()])
,'token': self.token
}
return Template(getattr(conf.TEMPLATES,self.token)).substitute(**self.templateMap)
def __add__(self,other):
if isinstance(other,View) or isinstance(other,Join):
self.addJoin(other)
elif isinstance(other, str):
self._bind_lkml(lkml.load(other))
else:
raise TypeError
return self
def __radd__(self,other):
return self.__add__(other)
def __getattr__(self, key):
if self.base_view and key == self.base_view.name:
return self.base_view
elif key == 'name':
return self.identifier
elif key in self.joins.keys():
return self.joins[key]
else:
return self.__getitem__(key)
def __setattr__(self, name, value):
if name in self.__dict__.keys():
self.__dict__[name] = value
else:
object.__setattr__(self, name, value)
def __getitem__(self,identifier):
return self.getJoin(identifier)
def createNDT(self,explore_source='', name='',fields=[]):
pass
# TODO: re-impliment
# if name:
# tmpView = View(name)
# else:
# tmpView = View(self.identifier + 'ndt')
# tmpndt = ndt(explore_source)
# for field in fields:
# tmpndt.addColumn(field.__refrs__,field.__refr__)
# tmpView + field.__refrs__
# tmpView.derived_table = tmpndt
# tmpView.tableSource = False
# return tmpView
def setViewName(self,view):
self.properties.addProperty('view_name',view)
def addJoin(self, join):
if isinstance(join,Join):
self.joins.update({join.identifier : join})
return join
elif isinstance(join,View):
tmpjoin = Join(View)
tmpjoin.setName(join.name)
tmpjoin.setTo(join)
self.joins.update({tmpjoin.identifier : tmpjoin})
return tmpjoin
def join(self,join):
return self.addJoin(join)
def getJoins(self):
for field, literal in self.joins.items():
yield literal
def getJoin(self, key):
return self.joins.get(key, {})
class Property(object):
''' A basic property / key value pair.
If the value is a dict it will recusively instantiate properties within itself '''
def __init__(self, name, value):
self.name = name
self.num = 0
if isinstance(value, str):
self.value = value
# lkml.keys.PLURAL_KEYS
# ('view', 'measure', 'dimension', 'dimension_group', 'filter', 'access_filter',
# 'bind_filter', 'map_layer', 'parameter', 'set', 'column', 'derived_column', 'include',
# 'explore', 'link', 'when', 'allowed_value', 'named_value_format', 'join', 'datagroup', 'access_grant',
# 'sql_step', 'action', 'param', 'form_param', 'option', 'user_attribute_param', 'assert', 'test')
elif name in ('links','filters','tags','suggestions',
'actions', 'sets', 'options', 'form_params', 'access_grants','params',
'allowed_values', 'named_value_formats', 'datagroups', 'map_layers', 'columns',
'derived_columns', 'explore_source', 'includes', 'access_filters'):
# elif name+'s' in lkml.keys.PLURAL_KEYS:
self.value = Properties(value, multiValueSpecialHandling=name)
elif isinstance(value, dict) or isinstance(value, list):
self.value = Properties(value)
else:
raise Exception('not a dict, list or string')
def __len__(self):
return len(self.value)
def __add__(self,other):
if isinstance(self.value, str):
raise Exception('`+ and - ` not supported for a single value property, try assigning via the `=` operator')
elif isinstance(self.value, Properties):
self.value.addProperty(self.name,other)
elif isinstance(self.value, list):# and self.multiValueSpecialHandling in ('tags','suggestions'):
self.schema.append(other)
elif self.properties.multiValueSpecialHandling == 'filters':
pass
elif self.properties.multiValueSpecialHandling == 'links':
pass
else:
pass
# def __getattr__(self,key):
# if isinstance(self.value, Properties):
# return self.value[key]
# def __setattr__(self,key, value):
# if isinstance(self.value, Properties):
# return self.value[key]
def __sub__(self,other):
# if isinstance(self.value, Properties) and self.value.multiValueSpecialHandling in ('tags','suggestions'):
if isinstance(self.value, Properties):
self.value.schema.remove(other)
else:
pass
def __iter__(self):
self.num = 0
return self
def __next__(self):
num = self.num
while num <= len(self.value):
return next(self.value)
def __str__(self):
#TODO: multiinstance / plural
#TODO: multivalue / list
#TODO: brackets
#TODO: braces
#TODO: quoted
#TODO: plain
#TODO: SQL / HTML Block ;;
#TODO
def quote_pair():
return splice(self.name, ': "', str(self.value), '"')
def expression_block():
return splice(self.name, ': ', str(self.value), ' ;;')
def brackets():
return splice(self.name, ': [', str(self.value), ']')
def svbrackets():
return splice(self.name, ': [', ''.join(self.value.schema), ']')
def braces():
return splice(self.name, ': {', str(self.value), '}')
def default():
return splice(self.name , ': ' , str(self.value))
def list_member_training_comma():
return splice(str(self.value),',')
def simple():
return str(self.value)
# lkml.keys.PLURAL_KEYS
# ('view', 'measure', 'dimension', 'dimension_group', 'filter', 'access_filter',
# 'bind_filter', 'map_layer', 'parameter', 'set', 'column', 'derived_column', 'include',
# 'explore', 'link', 'when', 'allowed_value', 'named_value_format', 'join', 'datagroup', 'access_grant',
# 'sql_step', 'action', 'param', 'form_param', 'option', 'user_attribute_param', 'assert', 'test')
# lkml.keys.KEYS_WITH_NAME_FIELDS
# ('user_attribute_param', 'param', 'form_param', 'option')
# lkml.keys.QUOTED_LITERAL_KEYS
# ('label', 'view_label', 'group_label', 'group_item_label', 'suggest_persist_for',
# 'default_value', 'direction', 'value_format', 'name', 'url', 'icon_url', 'form_url', 'default', '
# tags', 'value', 'description', 'sortkeys', 'indexes', 'partition_keys', 'connection', 'include',
# 'max_cache_age', 'allowed_values', 'timezone', 'persist_for', 'cluster_keys', 'distribution', 'extents_json_url',
# 'feature_key', 'file', 'property_key', 'property_label_key', 'else')
# lkml.keys.EXPR_BLOCK_KEYS
# ('expression_custom_filter', 'expression', 'html', 'sql_trigger_value', 'sql_table_name', 'sql_distinct_key',
# 'sql_start', 'sql_always_having', 'sql_always_where', 'sql_trigger', 'sql_foreign_key', 'sql_where', 'sql_end',
# 'sql_create', 'sql_latitude', 'sql_longitude', 'sql_step', 'sql_on', 'sql')
# replace with expression block
# if self.name.startswith('sql') or self.name == 'html':
# return splice(self.name, ': ', str(self.value), ' ;;')
if self.name in (
'links','filters','actions','options',
'form_params','sets', 'access_grants',
'params', 'allowed_values', 'named_value_formats',
'datagroups', 'map_layers', 'derived_columns','columns','access_filters'):
return simple()
elif self.name == 'explore_source':
shadow = copy.deepcopy(self.value)
return splice(self.name , ': ' + shadow.schema.pop('name') + ' ', str(shadow))
elif self.name in ('tags'):
return default()
elif self.name in lkml.keys.EXPR_BLOCK_KEYS:
return expression_block()
elif self.name in lkml.keys.QUOTED_LITERAL_KEYS:
return quote_pair()
#single Value brackets
elif self.name in ('extends', 'alias'):
return svbrackets()
elif self.name == "includes":
return splice('include: "',str(self.value),'"')
elif self.name in conf.MULTIVALUE_PROPERTIES:
return default()
elif self.name == ('list_member') and isinstance(self.value,str):
return list_member_training_comma()
elif self.name == 'list_member':
return simple()
elif self.name == 'list_member_quoted':
return simple()
elif self.name == 'field':
return (' '*4 + default())
else:
return default()
class Properties(object):
'''
Treats the collection of properties as a recursive dicitionary
Things that fall outside of uniqueness (special cases):
includes, links, filters, bind_filters
Things that should be their own class:
data_groups, named_value_format, sets
'''
def __init__(self, schema, multiValueSpecialHandling=False):
self.schema = schema
self.num = 0
self.valueiterator = iter(self.schema)
self.multiValueSpecialHandling = multiValueSpecialHandling
def __str__(self):
def process_plural_named_constructs():
singular = self.multiValueSpecialHandling[:-1]
buildString = ""
schemaDeepCopy = copy.deepcopy(self.schema)
for fset in schemaDeepCopy:
buildString += conf.NEWLINEINDENT + conf.INDENT + singular + ': ' + fset.pop('name') + ' '
buildString += str(Property('list_member',fset))
return buildString
def process_plural_unnamed_constructs():
if not self.multiValueSpecialHandling == "filters":
singular = conf.NEWLINE + self.multiValueSpecialHandling[:-1] + ': '
else:
singular = conf.NEWLINE + self.multiValueSpecialHandling + ': '
return splice( singular , singular.join([str(p) for p in self.getProperties()]))
def render(template,delim=' '):
self.templateMap = {
'data': stringify([str(p) for p in self.getProperties()], delim=delim, prefix=False)
}
return Template(getattr(conf.TEMPLATES,template)).substitute(self.templateMap)
if isinstance(self.schema, dict):
return render('array', delim=conf.NEWLINEINDENT)
elif isinstance(self.schema, list) and not self.multiValueSpecialHandling:
return render('_list', delim=' ')
elif isinstance(self.schema, list) and self.multiValueSpecialHandling in ('tags','suggestions'):
return splice(
'[\n ' ,
'\n '.join(['"' + str(p) + '",' for p in self.getProperties()]) ,
'\n ]'
)
elif self.multiValueSpecialHandling in ('filters', 'links', 'actions', 'options', 'form_params','params', "access_filters"):
return process_plural_unnamed_constructs()
elif self.multiValueSpecialHandling in ("access_grants","datagroups","map_layers","named_value_formats","sets", "columns", "derived_columns", "explore_source"):
return process_plural_named_constructs()
elif self.multiValueSpecialHandling == 'allowed_values':
if isinstance(self.schema[0],dict):
return splice('allowed_value: ','\n allowed_value: '.join([str(p) for p in self.getProperties()]))
elif isinstance(self.schema[0],str):
return splice(
'allowed_values: [\n ' ,
'\n '.join(['"' + str(p) + '",' for p in self.getProperties()]) ,
'\n ]'
)
else:
pass
def __getitem__(self, key):
'''
TODO: fix ephemeral properties...
TDOD: Add property subtyping
'''
if isinstance(self.schema, dict):
if key == 'sql':
# return sql_prop(identifier, self.schema.get(identifier, []))
return Property(key, self.schema.get(key, []))
else:
return Property(key, self.schema.get(key, []))
elif isinstance(self.schema, list):
if key == 'sql':
# return sql_prop(identifier, self.schema.get(identifier, []))
return Property(key, self.schema.get(key, []))
else:
return Property(key, self.schema.get(key, []))
def getProperties(self):
if isinstance(self.schema, dict):
for k, v in self.schema.items():
if k in conf.NONUNIQUE_PROPERTIES:
for n in v:
yield Property(k, n)
else:
yield Property(k, v)
elif isinstance(self.schema, list):
for item in self.schema:
if self.multiValueSpecialHandling in ('suggestions','tags','allowed_values'):
yield Property('list_member_quoted',item)
else:
yield Property('list_member',item)
def __iter__(self):
self.valueiterator = iter(self.schema)
return self
def __next__(self):
try:
return next(self.valueiterator)
except:
raise StopIteration
def __add__(self,other):
if isinstance(self.schema, dict):
pass
elif isinstance(self.schema, list) and not self.multiValueSpecialHandling:
pass
elif isinstance(self.schema, list) and self.multiValueSpecialHandling in ('tags','suggestions'):
self.addProperty(self.multiValueSpecialHandling,other)
elif self.multiValueSpecialHandling == 'filters':
pass
elif self.multiValueSpecialHandling == 'links':
pass
else:
pass
def addProperty(self, name, value):
if name in conf.NONUNIQUE_PROPERTIES:
index = self.schema.get(name,[])
index.append(value)
self.schema.update(
{name: index}
)
elif isinstance(self.schema, list):
if value not in self.schema:
self.schema.append(value)
else:
self.schema.update({name: value})
def __delete__(self, identifier):
if isinstance(self.schema,dict):
self.schema.pop(identifier, None)
elif isinstance(self.schema,list):
self.schema.remove(identifier, None)
def isMember(self, property):
if isinstance(self.schema,dict):
return property in self.schema.keys()
elif isinstance(self.schema,list):
return property in self.schema
def props(self):
'''
Returns a list of the property values. Mostly used for membership checking
'''
if isinstance(self.schema, dict):
return self.schema.keys()
elif isinstance(self.schema, list):
return self.schema
def rawPropValue(self,key):
'''
if dict type schema, needs a prop name. If list type schema needs a number index
'''
return self.schema[key]
def __len__(self):
return len(self.schema)
class Field(base):
''' Base class for fields in LookML, only derived/child types should be instantiated '''
def __init__(self, input):
self.db_column = ''
super(Field, self).__init__(input)
self.templateMap = {
}
def children(self):
if self.view:
for dependent in self.view.search('sql',[self.__refsre__,self.__refre__]):
yield dependent
def setName_safe(self, newName):
'''
Change the name of the field and references to it in sql (does not yet perform the same for HTML / Links / Drill Fields / Sets / Actions etc)
'''
#TODO: complete checking all places for dependencies.
old = copy.deepcopy(self.name)
oldrefsre = copy.deepcopy(self.__refsre__)
oldrefre = copy.deepcopy(self.__refre__)
self.setName(newName)
for f in self.view.search('sql',[oldrefsre,oldrefre]):
f.sql = re.sub(oldrefsre, self.__refs__, str(f.sql.value))
f.sql = re.sub(oldrefre, self.__ref__, str(f.sql.value))
self.view.removeField(old)
self.view + self
return self
def __getattr__(self, key):
if key == 'name':
return self.identifier
elif key == 'pk':
return self.getPrimaryKey()
#full reference
elif key == '__ref__':
if self.view:
return splice('${' , self.view.identifier , '.' , self.identifier , '}')
#Short Reference
elif key == '__refs__':
return splice('${' , self.identifier , '}')
#full reference -- regex escaped
elif key == '__refre__':
if self.view:
return splice('\$\{' , self.view.identifier , '\.' , self.identifier , '\}')
#Short reference -- regex escaped
elif key == '__refsre__':
if self.view:
return splice('\$\{' , self.identifier , '\}')
#Raw Reference
elif key == '__refr__':
if self.view:
return splice(self.view.identifier , '.' , self.identifier)
#Raw refence short
elif key == '__refrs__':
if self.view:
return splice(self.identifier)
#Raw Reference regex
elif key == '__refrre__':
if self.view:
return splice(self.view.identifier , '\.' , self.identifier)
else:
return self.getProperty(key)
def __setattr__(self, name, value):
if name == 'label':
self.setLabel(value)
return self
elif name == 'name':
self.setName(value)
return self
# elif name in self.properties.props():
elif name in conf.language_rules.field_props:
return self.setProperty(name,value)
else:
object.__setattr__(self, name, value)
def setDescription(self,value):
return self.setProperty('description', value)
def addTag(self,tag):
if self.properties.isMember('tags'):
if tag not in self.tags:
# self.tags.value.schema['tags'].append(tag)
self.tags.value.schema.append(tag)
#Else it's already a member
else:
self.setProperty('tags',[tag])
def removeTag(self,tag):
if self.properties.isMember('tags'):
self.tags.value.schema.remove(tag)
else:
pass
def setView(self, view):
'''
'''
self.view = view
return self # satisfies a need to linkback (look where setView is called)
def setSql(self, sql):
self.setProperty('sql', sql)
return self
def setType(self, type):
''''''
self.properties.addProperty('type', type)
return self
def setNumber(self):
''''''
return self.setType('number')
def setString(self):
''''''
return self.setType('string')
def setViewLabel(self, viewLabel):
''''''
return self.setProperty('view_label', viewLabel)
def sql_nvl(self,value_if_null):
self.sql = "NVL(" + str(self.sql.value) + "," + value_if_null + ")"
class Dimension(Field):
def __init__(self, input):
super(Dimension, self).__init__(input)
self.token = 'dimension'
def isPrimaryKey(self):
if self.hasProp('primary_key') and self.getProperty('primary_key').value == 'yes':
return True
else:
return False
def setDBColumn(self, dbColumn, changeIdentifier=True):
''''''
self.db_column = dbColumn
self.setProperty('sql', splice('${TABLE}.' , conf.DB_FIELD_DELIMITER_START , self.db_column , conf.DB_FIELD_DELIMITER_END))
if changeIdentifier:
self.identifier =lookCase(self.db_column)
return self
def setAllLabels(self, group: None, item: None, label: None):
if group:
self.setProperty('group_label', group)
if item:
self.setProperty('group_item_label', item)
if label:
self.setProperty('label', label)
return self
def setPrimaryKey(self):
self.setProperty('primary_key', 'yes')
# self.view.setPrimaryKey(self.identifier, callFromChild=True)
return self
def unSetPrimaryKey(self):
self.unSetProperty('primary_key')
return self
def setTier(self, tiers=[]):
if tiers:
self.setProperty('tiers', '[0,5,10,15,20]')
else:
self.setProperty('tiers', '[' + ','.join(tiers) + ']')
return self.setType('tier')
def addLink(self,url,label,icon_url='https://looker.com/favicon.ico'):
self.properties.addProperty('link',{
'url' :url
,'label' :label
,'icon_url':icon_url
})
return self
class DimensionGroup(Field):
def __init__(self, input):
super(DimensionGroup, self).__init__(input)
if not self.properties.isMember('timeframes'):
self.properties.addProperty('timeframes', splice('[','{},'.format(conf.NEWLINEINDENT).join(conf.TIMEFRAMES),']'))
if not self.properties.isMember('type'):
self.properties.addProperty('type', 'time')
# if not self.properties.isMember('sql'):
# self.properties.addProperty('sql', splice('${TABLE}.' , conf.DB_FIELD_DELIMITER_START , self.db_column , conf.DB_FIELD_DELIMITER_END))
self.token = 'dimension_group'
def setDBColumn(self, dbColumn, changeIdentifier=True):
''''''
self.db_column = dbColumn
self.setProperty('sql', splice('${TABLE}.' , conf.DB_FIELD_DELIMITER_START , self.db_column , conf.DB_FIELD_DELIMITER_END))
if changeIdentifier:
self.identifier = lookCase(self.db_column)
return self
class Measure(Field):
def __init__(self, input):
super(Measure, self).__init__(input)
self.token = 'measure'
class Filter(Field):
def __init__(self, input):
super(Filter, self).__init__(input)
self.token = 'filter'
class Parameter(Field):
def __init__(self, input):
super(Parameter, self).__init__(input)
self.token = 'parameter'
#next Minor release::
# TODO: set configurations via command line and environment variable
# TODO: make __getatt__ / __setattr__ consistent across classes
# TODO: Implement remaining collections iteration, top level file attributes (data groups, named value format etc)
# TODO: ensure the top level stuff for file works, i.e. accessors for plurals like data groups etc
# Dependency Graphing:
# TODO: Ancenstor functions?
# TODO: Child function support renaming across all properties (html, links, etc)
# TODO: Multi-generation dependency tracing (ancestor / decendangt)
# TODO: cross file / whole project?
# Code Cleanliness / pip:
# TODO: rationally break up the megafile...
# TODO: use the _variable name for all private variables
# TODO: change "identifier" to _name
# Unit Testing:
# TODO: Redesign / modularize test suite
#* Basic parsing loop,
#* network enabled loop, github / shell
# TODO: test iteration behaviors
######### V3+ #########
# TODO: Implement MVC?
# * model -> could eliminate the "phanton property" in that a class instance is only created on get / observation.... (getters and setters should mutate the underlying json at all times to ensure conssistency)
# TODO: Rationalize View rendering
# TODO: elimnate property / properties classes? -> replace with model? Think through getter / setter / render
# TODO: Integrate Tom's script for dependency graphing OO
# TODO: Common Sql Functions added to the SQL paramter
# TODO: Common html Functions added to the html paramter
# TODO: Manifest
# TODO: contants
# TODO: locale
# TODO: slots / performance optimizaiton
# TODO: Interactive CLI
# TODO: Update LKML to support new filters syntax
# TODO: additional documentation
# Finish Documenting every funtion for the autodocs
# Usecase oriented documentation (move to the.rst file):
# loop through all the files in a project make a change and update
# Auto - tune your model
# Looker API Query the database and create a new view file / EAV unnest (superview & multi-model approach)
# BQ Unnest
# Use dependency tracing
# BQML
# DONE: Top N
# Aggregate Awareness Macro (materialization + refinements)
# Calendar Table
# SFDC Waterfall
# Multi Grain period over period
# Drill to vis with constants
# Incremental PDTs? --> This breaks as of Looker 7?
# Negative Intervals Hacking
# Linking macro, Intel linking block?
# Fancy Conditional Formatting examples
# Something with slowly changing dimensions
# lambda / cloud function example?
# TODO: Write a test that would use materialization and refinements
# Meterialization:
# explore: event {
# aggregate_table: monthly_orders {
# materialization: {
# datagroup_trigger: orders_datagroup
# }
# query: {
# dimensions: [orders.created_month]
# measures: [orders.count]
# #filters: [orders.created_date: "1 year", orders.status: "fulfilled"]
# filters: {
# field: orders.created_date
# value: "1 year"
# }
# filters: {
# field: orders.status
# value: "fulfilled"
# }
# timezone: "America/Los_Angeles"
# }
# }
# } | 35.318206 | 233 | 0.574647 | 59,596 | 0.91156 | 3,357 | 0.051348 | 0 | 0 | 0 | 0 | 23,831 | 0.364511 |
96fc72968950824d61acc3ae7701e624cdb6308f | 1,933 | py | Python | chapter03/python/situation.py | coco-in-bluemoon/building-recommendation-engines | b337b2ba75b6c9b08612ab1720a2858e64e9de09 | [
"MIT"
] | null | null | null | chapter03/python/situation.py | coco-in-bluemoon/building-recommendation-engines | b337b2ba75b6c9b08612ab1720a2858e64e9de09 | [
"MIT"
] | null | null | null | chapter03/python/situation.py | coco-in-bluemoon/building-recommendation-engines | b337b2ba75b6c9b08612ab1720a2858e64e9de09 | [
"MIT"
] | null | null | null | from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
import pandas as pd
# 1. Load Data and Item Profile
ratings = pd.read_csv('chapter03/data/movie_rating.csv')
movie_ratings = pd.pivot_table(
ratings,
values='rating',
index='title',
columns='critic'
)
print(movie_ratings)
# 2. Consturct profiles
profile_dict = {
'Romance': [0.3, 0.4, 0],
'Thriller': [0, 0, 0.5],
'Action': [0.2, 0, 0],
'Sci-fi': [0, 0.3, 0.4],
'Mystery': [0, 0, 0],
'Comedy': [0.5, 0, 0],
'Fantasy': [0, 0.3, 0.1],
'Crime': [0, 0, 0]
}
situation_profile = pd.DataFrame(
profile_dict,
index=['weekday', 'weekend', 'holiday']
)
print(situation_profile)
profile_dict = {
'Romance': [1, 0, 0, 0, 0, 1],
'Thriller': [0, 1, 1, 0, 0, 0],
'Action': [0, 0, 1, 0, 0, 0],
'Sci-fi': [0, 0, 0, 1, 0, 0],
'Mystery': [0, 0, 0, 0, 1, 0],
'Comedy': [0, 0, 0, 0, 0, 1],
'Fantasy': [1, 1, 0, 1, 0, 0],
'Crime': [0, 0, 0, 0, 1, 0]
}
item_profile = pd.DataFrame(profile_dict, index=movie_ratings.index)
idf = dict()
N = len(item_profile)
for feature in item_profile.columns:
df = item_profile[feature].value_counts()[1]
idf[feature] = np.log(N / df)
item_profile.loc[:, feature] =\
[tf * idf[feature] for tf in item_profile.loc[:, feature]]
print(item_profile)
user_profile = np.dot(np.transpose(movie_ratings.fillna(0)), item_profile)
user_profile = pd.DataFrame(
user_profile,
index=movie_ratings.columns,
columns=item_profile.columns
)
print(user_profile)
user_id = 5
user_5_profile = user_profile.loc[user_profile.index[5]]
user_situation_profile = situation_profile * user_5_profile
print(user_situation_profile)
# 3. calculate similarity
result = cosine_similarity(user_situation_profile, item_profile)
result = pd.DataFrame(
result,
index=user_situation_profile.index,
columns=item_profile.index
)
print(result)
| 24.782051 | 74 | 0.649767 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 298 | 0.154165 |
96fd1f2b486144df968599ce7e69efe106bab45c | 1,025 | py | Python | tic_tac_toe/TD_lambda_run.py | Luca-Dambra/TD_lambda_board_games | 31febd4987fe882c559bfb40b2bd9aef3326dd3f | [
"MIT"
] | null | null | null | tic_tac_toe/TD_lambda_run.py | Luca-Dambra/TD_lambda_board_games | 31febd4987fe882c559bfb40b2bd9aef3326dd3f | [
"MIT"
] | null | null | null | tic_tac_toe/TD_lambda_run.py | Luca-Dambra/TD_lambda_board_games | 31febd4987fe882c559bfb40b2bd9aef3326dd3f | [
"MIT"
] | null | null | null | import functools
import numpy as np
import pandas as pd
from NN_base import load_network, save_network, create_network
from tic_tac_toe import TicTacToeGameSpec, play_game
from TD_lambda import TD_train
NETWORK_FILE_PATH = None
NUMBER_OF_GAMES_TO_RUN = 500
NUMBER_OF_TEST = 200
NUMBER_OF_ROUNDS = 100
EPSILON = 0.1
TAU = 0.8
LAMBDA = 0.3
DECAY_RATE = 0.95
DECAY_STEP = 1000
ALPHA = 0.04 ## starting learning rate
game_spec = TicTacToeGameSpec()
create_network_func = functools.partial(create_network, input_nodes=9, hidden_nodes=(20,30), output_nodes=1, output_softmax=False)
results = TD_train(game_spec,
create_network_func,
network_file_path = None,
opp_func = None,
number_of_games = NUMBER_OF_GAMES_TO_RUN,
number_of_test = NUMBER_OF_TEST,
number_of_rounds = NUMBER_OF_ROUNDS,
epsilon = EPSILON,
tau = TAU,
lamda = LAMBDA,
decay_rate = DECAY_RATE,
decay_steps = DECAY_STEP,
alpha_start = ALPHA)
| 27.702703 | 130 | 0.712195 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 0.02439 |
96fd8456b0b3a88b436bed5097c56c54ff0c3e18 | 7,808 | py | Python | FinalProject_SANet/demo/net.py | lev1khachatryan/ASDS_CV | c9f0c0412002e929bcb7cc2fc6e5392977a9fa76 | [
"MIT"
] | 5 | 2019-12-13T16:26:10.000Z | 2020-01-10T07:44:05.000Z | FinalProject_SANet/PyTorch/net.py | lev1khachatryan/ASDS_CV | c9f0c0412002e929bcb7cc2fc6e5392977a9fa76 | [
"MIT"
] | 1 | 2020-01-07T16:48:21.000Z | 2020-03-18T18:43:37.000Z | FinalProject_SANet/demo/net.py | lev1khachatryan/ASDS_CV | c9f0c0412002e929bcb7cc2fc6e5392977a9fa76 | [
"MIT"
] | null | null | null | import torch.nn as nn
import torch
from function import normal
from function import calc_mean_std
decoder = nn.Sequential(
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 256, (3, 3)),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='nearest'),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 128, (3, 3)),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='nearest'),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 128, (3, 3)),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 64, (3, 3)),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='nearest'),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 64, (3, 3)),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 3, (3, 3)),
)
vgg = nn.Sequential(
nn.Conv2d(3, 3, (1, 1)),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(3, 64, (3, 3)),
nn.ReLU(), # relu1-1
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 64, (3, 3)),
nn.ReLU(), # relu1-2
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 128, (3, 3)),
nn.ReLU(), # relu2-1
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 128, (3, 3)),
nn.ReLU(), # relu2-2
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 256, (3, 3)),
nn.ReLU(), # relu3-1
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(), # relu3-2
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(), # relu3-3
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(), # relu3-4
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 512, (3, 3)),
nn.ReLU(), # relu4-1, this is the last layer used
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu4-2
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu4-3
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu4-4
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu5-1
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu5-2
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu5-3
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU() # relu5-4
)
class SANet(nn.Module):
def __init__(self, in_dim):
super(SANet, self).__init__()
self.f = nn.Conv2d(in_dim , in_dim , (1,1))
self.g = nn.Conv2d(in_dim , in_dim , (1,1))
self.h = nn.Conv2d(in_dim , in_dim , (1,1))
self.softmax = nn.Softmax(dim=-1)
self.out_conv = nn.Conv2d(in_dim, in_dim, (1, 1))
def forward(self,content_feat,style_feat):
B,C,H,W = content_feat.size()
F_Fc_norm = self.f(normal(content_feat)).view(B,-1,H*W).permute(0,2,1)
B,C,H,W = style_feat.size()
G_Fs_norm = self.g(normal(style_feat)).view(B,-1,H*W)
energy = torch.bmm(F_Fc_norm,G_Fs_norm)
attention = self.softmax(energy)
H_Fs = self.h(style_feat).view(B,-1,H*W)
out = torch.bmm(H_Fs,attention.permute(0,2,1) )
B,C,H,W = content_feat.size()
out = out.view(B,C,H,W)
out = self.out_conv(out)
out += content_feat
return out
class Self_Attention_Module(nn.Module):
def __init__(self, in_dim):
super(Self_Attention_Module, self).__init__()
self.SAN1=SANet(in_dim)
self.SAN2=SANet(in_dim)
self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
self.merge_conv_pad = nn.ReflectionPad2d((1, 1, 1, 1))
self.merge_conv = nn.Conv2d(in_dim, in_dim, (3, 3))
def forward(self, content_feats, style_feats):
Fcsc_5 = self.SAN1(content_feats[-1], style_feats[-1])
Fcsc_5_up=self.upsample(Fcsc_5)
Fcsc_4 = self.SAN2(content_feats[-2], style_feats[-2])
Fcsc_4_plus_5=Fcsc_4+Fcsc_5_up
Fcsc_4_plus_5=self.merge_conv_pad(Fcsc_4_plus_5)
Fcsc_m=self.merge_conv(Fcsc_4_plus_5)
return Fcsc_m
class Net(nn.Module):
def __init__(self, encoder, decoder):
super(Net, self).__init__()
enc_layers = list(encoder.children())
self.enc_1 = nn.Sequential(*enc_layers[:4]) # input -> relu1_1
self.enc_2 = nn.Sequential(*enc_layers[4:11]) # relu1_1 -> relu2_1
self.enc_3 = nn.Sequential(*enc_layers[11:18]) # relu2_1 -> relu3_1
self.enc_4 = nn.Sequential(*enc_layers[18:31]) # relu3_1 -> relu4_1
self.enc_5 = nn.Sequential(*enc_layers[31:44]) # relu4_1 -> relu5_1
#transform
self.sa_module = Self_Attention_Module(512)
self.decoder = decoder
self.mse_loss = nn.MSELoss()
# fix the encoder
for name in ['enc_1', 'enc_2', 'enc_3', 'enc_4', 'enc_5']:
for param in getattr(self, name).parameters():
param.requires_grad = False
# extract relu1_1, relu2_1, relu3_1, relu4_1, relu5_1 from input image
def encode_with_intermediate(self, input):
results = [input]
for i in range(5):
func = getattr(self, 'enc_{:d}'.format(i + 1))
results.append(func(results[-1]))
return results[1:]
def calc_content_loss(self, input, target):
assert (input.size() == target.size())
assert (target.requires_grad is False)
return self.mse_loss(input, target)
def calc_style_loss(self, input, target):
assert (input.size() == target.size())
assert (target.requires_grad is False)
input_mean, input_std = calc_mean_std(input)
target_mean, target_std = calc_mean_std(target)
return self.mse_loss(input_mean, target_mean) + \
self.mse_loss(input_std, target_std)
def forward(self, content, style):
style_feats = self.encode_with_intermediate(style)
content_feats = self.encode_with_intermediate(content)
Ics = self.decoder(self.sa_module(content_feats, style_feats))
Ics_feats = self.encode_with_intermediate(Ics)
# Content loss
loss_c = self.calc_content_loss(normal(Ics_feats[-1]), normal(content_feats[-1]))+self.calc_content_loss(normal(Ics_feats[-2]), normal(content_feats[-2]))
# Style loss
loss_s = self.calc_style_loss(Ics_feats[0], style_feats[0])
for i in range(1, 5):
loss_s += self.calc_style_loss(Ics_feats[i], style_feats[i])
#Identity losses lambda 1
Icc = self.decoder(self.sa_module(content_feats, content_feats))
Iss = self.decoder(self.sa_module(style_feats, style_feats))
loss_lambda1 = self.calc_content_loss(Icc,content)+self.calc_content_loss(Iss,style)
#Identity losses lambda 2
Icc_feats=self.encode_with_intermediate(Icc)
Iss_feats=self.encode_with_intermediate(Iss)
loss_lambda2 = self.calc_content_loss(Icc_feats[0], content_feats[0])+self.calc_content_loss(Iss_feats[0], style_feats[0])
for i in range(1, 5):
loss_lambda2 += self.calc_content_loss(Icc_feats[i], content_feats[i])+self.calc_content_loss(Iss_feats[i], style_feats[i])
return loss_c, loss_s, loss_lambda1, loss_lambda2
| 38.653465 | 162 | 0.597848 | 4,909 | 0.628714 | 0 | 0 | 0 | 0 | 0 | 0 | 525 | 0.067239 |
96ff65371ea5fac719ab021f858179bf05875269 | 607 | py | Python | blaze/expr/scalar/interface.py | chdoig/blaze | caa5a497e1ca1ceb1cf585483312ff4cd74d0bda | [
"BSD-3-Clause"
] | 1 | 2015-01-18T23:59:57.000Z | 2015-01-18T23:59:57.000Z | blaze/expr/scalar/interface.py | chdoig/blaze | caa5a497e1ca1ceb1cf585483312ff4cd74d0bda | [
"BSD-3-Clause"
] | null | null | null | blaze/expr/scalar/interface.py | chdoig/blaze | caa5a497e1ca1ceb1cf585483312ff4cd74d0bda | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, division, print_function
from ..core import Expr
from datashape import dshape
from .boolean import BooleanInterface
from .numbers import NumberInterface
class ScalarSymbol(NumberInterface, BooleanInterface):
__slots__ = '_name', 'dtype'
def __init__(self, name, dtype='real'):
self._name = name
self.dtype = dshape(dtype)
@property
def name(self):
return self._name
@property
def dshape(self):
return dshape(self.dtype)
def __str__(self):
return str(self._name)
__hash__ = Expr.__hash__
| 21.678571 | 64 | 0.693575 | 410 | 0.675453 | 0 | 0 | 120 | 0.197694 | 0 | 0 | 20 | 0.032949 |
96ff89e422270b31c5889bf32192c54fcd3d3495 | 817 | py | Python | 2019/08-kosen/rev-favorites/solve.py | wani-hackase/wani-writeup | dd4ad0607d2f2193ad94c1ce65359294aa591681 | [
"MIT"
] | 25 | 2019-03-06T11:55:56.000Z | 2021-05-21T22:07:14.000Z | 2019/08-kosen/rev-favorites/solve.py | wani-hackase/wani-writeup | dd4ad0607d2f2193ad94c1ce65359294aa591681 | [
"MIT"
] | 1 | 2020-06-25T07:27:15.000Z | 2020-06-25T07:27:15.000Z | 2019/08-kosen/rev-favorites/solve.py | wani-hackase/wani-writeup | dd4ad0607d2f2193ad94c1ce65359294aa591681 | [
"MIT"
] | 1 | 2019-02-14T00:42:28.000Z | 2019-02-14T00:42:28.000Z | def main():
seed = 0x1234
e = [0x62d5, 0x7b27, 0xc5d4, 0x11c4, 0x5d67, 0xa356, 0x5f84,
0xbd67, 0xad04, 0x9a64, 0xefa6, 0x94d6, 0x2434, 0x0178]
flag = ""
for index in range(14):
for i in range(0x7f-0x20):
c = chr(0x20+i)
res = encode(c, index, seed)
if res == e[index]:
print(c)
flag += c
seed = encode(c, index, seed)
print("Kosen{%s}" % flag)
def encode(p1, p2, p3):
p1 = ord(p1) & 0xff
p2 = p2 & 0xffffffff
p3 = p3 & 0xffffffff
result = (((p1 >> 4) | (p1 & 0xf) << 4) + 1) ^ ((p2 >> 4) |
(~p2 << 4)) & 0xff | (p3 >> 4) << 8 ^ ((p3 >> 0xc) | (p3 << 4)) << 8
return result & 0xffff
if __name__ == "__main__":
main()
| 29.178571 | 120 | 0.440636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.028152 |
96ff8d29cb8034da8d295fc63e68e0bc696f15c1 | 1,095 | py | Python | WidgetsUnlimited/model/customer_address.py | AlanHorowitz/open-ended-capstone | 80590af5b09c2245f124cec20ed7594d62cff30e | [
"MIT"
] | null | null | null | WidgetsUnlimited/model/customer_address.py | AlanHorowitz/open-ended-capstone | 80590af5b09c2245f124cec20ed7594d62cff30e | [
"MIT"
] | null | null | null | WidgetsUnlimited/model/customer_address.py | AlanHorowitz/open-ended-capstone | 80590af5b09c2245f124cec20ed7594d62cff30e | [
"MIT"
] | null | null | null | from psycopg2.extensions import TRANSACTION_STATUS_IDLE
from .metadata import Table, Column
from .customer import CustomerTable
class CustomerAddressTable(Table):
NAME = "customer_address"
def __init__(self):
super().__init__(
CustomerAddressTable.NAME,
Column(
"customer_id",
"INTEGER",
parent_table=CustomerTable.NAME,
parent_key="customer_id",
),
Column("customer_address_id", "INTEGER", primary_key=True),
Column(
"customer_address",
"VARCHAR",
255,
default="First Middle Last\n123 Snickersnack Lane\nBrooklyn, NY 11229",
update=True,
),
# Column("customer_temp_updateable", "VARCHAR", update=True),
Column("customer_address_type", "VARCHAR", default="S"),
Column("customer_address_inserted_at", "TIMESTAMP", inserted_at=True),
Column("customer_address_updated_at", "TIMESTAMP", updated_at=True),
)
| 34.21875 | 87 | 0.581735 | 964 | 0.880365 | 0 | 0 | 0 | 0 | 0 | 0 | 349 | 0.318721 |
8c005a68252d143c8872adac4f5fee4210c1275d | 1,439 | py | Python | src/data/spark/domain_length_sql.py | sheikhomar/mako | 3bf0e183e63bd86c2df4eb90486726ca4194bd9b | [
"MIT"
] | null | null | null | src/data/spark/domain_length_sql.py | sheikhomar/mako | 3bf0e183e63bd86c2df4eb90486726ca4194bd9b | [
"MIT"
] | null | null | null | src/data/spark/domain_length_sql.py | sheikhomar/mako | 3bf0e183e63bd86c2df4eb90486726ca4194bd9b | [
"MIT"
] | null | null | null |
# Command to run this script on the CTIT cluster:
# $ spark-submit --master yarn --deploy-mode cluster --packages com.databricks:spark-csv_2.10:1.5.0 src/data/spark/domain_length_sql.py
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.functions import udf
from pyspark.sql.types import *
from pyspark.sql import functions as func
import time
sc = SparkContext("local", "Alexa Domain Length")
sc.setLogLevel("ERROR")
sqlContext = SQLContext(sc)
schema = StructType([
StructField("domain", StringType(), False),
StructField("count", IntegerType(), False),
StructField("min_rank", IntegerType(), False),
StructField("max_rank", IntegerType(), False),
StructField("avg_rank", FloatType(), False),
StructField("stddev", FloatType(), False),
StructField("variance", FloatType(), False),
StructField("skewness", FloatType(), False),
StructField("kurtosis", FloatType(), False),
])
df = sqlContext.read.format("com.databricks.spark.csv").option("header", "false").option("inferSchema", "false").schema(schema)
df = df.load('file:///home/s1962523/agg_alexa/part-*')
def str_len(str):
return len(str)
str_len_udf = udf(str_len, IntegerType())
df = df.withColumn('length', str_len_udf(df['domain']))
df = df.groupby('length').agg(
func.count('domain').alias('len_count')
)
df.write.format('com.databricks.spark.csv').save('file:///home/s1962523/alexa_domainlen')
| 35.975 | 135 | 0.721334 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 511 | 0.355108 |
8c00e685aea15a92f852072e47de4dda8adf42a0 | 3,305 | py | Python | api/programs/result.py | ca2315/PlasmoCount | b3c776ead5f436fa6e3ed0aca7d8f1683f5f4e49 | [
"BSD-3-Clause"
] | null | null | null | api/programs/result.py | ca2315/PlasmoCount | b3c776ead5f436fa6e3ed0aca7d8f1683f5f4e49 | [
"BSD-3-Clause"
] | null | null | null | api/programs/result.py | ca2315/PlasmoCount | b3c776ead5f436fa6e3ed0aca7d8f1683f5f4e49 | [
"BSD-3-Clause"
] | null | null | null | from programs.viz import plot_labels, make_crop
import pandas as pd
from pathlib import Path
import time
class Result:
def __init__(
self,
id,
fname,
img,
pred,
n_digits=2,
color_dict={
'uninfected': '#808080',
'ring': '#f77189',
'trophozoite': '#50b131',
'schizont': '#3ba3ec',
'gametocyte': '#ffd92f'
},
cutoffs=[1.5, 2.5]):
self.id = id
self.fname = fname
self.img = img
self.pred = pred
self.n_digits = n_digits
self.color_dict = color_dict
self.cutoffs = cutoffs
def __len__(self):
return len(self.pred)
def run(self, upload_folder):
self.path = upload_folder / self.id
self.path.mkdir(exist_ok=True)
# infected cells
self.counts = self.pred['classes'].value_counts()
self.n_infected = self.counts[
'infected'] if 'infected' in self.counts.keys() else 0
self.n_uninfected = self.counts[
'uninfected'] if 'uninfected' in self.counts.keys() else 0
self.parasitemia = round(self.n_infected / len(self), self.n_digits)
# life stages
self.life_stage_counts = self.pred['life_stage_c'].value_counts()
self.asex = self.get_asexuals(upload_folder)
# plotting
self.plot = Path(self.id) / 'full.png'
self.plot_prediction(save_to=upload_folder / self.plot)
def to_output(self):
return {
'id': int(self.id),
'name': str(self.fname),
'n_cells': int(len(self)),
'n_infected': int(self.n_infected),
'n_uninfected': int(self.n_uninfected),
'parasitemia': float(self.parasitemia),
'plot': str(self.plot),
'n_ring': int(self.life_stage_counts.get('ring', 0)),
'n_troph': int(self.life_stage_counts.get('trophozoite', 0)),
'n_schizont': int(self.life_stage_counts.get('schizont', 0)),
'n_gam': int(self.life_stage_counts.get('gametocyte', 0)),
'asex_stages': list(self.asex['life_stage']),
'asex_images': list(self.asex['filename'])
}
def get_asexuals(self,
upload_folder,
stages=['ring', 'trophozoite', 'schizont']):
asex = self.pred.loc[self.pred['life_stage_c'].isin(
stages)].reset_index()
asex['filename'] = asex['index'].apply(
lambda x: str(Path(self.id) / ('%s.png' % x)))
asex.apply(lambda x: make_crop(self.img, x['boxes'], upload_folder / x[
'filename']),
axis=1)
asex.sort_values('life_stage', inplace=True)
asex['life_stage'] = asex['life_stage'].apply(
lambda x: round(x, self.n_digits))
return asex[['filename', 'life_stage']]
def plot_prediction(self, save_to, **kwargs):
plot_labels(self.img, {
'boxes': self.pred['boxes'].tolist(),
'labels': self.pred['life_stage_c'].tolist()
},
color_dict=self.color_dict,
save_to=save_to,
**kwargs)
return save_to
| 34.789474 | 79 | 0.540393 | 3,197 | 0.967322 | 0 | 0 | 0 | 0 | 0 | 0 | 582 | 0.176097 |
8c00e8ea0f7d20261110718e7a74f0319ddcdebd | 1,107 | py | Python | tpp_twitter/twitter.py | SuperSonicHub1/twitter_plays_pyboy | 2818245fac2faeb10fca593bdbc13d211ab7e807 | [
"Unlicense"
] | null | null | null | tpp_twitter/twitter.py | SuperSonicHub1/twitter_plays_pyboy | 2818245fac2faeb10fca593bdbc13d211ab7e807 | [
"Unlicense"
] | null | null | null | tpp_twitter/twitter.py | SuperSonicHub1/twitter_plays_pyboy | 2818245fac2faeb10fca593bdbc13d211ab7e807 | [
"Unlicense"
] | null | null | null | from dotenv import load_dotenv
import tweepy
from os import getenv
from typing import BinaryIO
BASE_BIO = "Inspired by @screenshakes. Powered by PyBoy: http://github.com/Baekalfen/PyBoy\n"
load_dotenv()
auth = tweepy.OAuthHandler(getenv('TWITTER_KEY'), getenv('TWITTER_SECRET'))
auth.set_access_token(getenv('TWITTER_ACCESS'), getenv('TWITTER_ACCESS_TOKEN'))
api = tweepy.API(auth)
def get_replies_from_latest():
"""Gathers replies from latest Tweet in order of popularity."""
latest_status = api.user_timeline(count=1, exclude_replies=True)[0]
return tweepy.Cursor(api.search, q="to:TextOnlyGameBoy", since_id=latest_status.id, result_type="recent").items()
def update(tweet_image: BinaryIO, profile_image: BinaryIO, text: str = "Image", bio: str = ""):
"""Send a Tweet with an image and optionally update the bio."""
screenshot = api.media_upload("screenshot.jpg", file=tweet_image)
api.update_profile_image("screenshot.jpg", file_=profile_image)
api.update_status(text, media_ids=[screenshot.media_id])
if bio:
api.update_profile(description=BASE_BIO + bio)
| 41 | 117 | 0.754291 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 344 | 0.31075 |
8c0125ee7bfc57b4ed925dd722dd6fef3d05f11f | 1,805 | py | Python | ListNode.py | xinming365/LeetCode | e56097a60ddd1b5ddba7f15a726661c2aa6633e7 | [
"Apache-2.0"
] | null | null | null | ListNode.py | xinming365/LeetCode | e56097a60ddd1b5ddba7f15a726661c2aa6633e7 | [
"Apache-2.0"
] | null | null | null | ListNode.py | xinming365/LeetCode | e56097a60ddd1b5ddba7f15a726661c2aa6633e7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2022/2/2 10:08 上午
# @Author : xinming
# @File : ListNode.py
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class MyLinkedList:
def __init__(self):
self.size = 0
self.dummy_head = ListNode(0)
def get(self, index: int) -> int:
"""
Get the value of the i-th node in the linked list.
:param index:
:return:
"""
if index < 0 or index >= self.size:
return -1
curr = self.dummy_head
for i in range(index+1):
curr = curr.next
return curr.val
def print_all(self):
curr = self.dummy_head
while curr:
print(curr.val)
curr=curr.next
def addAtHead(self, val: int) -> None:
self.addAtIndex(0, val)
def addAtTail(self, val: int) -> None:
self.addAtIndex(self.size, val)
def addAtIndex(self, index: int, val: int) -> None:
if index > self.size:
return None
# Make it meaningful
if index < 0:
index= 0
self.size+=1 # update the size.
predecessor = self.dummy_head
for _ in range(index):
predecessor = predecessor.next
to_add = ListNode(val)
to_add.next = predecessor.next
predecessor.next = to_add
def deleteAtIndex(self, index: int) -> None:
# meaningless if index<0 or larger than the size.
if index < 0 or index >= self.size:
return None
# update the size of linked list.
self.size -= 1
predecessor = self.dummy_head
for _ in range(index):
predecessor = predecessor.next
predecessor.next = predecessor.next.next | 23.75 | 58 | 0.547368 | 1,678 | 0.927584 | 0 | 0 | 0 | 0 | 0 | 0 | 355 | 0.196241 |
8c0191528bb4a7ba7c5f50c6c5ad4f4b78812839 | 8,295 | py | Python | samples/waitforupdates.py | ArpitSharma2800/pyvmomi-community-samples | 68a0ef912cbb48f2fa498c14fbeb1bcda2ede9d8 | [
"Apache-2.0"
] | 931 | 2015-01-13T09:18:06.000Z | 2022-03-29T00:08:40.000Z | samples/waitforupdates.py | ArpitSharma2800/pyvmomi-community-samples | 68a0ef912cbb48f2fa498c14fbeb1bcda2ede9d8 | [
"Apache-2.0"
] | 494 | 2015-01-01T02:42:04.000Z | 2022-03-10T10:00:44.000Z | samples/waitforupdates.py | ArpitSharma2800/pyvmomi-community-samples | 68a0ef912cbb48f2fa498c14fbeb1bcda2ede9d8 | [
"Apache-2.0"
] | 976 | 2015-01-04T19:50:37.000Z | 2022-03-18T15:14:04.000Z | #!/usr/bin/env python
#
# VMware vSphere Python SDK
# Copyright (c) 2008-2021 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sample Python program for monitoring property changes to objects of one
or more types
"""
import atexit
import collections
import sys
from pyVmomi import vim, vmodl
from tools import cli, service_instance, serviceutil
def parse_propspec(propspec):
"""
Parses property specifications. Returns sequence of 2-tuples, each
containing a managed object type and a list of properties applicable
to that type
:type propspec: collections.Sequence
:rtype: collections.Sequence
"""
props = []
for objspec in propspec:
if ':' not in objspec:
raise Exception('property specification \'%s\' does not contain '
'property list' % objspec)
objtype, objprops = objspec.split(':', 1)
motype = getattr(vim, objtype, None)
if motype is None:
raise Exception('referenced type \'%s\' in property specification '
'does not exist,\nconsult the managed object type '
'reference in the vSphere API documentation' %
objtype)
proplist = objprops.split(',')
props.append((motype, proplist,))
return props
def make_wait_options(max_wait_seconds=None, max_object_updates=None):
waitopts = vmodl.query.PropertyCollector.WaitOptions()
if max_object_updates is not None:
waitopts.maxObjectUpdates = max_object_updates
if max_wait_seconds is not None:
waitopts.maxWaitSeconds = max_wait_seconds
return waitopts
def make_property_collector(prop_collector, from_node, props):
"""
:type prop_collector: pyVmomi.VmomiSupport.vmodl.query.PropertyCollector
:type from_node: pyVmomi.VmomiSupport.ManagedObject
:type props: collections.Sequence
:rtype: pyVmomi.VmomiSupport.vmodl.query.PropertyCollector.Filter
"""
# Make the filter spec
filter_spec = vmodl.query.PropertyCollector.FilterSpec()
# Make the object spec
traversal = serviceutil.build_full_traversal()
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(obj=from_node, selectSet=traversal)
obj_specs = [obj_spec]
filter_spec.objectSet = obj_specs
# Add the property specs
prop_set = []
for motype, proplist in props:
prop_spec = \
vmodl.query.PropertyCollector.PropertySpec(type=motype, all=False)
prop_spec.pathSet.extend(proplist)
prop_set.append(prop_spec)
filter_spec.propSet = prop_set
try:
pc_filter = prop_collector.CreateFilter(filter_spec, True)
atexit.register(pc_filter.Destroy)
return pc_filter
except vmodl.MethodFault as ex:
if ex._wsdlName == 'InvalidProperty':
print("InvalidProperty fault while creating PropertyCollector filter : %s"
% ex.name, file=sys.stderr)
else:
print("Problem creating PropertyCollector filter : %s"
% str(ex.faultMessage), file=sys.stderr)
raise
def monitor_property_changes(si, propspec, iterations=None):
"""
:type si: pyVmomi.VmomiSupport.vim.ServiceInstance
:type propspec: collections.Sequence
:type iterations: int or None
"""
prop_collector = si.content.propertyCollector
make_property_collector(prop_collector, si.content.rootFolder, propspec)
waitopts = make_wait_options(30)
version = ''
while True:
if iterations is not None:
if iterations <= 0:
print('Iteration limit reached, monitoring stopped')
break
result = prop_collector.WaitForUpdatesEx(version, waitopts)
# timeout, call again
if result is None:
continue
# process results
for filter_set in result.filterSet:
for object_set in filter_set.objectSet:
moref = getattr(object_set, 'obj', None)
assert moref is not None, \
'object moref should always be present in objectSet'
moref = str(moref).strip('\'')
kind = getattr(object_set, 'kind', None)
assert (
kind is not None
and kind in ('enter', 'modify', 'leave',)), \
'objectSet kind must be valid'
if kind in ('enter', 'modify'):
change_set = getattr(object_set, 'changeSet', None)
assert (change_set is not None
and isinstance(change_set, collections.Sequence)
and len(change_set) > 0), \
'enter or modify objectSet should have non-empty changeSet'
changes = []
for change in change_set:
name = getattr(change, 'name', None)
assert (name is not None), \
'changeset should contain property name'
val = getattr(change, 'val', None)
changes.append((name, val,))
print("== %s ==" % moref)
print('\n'.join(['%s: %s' % (n, v,) for n, v in changes]))
print('\n')
elif kind == 'leave':
print("== %s ==" % moref)
print('(removed)\n')
version = result.version
if iterations is not None:
iterations -= 1
def main():
"""
Sample Python program for monitoring property changes to objects of
one or more types to stdout
"""
parser = cli.Parser()
parser.set_epilog("""
Example usage:
waitforupdates.py -k -s vcenter -u root -p vmware -i 1 -P
VirtualMachine:name,summary.config.numCpu,runtime.powerState,config.uuid -P
-P Datacenter:name -- This will fetch and print a few VM properties and the
name of the datacenters
""")
parser.add_custom_argument('--iterations', type=int, default=None,
action='store',
help="""
The number of updates to receive before exiting
, default is no limit. Must be 1 or more if specified.
""")
parser.add_custom_argument('--propspec', dest='propspec', required=True,
action='append',
help='Property specifications to monitor, e.g. '
'VirtualMachine:name,summary.config. Repetition '
'permitted')
args = parser.get_args()
if args.iterations is not None and args.iterations < 1:
parser.print_help()
print('\nInvalid argument: Iteration count must be omitted or greater than 0',
file=sys.stderr)
sys.exit(-1)
try:
si = service_instance.connect(args)
propspec = parse_propspec(args.propspec)
print("Monitoring property changes. Press ^C to exit")
monitor_property_changes(si, propspec, args.iterations)
except vmodl.MethodFault as ex:
print("Caught vmodl fault :\n%s" % str(ex), file=sys.stderr)
except Exception as ex:
print("Caught exception : " + str(ex), file=sys.stderr)
if __name__ == '__main__':
try:
main()
sys.exit(0)
except Exception as ex:
print("Caught exception : " + str(ex), file=sys.stderr)
except KeyboardInterrupt:
print("Exiting", file=sys.stderr)
sys.exit(0)
# vim: set ts=4 sw=4 expandtab filetype=python:
| 33.857143 | 91 | 0.599879 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,221 | 0.388306 |
8c021ebbd6408b7ecf94c20aa0fa0bab32c3828d | 1,159 | py | Python | src/tests/cli/hypergol_create_test_case.py | hypergol/hypergol | 0beee71c8f72d517ef376030baff9c840a2f7eeb | [
"MIT"
] | 49 | 2020-07-09T10:22:25.000Z | 2022-02-21T16:55:34.000Z | src/tests/cli/hypergol_create_test_case.py | hypergol/hypergol | 0beee71c8f72d517ef376030baff9c840a2f7eeb | [
"MIT"
] | 16 | 2020-08-18T17:06:05.000Z | 2022-02-19T16:30:04.000Z | src/tests/cli/hypergol_create_test_case.py | hypergol/hypergol | 0beee71c8f72d517ef376030baff9c840a2f7eeb | [
"MIT"
] | 3 | 2020-07-16T08:42:09.000Z | 2021-03-06T15:09:13.000Z | import os
import glob
from pathlib import Path
from unittest import TestCase
from hypergol.name_string import NameString
def delete_if_exists(filePath):
if os.path.exists(filePath):
if os.path.isdir(filePath):
os.rmdir(filePath)
else:
os.remove(filePath)
class HypergolCreateTestCase(TestCase):
def __init__(self, projectName, methodName):
super(HypergolCreateTestCase, self).__init__(methodName=methodName)
self.projectName = projectName
self.projectDirectory = NameString(self.projectName).asSnake
self.allPaths = []
def clean_up(self):
for filePath in self.allPaths:
try:
delete_if_exists(filePath)
except OSError:
for unexpectedFilePath in glob.glob(str(Path(filePath, '*'))):
print(f'deleting unexpected files {unexpectedFilePath}')
delete_if_exists(unexpectedFilePath)
delete_if_exists(filePath)
def setUp(self):
super().setUp()
self.clean_up()
def tearDown(self):
super().tearDown()
self.clean_up()
| 27.595238 | 78 | 0.633305 | 854 | 0.736842 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.044866 |
8c05b81bf47ea60ca797bd79f44d7a048d6eca52 | 223 | py | Python | examples/load_from_file/main.py | viniciuschiele/configd | f9b405cd2254f79152a13e57e2be907550ef83ed | [
"MIT"
] | 3 | 2017-03-17T15:44:26.000Z | 2021-08-31T02:45:52.000Z | examples/load_from_file/main.py | viniciuschiele/configd | f9b405cd2254f79152a13e57e2be907550ef83ed | [
"MIT"
] | 1 | 2017-07-28T23:20:15.000Z | 2017-07-29T11:13:49.000Z | examples/load_from_file/main.py | viniciuschiele/configd | f9b405cd2254f79152a13e57e2be907550ef83ed | [
"MIT"
] | 2 | 2021-06-26T20:57:24.000Z | 2021-11-21T19:29:39.000Z | from central.config.file import FileConfig
config = FileConfig('config.json')
config.load()
print(config.get('timeout'))
print(config.get('database'))
print(config.get('database.host'))
print(config.get('database.port'))
| 22.3 | 42 | 0.753363 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 62 | 0.278027 |
8c08b48a4d80e079c5239a8398068125f720f5ce | 2,637 | py | Python | fbapp/views.py | shashank-sharma/facebook-comments | bed74d488cfbada00445d840027de89e5f005c84 | [
"MIT"
] | null | null | null | fbapp/views.py | shashank-sharma/facebook-comments | bed74d488cfbada00445d840027de89e5f005c84 | [
"MIT"
] | 3 | 2017-08-14T11:21:14.000Z | 2017-08-26T12:57:33.000Z | fbapp/views.py | shashank-sharma/facebook-comments | bed74d488cfbada00445d840027de89e5f005c84 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from getpage import *
from fbapp.models import Search, Clap
from django.http import Http404, HttpResponse
import json
# Create your views here.
def clap(request):
if request.is_ajax():
keyword = request.GET['keyword']
clap = Clap.objects.all()
if(len(clap) == 0):
clap = Clap(clap = int(keyword))
clap.save()
clapCount = int(keyword)
else:
clap[0].clap = int(keyword)
clap[0].save()
return HttpResponse({}, content_type = "application/json")
else:
raise Http404
def getclap(request):
if request.is_ajax():
clap = Clap.objects.all()
if(len(clap) == 0):
clap = 0
else:
clap = clap[0].clap
data = json.dumps(clap)
print(data)
return HttpResponse(data, content_type = "application/json")
else:
raise Http404
def home(request):
return render(request, 'home.html', {})
def getuser(request):
if request.is_ajax():
keyword = request.GET['keyword']
try:
user = getFacebookUser(keyword)
except:
user = 'no'
if user != 'no':
searchModel = Search.objects.filter(user = keyword)
if(not searchModel):
searchModel = Search(user = keyword)
searchModel.save()
data = json.dumps(user)
return HttpResponse(data, content_type = "application/json")
else:
raise Http404
def getpost(request):
if request.is_ajax():
userid = request.GET['keyword']
message, image, postid = getFacebookPost(userid)
data = json.dumps([message, image, postid])
return HttpResponse(data, content_type = "application/json")
else:
raise Http404
def getcomments(request):
if request.is_ajax():
postid = request.GET['keyword']
number, summary = getFacebookComments(postid)
data = json.dumps([number, summary])
return HttpResponse(data, content_type = "application/json")
else:
raise Http404
def getreplies(request):
if request.is_ajax():
commentid = request.GET['keyword']
replies, likes, users = getFacebookReplies(commentid)
data = json.dumps([replies, likes, users])
return HttpResponse(data, content_type = "application/json")
else:
raise Http404
def suggest(request):
if request.is_ajax():
temp = []
searchModel = Search.objects.all()
for i in searchModel.values():
temp.append(i['user'])
temp = list(set(temp))
print(temp)
data = json.dumps(temp)
return HttpResponse(data, content_type = "application/json")
else:
raise Http404
| 27.185567 | 68 | 0.630641 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 221 | 0.083807 |
8c08c4a6650e9ba67c915b77a4490106eafe823c | 1,017 | py | Python | Text_B_ocr_crnn_model_file/crnn/network_dnn.py | HAIbingshuai/chinese_ocr | 36c06226b3762b2e516427579f2c2614770e60ae | [
"MIT"
] | 21 | 2019-11-16T15:12:17.000Z | 2022-02-24T03:08:44.000Z | Text_B_ocr_crnn_model_file/crnn/network_dnn.py | HAIbingshuai/chinese_ocr | 36c06226b3762b2e516427579f2c2614770e60ae | [
"MIT"
] | 1 | 2019-12-03T00:41:09.000Z | 2020-10-16T13:46:11.000Z | Text_B_ocr_crnn_model_file/crnn/network_dnn.py | HAIbingshuai/chinese_ocr | 36c06226b3762b2e516427579f2c2614770e60ae | [
"MIT"
] | 3 | 2019-12-10T02:43:20.000Z | 2021-05-12T07:31:41.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import cv2
from Text_B_ocr_crnn_model_file.crnn.util import resizeNormalize, strLabelConverter
class CRNN:
def __init__(self, alphabet=None):
self.alphabet = alphabet
def load_weights(self, path):
ocrPath = path
ocrPathtxt = path.replace('.pb', '.pbtxt')
self.model = cv2.dnn.readNetFromTensorflow(ocrPath, ocrPathtxt)
def predict(self, image):
image = resizeNormalize(image, 32)
image = image.astype(np.float32)
image = np.array([[image]])
self.model.setInput(image)
preds = self.model.forward()
preds = preds.transpose(0, 2, 3, 1)
preds = preds[0]
preds = np.argmax(preds, axis=2).reshape((-1,))
raw = strLabelConverter(preds, self.alphabet)
return raw
def predict_job(self, boxes):
n = len(boxes)
for i in range(n):
boxes[i]['text'] = self.predict(boxes[i]['img'])
return boxes
| 29.057143 | 83 | 0.613569 | 852 | 0.837758 | 0 | 0 | 0 | 0 | 0 | 0 | 69 | 0.067847 |
8c094ff4339e6253f78275942a72ff3cc8c9f8a5 | 1,724 | py | Python | venta/admin.py | darkdrei/Inventario | dc2dcc830be5a49ba602c242d8c7d5d9c24c7b5c | [
"MIT"
] | null | null | null | venta/admin.py | darkdrei/Inventario | dc2dcc830be5a49ba602c242d8c7d5d9c24c7b5c | [
"MIT"
] | null | null | null | venta/admin.py | darkdrei/Inventario | dc2dcc830be5a49ba602c242d8c7d5d9c24c7b5c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
import models
import forms
from inventario import models as inventario
# Register your models here.
class DetalleInline(admin.StackedInline):
model = models.Detalle
form = forms.DetalleForm
extra = 1
# end class
class FacturaAdmin(admin.ModelAdmin):
list_display = ['comprador','fecha','subtotal','iva','impoconsumo','total','creador','paga']
search_fields = ['comprador','fecha','subtotal','iva','impoconsumo','total','creador','paga']
form = forms.FacturaForm
inlines = [DetalleInline,]
icon = '<i class="material-icons">receipt</i>'
def save_model(self, request, obj, form, change):
obj.save()
total = 0
for s in models.Detalle.objects.filter(factura__id=obj.id):
articulo = inventario.Activo.objects.filter(id=s.articulo.id).first()
if s.cantidad > s.articulo.existencias :
s.cantidad = s.articulo.existencias
#end if
if articulo:
articulo.existencias = articulo.existencias - s.cantidad
articulo.save()
total = s.cantidad * articulo.precio_venta
#end if
# end for
obj.total = total
obj.save()
# end if
#end class
class DetalleAdmin(admin.ModelAdmin):
list_display = ['factura','articulo','cantidad','valor_unitario','total']
search_fields = ['factura','articulo','cantidad','valor_unitario','total']
form = forms.DetalleForm
icon = '<i class="material-icons">assignment</i>'
#end class
admin.site.register(models.Factura, FacturaAdmin)
admin.site.register(models.Detalle, DetalleAdmin)
| 32.528302 | 97 | 0.654872 | 1,383 | 0.802204 | 0 | 0 | 0 | 0 | 0 | 0 | 434 | 0.25174 |
8c09cfaddc4d9d32bda49a28af781c40e36a6a23 | 83 | py | Python | Recipes/rcps/apps.py | ADKosm/Recipes | 39b81091d8c5b91eeabdb99412c437a62ca5e1aa | [
"MIT"
] | null | null | null | Recipes/rcps/apps.py | ADKosm/Recipes | 39b81091d8c5b91eeabdb99412c437a62ca5e1aa | [
"MIT"
] | 1 | 2016-12-17T07:08:16.000Z | 2016-12-17T07:08:16.000Z | Recipes/rcps/apps.py | ADKosm/Recipes | 39b81091d8c5b91eeabdb99412c437a62ca5e1aa | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class RcpsConfig(AppConfig):
name = 'rcps'
| 13.833333 | 33 | 0.73494 | 46 | 0.554217 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.072289 |
8c0a58b707f6e1b5cb769898d9e99e268749f4f1 | 1,324 | py | Python | tests/test_dotdict.py | datakortet/dkbuild-apacheconf | 740bed45a33631144c967af5e5fc4288b33d537d | [
"Apache-2.0"
] | null | null | null | tests/test_dotdict.py | datakortet/dkbuild-apacheconf | 740bed45a33631144c967af5e5fc4288b33d537d | [
"Apache-2.0"
] | 1 | 2020-05-06T13:51:24.000Z | 2020-05-10T10:46:46.000Z | tests/test_dotdict.py | datakortet/dkbuild-apacheconf | 740bed45a33631144c967af5e5fc4288b33d537d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import textwrap
import pytest
from dkbuild_apacheconf.dotdict import dotdict
def test_add_depth1():
dd = dotdict()
dd['hello'] = 42
print(dd)
assert dd.ctx == { 'hello': 42 }
def test_add_depth2():
dd = dotdict()
dd['hello.world'] = 42
print(dd)
assert dd.ctx == {
'hello': {
'world': 42
}
}
def test_add_depth3():
dd = dotdict()
dd['hello.beautiful.world'] = 42
dd['hello.beautiful.moon'] = 43
assert dd.ctx == {
'hello': {
'beautiful': {
'world': 42,
'moon': 43
}
}
}
def test_add_err():
dd = dotdict()
with pytest.raises(TypeError):
dd[42] = 'hello world'
def test_get():
dd = dotdict()
dd['hello.world'] = 42
assert dd['hello.world'] == 42
assert dd.get('hello.world') == 42
def test_get_default():
dd = dotdict()
assert dd.get('hello.world', 42) == 42
assert dd.get('hello.world') is None
def test_serialization():
dd = dotdict()
dd['hello.world'] = 42
assert str(dd) == textwrap.dedent("""\
<dotdict {
"hello": {
"world": 42
}
}>""")
assert repr(dd) == textwrap.dedent("""\
<dotdict {
"hello.world": 42
}>""")
| 18.136986 | 46 | 0.508308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 368 | 0.277946 |
8c0a62c8b3bc8944696a2c3153f41db04a0511f7 | 1,056 | py | Python | EM_GUI/parser5(void).py | AmirKavousi/EMspice | 6799981783afa410b39f5227e7c435d106c61e6c | [
"BSD-3-Clause"
] | 2 | 2020-07-15T21:20:32.000Z | 2021-11-24T08:26:19.000Z | EM_GUI/parser5(void).py | AmirKavousi/EMspice | 6799981783afa410b39f5227e7c435d106c61e6c | [
"BSD-3-Clause"
] | 1 | 2020-11-30T06:33:23.000Z | 2020-11-30T06:33:23.000Z | EM_GUI/parser5(void).py | AmirKavousi/EMspice | 6799981783afa410b39f5227e7c435d106c61e6c | [
"BSD-3-Clause"
] | 3 | 2020-07-03T19:49:36.000Z | 2021-04-07T20:32:16.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
file1=open('data/u_Lvoid_20.txt',encoding='utf-8')
file2=open('temp2/void.txt','w',encoding='utf-8')
count=0
for line in file1:
count=count+1
if(line[0]=='R'):# 'line' here is a string
line_list=line.split( ) # 'line_list' is a list of small strings=['R41_1_2', 'n1_1620161_481040', n1_1620161_480880, 2.8e-05]
branch=line_list[0].split('-') #branch is a list of string=['R41','1','2']
branch0=branch[0].split('R')#branch is a list of string=['','41']
branch[0]=branch0[1]#now branch is a list of string=['41','1','2'], which is [layer_id, tree_id, branch_id]
for i in range(3):
file2.write(str(branch[i]))
file2.write(' ')
branch1=line_list[1].split('_')
for i in range(2):
file2.write(str(int(branch1[i+1])/1000))
file2.write(' ')
branch3=line_list[3].split('um')
a=float(branch3[0])
file2.write(str(a))
file2.write('\n')
file1.close()
file2.close()
| 34.064516 | 133 | 0.578598 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 418 | 0.395833 |
8c0deaf91737a4e2d3e5ee024b0d61a03cfcd0d1 | 5,230 | py | Python | ebi_eva_common_pyutils/variation/contig_utils.py | itsroops/eva-common-pyutils | 4226e3800a95e8a50ff2735792382fe1a6ca4efc | [
"Apache-2.0"
] | null | null | null | ebi_eva_common_pyutils/variation/contig_utils.py | itsroops/eva-common-pyutils | 4226e3800a95e8a50ff2735792382fe1a6ca4efc | [
"Apache-2.0"
] | 4 | 2020-09-11T12:02:49.000Z | 2021-06-23T14:44:36.000Z | ebi_eva_common_pyutils/variation/contig_utils.py | itsroops/eva-common-pyutils | 4226e3800a95e8a50ff2735792382fe1a6ca4efc | [
"Apache-2.0"
] | 6 | 2020-07-18T21:06:10.000Z | 2022-01-11T08:00:26.000Z | # Copyright 2020 EMBL - European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import requests
from retry import retry
# TODO: Might be a good idea to re-visit this after a production implementation
# of the contig-alias resolution project is available
@retry(tries=10, delay=5, backoff=1.2, jitter=(1, 3))
def resolve_contig_accession_to_chromosome_name(contig_accession, line_limit=100):
"""
Given a Genbank contig accession, get the corresponding chromosome name from the ENA Text API
which returns results in a EMBL Flatfile format
:param contig_accession: Genbank contig accession (ex: CM003032.1)
:param line_limit: number of lines to parse in the EMBL Flatfile result to find the chromosome before giving up
:return: Chromosome name (ex: 12 when given an accession CM003032.1)
"""
ENA_TEXT_API_URL = "https://www.ebi.ac.uk/ena/browser/api/text/{0}?lineLimit={1}&annotationOnly=true"
response = requests.get(ENA_TEXT_API_URL.format(contig_accession, line_limit))
response_lines = response.content.decode("utf-8").split("\n")
num_lines = len(response_lines)
features_section_found, source_line_found = False, False
chosen_response = []
line_index = 0
# Look for the "source" feature under the "Features" section in the text response
while line_index < num_lines:
line = response_lines[line_index]
if not (features_section_found or line.lower().startswith("fh key")):
line_index += 1
continue
features_section_found = True
# Based on "Data item positions" described here, http://www.insdc.org/files/feature_table.html#3.4.2
# the sixth character represents the start of the feature key
if not (source_line_found or line[5:].lower().startswith("source")):
line_index += 1
continue
source_line_found = True
if line[21:].startswith("/"):
assembled_line = line.strip()
line_index += 1
# Assemble text spread across multiple lines until
# we hit the next qualifier (starts with /) or the next section
while line_index < num_lines and \
not (response_lines[line_index][21:].startswith("/")
or response_lines[line_index][5:6].strip() != ''):
line = response_lines[line_index]
assembled_line += " " + line[21:].strip()
line_index += 1
# Fall back to organelle in case of MT/Chloroplast accessions
# and the reference notes in case of Linkage Group molecules
chosen_response = re.findall('.*/chromosome=".+"', assembled_line) or \
re.findall('.*/organelle=".+"', assembled_line) or \
re.findall('.*/note=".+"', assembled_line)
# If we have a response to give, no need to continue further
# If the sixth character is not empty, we have reached the next feature, so no need to continue further
if chosen_response or line[5:6].strip() != '':
break
else:
line_index += 1
if not chosen_response:
return ""
return str.split(chosen_response[0], '"')[1].strip()
def is_wgs_accession_format(contig_accession):
"""
Check if a Genbank contig is part of WGS (Whole Genome Shotgun) sequence
:param contig_accession: Genbank contig accession (ex: CM003032.1)
:return: True if the provided contig is in the WGS format
"""
wgs_prefix = contig_accession[:4]
wgs_numeric_suffix = contig_accession[4:].replace(".", "")
return str.isalpha(wgs_prefix) and str.isnumeric(wgs_numeric_suffix)
def get_chromosome_name_for_contig_accession(contig_accession):
"""
Given a Genbank contig accession, get the corresponding chromosome name
:param contig_accession: Genbank contig accession (ex: CM003032.1)
:return: Chromosome name (ex: 12 when given an accession CM003032.1)
"""
# Don't bother calling the ENA web service to get the chromosome number if the accession is a WGS accession
# since the API will proceed to download the entire WGS dataset which can be in hundreds of MBs or even GBs
# See https://www.ebi.ac.uk/ena/browser/api/text/AABR07050911.1?lineLimit=100&annotationOnly=true for example
if is_wgs_accession_format(contig_accession):
return None
return \
resolve_contig_accession_to_chromosome_name(contig_accession, 1000) or \
resolve_contig_accession_to_chromosome_name(contig_accession, 10000) or \
resolve_contig_accession_to_chromosome_name(contig_accession, 100000)
| 45.877193 | 115 | 0.68413 | 0 | 0 | 0 | 0 | 3,017 | 0.576864 | 0 | 0 | 2,753 | 0.526386 |
8c0e97f1acefec6e12b9b45266f9d330feb11b47 | 355 | py | Python | pswalker/sim/sim.py | ZLLentz/pswalker | 9642489e6add37906998f09cc88290bc95686ac9 | [
"BSD-3-Clause-LBNL"
] | null | null | null | pswalker/sim/sim.py | ZLLentz/pswalker | 9642489e6add37906998f09cc88290bc95686ac9 | [
"BSD-3-Clause-LBNL"
] | 8 | 2018-04-20T21:26:02.000Z | 2021-01-11T19:31:08.000Z | pswalker/sim/sim.py | ZLLentz/pswalker | 9642489e6add37906998f09cc88290bc95686ac9 | [
"BSD-3-Clause-LBNL"
] | 2 | 2018-06-26T00:09:27.000Z | 2018-11-29T19:40:04.000Z | """
Simulated device classes
"""
from ophyd.device import Device, Component
from .signal import FakeSignal
class SimDevice(Device):
"""
Class to house components and methods common to all simulated devices.
"""
sim_x = Component(FakeSignal, value=0)
sim_y = Component(FakeSignal, value=0)
sim_z = Component(FakeSignal, value=0)
| 22.1875 | 74 | 0.712676 | 244 | 0.687324 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.332394 |
8c11280fc4fa1a1f5cf0b54ef39c72d2dd65d7b4 | 7,824 | py | Python | clearKeys.py | kylephan/Utilities | 21b025a1cf71b4ced95243cc0cf1a8be4d316f5e | [
"MIT"
] | null | null | null | clearKeys.py | kylephan/Utilities | 21b025a1cf71b4ced95243cc0cf1a8be4d316f5e | [
"MIT"
] | null | null | null | clearKeys.py | kylephan/Utilities | 21b025a1cf71b4ced95243cc0cf1a8be4d316f5e | [
"MIT"
] | null | null | null | import maya.cmds as mc
def letsClear(*args):
obj = mc.ls(sl=True)
if all == True:
for o in obj:
clearTX(o)
clearTY(o)
clearTZ(o)
clearRX(o)
clearRY(o)
clearRZ(o)
else:
for o in obj:
clear(o,type)
def allOn(*args):
global all
all = True
mc.optionMenu(opt, e = True, enable = False)
def allOff(*args):
global all
all = False
mc.optionMenu(opt, e = True, enable = True)
def changeAttr(item):
global type
type = ''
if (item == 'Rotate X'):
type = 'rotateX'
if (item == 'Rotate Y'):
type = 'rotateY'
if (item == 'Rotate Z'):
type = 'rotateZ'
if (item == 'Translate X'):
type = 'translateX'
if (item == 'Translate Y'):
type = 'translateY'
if (item == 'Translate Z'):
type = 'translateZ'
def clear(o, type):
count = mc.keyframe(o, query=True,attribute=type, keyframeCount = True)
transX = mc.keyframe(o, query=True,attribute=type)
c = 0
emptyList = []
while c < count:
test = 0
up = c + 1
down = c - 1
value = mc.keyframe(o, query=True,attribute = type, valueChange=True)
if c == 0 or c == (len(value)-1):
test = test + 1
else:
if value[c] == value[up] and value[c] == value[down]:
emptyList.append(c)
c = c + 1
print value
if len(emptyList) != 0:
emptyList.reverse()
indexRange=[(index,) for index in emptyList]
mc.cutKey(o, option = 'keys', attribute = type, index = indexRange)
def clearTX(o):
count = mc.keyframe(o, query=True,attribute='translateX', keyframeCount = True)
transX = mc.keyframe(o, query=True,attribute='translateX')
c = 0
emptyList = []
while c < count:
test = 0
up = c + 1
down = c - 1
value = mc.keyframe(o, query=True,attribute = 'translateX', valueChange=True)
if c == 0 or c == (len(value)-1):
test = test + 1
else:
if value[c] == value[up] and value[c] == value[down]:
emptyList.append(c)
c = c + 1
print value
if len(emptyList) != 0:
emptyList.reverse()
indexRange=[(index,) for index in emptyList]
mc.cutKey(o, option = 'keys', attribute = 'translateX', index = indexRange)
def clearTY(o):
count = mc.keyframe(o, query=True,attribute='translateY', keyframeCount = True)
transX = mc.keyframe(o, query=True,attribute='translateY')
c = 0
emptyList = []
while c < count:
test = 0
up = c + 1
down = c - 1
value = mc.keyframe(o, query=True,attribute = 'translateY', valueChange=True)
if c == 0 or c == (len(value)-1):
test = test + 1
else:
if value[c] == value[up] and value[c] == value[down]:
emptyList.append(c)
c = c + 1
print value
if len(emptyList) != 0:
emptyList.reverse()
indexRange=[(index,) for index in emptyList]
mc.cutKey(o, option = 'keys', attribute = 'translateY', index = indexRange)
def clearTZ(o):
count = mc.keyframe(o, query=True,attribute='translateZ', keyframeCount = True)
transX = mc.keyframe(o, query=True,attribute='translateZ')
c = 0
emptyList = []
while c < count:
test = 0
up = c + 1
down = c - 1
value = mc.keyframe(o, query=True,attribute = 'translateZ', valueChange=True)
if c == 0 or c == (len(value)-1):
test = test + 1
else:
if value[c] == value[up] and value[c] == value[down]:
emptyList.append(c)
c = c + 1
print value
if len(emptyList) != 0:
emptyList.reverse()
indexRange=[(index,) for index in emptyList]
mc.cutKey(o, option = 'keys', attribute = 'translateZ', index = indexRange)
def clearRX(o):
count = mc.keyframe(o, query=True,attribute='rotateX', keyframeCount = True)
transX = mc.keyframe(o, query=True,attribute='rotateX')
c = 0
emptyList = []
while c < count:
test = 0
up = c + 1
down = c - 1
value = mc.keyframe(o, query=True,attribute = 'rotateX', valueChange=True)
if c == 0 or c == (len(value)-1):
test = test + 1
else:
if value[c] == value[up] and value[c] == value[down]:
emptyList.append(c)
c = c + 1
print value
if len(emptyList) != 0:
emptyList.reverse()
indexRange=[(index,) for index in emptyList]
mc.cutKey(o, option = 'keys', attribute = 'rotateX', index = indexRange)
def clearRY(o):
count = mc.keyframe(o, query=True,attribute='rotateY', keyframeCount = True)
transX = mc.keyframe(o, query=True,attribute='rotateY')
c = 0
emptyList = []
while c < count:
test = 0
up = c + 1
down = c - 1
value = mc.keyframe(o, query=True,attribute = 'rotateY', valueChange=True)
if c == 0 or c == (len(value)-1):
test = test + 1
else:
if value[c] == value[up] and value[c] == value[down]:
emptyList.append(c)
c = c + 1
print value
if len(emptyList) != 0:
emptyList.reverse()
indexRange=[(index,) for index in emptyList]
mc.cutKey(o, option = 'keys', attribute = 'rotateY', index = indexRange)
def clearRZ(o):
count = mc.keyframe(o, query=True,attribute='rotateZ', keyframeCount = True)
transX = mc.keyframe(o, query=True,attribute='rotateZ')
c = 0
emptyList = []
while c < count:
test = 0
up = c + 1
down = c - 1
value = mc.keyframe(o, query=True,attribute = 'rotateZ', valueChange=True)
if c == 0 or c == (len(value)-1):
test = test + 1
else:
if value[c] == value[up] and value[c] == value[down]:
emptyList.append(c)
c = c + 1
print value
if len(emptyList) != 0:
emptyList.reverse()
indexRange=[(index,) for index in emptyList]
mc.cutKey(o, option = 'keys', attribute = 'rotateZ', index = indexRange)
windowID = 'deleteKeys'
if mc.window( windowID, exists = True):
mc.deleteUI(windowID)
mc.window( windowID, title = 'Delete Key' )
mc.rowColumnLayout(numberOfColumns=3)
opt = mc.optionMenu( changeCommand = changeAttr)
mc.menuItem(label = 'Choose an attribute')
mc.menuItem(label = 'Rotate X')
mc.menuItem(label = 'Rotate Y')
mc.menuItem(label = 'Rotate Z')
mc.menuItem(label = 'Translate X')
mc.menuItem(label = 'Translate Y')
mc.menuItem(label = 'Translate Z')
mc.checkBox(label = 'All Attribute', onCommand = allOn, offCommand = allOff)
mc.button(label = 'Lets clear', command = letsClear)
mc.showWindow()
| 32.330579 | 107 | 0.484279 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 569 | 0.072725 |
8c12046b606eeaab0b8c35a25777e9fe98b1e5b5 | 1,067 | py | Python | server.py | boyuhou/security-data | 31bd1f0e9577d46124512102c4c4bf995cf36f95 | [
"Apache-2.0"
] | null | null | null | server.py | boyuhou/security-data | 31bd1f0e9577d46124512102c4c4bf995cf36f95 | [
"Apache-2.0"
] | 3 | 2017-11-30T01:22:12.000Z | 2017-12-02T22:47:39.000Z | server.py | boyuhou/security-data | 31bd1f0e9577d46124512102c4c4bf995cf36f95 | [
"Apache-2.0"
] | null | null | null | import click
import logging
import datetime
import pandas as pd
from security_data import SecurityService
DATE_FORMAT = '%Y%m%d'
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', level=logging.DEBUG, datefmt='%Y-%m-%d %I:%M:%S')
logger = logging.getLogger(__name__)
@click.command()
@click.option('--input_file', default=r'.\config\FrogBoxList.csv', help='Trade data input folder')
@click.option('--start_date', default=pd.datetime.today().strftime(DATE_FORMAT))
def main(input_file, start_date):
logger.info('input file: {0}'.format(input_file))
logger.info('start date: {0}'.format(start_date))
start_date = datetime.datetime.strptime(start_date+'000000', '%Y%m%d%H%M%S')
ticker_list = get_ticker_list(input_file).tolist()
security_service = SecurityService()
# security_service.update_daily_data(ticker_list, start_date)
security_service.update_intraday_data(ticker_list, start_date)
def get_ticker_list(input_file):
df = pd.read_csv(input_file)
return df['ticker']
if __name__ == '__main__':
main() | 34.419355 | 117 | 0.738519 | 0 | 0 | 0 | 0 | 648 | 0.60731 | 0 | 0 | 281 | 0.263355 |
8c1228dad2ddec0f512667f55e56e57c66f03a49 | 3,731 | py | Python | chemlib.py | nano-bio/fitlib | 299703dbc9ecafa528b965ccac538173801923b2 | [
"BSD-3-Clause"
] | null | null | null | chemlib.py | nano-bio/fitlib | 299703dbc9ecafa528b965ccac538173801923b2 | [
"BSD-3-Clause"
] | null | null | null | chemlib.py | nano-bio/fitlib | 299703dbc9ecafa528b965ccac538173801923b2 | [
"BSD-3-Clause"
] | null | null | null | from suds.client import Client
import suds
import time
import helplib as hl
#be aware that you need a chemspider_token.txt in the directory for the app to work
#the chemspider_token.txt should only contain the token (available online for free)
class ChemicalObject():
def __init__(self, name = '', cas = '', inchi = '', inchikey = '', csid = ''):
#first define the SOAP service for searching
searchurl = 'http://www.chemspider.com/Search.asmx?WSDL'
try:
self.searchclient = Client(searchurl)
except Exception as e:
print(e)
#define the soap service for inchi-conversion
inchiurl = 'http://www.chemspider.com/InChI.asmx?WSDL'
try:
self.inchiclient = Client(inchiurl)
except Exception as e:
print(e)
#set all properties to the ones from initiating call
self.cas = cas
self.inchi = inchi
self.inchikey = inchikey
self.name = name
self.csid = csid
#no transaction id for now
self.transaction_id = ''
#how quickly should we ask for results? in seconds
self.timetick = 0.2
self.maxtime = 15
#read chemspider token from config file 'chemspider_token'
try:
f = hl.openfile('chemspider_token.txt')
except IOError:
raise IOError
self.token = f.readline()
def complete(self):
"""Fills all other properties of an instance"""
#first retrieve the Chemspider ID
self.retrieve_csid()#
#fill up the other fields
self.fill_forms_with_csid()
def status(self):
#if we don't have a transaction id, we are free
if self.transaction_id != '':
return 'busy'
else:
return 'free'
def retrieve_csid(self):
#for what should we search?
if self.inchi != '':
searchterm = self.inchi
else:
searchterm = self.name
#it's a good idea to only search for ascii:
searchterm = searchterm.decode('utf8', 'replace').encode('ascii', 'replace')
#try connecting
try:
self.transaction_id = self.searchclient.service.AsyncSimpleSearch(searchterm, self.token)
except suds.WebFault as detail:
self.errorstatus = detail
self.transaction_id = ''
#don't run too long in the following loop
i = 0
#if successful we can check whether the results are already available
if self.transaction_id != '':
while self.searchclient.service.GetAsyncSearchStatus(self.transaction_id, self.token) != 'ResultReady':
#wait a little
time.sleep(self.timetick)
i = i + 1
if i > (self.maxtime / self.timetick):
print('No result, aborting')
break
#ready! the [0] is because it basically gives a list and we use the first one
result = self.searchclient.service.GetAsyncSearchResult(self.transaction_id, self.token)
if result != '':
self.csid = result[0][0]
#transaction over. set transaction id to empty for proper status displays
self.transaction_id = ''
def fill_forms_with_csid(self):
"""Retrieve all data from Chemspider service using a CS ID"""
if self.csid != '':
try:
tmp = self.searchclient.service.GetCompoundInfo(self.csid, self.token)
except suds.WebFault as detail:
print(detail)
self.inchi = tmp[1]
self.inchikey = tmp[2]
| 33.918182 | 115 | 0.582418 | 3,483 | 0.93353 | 0 | 0 | 0 | 0 | 0 | 0 | 1,226 | 0.328598 |
8c12a700a732ea2822524f81a70b50cd9e629e1d | 4,115 | py | Python | src/lib/hxPy/py/hxpy/hxpy/hxpy.py | jamesdgessel/hxpy_adjustments | 25010dd023bdc36cc08e1afc08ac16159aba5646 | [
"AFL-3.0"
] | 35 | 2021-05-04T18:50:16.000Z | 2022-02-27T05:15:35.000Z | src/lib/hxPy/py/hxpy/hxpy/hxpy.py | jamesdgessel/hxpy_adjustments | 25010dd023bdc36cc08e1afc08ac16159aba5646 | [
"AFL-3.0"
] | 7 | 2021-07-12T08:06:14.000Z | 2022-03-19T08:46:08.000Z | src/lib/hxPy/py/hxpy/hxpy/hxpy.py | jamesdgessel/hxpy_adjustments | 25010dd023bdc36cc08e1afc08ac16159aba5646 | [
"AFL-3.0"
] | 4 | 2021-05-11T18:53:16.000Z | 2022-03-17T04:15:48.000Z | # -*- coding: utf-8 -*-
#
# Copyright (c) 2021, SkyFoundry LLC
# Licensed under the Academic Free License version 3.0
#
# History:
# 23 Jul 2021 Matthew Giannini Creation
#
import socket
import struct
import traceback
from . import brio
from .haystack import Marker
from .haystack import GridBuilder
class HxPy:
"""HxPy"""
def __init__(self, address, api_key, timeout, log):
self._address = address
self._api_key = api_key
self._timeout = timeout
self._log = log
def __enter__(self):
self._log.debug(f'Listen on {self._address} with key {self._api_key}')
self._listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._listener.settimeout(self._timeout)
self._listener.bind(self._address)
self._listener.listen()
(self._socket, addr) = self._listener.accept()
self._log.debug(f'Accepted connection')
return self
def __exit__(self, type, value, traceback):
self._socket.close()
self._listener.close()
##########################################################
# Run
##########################################################
def run(self):
log = self._log
# handle auth packet
auth = self._recv_brio()
log.debug(f'Auth {auth}')
if auth["ver"] != "0":
raise self._fail_auth(f'Unsupported version {auth["ver"]}')
if auth["key"] != self._api_key:
raise self._fail_auth(f'Invalid api key')
self._send_brio({"ok": Marker()})
# handle instructions
local_vars = {}
try:
instrs = self._recv_brio()
while instrs:
for instr in instrs:
log.debug(f'process {instr}')
if "def" in instr:
self._define(instr, local_vars)
if "exec" in instr:
self._exec(instr, local_vars)
elif "eval" in instr:
result = self._eval(instr, local_vars)
self._send_brio(result)
log.debug(f'eval => {result}')
# for
instrs = self._recv_brio()
# while
except Exception as e:
g = GridBuilder().set_meta({'err': Marker(), 'errMsg': str(e), 'errTrace': traceback.format_exc()}).to_grid()
self._send_brio(g)
raise e
def _fail_auth(self, msg):
self._send_brio({"err": Marker(), "errMsg": msg})
return IOError(msg)
##########################################################
# Instructions
##########################################################
def _define(self, instr, local_vars):
name = instr["def"]
val = instr.get("v")
local_vars[name] = val
def _exec(self, instr, local_vars):
code = instr["exec"]
return exec(code, local_vars, local_vars)
def _eval(self, instr, local_vars):
expr = instr["eval"]
return eval(expr, local_vars, local_vars)
##########################################################
# IO Util
##########################################################
def _recv_brio(self):
frame = self._recv_frame()
if not frame:
return None
val = brio.NativeBrioReader(frame).read_val()
return val
def _recv_frame(self):
lenbuf = self._recvall(4)
if not lenbuf:
return None
# raise IOError('No frame data received. Remote server closed the connection')
frame_len, = struct.unpack('!I', lenbuf)
return self._recvall(frame_len)
def _recvall(self, count):
buf = bytearray()
while count:
newbuf = self._socket.recv(count)
if not newbuf:
return None
buf.extend(newbuf)
count -= len(newbuf)
return bytes(buf)
def _send_brio(self, val):
frame = brio.NativeBrioWriter.to_bytes(val)
self._socket.sendall(frame)
# HxPy
| 30.481481 | 121 | 0.509356 | 3,799 | 0.923208 | 0 | 0 | 0 | 0 | 0 | 0 | 966 | 0.234751 |
8c139b694a21d0428d056ef4776c840e99b1f37e | 10,727 | py | Python | util/legacypgsql.py | twonds/palaver | fcaa1884bc206e0aba7c88d9614e38b492c59285 | [
"MIT"
] | 4 | 2015-01-20T17:25:12.000Z | 2020-02-12T08:24:05.000Z | util/legacypgsql.py | twonds/palaver | fcaa1884bc206e0aba7c88d9614e38b492c59285 | [
"MIT"
] | 1 | 2016-01-27T16:13:18.000Z | 2016-01-27T19:11:21.000Z | util/legacypgsql.py | twonds/palaver | fcaa1884bc206e0aba7c88d9614e38b492c59285 | [
"MIT"
] | null | null | null | # Copyright (c) 2007 Christopher Zorn, OGG, LLC
# See LICENSE.txt for details
# Converts the legacy muc spool to the new dirDBM one
import sys
from twisted.words.xish import domish, xpath
from twisted.words.protocols.jabber import jid
from twisted.enterprise import adbapi
from palaver import palaver, pgsql_storage
from pyPgSQL import PgSQL
class RoomParser:
"""
A simple stream parser for configuration files.
"""
def __init__(self):
# Setup the parser
self.stream = domish.elementStream()
self.stream.DocumentStartEvent = self.onDocumentStart
self.stream.ElementEvent = self.onElement
self.stream.DocumentEndEvent = self.onDocumentEnd
self.hash = {}
self.files = {}
self.room = {}
def parse(self, file, room):
self.room = room
f = open(file)
buf = f.read()
f.close()
self.stream.parse(buf)
return self.room
def serialize(self, obj):
if isinstance(obj, domish.Element):
obj = obj.toXml()
return obj
def onDocumentStart(self, rootelem):
pass
def onElement(self, element):
if element.name == 'room':
for c in element.elements():
if c.name == 'name':
self.room['roomname'] = str(c)
elif c.name=='notice':
for n in c.elements():
self.room[n.name] = str(n)
else:
if str(c) == '0':
self.room[c.name] = False
elif str(c) == '1':
self.room[c.name] = True
else:
self.room[c.name] = str(c)
elif element.name == 'list':
if element.hasAttribute('xdbns'):
if element['xdbns'] == 'muc:list:owner':
for i in element.elements():
self.room['owner'].append(i['jid'])
elif element['xdbns'] == 'muc:list:admin':
for i in element.elements():
self.room['admin'].append(i['jid'])
elif element['xdbns'] == 'muc:list:member':
for i in element.elements():
self.room['member'].append(i['jid'])
elif element['xdbns'] == 'muc:list:outcast':
for i in element.elements():
self.room['outcast'].append(i['jid'])
def onDocumentEnd(self):
pass
def _reset(self):
# Setup the parser
self.stream = domish.elementStream()
self.stream.DocumentStartEvent = self.onDocumentStart
self.stream.ElementEvent = self.onElement
self.stream.DocumentEndEvent = self.onDocumentEnd
class RoomsParser(RoomParser):
def parse(self, file):
f = open(file)
buf = f.read()
f.close()
self.stream.parse(buf)
return self.hash, self.files
def onElement(self, element):
if element.name == 'registered':
for i in element.elements():
name = i.getAttribute('name')
j = i.getAttribute('jid')
njid = jid.JID(name)
room = unicode(njid.user)
file = jid.JID(j).user
self.files[room] = file
self.hash[room] = {}
self.hash[room]['name'] = room
self.hash[room]['roomname'] = room
self.hash[room]['subject'] = ''
self.hash[room]['subject_change']= True
self.hash[room]['persistent'] = True
self.hash[room]['moderated'] = False
self.hash[room]['private'] = True
self.hash[room]['history'] = 10
self.hash[room]['game'] = False
self.hash[room]['hidden'] = False
self.hash[room]['locked'] = False
self.hash[room]['subjectlocked'] = False
self.hash[room]['description'] = room
self.hash[room]['leave'] = ''
self.hash[room]['join'] = ''
self.hash[room]['rename'] = ''
self.hash[room]['maxusers'] = 30
self.hash[room]['privmsg'] = True
self.hash[room]['change_nick'] = True
self.hash[room]['owner'] = []
self.hash[room]['member'] = []
self.hash[room]['admin'] = []
self.hash[room]['outcast'] = []
self.hash[room]['roster'] = []
def fetch_user(cursor, user):
cursor.execute("""SELECT * FROM muc_users WHERE username = %s""",(user,))
return cursor.fetchone()
def create_user(cursor, user):
dbuser = fetch_user(cursor,user)
# TODO - add other values
if not dbuser:
cursor.execute("""INSERT INTO muc_users (username)
VALUES (%s)
""", (user,))
dbuser = fetch_user(cursor,user)
return dbuser
def do_room(conn, room, hostname):
cursor = conn.cursor()
cursor.execute("""INSERT INTO muc_rooms (name,
roomname,
subject,
subject_change,
persistent,
moderated,
private,
history,
game,
\"hidden\",
\"locked\",
subjectlocked,
description,
\"leave\",
\"join\",
rename,
maxusers,
privmsg,
change_nick,
hostname
)
VALUES (%s, %s, %s, %s, %s, %s, %s,
%s, %s, %s, %s, %s, %s, %s,
%s, %s, %s, %s, %s, %s)""" ,
(room['name'],
room['roomname'],
room['subject'],
room['subject_change'],
room['persistent'],
room['moderated'],
room['private'],
room['history'],
room['game'],
room['hidden'],
room['locked'],
room['subjectlocked'],
room['description'],
room['leave'],
room['join'],
room['rename'],
room['maxusers'],
room['privmsg'],
room['change_nick'],
hostname
))
cursor.execute("""SELECT * FROM muc_rooms WHERE name = %s AND hostname = %s""", (room['name'],hostname))
dbroom = cursor.fetchone()
cursor.close()
# do admins , members, owners, etc
for u in room['admin']:
cursor = conn.cursor()
# create a user if not in he user table
dbuser = create_user(cursor, u)
cursor.execute("""INSERT INTO muc_rooms_admins (user_id, room_id)
VALUES (%s, %s)
""", (dbuser[0],dbroom[0]))
cursor.close()
for u in room['member']:
cursor = conn.cursor()
# create a user if not in he user table
dbuser = create_user(cursor, u)
cursor.execute("""INSERT INTO muc_rooms_members (user_id, room_id)
VALUES (%s, %s)
""", (dbuser[0],dbroom[0]))
cursor.close()
for u in room['owner']:
cursor = conn.cursor()
# create a user if not in he user table
dbuser = create_user(cursor, u)
cursor.execute("""INSERT INTO muc_rooms_owners (user_id, room_id)
VALUES (%s, %s)
""", (dbuser[0],dbroom[0]))
cursor.close()
for u in room['outcast']:
cursor = conn.cursor()
# create a user if not in he user table
dbuser = create_user(cursor, u)
cursor.execute("""INSERT INTO muc_rooms_outcasts (user_id, room_id)
VALUES (%s, %s)
""", (dbuser[0],dbroom[0]))
cursor.close()
def main(sdir, conf):
print 'Convert : %s ' % sdir
# parse conf file
cf = None
p = palaver.ConfigParser()
cf = p.parse(conf)
config = {}
backend = getattr(cf.backend,'type',None)
if backend:
config['backend'] = str(backend)
if config['backend'] == 'pgsql':
user = getattr(cf.backend,'dbuser',None)
database = str(getattr(cf.backend,'dbname',''))
if getattr(cf.backend,'dbpass',None):
password = str(getattr(cf.backend,'dbpass',''))
else:
password = ''
if getattr(cf.backend,'dbhostname',None):
hostname = str(getattr(cf.backend,'dbhostname',''))
else:
hostname = ''
for elem in cf.elements():
if elem.name == 'name':
host = str(elem)
_dbpool = PgSQL.connect(
database=database,
user=user,
password=password,
dsn=hostname,
client_encoding='utf-8'
)
rsp = RoomsParser()
rp = RoomParser()
rooms, files = rsp.parse(sdir+'/rooms.xml')
for f in files:
r = files[f]
print sdir+'/'+str(r)+'.xml'
room = rp.parse(sdir+'/'+str(r)+'.xml',rooms[f])
do_room(_dbpool, room, host)
rp._reset()
_dbpool.commit()
if __name__ == '__main__':
if len(sys.argv)==2:
main(sys.argv[1])
elif len(sys.argv)==3:
main(sys.argv[1], sys.argv[2])
else:
print "Usage : %s <old spool dir> <palaver config file>\n" % sys.argv[0]
| 34.38141 | 108 | 0.43535 | 4,389 | 0.409154 | 0 | 0 | 0 | 0 | 0 | 0 | 3,336 | 0.310991 |
8c1485cf8e092dcb4f7018714e075d4893a7a7a6 | 11,425 | py | Python | gems/simple_args.py | Beatnukem/python-gems | fa9f2c66d969e8ca7d9913c772d0c4ea3e45f930 | [
"MIT"
] | null | null | null | gems/simple_args.py | Beatnukem/python-gems | fa9f2c66d969e8ca7d9913c772d0c4ea3e45f930 | [
"MIT"
] | null | null | null | gems/simple_args.py | Beatnukem/python-gems | fa9f2c66d969e8ca7d9913c772d0c4ea3e45f930 | [
"MIT"
] | null | null | null | # Copyright (c) 2018 Paul Reindell
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#############################################################
# Interface
#############################################################
from collections import namedtuple
# Command/Argument definition
Cmd = namedtuple('Command', 'desc cb args cmds', defaults=(None,None,None,None,))
Arg = namedtuple('Arg', 'name flags short desc default exmpl convert', defaults=(None,None,None,None,None,))
#Flags
OPTION = 0 # simple 'flag' argument; example: '--foo' or '-f'
VALUE = 1 # value argument; example: '--foo=value' '-f=value'
UNNAMED = 2 # unamed argument; example; 'foo'
REQUIRED = 4 # required argument; omitting argument will print the help text
# print help
def print_help(cmd_name, cmd):
_print_help(cmd, [cmd_name])
# execute command based on arguments
def exec_command(cmd_name, cmd, argv):
return _execute_command(cmd, argv[1:], [cmd_name])
#############################################################
# Implementation
#############################################################
_PrintCmd = namedtuple('PrintCmd', 'name text desc cmds args mla_name mla_text mla_short')
_PrintArg = namedtuple('PrintArg', 'name text short desc')
_PRE_UNAMED = 0
_PRE_SHORT = 1
_PRE_NAME = 2
def _execute_command(cmd, argv, commands):
help_args = ['help', '-help', '--help', '?']
if (len(argv) == 0 and not cmd.cb) or (len(argv) > 0 and argv[0] in help_args):
_print_help(cmd, commands)
if len(argv) == 0:
print('Error: Please specify Command!')
print('')
return -1
return 0
if cmd.cb:
args = {}
if cmd.args:
for x in range(0, len(argv)):
arg_name = argv[x]
pre = _PRE_UNAMED
if arg_name.find('--') == 0:
pre = _PRE_NAME
elif arg_name.find('-') == 0:
pre = _PRE_SHORT
found = False
for arg in cmd.args:
cc = arg_name[pre:].split('=')
if (pre == _PRE_NAME and arg.name == cc[0]) or (pre == _PRE_SHORT and arg.short == cc[0]) or (pre == _PRE_UNAMED and arg.flags & UNNAMED and arg.name not in args):
found = True
if arg.flags & VALUE or pre == _PRE_UNAMED:
idx = 0 if pre == _PRE_UNAMED else 1
val = ''.join(cc[idx:]) if len(cc) > idx else ''
if val == '':
_print_help(cmd, commands)
print('Error: Argument \'{}\': Expects to have a value!'.format(arg.name))
if arg.flags & UNNAMED:
print(' Example: {} <{}>'.format(' '.join(commands), arg.name))
else:
print(' Example: {} --{}=<{}>'.format(' '.join(commands), arg.name, arg.exmpl if arg.exmpl else 'foo'))
print('')
return -1
v_str = val.strip('\'')
if arg.convert:
try:
args[arg.name] = arg.convert(v_str)
except:
_print_help(cmd, commands)
print('Error: Argument \'{}\': Value not expected type!'.format(arg.name))
if arg.exmpl:
if arg.flags & UNNAMED:
print(' Example: {} <{}>'.format(' '.join(commands), arg.exmpl))
else:
print(' Example: {} --{}=<{}>'.format(' '.join(commands), arg.name, arg.exmpl))
print('')
return -1
else:
args[arg.name] = v_str
else:
args[arg.name] = True
break
if not found:
_print_help(cmd, commands)
print('Error: Argument \'{}\': Unknown Argument!'.format(arg_name))
print('')
return -1
for arg in cmd.args:
if not arg.name in args:
if arg.default is not None:
args[arg.name] = arg.default
elif arg.flags & REQUIRED:
_print_help(cmd, commands)
if arg.flags & UNNAMED:
print('Error: Argument \'{}\': Required Argument not set!'.format(arg.name))
print(' Example: {} <{}>'.format(' '.join(commands), arg.exmpl if arg.exmpl else arg.name))
print('')
else:
print('Error: Argument \'{}\': Required Argument not set!'.format(arg.name))
print(' Example: {} --{}=<{}>'.format(' '.join(commands), arg.name, arg.exmpl if arg.exmpl else 'foo'))
print('')
return -1
else:
args[arg.name] = None
res = cmd.cb(args)
return res if res else 0
if cmd.cmds:
if not argv[0] in cmd.cmds:
_print_help(cmd, commands)
print(' Error: Command \'{}\': Not a valid command!'.format(argv[0]))
print('')
return -1
commands.append(argv[0])
return _execute_command(cmd.cmds[argv[0]], argv[1:], commands)
return -2
def _print_help(cmd, commands, pre_len=0, post_len=0):
lines = []
n = _collect_help(cmd, commands, 0, 0, lines, 0)
for l in lines:
print('{}{}'.format(l[0].ljust(n), ' : {}'.format(l[1]) if l[1] else ''))
def _collect_help(cmd, commands, pre_len, post_len, lines, n):
if pre_len == 0:
prefix = ' '
else:
prefix = ''.ljust(pre_len)
names_args = []
unamed_args = []
arg_name_maxlen = 0
arg_text_maxlen = 0
arg_short_maxlen = 0
if cmd.cb:
if cmd.args:
for arg in cmd.args:
if arg.short:
arg_short = ' (-{})'.format(arg.short)
else:
arg_short = ''
if arg.flags & UNNAMED:
arg_text = '<{}>'.format(arg.name)
else:
arg_text = '--{}{}'.format(arg.name, '=<{}>'.format(arg.exmpl if arg.exmpl else 'foo') if arg.flags & VALUE else '')
if arg.default is not None:
arg_desc = '{} (default: {})'.format(arg.desc, arg.default)
elif arg.flags & REQUIRED:
arg_desc = arg.desc
else:
arg_desc = '{} (optional)'.format(arg.desc)
l = len(arg_text)
if l > arg_text_maxlen:
arg_text_maxlen = l
l = len(arg_short)
if l > arg_short_maxlen:
arg_short_maxlen = l
l = len(arg.name)
if l > arg_name_maxlen:
arg_name_maxlen = l
pa = _PrintArg(
name=arg.name,
text=arg_text,
short=arg_short,
desc=arg_desc)
if arg.flags & UNNAMED:
unamed_args.append(pa)
else:
names_args.append(pa)
cmd_text_maxlen = 0
cmdlist = []
if cmd.cmds:
for cmd_name in cmd.cmds:
cmdlist.append(cmd_name)
l = len(cmd_name)
if l > cmd_text_maxlen:
cmd_text_maxlen = l
if pre_len == 0:
cmd_name = ' '.join(commands).ljust(post_len)
#cmd_list_str = ' {{{}}}'.format('|'.join(cmdlist)) if cmd.cmds else ''
else:
cmd_name = commands[len(commands)-1].ljust(post_len)
#cmd_list_str = ' <Command>' if cmd.cmds else ''
cmd_text = '{}{}{}'.format(
#cmd_list_str,
' <Command>' if cmd.cmds else '',
' <Arguments>' if len(unamed_args) > 0 else '',
' [Options]' if len(names_args) > 0 else '')
cmd_desc = cmd.desc if cmd.desc else commands[len(commands)-1]
if pre_len == 0:
n = _add_line(lines, 'Usage:', None, n)
n = _add_line(lines, '{}{}{}'.format(
prefix,
cmd_name,
cmd_text),
cmd_desc, n)
if len(unamed_args) > 0 and pre_len == 0:
n = _add_line(lines, '', None, n)
n = _add_line(lines, 'Arguments:', None, n)
for arg in unamed_args:
n = _add_line(lines, '{}{}{}{}'.format(
prefix,
''.ljust(post_len + 1),
'{}'.format(arg.text).ljust(arg_text_maxlen),
'{}'.format(arg.short).ljust(arg_short_maxlen)),
arg.desc if arg.desc else arg.name, n)
if len(names_args) > 0 and pre_len == 0:
n = _add_line(lines, '', None, n)
n = _add_line(lines, 'Options:', None, n)
names_args = sorted(names_args, key=lambda x: x.name)
for arg in names_args:
n = _add_line(lines, '{}{}{}{}'.format(
prefix,
''.ljust(post_len + 1),
'{}'.format(arg.text).ljust(arg_text_maxlen),
'{}'.format(arg.short).ljust(arg_short_maxlen)),
arg.desc if arg.desc else arg.name, n)
if cmd.cmds:
if len(cmd.cmds) > 0 and pre_len == 0:
pre_len = 3
n = _add_line(lines, '', None, n)
n = _add_line(lines, 'Commands:', None, n)
else:
pre_len = pre_len + len(cmd_name) + 1
for cmd_name, cmd in cmd.cmds.items():
n = _collect_help(cmd, commands + [cmd_name], pre_len, cmd_text_maxlen, lines, n)
n = _add_line(lines, '', None, n)
elif pre_len == 0:
n = _add_line(lines, '', None, n)
return n
def _add_line(lines, ll, lr, n):
lines.append([ll, lr])
return max(n, len(ll))
| 39.261168 | 183 | 0.482451 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,742 | 0.24 |
8c148fde88203756452378cb9e839f75242a5e32 | 16,209 | py | Python | teetool/visual_2d.py | sfo/teetool | 24fa8691449b733f9d07d9a8f96bd40b0e50c2a5 | [
"MIT"
] | 9 | 2017-04-26T13:08:07.000Z | 2021-01-29T08:52:15.000Z | teetool/visual_2d.py | sfo/teetool | 24fa8691449b733f9d07d9a8f96bd40b0e50c2a5 | [
"MIT"
] | 1 | 2017-04-12T22:31:41.000Z | 2017-04-14T16:05:30.000Z | teetool/visual_2d.py | sfo/teetool | 24fa8691449b733f9d07d9a8f96bd40b0e50c2a5 | [
"MIT"
] | 7 | 2016-10-25T13:48:52.000Z | 2022-02-27T06:38:40.000Z | ## @package teetool
# This module contains the Visual_2d class
#
# See Visual_2d class for more details
import numpy as np
from scipy.interpolate import griddata
import matplotlib.pyplot as plt
import teetool as tt
## Visual_2d class generates the 2d output using Matplotlib
#
# Even 3-dimensional trajectories can be output in 2d (sliced)
class Visual_2d(object):
## Constructor for Visual_2d
# @param self object pointer
# @param thisWorld World object, filled with trajectory data and models
# @param kwargs additional parameters for plt.figure()
def __init__(self, thisWorld, **kwargs):
"""
<description>
"""
## figure object
self._fig = plt.figure(facecolor="white", **kwargs)
## axis object
self._ax = self._fig.gca()
# set colour of axis
#self._ax.set_axis_bgcolor('white')
#self._ax.set_facecolor('white')
## World object
self._world = thisWorld
## Labels of plots
self._labels = []
## Plot mean of trajectories
# @param self object pointer
# @param list_icluster list of clusters to plot
# @param colour if specified, overwrites distinct colours
# @param kwargs additional parameters for plotting
def plotMean(self, list_icluster=None, colour=None, **kwargs):
# check validity
list_icluster = self._world._check_list_icluster(list_icluster)
# extract data
clusters = self._world.getCluster(list_icluster)
# unique colours
colours = tt.helpers.getDistinctColours(len(self._world._clusters),
colour)
for (i, this_cluster) in enumerate(clusters):
# pass clusters
Y = this_cluster["model"].getMean()
a_line, = self._ax.plot(Y[:, 0],
Y[:, 1],
color=colours[list_icluster[i]],
**kwargs)
## Plot trajectories of cluster
# @param self object pointer
# @param list_icluster list of clusters to plot
# @param ntraj maximum number of trajectories
# @param colour if specified, overwrites distinct colours
# @param kwargs additional parameters for plotting
def plotTrajectories(self,
list_icluster=None,
ntraj=50,
colour=None,
**kwargs):
# check validity
list_icluster = self._world._check_list_icluster(list_icluster)
# extract data
clusters = self._world.getCluster(list_icluster)
# unique colours
colours = tt.helpers.getDistinctColours(len(self._world._clusters),
colour)
for (i, this_cluster) in enumerate(clusters):
# pass clusters
for itraj, (x, Y) in enumerate(this_cluster["data"]):
a_line, = self._ax.plot(Y[:, 0],
Y[:, 1],
color=colours[i],
**kwargs)
# limit number of trajectories
if itraj > ntraj:
break
self._labels.append((a_line, "data"))
## Plot trajectories of cluster
# @param self object pointer
# @param x1 point from [0,1] to visualise
# @param list_icluster list of clusters to plot
# @param ntraj maximum number of trajectories
# @param colour if specified, overwrites distinct colours
# @param kwargs additional parameters for plotting
def plotTrajectoriesPoints(self,
x1,
list_icluster=None,
ntraj=50,
colour=None,
**kwargs):
# check validity
list_icluster = self._world._check_list_icluster(list_icluster)
# obtain points
clustersP = self._world.getClusterPoints(x1, list_icluster)
# unique colours
colours = tt.helpers.getDistinctColours(len(self._world._clusters),
colour)
for (i, A) in enumerate(clustersP):
# pass clusters
for itraj, a in enumerate(A):
a_line, = self._ax.plot(a[0],
a[1],
color=colours[i],
**kwargs)
# limit number of trajectories
if itraj > ntraj:
break
self._labels.append((a_line, "data"))
## Plot time-series of trajectories
# @param self object pointer
# @param icluster select cluster to plot
# @param idim select dimension to plot
# @param ntraj maximum number of trajectories
# @param colour specificy colour of trajectories
# @param kwargs additional parameters for plotting
def plotTimeSeries(self, icluster=0, idim=0, ntraj=50,
colour='k', **kwargs):
# number of subplots, 2 or 3
ndim = self._world._ndim
# subplot
#f, axarr = plt.subplots(ndim, sharex=True)
# check validity
[icluster] = self._world._check_list_icluster([icluster])
# extract data
clusters = self._world.getCluster([icluster])
for (i, this_cluster) in enumerate(clusters):
# pass clusters
for itraj, (x, Y) in enumerate(this_cluster["data"]):
#for d in range(ndim):
x_norm = (x - x.min()) / (x.max() - x.min())
a_line, = self._ax.plot(x_norm,
Y[:,idim],
color=colour, **kwargs)
if itraj > ntraj:
break
self._labels.append((a_line, "data"))
## Plot a box based on two coordinates
# @param self object pointer
# @param coord_lowerleft lower-left coordinate (x,y)
# @param coord_upperright upper-right coordinate (x,y)
# @param kwargs additional parameters for plotting
def plotBox(self, coord_lowerleft, coord_upperright, **kwargs):
x_lo = coord_lowerleft[0]
x_hi = coord_upperright[0]
y_lo = coord_lowerleft[1]
y_hi = coord_upperright[1]
coords = np.array([[x_lo, y_lo],
[x_hi, y_lo],
[x_hi, y_hi],
[x_lo, y_hi],
[x_lo, y_lo]])
coords_x = coords[:,0]
coords_y = coords[:,1]
self._ax.plot(coords_x, coords_y, **kwargs)
## standard plotting function for Matplotlib
# @param self object pointer
# @param args additional arguments for plotting
# @param kwargs additional labeled parameters for plotting
def plot(self, *args, **kwargs):
# plot
self._ax.plot(*args, **kwargs)
## Plot samples of model
# @param self object pointer
# @param list_icluster list of clusters to plot
# @param ntraj number of trajectories
# @param colour if specified, overwrites distinct colours
# @param kwargs additional parameters for plotting
def plotSamples(self, list_icluster=None, ntraj=50, colour=None, **kwargs):
# check validity
list_icluster = self._world._check_list_icluster(list_icluster)
# unique colours
colours = tt.helpers.getDistinctColours(len(self._world._clusters),
colour)
for (i, icluster) in enumerate(list_icluster):
these_samples = self._world.getSamples(icluster,
nsamples=ntraj)
for (x, Y) in these_samples:
a_line, = self._ax.plot(Y[:, 0],
Y[:, 1],
color=colours[i],
linestyle=":",
**kwargs)
self._labels.append((a_line, "samples"))
## Add legend to plot
# @param self object pointer
def plotLegend(self):
list_lines = []
list_label = []
for (a_line, a_label) in self._labels:
list_lines.append(a_line)
list_label.append(a_label)
plt.legend(handles=list_lines, labels=list_label)
## Plots a confidence region of variance sigma
# @param self object pointer
# @param list_icluster list of clusters to plot
# @param sdwidth variance to evaluate
# @param z if specified, it evaluates the confidence region at a constant altitude for 3D trajectories
# @param resolution sets resolution for which to calculate the tube, can be a single integer, or an actual measurement [dim1 dim2] (2d) [dim1 dim2 dim3] (3d)
# @param colour if specified, overwrites distinct colours
# @param alpha opacity for the confidence region
# @param kwargs additional parameters for plotting
def plotTube(self,
list_icluster=None,
sdwidth=1,
z=None,
resolution=None,
colour=None,
alpha=.1,
**kwargs):
# check validity
list_icluster = self._world._check_list_icluster(list_icluster)
# extract
(ss_list, [xx, yy, zz]) = self._world.getTube(list_icluster,
sdwidth,
z=z,
resolution=resolution)
# unique colours
lcolours = tt.helpers.getDistinctColours(len(self._world._clusters),
colour)
for i, ss1 in enumerate(ss_list):
#plt.contourf(xx, yy, 1.*ss1, levels=[-np.inf, 1., np.inf], colors=(lcolours[i],), alpha=alpha, **kwargs)
# plot an iso surface line
plt.contour(xx,
yy,
ss1,
levels=[.5],
colors=(lcolours[list_icluster[i]], 'w'),
**kwargs)
## Plots the difference confidence region of variance sigma for two models
# @param self object pointer
# @param list_icluster list of 2 clusters to compare
# @param sdwidth variance to evaluate
# @param z if specified, it evaluates the confidence region at a constant altitude for 3D trajectories
# @param resolution specify resolution of region
# @param colour if specified, overwrites distinct colours
# @param alpha opacity for the confidence region
# @param kwargs additional parameters for plotting
def plotTubeDifference(self,
list_icluster=None,
sdwidth=1,
z=None,
resolution=None,
colour=None,
alpha=.1,
**kwargs):
# check validity
list_icluster = self._world._check_list_icluster(list_icluster)
# extract first two only!
list_icluster = list_icluster[:2]
# extract
(ss_list, [xx, yy, zz]) = self._world.getTube(list_icluster,
sdwidth, z=z,
resolution=resolution)
# to plot
ss_plot = - np.inf * np.ones_like(ss_list[0])
# 1 :: blocks added
ss_added = ((ss_list[0] - ss_list[1])==-1)
# 2 :: blocks removed
ss_removed = ((ss_list[0] - ss_list[1])==1)
# 3 :: present in both
ss_neutral = ((ss_list[0] + ss_list[1])==2)
ss_plot[ss_added] = 1.
ss_plot[ss_removed] = -1.
ss_plot[ss_neutral] = 0.
#plt.contourf(xx, yy, ss_plot, levels=[-np.inf, -1., 0., 1., np.inf], colors='none', hatches=['//', '.', '/'], **kwargs)
plt.contourf(xx,
yy,
ss_plot,
levels=[-np.inf, -1., 0., 1., np.inf],
colors=('r','b','g'),
alpha=alpha,
**kwargs)
for i in [1, 2, 3]:
if i == 1:
ss1 = 1.*ss_removed
color = 'r'
elif i == 2:
ss1 = 1.*ss_added
color = 'g'
elif i == 3:
ss1 = 1.*ss_neutral
color = 'b'
# plot an iso surface
plt.contour(xx, yy, ss1, levels=[0.5], colors=color)
## Plot the log-likehood of confidence regions -- which can be related to traffic complexity in the future
# @param self object pointer
# @param list_icluster list of clusters to compare
# @param pmin minimum value on a normalised scale
# @param pmax maximum value on a normalised scale
# @param z if specified, it evaluates the confidence region at a constant altitude for 3D trajectories
# @param resolution specify resolution of region
def plotLogLikelihood(self,
list_icluster=None,
pmin=0, pmax=1,
z=None,
resolution=None):
# check validity
list_icluster = self._world._check_list_icluster(list_icluster)
(ss_list, [xx, yy, zz]) = self._world.getLogLikelihood(list_icluster,
resolution,
z)
ss = ss_list[0] # initialise
for ss1 in ss_list:
# find those greater
mask = np.greater(ss1, ss)
# replace
ss[mask] = ss1[mask]
# normalise
ss_norm = (ss - np.min(ss)) / (np.max(ss) - np.min(ss))
# plot contours
self._ax.pcolor(xx,
yy,
ss_norm,
cmap="viridis",
vmin=pmin,
vmax=pmax)
def plotComplexityMap(self,
list_icluster=None,
complexity=1,
pmin=0, pmax=1,
z=None,
resolution=None, cmap1="Reds"):
ss, xx, yy, zz = self._world.getComplexityMap(list_icluster,
complexity,
resolution,
z)
# normalise
ss_norm = (ss - np.min(ss)) / (np.max(ss) - np.min(ss))
# plot contours
cax = self._ax.pcolor(xx,
yy,
ss_norm,
cmap=cmap1,
vmin=pmin,
vmax=pmax)
return cax
## add colorbar
def plotColourBar(self, *args, **kwargs):
cbar = self._fig.colorbar(*args, **kwargs)
# horizontal colorbar
# cbar.ax.set_xticklabels(['Low', 'Medium', 'High'])
return cbar
## Plots the title or worldname
# @param self object pointer
def _plotTitle(self):
# add title
world_name = self._world.getName()
if not (world_name == None):
plt.title(world_name)
## saves the figure to a file in the output folder
# @param self object pointer
# @param add additional identifier for file
def save(self, add=None):
if (add==None):
saveas = self._world.getName()
else:
saveas = "{0}_{1}".format(self._world.getName(), add)
plt.savefig("output/2d_{0}.png".format(saveas))
## shows the figure (pop-up or inside notebook)
# @param self object pointer
def show(self):
plt.show()
## closes all figures
# @param self object pointer
def close(self):
plt.close("all")
| 36.02 | 161 | 0.516256 | 15,862 | 0.978592 | 0 | 0 | 0 | 0 | 0 | 0 | 5,338 | 0.329323 |
8c16f46acf3c76645280b368240ee645781f645e | 248 | py | Python | Algorithms/2. Implementation/18 - Climbing the Leaderboard.py | rosiejh/HackerRank | bfb07b8add04d3f3b67a61754db483f88a79e5a5 | [
"Apache-2.0"
] | null | null | null | Algorithms/2. Implementation/18 - Climbing the Leaderboard.py | rosiejh/HackerRank | bfb07b8add04d3f3b67a61754db483f88a79e5a5 | [
"Apache-2.0"
] | null | null | null | Algorithms/2. Implementation/18 - Climbing the Leaderboard.py | rosiejh/HackerRank | bfb07b8add04d3f3b67a61754db483f88a79e5a5 | [
"Apache-2.0"
] | null | null | null | def climbingLeaderboard(scores, alice):
scores = list(reversed(sorted(set(scores))))
r, rank = len(scores), []
for a in alice:
while (r > 0) and (a >= scores[r - 1]):
r -= 1
rank.append(r + 1)
return rank | 31 | 48 | 0.540323 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
8c16f8302199537857c9b354ec984440477f9e4f | 1,417 | py | Python | endsem/component_1/initial_parameters_estimator.py | maher460/cmu10601 | 108811a648aa5128a1e44e269a1e6e8802e3f100 | [
"MIT"
] | 1 | 2019-04-17T14:05:35.000Z | 2019-04-17T14:05:35.000Z | endsem/component_1/initial_parameters_estimator.py | maher460/cmu10601 | 108811a648aa5128a1e44e269a1e6e8802e3f100 | [
"MIT"
] | null | null | null | endsem/component_1/initial_parameters_estimator.py | maher460/cmu10601 | 108811a648aa5128a1e44e269a1e6e8802e3f100 | [
"MIT"
] | null | null | null | import kmeans
import json
import numpy as np
NUM_GAUSSIANS = 32
DO_KMEANS = False
DEBUG = True
mixture_weights = [1.0/NUM_GAUSSIANS] * NUM_GAUSSIANS
if DEBUG:
print ("mixture_weights: ", mixture_weights)
print("Loading parsed data...")
traindata_processed_file = open("parsed_data/data1.universalenrollparsed", "r")
data = json.loads(traindata_processed_file.read())
traindata_processed_file.close()
print("Done loading parsed data!")
means = []
if DO_KMEANS:
means = kmeans.do_kmeans(data, 32)
else:
print("Loading centroids...")
traindata_processed_file = open("parsed_data/data1.kmeanspartialcentroids",
"r")
means = json.loads(traindata_processed_file.read())
traindata_processed_file.close()
print("Done loading centroids!")
data_np = np.array(data)
variances_np = np.var(data_np, axis=0)
if DEBUG:
print ("variances_np: ", variances_np)
variances = [variances_np.tolist()] * NUM_GAUSSIANS
initial_params = {
'mixture_weights': mixture_weights,
'means': means,
'variances': variances
}
print("writing inital parameters to file...")
traindata_processed_file = open("parsed_data/data1.initialparameters", "w")
traindata_processed_file.write(json.dumps(initial_params))
traindata_processed_file.close()
print("Done writing inital parameters to file")
| 27.784314 | 80 | 0.694425 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 375 | 0.264644 |
8c17fa8708bb9e41eb65c6fabc304aa1f4620e52 | 492 | py | Python | backend/server/apps/endpoints/tests.py | BetikuOluwatobi/tweets_sentiment-analysis | b6a86ebbd281c1e4550211c6ff2788006728af8f | [
"MIT"
] | 1 | 2021-09-23T20:20:27.000Z | 2021-09-23T20:20:27.000Z | backend/server/apps/endpoints/tests.py | BetikuOluwatobi/tweets_sentiment-analysis | b6a86ebbd281c1e4550211c6ff2788006728af8f | [
"MIT"
] | null | null | null | backend/server/apps/endpoints/tests.py | BetikuOluwatobi/tweets_sentiment-analysis | b6a86ebbd281c1e4550211c6ff2788006728af8f | [
"MIT"
] | null | null | null | from django.test import TestCase
from .algorithm import Logistic_Regression
# Create your tests here.
class TestModelPredictions(TestCase):
def testPositive(self):
input = 'I am very happy today :)'
model = Logistic_Regression()
pred = model.predict_tweet(input)
self.assertEqual('positive',pred)
def testNegative(self):
input = 'I am very sad today :('
model = Logistic_Regression()
pred = model.predict_tweet(input)
self.assertEqual('negative',pred)
| 28.941176 | 42 | 0.719512 | 388 | 0.788618 | 0 | 0 | 0 | 0 | 0 | 0 | 95 | 0.193089 |
8c1b1007e7ce83e67bed0e618b9207e4eb8ea15f | 9,369 | py | Python | githubly.py | kumaranvpl/githubly | 0261af92f375ad06105e2969c5e62a0db6d4b095 | [
"MIT"
] | null | null | null | githubly.py | kumaranvpl/githubly | 0261af92f375ad06105e2969c5e62a0db6d4b095 | [
"MIT"
] | 4 | 2016-10-24T18:40:54.000Z | 2016-10-25T02:34:44.000Z | githubly.py | kumaranvpl/githubly | 0261af92f375ad06105e2969c5e62a0db6d4b095 | [
"MIT"
] | null | null | null | import csv
import getpass
import json
import requests
import sys
from requests.auth import HTTPBasicAuth
class GithublyException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Githubly:
def __init__(self, username, password):
self.username = username
self.password = password
self.GITHUB_API = "https://api.github.com/"
self.auth_token = self._get_auth_token()
self.headers = {'Authorization': 'token %s' % self.auth_token}
self.user = self._need_another_user()
self._print_repos(self.user)
self.repo = raw_input("Please enter a repo name: ")
def _get_response_from_api(self, url, need_links=None):
# print url
response = requests.get(url, auth=HTTPBasicAuth(self.username, self.password), headers=self.headers)
if need_links:
if "next" in response.links and "last" in response.links:
return json.loads((response.text).encode('utf-8')), response.links["next"], response.links["last"]
return json.loads((response.text).encode('utf-8')), None, None
return json.loads((response.text).encode('utf-8'))
def _post_to_api(self, url, data):
# print url, data
response = requests.post(url, data=json.dumps(data), auth=HTTPBasicAuth(self.username, self.password), headers=self.headers)
return json.loads((response.text).encode('utf-8'))
def _get_auth_token(self):
with open('tokens.csv', 'rb') as f:
reader = csv.reader(f)
dict_from_csv = dict(reader)
if self.username in dict_from_csv:
return dict_from_csv[self.username]
url = self.GITHUB_API + "authorizations"
data = {"scopes": ["repo"], "note": "Auth token for Githubly"}
response = requests.post(url, data=json.dumps(data), auth=HTTPBasicAuth(self.username, self.password))
resp_dict = json.loads((response.text).encode('utf-8'))
with open('tokens.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow([self.username, resp_dict["token"]])
return resp_dict["token"]
def _need_another_user(self):
user = raw_input("Please enter username of another user to list issues, else enter no: ")
if user in ["no", "n", "N", "NO", "No"]:
user = self.username
return user
def _get_repos(self, user):
url = self.GITHUB_API + "users/" + user + "/repos?visibility=all&type=all"
repos_list = self._get_response_from_api(url)
return repos_list
def _print_issues(self, user, repo, url=None):
if not url:
url = self.GITHUB_API + "repos/" + user + "/" + repo + "/issues"
issues_list, next_url, last_url = self._get_response_from_api(url, need_links=True)
if not issues_list:
print "No issues found. Please open one first"
return False
for issue in issues_list:
print str(issue["number"]) + "-" + issue["title"]
if next_url and last_url:
print "Next - %s" % next_url["url"]
print "Last - %s" % last_url["url"]
new_choice = raw_input("Please enter next/last to navigate to next/last page. Enter exit to quit: ")
if new_choice not in ["Exit", "Quit", "quit", "exit", "q"]:
if new_choice in ["Next", "next", "NEXT"]:
new_url = next_url["url"]
elif new_choice in ["Last", "last", "LAST"]:
new_url = last_url["url"]
else:
print "Bad choice :("
return True
self._print_issues(user=user, repo=repo, url=new_url)
return True
def _print_repos(self, user):
need_repos = raw_input("Do you want to see all repos?(y/n) ")
if need_repos in ["yes", "Yes", "y", "Y", "YES"]:
repos_list = self._get_repos(user)
for repo in repos_list:
print repo["name"]
def list_issues(self):
issues_present = self._print_issues(self.user, self.repo)
def issue_in_detail(self):
issues_present = self._print_issues(self.user, self.repo)
if not issues_present: return
issue_num = raw_input("Please enter issue's number to check its details: ")
url = self.GITHUB_API + "repos" + "/" + self.user + "/" + self.repo + "/issues/" + issue_num
try:
response = self._post_to_api(url=url, data={})
#print response
print "Issue details:"
print "Issue id - %s" % response["id"]
print "Issue number - %s" % response["number"]
print "Issue title - %s" % response["title"]
print "Issue body - %s" % response["body"]
print "Issue state - %s" % response["state"]
print "Issue url - %s" % response["url"]
print "Issue repository_url - %s" % response["repository_url"]
print "Issue html_url - %s" % response["html_url"]
print "Issue comments - %s" % response["comments"]
print "Issue created_at - %s" % response["created_at"]
print "Issue closed_at - %s" % response["closed_at"]
except Exception as e:
print "Error occured - %s" % str(e)
raise GithublyException(e)
def open_issue(self):
title = raw_input("Please enter title for new issue: ")
body = raw_input("Please enter body for new issue: ")
data = {"title": title, "body": body}
url = self.GITHUB_API + "repos" + "/" + self.user + "/" + self.repo + "/issues"
try:
response = self._post_to_api(url=url, data=data)
print "Issue created successfully"
print "Issue id - %s" % response["id"]
print "Issue number - %s" % response["number"]
print "Issue created_at - %s" % response["created_at"]
except Exception as e:
print "Error occured - %s" % str(e)
raise GithublyException(e)
def close_issue(self):
issues_present = self._print_issues(self.user, self.repo)
if not issues_present: return
issue_num = raw_input("Please enter issue's number to close: ")
data = {"state": "closed"}
url = self.GITHUB_API + "repos" + "/" + self.user + "/" + self.repo + "/issues/" + issue_num
try:
response = self._post_to_api(url=url, data=data)
print response
print "Issue closed successfully"
print "Issue id - %s" % response["id"]
print "Issue number - %s" % response["number"]
print "Issue state - %s" % response["state"]
print "Issue created_at - %s" % response["created_at"]
print "Issue closed_at - %s" % response["closed_at"]
except Exception as e:
print "Error occured - %s" % str(e)
raise GithublyException(e)
def add_comment(self):
issues_present = self._print_issues(self.user, self.repo)
if not issues_present: return
issue_num = raw_input("Please enter issue's number to add comment: ")
comment = raw_input("Please enter your comment: ")
data = {"body": comment}
url = self.GITHUB_API + "repos" + "/" + self.user + "/" + self.repo + "/issues/" + issue_num + "/comments"
try:
response = self._post_to_api(url=url, data=data)
print response
print "Comment added successfully"
print "Comment id - %s" % response["id"]
print "Comment message - %s" % response["body"]
print "Comment html_url - %s" % response["html_url"]
print "Comment created_at - %s" % response["created_at"]
except Exception as e:
print "Error occured - %s" % str(e)
raise GithublyException(e)
if __name__ == "__main__":
print "Githublyyyyyyyyyyyyyyyyyyyyy"
print "Please enter your github username, password below. This is needed to avoid github's rate limitation. "
print "Don't worry I am not saving your credentials ;)"
username = raw_input("Username: ")
password = getpass.getpass(prompt='Password: ', stream=None)
try:
githubly = Githubly(username=username, password=password)
except Exception as e:
print "Something broke :("
print "Exception for geeks - %s" % str(e)
raise GithublyException(e)
while True:
print "Menu"
print "1. List issues"
print "2. Issue in detail"
print "3. Open new issue"
print "4. Close issue"
print "5. Add comment to an issue"
print "Exit or Ctrl + C to quit"
user_input = raw_input("Please enter your choice: ")
if user_input == "1":
githubly.list_issues()
elif user_input == "2":
githubly.issue_in_detail()
elif user_input == "3":
githubly.open_issue()
elif user_input == "4":
githubly.close_issue()
elif user_input == "5":
githubly.add_comment()
elif user_input in ["Exit", "Quit", "quit", "exit", "q"]:
print "Bye Bye Bye!!!"
sys.exit()
else:
print "Wrong choice... Try again please"
| 41.455752 | 132 | 0.582026 | 7,846 | 0.837443 | 0 | 0 | 0 | 0 | 0 | 0 | 2,471 | 0.263742 |
8c1b30fbf9e603403e818a12412369bae43a8ccb | 5,562 | py | Python | heat/engine/resources/neutron/floatingip.py | NeCTAR-RC/heat | b152817f192a7b46514793633ddc968c1fe1ebf8 | [
"Apache-2.0"
] | 1 | 2015-02-26T03:23:23.000Z | 2015-02-26T03:23:23.000Z | heat/engine/resources/neutron/floatingip.py | NeCTAR-RC/heat | b152817f192a7b46514793633ddc968c1fe1ebf8 | [
"Apache-2.0"
] | null | null | null | heat/engine/resources/neutron/floatingip.py | NeCTAR-RC/heat | b152817f192a7b46514793633ddc968c1fe1ebf8 | [
"Apache-2.0"
] | null | null | null |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.engine import clients
from heat.engine import properties
from heat.engine.resources.neutron import neutron
from heat.engine.resources.neutron import router
from heat.openstack.common import log as logging
if clients.neutronclient is not None:
from neutronclient.common.exceptions import NeutronClientException
logger = logging.getLogger(__name__)
class FloatingIP(neutron.NeutronResource):
PROPERTIES = (
FLOATING_NETWORK_ID, VALUE_SPECS, PORT_ID, FIXED_IP_ADDRESS,
) = (
'floating_network_id', 'value_specs', 'port_id', 'fixed_ip_address',
)
properties_schema = {
FLOATING_NETWORK_ID: properties.Schema(
properties.Schema.STRING,
_('ID of network to allocate floating IP from.'),
required=True
),
VALUE_SPECS: properties.Schema(
properties.Schema.MAP,
_('Extra parameters to include in the "floatingip" object in the '
'creation request. Parameters are often specific to installed '
'hardware or extensions.'),
default={}
),
PORT_ID: properties.Schema(
properties.Schema.STRING,
_('ID of an existing port with at least one IP address to '
'associate with this floating IP.')
),
FIXED_IP_ADDRESS: properties.Schema(
properties.Schema.STRING,
_('IP address to use if the port has multiple addresses.')
),
}
attributes_schema = {
'router_id': _('ID of the router used as gateway, set when associated '
'with a port.'),
'tenant_id': _('The tenant owning this floating IP.'),
'floating_network_id': _('ID of the network in which this IP is '
'allocated.'),
'fixed_ip_address': _('IP address of the associated port, if '
'specified.'),
'floating_ip_address': _('The allocated address of this IP.'),
'port_id': _('ID of the port associated with this IP.'),
'show': _('All attributes.')
}
def add_dependencies(self, deps):
super(FloatingIP, self).add_dependencies(deps)
# depend on any RouterGateway in this template with the same
# network_id as this floating_network_id
for resource in self.stack.itervalues():
if (resource.has_interface('OS::Neutron::RouterGateway') and
resource.properties.get(router.RouterGateway.NETWORK_ID) ==
self.properties.get(self.FLOATING_NETWORK_ID)):
deps += (self, resource)
def handle_create(self):
props = self.prepare_properties(
self.properties,
self.physical_resource_name())
fip = self.neutron().create_floatingip({
'floatingip': props})['floatingip']
self.resource_id_set(fip['id'])
def _show_resource(self):
return self.neutron().show_floatingip(self.resource_id)['floatingip']
def handle_delete(self):
client = self.neutron()
try:
client.delete_floatingip(self.resource_id)
except NeutronClientException as ex:
self._handle_not_found_exception(ex)
class FloatingIPAssociation(neutron.NeutronResource):
PROPERTIES = (
FLOATINGIP_ID, PORT_ID, FIXED_IP_ADDRESS,
) = (
'floatingip_id', 'port_id', 'fixed_ip_address',
)
properties_schema = {
FLOATINGIP_ID: properties.Schema(
properties.Schema.STRING,
_('ID of the floating IP to associate.'),
required=True
),
PORT_ID: properties.Schema(
properties.Schema.STRING,
_('ID of an existing port with at least one IP address to '
'associate with this floating IP.')
),
FIXED_IP_ADDRESS: properties.Schema(
properties.Schema.STRING,
_('IP address to use if the port has multiple addresses.')
),
}
def handle_create(self):
props = self.prepare_properties(self.properties, self.name)
floatingip_id = props.pop(self.FLOATINGIP_ID)
self.neutron().update_floatingip(floatingip_id, {
'floatingip': props})['floatingip']
self.resource_id_set('%s:%s' % (floatingip_id, props[self.PORT_ID]))
def handle_delete(self):
if not self.resource_id:
return
client = self.neutron()
(floatingip_id, port_id) = self.resource_id.split(':')
try:
client.update_floatingip(
floatingip_id,
{'floatingip': {'port_id': None}})
except NeutronClientException as ex:
self._handle_not_found_exception(ex)
def resource_mapping():
if clients.neutronclient is None:
return {}
return {
'OS::Neutron::FloatingIP': FloatingIP,
'OS::Neutron::FloatingIPAssociation': FloatingIPAssociation,
}
| 36.116883 | 79 | 0.627292 | 4,399 | 0.790903 | 0 | 0 | 0 | 0 | 0 | 0 | 1,876 | 0.337289 |
8c1b7857206e2478990b4e2756556816497cca42 | 1,537 | py | Python | netgpibdata/netgpibcmd.py | daccordeon/summerSHG | 4e9254b408bb04ddd7b4b5ae1c8db0d23c72f88f | [
"BSD-3-Clause"
] | 1 | 2020-12-06T23:40:21.000Z | 2020-12-06T23:40:21.000Z | netgpibdata/netgpibcmd.py | daccordeon/summerSHG | 4e9254b408bb04ddd7b4b5ae1c8db0d23c72f88f | [
"BSD-3-Clause"
] | null | null | null | netgpibdata/netgpibcmd.py | daccordeon/summerSHG | 4e9254b408bb04ddd7b4b5ae1c8db0d23c72f88f | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python
import sys
import optparse
import netgpib
# Usage text
usage = """usage: %prog [options] CMD
Issue a command or query from a network-connected GPIB device.
example:
%prog -i 192.168.113.105 -d AG4395A -a 10 'POIN?'"""
# Parse options
parser = optparse.OptionParser(usage=usage)
parser.add_option("-a", "--address",
dest="gpibAddress", type="int", default=10,
help="GPIB device address (default: 10)")
parser.add_option("-i", "--ip",
dest="ipAddress", default="gpib01",
help="IP address/Host name (default: gpib01)")
parser.add_option("-l", "--log",
dest="log", action="store_true",
help="Log GPIB commands")
(options, args) = parser.parse_args()
if not args:
print('Must supply command argument.', file=sys.stderr)
sys.exit(1)
##################################################
# Create/connect to netGPIB object
#print >>sys.stderr, 'Connecting to %s...' % (options.ipAddress),
gpibObj = netgpib.netGPIB(options.ipAddress,
options.gpibAddress,
'\004', 0,
log=options.log)
#print >>sys.stderr, ' done.'
for cmd_string in args[0].split('\n'):
if not cmd_string:
continue
cmd = cmd_string.split(' ')
if cmd[0].find('?') > 0:
print(gpibObj.query(cmd_string).strip())
elif cmd == 'refresh':
gpibObj.refresh()
else:
gpibObj.command(cmd_string)
gpibObj.close()
| 26.964912 | 65 | 0.56864 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 623 | 0.405335 |
8c1bd682f752834d77fb93966c012c2997c2b56d | 784 | py | Python | microcosm_postgres/tests/test_toposort.py | globality-corp/microcosm-postgres | 5530a0cb3e022dfb6a93808e3397bf8beaa45b66 | [
"Apache-2.0"
] | 2 | 2018-08-17T14:52:41.000Z | 2018-09-20T01:39:21.000Z | microcosm_postgres/tests/test_toposort.py | globality-corp/microcosm-postgres | 5530a0cb3e022dfb6a93808e3397bf8beaa45b66 | [
"Apache-2.0"
] | 37 | 2016-03-08T05:34:28.000Z | 2021-04-21T03:20:18.000Z | microcosm_postgres/tests/test_toposort.py | globality-corp/microcosm-postgres | 5530a0cb3e022dfb6a93808e3397bf8beaa45b66 | [
"Apache-2.0"
] | 6 | 2016-11-16T07:30:35.000Z | 2019-06-24T19:27:33.000Z | """
Test topological sort.
"""
from hamcrest import assert_that, contains
from microcosm_postgres.dag import Edge
from microcosm_postgres.toposort import toposorted
class Node:
def __init__(self, id):
self.id = id
def test_toposort():
nodes = dict(
one=Node(id="one"),
two=Node(id="two"),
three=Node(id="three"),
four=Node(id="four"),
)
edges = [
Edge(from_id="one", to_id="two"),
Edge(from_id="one", to_id="three"),
Edge(from_id="two", to_id="three"),
Edge(from_id="three", to_id="four"),
]
assert_that(
toposorted(nodes, edges),
contains(
nodes["one"],
nodes["two"],
nodes["three"],
nodes["four"],
),
)
| 20.102564 | 50 | 0.540816 | 60 | 0.076531 | 0 | 0 | 0 | 0 | 0 | 0 | 124 | 0.158163 |
8c1dba83b34a1c4ed784910f891d161f756f6131 | 905 | py | Python | examples/accessing_variables.py | Rory-Sullivan/yrlocationforecast | 26b66834cac4569704daf0009a9d2bba39dbfb75 | [
"MIT"
] | 13 | 2020-07-28T17:47:42.000Z | 2022-03-30T13:35:12.000Z | examples/accessing_variables.py | Rory-Sullivan/yrlocationforecast | 26b66834cac4569704daf0009a9d2bba39dbfb75 | [
"MIT"
] | 5 | 2020-10-14T11:10:13.000Z | 2022-01-01T17:35:19.000Z | examples/accessing_variables.py | Rory-Sullivan/yrlocationforecast | 26b66834cac4569704daf0009a9d2bba39dbfb75 | [
"MIT"
] | 6 | 2020-10-16T12:30:07.000Z | 2022-02-18T07:13:21.000Z | """An example of accessing individual forecast variables."""
from metno_locationforecast import Place, Forecast
USER_AGENT = "metno_locationforecast/1.0 https://github.com/Rory-Sullivan/yrlocationforecast"
new_york = Place("New York", 40.7, -74.0, 10)
new_york_forecast = Forecast(new_york, USER_AGENT, "complete")
new_york_forecast.update()
# Access a particular interval.
first_interval = new_york_forecast.data.intervals[0]
print(first_interval)
# Access the interval's duration attribute.
print(f"Duration: {first_interval.duration}")
print() # Blank line
# Access a particular variable from the interval.
rain = first_interval.variables["precipitation_amount"]
print(rain)
# Access the variables value and unit attributes.
print(f"Rain value: {rain.value}")
print(f"Rain units: {rain.units}")
# Get a full list of variables available in the interval.
print(first_interval.variables.keys())
| 30.166667 | 93 | 0.781215 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 515 | 0.569061 |
8c1fd8a545034c6706874c719b97ff2d1685083f | 1,216 | py | Python | photometry/run_simulateFITS.py | aditya-sengupta/tesscomp-prototyping | 93da86cd139c240ba5f59f187bcbc47e899c3519 | [
"MIT"
] | null | null | null | photometry/run_simulateFITS.py | aditya-sengupta/tesscomp-prototyping | 93da86cd139c240ba5f59f187bcbc47e899c3519 | [
"MIT"
] | null | null | null | photometry/run_simulateFITS.py | aditya-sengupta/tesscomp-prototyping | 93da86cd139c240ba5f59f187bcbc47e899c3519 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Command-line utility to simulate TESS FITS images for the photometry pipeline.
Structure inspired by `run_tessphot` by Rasmus Handberg.
.. codeauthor:: Jonas Svenstrup Hansen <jonas.svenstrup@gmail.com>
"""
#import os
import argparse
#import logging
from simulation.simulateFITS import simulateFITS
if __name__ == '__main__':
# Parse command-line arguments:
parser = argparse.ArgumentParser(description='Simulate FITS images to be used by the photometry pipeline.')
parser.add_argument('-s', '--Nstars', help='Number of stars in image', type=int, default=5)
parser.add_argument('-t', '--Ntimes', help='Number of time steps and FITS images', type=int, default=5)
parser.add_argument('-d', '--debug', help='Print debug messages.', action='store_true')
parser.add_argument('-q', '--quiet', help='Only report warnings and errors.', action='store_true')
args = parser.parse_args()
# # Set logging level:
# logging_level = logging.INFO
# if args.quiet:
# logging_level = logging.WARNING
# elif args.debug:
# logging_level = logging.DEBUG
# Run the program:
simulateFITS(Nstars=args.Nstars, Ntimes=args.Ntimes,
save_images=True, overwrite_images=True)
| 33.777778 | 108 | 0.739309 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 753 | 0.619243 |
8c1ffb243fbd46c27878c213979b249291696cf1 | 7,083 | py | Python | src/pythonScripts/DrivePart/download.py | sahilbest999/FVS-GIT-CLONE | 428ad5726a8b04d4fcd79eac22bec4395f3e2422 | [
"MIT"
] | null | null | null | src/pythonScripts/DrivePart/download.py | sahilbest999/FVS-GIT-CLONE | 428ad5726a8b04d4fcd79eac22bec4395f3e2422 | [
"MIT"
] | 1 | 2021-02-11T15:47:55.000Z | 2021-03-22T03:35:46.000Z | src/pythonScripts/DrivePart/download.py | sahilbest999/FVS-GIT-CLONE | 428ad5726a8b04d4fcd79eac22bec4395f3e2422 | [
"MIT"
] | 1 | 2020-08-12T18:32:38.000Z | 2020-08-12T18:32:38.000Z | import pickle
import os
import re
import io
import response
import upload
import authenticate
from googleapiclient.errors import HttpError
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from googleapiclient.http import MediaIoBaseDownload
import requests
from tqdm import tqdm
from tabulate import tabulate
import re
def download(file_id, destination, saveName="", service=authenticate.get_gdrive_service()):
if not os.path.exists(destination):
print(f"DESTINATION {destination} not found")
return 0
try:
file = service.files().get(fileId=file_id, fields="name, size, mimeType").execute()
print(file)
except HttpError as e:
# print(e.content)
err_code = int(e.resp['status'])
print("Error Code ", err_code)
if err_code == 404:
print(f"FILE WITH ID {file_id} NOT FOUND")
return 0
if not saveName:
saveName = file['name']
mime = file['mimeType']
if mime == 'application/vnd.google-apps.document' or mime == 'application/vnd.google-apps.spreadsheet' or mime == 'application/vnd.google-apps.presentation':
if downloadGsuit(file_id, destination, saveName, mime, service) == 1:
return 1
else: return 0
if not os.path.splitext(file['name'])[1]:
saveName += resolveExtension(file['mimeType'])
print("Save name->",saveName)
request = service.files().get_media(fileId = file_id)
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while not done:
status, done = downloader.next_chunk()
print(f"Download {int(status.progress()*100)}")
fh.seek(0)
# destination = os.path.join(destination, saveName)
destination += saveName
newPath = checkFileAlreadyExists(destination)
print(type(newPath))
print("New File Destination -> ", newPath)
with open(newPath, "wb") as f:
f.write(fh.read())
f.close()
return 1
def checkFileAlreadyExists(path, i=1, iter=0):
if os.path.exists(path):
if iter == 0:
pos = len(path) - 1 - path[::-1].find('.')
new = path[:pos] + "("+str(i)+")" + path[pos:]
else:
pos = len(path) - 3 - path[::-1].find('.')
new = path[:pos] + str(i) + path[pos+1:]
if(os.path.exists(new)):
i+=1
iter+=1
return checkFileAlreadyExists(new, i, iter)
else:
print("Returned Value", new)
return new
else:
print("Returned Value", path)
return path
def downloadGsuit(file_id, destination, saveName, mime, service):
print("IT IS A Gsuit Document")
newMime={
"application/vnd.google-apps.document": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"application/vnd.google-apps.spreadsheet": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"application/vnd.google-apps.presentation": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
}[mime]
request = service.files().export_media(fileId=file_id, mimeType=newMime)
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while not done:
status, done = downloader.next_chunk()
print(f"Dowloaded {int(status.progress()*100)}")
fh.seek(0)
if not os.path.splitext(saveName)[1]:
saveName += resolveExtension(mime)
destination += saveName
destination = checkFileAlreadyExists(destination)
print(destination)
with open(destination, "wb") as f:
f.write(fh.read())
f.close()
return 1
def resolveExtension(mimeType):
return {
"audio/aac": ".aac",
"application/x-abiword": ".abw",
"application/x-freearc": ".arc",
"video/x-msvideo": ".avi",
"application/vnd.amazon.ebook": ".azw",
"application/octet-stream": ".bin",
"image/bmp": ".bmp",
"application/x-bzip": ".bz",
"application/x-bzip2": ".bz2",
"application/x-csh": ".csh",
"text/css": ".css",
"text/csv": ".csv",
"application/msword": ".doc",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": ".docx",
"application/vnd.ms-fontobject": ".eot",
"application/epub+zip": ".epub",
"application/gzip": ".gz",
"image/gif": ".gif",
"text/html": ".html",
"image/vnd.microsoft.icon": ".ico",
"text/calendar": ".ics",
"application/java-archive": ".jar",
"image/jpeg": ".jpg",
"text/javascript": ".js",
"application/json": ".json",
"application/ld+json": ".jsonld",
"audio/midi audio/x-midi": ".mid",
"text/javascript": ".mjs",
"audio/mpeg": ".mp3",
"video/mpeg": ".mpeg",
"application/vnd.apple.installer+xml": ".mpkg",
"application/vnd.oasis.opendocument.presentation": ".odp",
"application/vnd.oasis.opendocument.spreadsheet": ".ods",
"application/vnd.oasis.opendocument.text": ".odt",
"audio/ogg": ".oga",
"video/ogg": ".ogv",
"application/ogg": ".ogx",
"audio/opus": ".opus",
"font/otf": ".otf",
"image/png": ".png",
"application/pdf": ".pdf",
"application/x-httpd-php": ".php",
"application/vnd.ms-powerpoint": ".ppt",
"application/vnd.openxmlformats-officedocument.presentationml.presentation": ".pptx",
"application/vnd.rar": ".rar",
"application/rtf": ".rtf",
"application/x-sh": ".sh",
"image/svg+xml": ".svg",
"application/x-shockwave-flash": ".swf",
"application/x-tar": ".tar",
"image/tiff": ".tif.tiff",
"video/mp2t": ".ts",
"font/ttf": ".ttf",
"text/plain": ".txt",
"application/vnd.visio": ".vsd",
"audio/wav": ".wav",
"audio/webm": ".weba",
"video/webm": ".webm",
"image/webp": ".webp",
"font/woff": ".woff",
"font/woff2": ".woff2",
"application/xhtml+xml": ".xhtml",
"application/vnd.ms-excel": ".xls",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": ".xlsx",
"application/xml": "xml",
"application/vnd.mozilla.xul+xml": ".xul",
"application/zip": ".zip",
"video/3gpp": ".3gp",
"video/3gpp2": ".3g2",
"application/x-7z-compressed": ".7z",
"application/vnd.google-apps.document": ".docx",
"application/vnd.google-apps.spreadsheet": ".xlsx",
"application/vnd.google-apps.presentation": ".pptx",
}[mimeType]
download("1ikrvmXBcn_ZeXR3a8A6bdSQb_PG0DXGK", "/home/uttkarsh/Pictures/Test/ht/")
#MIME TYPE:-
# google doc - application/vnd.google-apps.document
# google sheets - application/vnd.google-apps.spreadsheet
# google slides - application/vnd.google-apps.presentation
# normalFile - 1FlMtPenfHeB5GyLHwKKmrQ9omla2kk-R ------------ working
# docs - 1TLvU8TSmHONcQttjnvufVeMhr83z-FrHOhexHNdJM-Q ------------ working
# sheets - 1D6h9YFmZCdjY8WryicXODjOqSoHuJdjYBhH8DeKkn-A ------------ working
# slides - 1Eg2BjGuZG15mu-Rd_e0mZVlVv6nD9IxbVORu0KzQucU ------------ working
# folder - 1ggGA6H5ztS5IdgRxMAiQxhzDdFDSLh2A ------------ ask
# photo - 1XKNBHQG-Alq1m3kypPq0d0s9WBvvmpgV ------------ working
# video - 1ikrvmXBcn_ZeXR3a8A6bdSQb_PG0DXGK ------------ working
| 33.410377 | 159 | 0.652266 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,752 | 0.529644 |
8c22ac32d9e86be7204e958a46ee2baa1ce5b423 | 602 | py | Python | MakeRelativePaths.py | Ccantey/ArcGIS-Scripting | 464ed20e4365c5518ff6e226fb9b209de422f0dc | [
"Unlicense"
] | 11 | 2015-06-08T08:35:46.000Z | 2021-07-29T22:45:23.000Z | MakeRelativePaths.py | Ccantey/ArcGIS-Scripting | 464ed20e4365c5518ff6e226fb9b209de422f0dc | [
"Unlicense"
] | null | null | null | MakeRelativePaths.py | Ccantey/ArcGIS-Scripting | 464ed20e4365c5518ff6e226fb9b209de422f0dc | [
"Unlicense"
] | 5 | 2016-11-22T19:24:59.000Z | 2018-05-02T13:03:43.000Z | import arcpy, os
#walk through all subdirectories and change mxd to store relative paths
for root, dirs, files in os.walk(r"Q:\Geodata\shape"):
for f in files:
if f.endswith(".mxd"):
filepath = root + '\\' + f
print filepath
try:
mxd = arcpy.mapping.MapDocument(filepath)
#set relative paths property
mxd.relativePaths = True
mxd.save()
except:
print filepath + ' failed'
pass
| 28.666667 | 71 | 0.463455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 137 | 0.227575 |
8c23261bdfd7bedf3419eb6c6b69a2ddfc65fe75 | 376 | py | Python | ind3.py | Nebula139/Sky3 | 16ece8db297966bc016ae6e0bbfee4e2998d1e16 | [
"MIT"
] | null | null | null | ind3.py | Nebula139/Sky3 | 16ece8db297966bc016ae6e0bbfee4e2998d1e16 | [
"MIT"
] | null | null | null | ind3.py | Nebula139/Sky3 | 16ece8db297966bc016ae6e0bbfee4e2998d1e16 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import math
if __name__ == '__main__':
a = input('Введите время: ')
t = 0
A = 1
V = int(int(a)/3)
if V == 0:
print('Ошибка')
else:
while t < int(a):
t = t + 3
A *= 2
print('Через ' + a + ' часов будет ' + str(A) + ' амёбы')
| 18.8 | 66 | 0.420213 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 151 | 0.364734 |
8c23962ece1baf380f89499b984bcb0464883878 | 9,971 | py | Python | src/k8s-extension/azext_k8s_extension/custom.py | anagg929/azure-cli-extensions | 5f9338bdbea4d127cde442ca1d60ee5c892aac0a | [
"MIT"
] | null | null | null | src/k8s-extension/azext_k8s_extension/custom.py | anagg929/azure-cli-extensions | 5f9338bdbea4d127cde442ca1d60ee5c892aac0a | [
"MIT"
] | null | null | null | src/k8s-extension/azext_k8s_extension/custom.py | anagg929/azure-cli-extensions | 5f9338bdbea4d127cde442ca1d60ee5c892aac0a | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import json
from knack.util import CLIError
from knack.log import get_logger
from azext_k8s_extension.vendored_sdks.models import ExtensionInstanceForCreate
from azext_k8s_extension.vendored_sdks.models import ExtensionInstanceUpdate
from azext_k8s_extension.vendored_sdks.models import ErrorResponseException
from azext_k8s_extension.vendored_sdks.models import ScopeCluster
from azext_k8s_extension.vendored_sdks.models import ScopeNamespace
from azext_k8s_extension.vendored_sdks.models import Scope
from .containerinsights import _get_container_insights_settings
logger = get_logger(__name__)
def show_k8s_extension(client, resource_group_name, cluster_name, name, cluster_type):
"""Get an existing K8s Extension.
"""
# Determine ClusterRP
cluster_rp = __get_cluster_type(cluster_type)
try:
extension = client.get(resource_group_name,
cluster_rp, cluster_type, cluster_name, name)
return extension
except ErrorResponseException as ex:
# Customize the error message for resources not found
if ex.response.status_code == 404:
# If Cluster not found
if ex.message.__contains__("(ResourceNotFound)"):
message = "{0} Verify that the --cluster-type is correct and the resource exists.".format(
ex.message)
# If Configuration not found
elif ex.message.__contains__("Operation returned an invalid status code 'Not Found'"):
message = "(ExtensionNotFound) The Resource {0}/{1}/{2}/Microsoft.KubernetesConfiguration/" \
"extensions/{3} could not be found!".format(
cluster_rp, cluster_type, cluster_name, name)
else:
message = ex.message
raise CLIError(message)
def create_k8s_extension(cmd, client, resource_group_name, cluster_name, name, cluster_type,
extension_type, scope='cluster', auto_upgrade_minor_version=None, release_train=None,
version=None, target_namespace=None, release_namespace=None, configuration_settings=None,
configuration_protected_settings=None, configuration_settings_file=None,
configuration_protected_settings_file=None, location=None, tags=None):
"""Create a new Extension Instance.
"""
# Determine ClusterRP
cluster_rp = __get_cluster_type(cluster_type)
# Validate scope and namespace
__validate_scope_and_namespace(scope, release_namespace, target_namespace, name)
# Validate version, release_train
__validate_version_and_release_train(
version, release_train, auto_upgrade_minor_version)
# Configuration Settings & Configuration Protected Settings
if configuration_settings is not None and configuration_settings_file is not None:
raise CLIError('Error! Both configuration_settings and configuration_settings_file cannot be provided.')
if configuration_protected_settings is not None and configuration_protected_settings_file is not None:
raise CLIError('Error! Both configuration_protected_settings and configuration_protected_settings_file '
'cannot be provided.')
config_settings = {}
config_protected_settings = {}
# Get Configuration Settings from file
if configuration_settings_file is not None:
config_settings = __get_config_settings_from_file(configuration_settings_file)
if configuration_settings is not None:
for dicts in configuration_settings:
for key, value in dicts.items():
config_settings[key] = value
# Get Configuration Protected Settings from file
if configuration_protected_settings_file is not None:
config_protected_settings = __get_config_settings_from_file(configuration_protected_settings_file)
if configuration_protected_settings is not None:
for dicts in configuration_protected_settings:
for key, value in dicts.items():
config_protected_settings[key] = value
# ExtensionType specific conditions
if extension_type.lower() == 'azuremonitor-containers':
# hardcoding name, release_namespace and scope since ci only supports one instance and cluster scope
# and platform doesnt have support yet extension specific constraints like this
logger.warning('Ignoring name, release_namespace and scope parameters since azuremonitor-containers '
'only supports cluster scope and single instance of this extension')
name = 'azuremonitor-containers'
release_namespace = 'azuremonitor-containers'
scope = 'cluster'
if not config_settings:
config_settings = {}
if not config_protected_settings:
config_protected_settings = {}
_get_container_insights_settings(cmd, resource_group_name,
cluster_name, config_settings, config_protected_settings)
# Determine namespace name
if scope == 'cluster':
if release_namespace is None:
release_namespace = name
scope_cluster = ScopeCluster(release_namespace=release_namespace)
ext_scope = Scope(cluster=scope_cluster, namespace=None)
else:
if target_namespace is None:
target_namespace = name
scope_namespace = ScopeNamespace(target_namespace=target_namespace)
ext_scope = Scope(namespace=scope_namespace, cluster=None)
# Create Extension Instance object
extension_instance = ExtensionInstanceForCreate(extension_type=extension_type,
auto_upgrade_minor_version=auto_upgrade_minor_version,
release_train=release_train,
version=version,
scope=ext_scope,
configuration_settings=config_settings,
configuration_protected_settings=config_protected_settings)
# Try to create the resource
return client.create(resource_group_name, cluster_rp, cluster_type, cluster_name, name, extension_instance)
def list_k8s_extension(cmd, client, resource_group_name, cluster_name, cluster_type):
cluster_rp = __get_cluster_type(cluster_type)
return client.list(resource_group_name, cluster_rp, cluster_type, cluster_name)
def update_k8s_extension(cmd, client, resource_group_name, cluster_type, cluster_name, name,
auto_upgrade_minor_version='', release_train='', version='', tags=None):
print("In update!")
# Ensure some values are provided for update
if auto_upgrade_minor_version is None and release_train is None and version is None:
message = "No values provided for update. Provide new value(s) for one or more of these properties:" \
" auto_upgrade_minor_version, release_train or version."
CLIError(message)
# Determine ClusterRP
cluster_rp = __get_cluster_type(cluster_type)
upd_extension = ExtensionInstanceUpdate(auto_upgrade_minor_version=auto_upgrade_minor_version,
release_train=release_train, version=version)
return client.update(resource_group_name, cluster_rp, cluster_type, cluster_name, name, upd_extension)
def delete_k8s_extension(cmd, client, resource_group_name, cluster_name, name, cluster_type, location=None, tags=None):
"""Delete an existing Kubernetes Extension.
"""
# Determine ClusterRP
cluster_rp = __get_cluster_type(cluster_type)
k8s_extension_instance_name = name
return client.delete(resource_group_name, cluster_rp, cluster_type, cluster_name, k8s_extension_instance_name)
def __get_cluster_type(cluster_type):
if cluster_type.lower() == 'connectedclusters':
return 'Microsoft.Kubernetes'
# Since cluster_type is an enum of only two values, if not connectedClusters, it will be managedClusters.
return 'Microsoft.ContainerService'
def __validate_scope_and_namespace(scope, release_namespace, target_namespace, name):
if scope == 'cluster':
if target_namespace is not None:
message = "When Scope is 'cluster', target_namespace must not be given."
raise CLIError(message)
if release_namespace is None:
release_namespace = name
else:
if release_namespace is not None:
message = "When Scope is 'namespace', release_namespace must not be given."
raise CLIError(message)
def __validate_version_and_release_train(version, release_train, auto_upgrade_minor_version):
if version is not None:
if release_train is not None:
message = "Both release_train and version cannot be given. To pin to specific version, give only version."
raise CLIError(message)
if auto_upgrade_minor_version is not False:
message = "To pin to specific version, auto-upgrade-minor-version must be set to 'false'."
raise CLIError(message)
def __get_config_settings_from_file(file_path):
try:
config_file = open(file_path,)
settings = json.load(config_file)
except ValueError:
raise Exception("File {} is not a valid JSON file".format(file_path))
files = len(settings)
if files == 0:
raise Exception("File {} is empty".format(file_path))
return settings
| 46.162037 | 119 | 0.684986 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,640 | 0.264768 |
8c27dc6bc871f7889ddc494ac9e16b08fd6a4764 | 248 | py | Python | settings.py | arakilian0/calcupy | c0091c840cae982c9bf8816f1f8ec630dbb305ae | [
"MIT"
] | 1 | 2021-07-22T03:07:16.000Z | 2021-07-22T03:07:16.000Z | settings.py | arakilian0/calcupy | c0091c840cae982c9bf8816f1f8ec630dbb305ae | [
"MIT"
] | null | null | null | settings.py | arakilian0/calcupy | c0091c840cae982c9bf8816f1f8ec630dbb305ae | [
"MIT"
] | 1 | 2020-01-24T11:51:47.000Z | 2020-01-24T11:51:47.000Z | screen = {
"bg": "blue",
"rows": 0,
"columns": 0,
"columnspan": 4,
"padx": 5,
"pady": 5,
}
input = {
"bg": "blue",
"fg": "red",
"fs": "20px",
}
button = {
"bg": "blue",
"fg": "red",
"fs": "20px",
}
| 11.809524 | 20 | 0.366935 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.431452 |
8c28212a9787a533451cf08af6ca4192b14942e2 | 7,203 | py | Python | pymcuprog/nvmpic.py | xthanhn/mcuprog | 84eb4baf82dcefe696d5d50344d7d5ca155cbefd | [
"MIT"
] | 1 | 2021-08-25T08:59:33.000Z | 2021-08-25T08:59:33.000Z | pymcuprog/nvmpic.py | xthanhn/mcuprog | 84eb4baf82dcefe696d5d50344d7d5ca155cbefd | [
"MIT"
] | null | null | null | pymcuprog/nvmpic.py | xthanhn/mcuprog | 84eb4baf82dcefe696d5d50344d7d5ca155cbefd | [
"MIT"
] | null | null | null | """
PIC NVM implementation
"""
import os
import sys
from pyedbglib.util import binary
from pymcuprog import utils
from pymcuprog.nvm import NvmAccessProviderCmsisDapTool
from pymcuprog.pymcuprog_errors import PymcuprogNotSupportedError
from pymcuprog.deviceinfo.memorynames import MemoryNames
from pymcuprog.deviceinfo.deviceinfokeys import DeviceMemoryInfoKeys, DeviceInfoKeys
class NvmAccessProviderCmsisDapPic(NvmAccessProviderCmsisDapTool):
"""
NVM access the PIC way
"""
def __init__(self, transport, device_info, packpath, options=""):
"""
:raises ImportError: if packpath is None
"""
self.pic = None
NvmAccessProviderCmsisDapTool.__init__(self, device_info)
self.options = {}
if packpath is None:
raise ImportError("No path to pack repo provided!")
# Each part pack ships its own version of the full script stack, including pyedbglib.
# pyedbglib, and other libraries, can be installed in the local python site-packages
# This path hack puts the part pack path at the front of the python import path
system_path = sys.path
sys.path = [os.path.normpath(packpath)] + sys.path
sys.path = [os.path.normpath(packpath + "//common")] + sys.path
# Create driver for scripted debuggers
self.options['skip_blank_pages'] = True
self.options['overlapped_usb_access'] = False
# This imports the debugger model from the provided packpath so the import must be late
from common.debugprovider import provide_debugger_model # pylint: disable=import-outside-toplevel, import-error
devicename = device_info[DeviceInfoKeys.NAME]
self.pic = provide_debugger_model(devicename)
# Start immediately
self.pic.setup_session(transport, self.options)
self.device_info = device_info
# Start the programming session
if 'pic24' in devicename.lower():
if 'no_pe' in options:
# Only PIC24 devices support Programming Executives
try:
# Force no Programming Executive usage by setting program_pe flag but not configure a
# PE (i.e. not calling set_program_exec)
self.pic.start_programming_operation(program_pe=options['no_pe'])
except TypeError:
# start_programming_operation does not have program_pe argument (i.e. old
# devicesupportscripts without PE support)
self.pic.start_programming_operation()
else:
self.pic.start_programming_operation(program_pe=False)
else:
self.pic.start_programming_operation()
# The stack has been built, revert path hacks
sys.path = system_path
def read(self, memory_info, offset, numbytes):
"""
Read the memory
:param memory_info: dictionary for the memory as provided by the DeviceMemoryInfo class
:param offset: relative offset in the memory type
:param numbytes: number of bytes to read
:return: array of bytes read
"""
mem_name = memory_info[DeviceInfoKeys.NAME]
offset += memory_info[DeviceMemoryInfoKeys.ADDRESS]
if mem_name in [MemoryNames.FLASH, MemoryNames.USER_ID, MemoryNames.ICD]:
mem = self.pic.read_flash_memory(offset, numbytes)
return mem
if mem_name == MemoryNames.CONFIG_WORD:
mem = self.pic.read_config_memory(offset, numbytes)
return mem
if mem_name == MemoryNames.EEPROM:
mem = self.pic.read_eeprom_memory(offset, numbytes)
return mem
self.logger.error("Unsupported memtype!")
return []
def write(self, memory_info, offset, data):
"""
Write the memory with data
:param memory_info: dictionary for the memory as provided by the DeviceMemoryInfo class
:param offset: relative offset within the memory type
:param data: the data to program
"""
# Make sure the data is aligned to a memory page
chunk, address = utils.pagealign(data,
offset,
memory_info[DeviceMemoryInfoKeys.PAGE_SIZE],
memory_info[DeviceMemoryInfoKeys.WRITE_SIZE])
mem_name = memory_info[DeviceInfoKeys.NAME]
address += memory_info[DeviceMemoryInfoKeys.ADDRESS]
if mem_name == MemoryNames.FLASH:
self.pic.write_flash_memory(address, chunk)
elif mem_name == MemoryNames.CONFIG_WORD:
self.pic.write_config_memory(address, chunk)
elif mem_name == MemoryNames.USER_ID:
self.pic.write_user_id_memory(address, chunk)
elif mem_name == MemoryNames.EEPROM:
self.pic.write_eeprom_memory(address, chunk)
elif mem_name == MemoryNames.ICD:
try:
self.pic.write_de_memory(address, chunk)
except AttributeError:
# Some PIC devices don't have the write_de_memory but instead a _write_de_block function
self.pic._write_de_block(address, chunk) # pylint: disable=protected-access
else:
raise PymcuprogNotSupportedError("Unsupported memtype: {}!".format(mem_name))
def erase(self, memory_info=None, address=None):
"""
Erase the device or parts of it.
:param memory_info: dictionary for the memory as provided by the DeviceMemoryInfo class
If memory_info is None the default bulk erase will be run
:param address: address info for erase (optional)
"""
if address is None:
if memory_info is None:
self.pic.erase()
else:
if memory_info[DeviceInfoKeys.NAME] == MemoryNames.ICD:
self.pic.erase_de_memory(memory_info[DeviceMemoryInfoKeys.ADDRESS],
memory_info[DeviceMemoryInfoKeys.SIZE])
else:
if DeviceMemoryInfoKeys.ERASE_ADDRESS in memory_info:
self.pic.erase(memory_info[DeviceMemoryInfoKeys.ERASE_ADDRESS])
else:
raise ValueError("Missing erase address for {}".format(memory_info[DeviceInfoKeys.NAME]))
else:
self.pic.erase(address)
def read_device_id(self):
"""
Get the device info from the device
:returns: Device ID raw bytes (little endian)
"""
pic_id = self.pic.read_id()
id_array = binary.pack_le16(pic_id)
self.logger.info("Device ID read out: '%04X'", pic_id)
return id_array
def hold_in_reset(self):
"""
Hold the device in reset
"""
self.pic.hold_in_reset()
def release_from_reset(self):
"""
Release the device from reset
"""
self.pic.release_from_reset()
def stop(self):
"""
Stop programming session
"""
if self.pic is not None:
self.pic.end_of_operations()
| 40.240223 | 119 | 0.627377 | 6,820 | 0.946828 | 0 | 0 | 0 | 0 | 0 | 0 | 2,402 | 0.333472 |
8c28407a573c7236354b86016987c0975cf4f3c6 | 502 | py | Python | 03-Simple-Neuron-Layer(Looping).py | KisanThapa/Neural-Networks-Scratch- | 4a78c2042e8575bbadaba709e50e7ad4070963f8 | [
"Apache-2.0"
] | null | null | null | 03-Simple-Neuron-Layer(Looping).py | KisanThapa/Neural-Networks-Scratch- | 4a78c2042e8575bbadaba709e50e7ad4070963f8 | [
"Apache-2.0"
] | null | null | null | 03-Simple-Neuron-Layer(Looping).py | KisanThapa/Neural-Networks-Scratch- | 4a78c2042e8575bbadaba709e50e7ad4070963f8 | [
"Apache-2.0"
] | null | null | null | # 3. Single layered 4 inputs and 3 outputs(Looping)
mInputs = [3, 4, 1, 2]
mWeights = [[0.2, -0.4, 0.6, 0.4],
[0.4, 0.3, -0.1, 0.8],
[0.7, 0.6, 0.3, -0.3]]
mBias1 = [3, 4, 2]
layer_output = []
for neuron_weights, neuron_bias in zip(mWeights, mBias1):
neuron_output = 0
for n_inputs, n_weights in zip(mInputs, neuron_weights):
neuron_output += n_inputs*n_weights
neuron_output += neuron_bias
layer_output.append(neuron_output)
print(layer_output)
| 21.826087 | 60 | 0.621514 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 51 | 0.101594 |
8c29f01f7753be2f1ec266b967456193a850ecb8 | 2,733 | py | Python | doubly_stochastic_dgp/layer_initializations.py | ayush29/Doubly-Stochastic-DGP | 5bf96b57a7571fe367083c8fd95e61bd1d534f55 | [
"Apache-2.0"
] | 126 | 2017-05-26T07:10:58.000Z | 2021-12-24T07:56:36.000Z | doubly_stochastic_dgp/layer_initializations.py | SourangshuGhosh/Doubly-Stochastic-DGP | e22f5c94b1a60ee04e3ebca9eb140510eab54724 | [
"Apache-2.0"
] | 39 | 2017-08-04T11:29:13.000Z | 2021-02-16T09:52:29.000Z | doubly_stochastic_dgp/layer_initializations.py | SourangshuGhosh/Doubly-Stochastic-DGP | e22f5c94b1a60ee04e3ebca9eb140510eab54724 | [
"Apache-2.0"
] | 56 | 2017-05-29T11:48:47.000Z | 2021-09-09T15:19:46.000Z |
import tensorflow as tf
import numpy as np
from gpflow.params import DataHolder, Minibatch
from gpflow import autoflow, params_as_tensors, ParamList
from gpflow.models.model import Model
from gpflow.mean_functions import Identity, Linear
from gpflow.mean_functions import Zero
from gpflow.quadrature import mvhermgauss
from gpflow import settings
float_type = settings.float_type
from doubly_stochastic_dgp.layers import SVGP_Layer
def init_layers_linear(X, Y, Z, kernels,
num_outputs=None,
mean_function=Zero(),
Layer=SVGP_Layer,
white=False):
num_outputs = num_outputs or Y.shape[1]
layers = []
X_running, Z_running = X.copy(), Z.copy()
for kern_in, kern_out in zip(kernels[:-1], kernels[1:]):
dim_in = kern_in.input_dim
dim_out = kern_out.input_dim
print(dim_in, dim_out)
if dim_in == dim_out:
mf = Identity()
else:
if dim_in > dim_out: # stepping down, use the pca projection
_, _, V = np.linalg.svd(X_running, full_matrices=False)
W = V[:dim_out, :].T
else: # stepping up, use identity + padding
W = np.concatenate([np.eye(dim_in), np.zeros((dim_in, dim_out - dim_in))], 1)
mf = Linear(W)
mf.set_trainable(False)
layers.append(Layer(kern_in, Z_running, dim_out, mf, white=white))
if dim_in != dim_out:
Z_running = Z_running.dot(W)
X_running = X_running.dot(W)
# final layer
layers.append(Layer(kernels[-1], Z_running, num_outputs, mean_function, white=white))
return layers
def init_layers_input_prop(X, Y, Z, kernels,
num_outputs=None,
mean_function=Zero(),
Layer=SVGP_Layer,
white=False):
num_outputs = num_outputs or Y.shape[1]
D = X.shape[1]
M = Z.shape[0]
layers = []
for kern_in, kern_out in zip(kernels[:-1], kernels[1:]):
dim_in = kern_in.input_dim
dim_out = kern_out.input_dim - D
std_in = kern_in.variance.read_value()**0.5
pad = np.random.randn(M, dim_in - D) * 2. * std_in
Z_padded = np.concatenate([Z, pad], 1)
layers.append(Layer(kern_in, Z_padded, dim_out, Zero(), white=white, input_prop_dim=D))
dim_in = kernels[-1].input_dim
std_in = kernels[-2].variance.read_value()**0.5 if dim_in > D else 1.
pad = np.random.randn(M, dim_in - D) * 2. * std_in
Z_padded = np.concatenate([Z, pad], 1)
layers.append(Layer(kernels[-1], Z_padded, num_outputs, mean_function, white=white))
return layers
| 34.1625 | 95 | 0.608855 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 89 | 0.032565 |
8c2a807f43d6eef105689c63e12efd6dbb33bdcb | 2,099 | py | Python | backend_django/login_api/serializers.py | oereo/cau-lion-server | 58ae08bba739387796a814f03c193a1eeea1b8a6 | [
"MIT"
] | 2 | 2020-04-17T07:22:55.000Z | 2020-04-20T16:45:38.000Z | backend_django/login_api/serializers.py | oereo/cau-lion-server | 58ae08bba739387796a814f03c193a1eeea1b8a6 | [
"MIT"
] | 17 | 2020-04-25T12:01:16.000Z | 2022-03-12T00:32:42.000Z | backend_django/login_api/serializers.py | minseungseon/cau-lion-server | 705d892df4746f658f903bc30e1622da35e81e69 | [
"MIT"
] | 3 | 2020-04-16T06:20:53.000Z | 2020-04-19T01:47:20.000Z | #2020-04-20 minseung seon created.
#serializer는 모두 ModelSerializer로 간단히 처리함
from rest_framework import serializers
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from .models import Profile
#Sign Up 회원가입
class UserSerializer(serializers.ModelSerializer):
class meta:
model = User
fields = ('id', 'username', 'email')
class CreateUserSerializer(serializers.ModelSerializer) :
# def create(self, validated_data):
# username = validated_data['username']
# email = validated_data['email']
# password = validated_data['password']
# user_obj = User(
# username = username,
# email = email
# )
# user_obj.set_password(password)
# user_obj.save()
# return validated_data
# class Meta:
# model = User
# fields = [
# 'username',
# 'password',
# 'email',
# 'is_superuser',
# ]
class Meta:
model = User
fields = ("id", "username", "password", "email")
extra_kwargs = {"password": {"write_only": True}}
def create(self, validated_data):
user = User.objects.create_user(
validated_data["username"], None, validated_data["password"]
)
return user
#Check Valid Access on Server 접속 유지중인지 확인
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ("id", "username")
#Login 로그인
#연결되는 모델이 없기 때문에 serializer로 작성
class LoginUserSerializer(serializers.Serializer):
username = serializers.CharField()
password = serializers.CharField()
def validate(self, data):
user = authenticate(**data)
if user and user.is_active:
return user
raise serializers.ValidationError("아이디 혹은 비밀번호가 잘못 되었습니다.")
class ProfileSerializer(serializers.Serializer):
class Meta:
model = Profile
#exclude = ("user_pk", "likelion_number", "email")
fields = '__all__'
#read_only = True | 26.56962 | 72 | 0.613626 | 1,769 | 0.798646 | 0 | 0 | 0 | 0 | 0 | 0 | 960 | 0.433409 |
8c2cb46531652620ec234711640343b0c737574c | 10,157 | py | Python | menu_view.py | MCOxford/tile_miner | eefa0c8a31cf44e9b25ee3e779bc21b7ee79212e | [
"MIT"
] | null | null | null | menu_view.py | MCOxford/tile_miner | eefa0c8a31cf44e9b25ee3e779bc21b7ee79212e | [
"MIT"
] | null | null | null | menu_view.py | MCOxford/tile_miner | eefa0c8a31cf44e9b25ee3e779bc21b7ee79212e | [
"MIT"
] | null | null | null | import arcade
import arcade.gui
from arcade.gui import UIManager
from constants import *
import os
dirname = os.path.dirname(__file__)
button_normal = arcade.load_texture(os.path.join(dirname, 'images/red_button_normal.png'))
hovered_texture = arcade.load_texture(os.path.join(dirname, 'images/red_button_hover.png'))
pressed_texture = arcade.load_texture(os.path.join(dirname, 'images/red_button_press.png'))
class BoundaryError(Exception):
pass
class PlayButton(arcade.gui.UIImageButton):
"""
To capture a button click, subclass the button and override on_click.
"""
start_game = False
def on_click(self):
""" Called when user lets off button """
self.start_game = True
class QuitButton(arcade.gui.UIImageButton):
"""
Quit button class - creates a button to close down game.
"""
def on_click(self):
arcade.close_window()
class LeaderboardButton(arcade.gui.UIImageButton):
"""
Leaderboard button class - click the button to go to the leaderboard.
"""
go_to_leaderboard = False
def on_click(self):
self.go_to_leaderboard = True
class MainMenu(arcade.View):
"""
Class for main menu screen (the first view the player sees when booting up the game).
"""
# minimum/maximum dimensions for tile board (width and height)
MIN = 4
MAX = 20
def __init__(self, row_count=5, column_count=5, minutes=1, seconds=0):
"""
MainMenu construct.
"""
super().__init__()
arcade.set_background_color(arcade.color.LIGHT_TAUPE)
self.ui_manager = UIManager()
self.row_count = row_count
self.column_count = column_count
self.minutes = minutes
self.seconds = seconds
# GUI elements which will get constructed in setup()
self.ui_row_input_box = None
self.ui_column_input_box = None
self.ui_minute_input_box = None
self.ui_second_input_box = None
self.play_button = None
self.leaderboard_button = None
@property
def timer(self):
return 60 * self._minutes + self._seconds
@property
def row_count(self):
return self._row_count
@row_count.setter
def row_count(self, value):
if not isinstance(value, int):
raise TypeError(f"row value not an integer: {value}")
if value < self.MIN or value > self.MAX:
raise BoundaryError
self._row_count = value
@property
def column_count(self):
return self._column_count
@column_count.setter
def column_count(self, value):
if not isinstance(value, int):
raise TypeError(f"column value not an integer: {value}")
if value < self.MIN or value > self.MAX:
raise BoundaryError
self._column_count = value
@property
def minutes(self):
return self._minutes
@minutes.setter
def minutes(self, value):
if not isinstance(value, int):
raise TypeError(f"value not an integer type: {value}")
if value < 0 or value > 99:
raise BoundaryError(f"value must be between 0 and 99: {value}")
self._minutes = value
@property
def seconds(self):
return self._seconds
@seconds.setter
def seconds(self, value):
if not isinstance(value, int):
raise TypeError(f"value not an integer type: {value}")
if value < 0 or value > 59:
raise BoundaryError(f"value must be between 0 and 59: {value}")
self._seconds = value
def setup(self):
"""
Sets up menu screen with GUI elements.
:return:
"""
self.ui_manager.purge_ui_elements()
# board row size input box
self.ui_row_input_box = arcade.gui.UIInputBox(center_x=WIDTH * 6.5 / 10, center_y=HEIGHT * 6 / 10,
width=350)
self.ui_row_input_box.text = str(self._row_count)
self.ui_row_input_box.cursor_index = len(self.ui_row_input_box.text)
self.ui_manager.add_ui_element(self.ui_row_input_box)
# board column size input box
self.ui_column_input_box = arcade.gui.UIInputBox(center_x=WIDTH * 6.5 / 10, center_y=HEIGHT * 4.5 / 10,
width=350)
self.ui_column_input_box.text = str(self._column_count)
self.ui_column_input_box.cursor_index = len(self.ui_column_input_box.text)
self.ui_manager.add_ui_element(self.ui_column_input_box)
# minute input box
self.ui_minute_input_box = arcade.gui.UIInputBox(center_x=WIDTH * 4.94 / 10, center_y=HEIGHT * 3 / 10,
width=100)
self.ui_minute_input_box.text = str(self._minutes)
self.ui_minute_input_box.cursor_index = len(self.ui_minute_input_box.text)
self.ui_manager.add_ui_element(self.ui_minute_input_box)
# second input box
self.ui_second_input_box = arcade.gui.UIInputBox(center_x=WIDTH * 6.9 / 10, center_y=HEIGHT * 3 / 10,
width=100)
self.ui_second_input_box.text = str(self._seconds)
self.ui_second_input_box.cursor_index = len(self.ui_second_input_box.text)
self.ui_manager.add_ui_element(self.ui_second_input_box)
# play button - press to play the game (creates a new view)
self.play_button = PlayButton(center_x=WIDTH / 2, center_y=HEIGHT * 1.5 / 10, normal_texture=button_normal,
hover_texture=hovered_texture, press_texture=pressed_texture, text='Play!')
self.ui_manager.add_ui_element(self.play_button)
# quit button - close the game
quit_button = QuitButton(center_x=WIDTH * 8 / 10, center_y=HEIGHT * 1 / 10, normal_texture=button_normal,
hover_texture=hovered_texture, press_texture=pressed_texture, text='Quit')
self.ui_manager.add_ui_element(quit_button)
# leaderboard button - press to go to the leaderboard view
self.leaderboard_button = LeaderboardButton(center_x=WIDTH * 2 / 10, center_y=HEIGHT * 1 / 10,
normal_texture=button_normal, hover_texture=hovered_texture,
press_texture=pressed_texture, text='Leaderboard')
self.ui_manager.add_ui_element(self.leaderboard_button)
def on_draw(self):
"""
Render the screen.
"""
arcade.start_render()
arcade.draw_text("TILE MINER", WIDTH / 2, HEIGHT * 3/4,
arcade.color.BLACK, font_size=75, anchor_x="center")
arcade.draw_text("Row size: ", WIDTH * 3 / 10, HEIGHT * 6 / 10,
arcade.color.BLACK, font_size=30, anchor_x="center", anchor_y="center")
arcade.draw_text("(Whole number between 4 and 20)", WIDTH * 6.5 / 10, HEIGHT * 5.4 / 10,
arcade.color.BLACK, font_size=15, anchor_x="center", anchor_y="center")
arcade.draw_text("Column size: ", WIDTH * 3 / 10 - 24, HEIGHT * 4.5 / 10,
arcade.color.BLACK, font_size=30, anchor_x="center", anchor_y="center")
arcade.draw_text("(Whole number between 4 and 20)", WIDTH * 6.5 / 10, HEIGHT * 3.9 / 10,
arcade.color.BLACK, font_size=15, anchor_x="center", anchor_y="center")
arcade.draw_text("Timer: ", WIDTH * 3.29 / 10, HEIGHT * 3 / 10,
arcade.color.BLACK, font_size=30, anchor_x="center", anchor_y="center")
arcade.draw_text("min", WIDTH * 5.9 / 10, HEIGHT * 3 / 10,
arcade.color.BLACK, font_size=20, anchor_x="center", anchor_y="center")
arcade.draw_text("(0-99)", WIDTH * 4.93 / 10, HEIGHT * 2.4 / 10,
arcade.color.BLACK, font_size=15, anchor_x="center", anchor_y="center")
arcade.draw_text("sec", WIDTH * 7.85 / 10, HEIGHT * 3 / 10,
arcade.color.BLACK, font_size=20, anchor_x="center", anchor_y="center")
arcade.draw_text("(0-59)", WIDTH * 6.9 / 10, HEIGHT * 2.4 / 10,
arcade.color.BLACK, font_size=15, anchor_x="center", anchor_y="center")
def on_show_view(self):
"""
Show this view.
"""
self.setup()
def on_hide_view(self):
"""
What to do when hiding this view.
:return:
"""
self.ui_manager.unregister_handlers()
def update(self, delta_time: float):
"""
Called every frame.
:param delta_time: delta time for each frame.
:return:
"""
if self.play_button.start_game:
try:
self.row_count = int(self.ui_row_input_box.text)
self.column_count = int(self.ui_column_input_box.text)
self.minutes = int(self.ui_minute_input_box.text)
self.seconds = int(self.ui_second_input_box.text)
except (ValueError, BoundaryError):
self.ui_row_input_box.text = ""
self.ui_column_input_box.text = ""
self.ui_minute_input_box.text = ""
self.ui_second_input_box.text = ""
self.play_button.start_game = False
return
import tile_miner
game_view = tile_miner.TileMiner(row_count=self._row_count, column_count=self._column_count,
total_time=self.timer)
self.window.width = game_view.screen_width
self.window.height = game_view.screen_height
self.window.show_view(game_view)
if self.leaderboard_button.go_to_leaderboard:
import leaderboard_view
lb_view = leaderboard_view.LeaderboardView()
self.window.show_view(lb_view)
def main():
window = arcade.Window(WIDTH, HEIGHT, "Tile Miner")
menu_view = MainMenu(6, 6, 3, 0)
window.show_view(menu_view)
arcade.run()
if __name__ == "__main__":
main()
| 37.341912 | 115 | 0.608743 | 9,535 | 0.938761 | 0 | 0 | 1,470 | 0.144728 | 0 | 0 | 1,795 | 0.176725 |
8c2d9c7411c316a5fdbc00d27ddd1f859de4e57c | 4,645 | py | Python | Libraries/Python/CommonEnvironment/v1.0/CommonEnvironment/TypeInfo/FundamentalTypes/All.py | davidbrownell/v3-Common_Environment | 8f42f256e573cbd83cbf9813db9958025ddf12f2 | [
"BSL-1.0"
] | null | null | null | Libraries/Python/CommonEnvironment/v1.0/CommonEnvironment/TypeInfo/FundamentalTypes/All.py | davidbrownell/v3-Common_Environment | 8f42f256e573cbd83cbf9813db9958025ddf12f2 | [
"BSL-1.0"
] | 1 | 2018-06-08T06:45:16.000Z | 2018-06-08T06:45:16.000Z | Libraries/Python/CommonEnvironment/v1.0/CommonEnvironment/TypeInfo/FundamentalTypes/All.py | davidbrownell/v3-Common_Environment | 8f42f256e573cbd83cbf9813db9958025ddf12f2 | [
"BSL-1.0"
] | 1 | 2018-06-08T04:15:17.000Z | 2018-06-08T04:15:17.000Z | # ----------------------------------------------------------------------
# |
# | All.py
# |
# | David Brownell <db@DavidBrownell.com>
# | 2018-04-23 10:05:42
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2018-22.
# | Distributed under the Boost Software License, Version 1.0.
# | (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# |
# ----------------------------------------------------------------------
"""All items from this module."""
import os
import sys
import CommonEnvironment
from CommonEnvironment.TypeInfo.FundamentalTypes.BoolTypeInfo import BoolTypeInfo
from CommonEnvironment.TypeInfo.FundamentalTypes.DateTimeTypeInfo import DateTimeTypeInfo
from CommonEnvironment.TypeInfo.FundamentalTypes.DateTypeInfo import DateTypeInfo
from CommonEnvironment.TypeInfo.FundamentalTypes.DirectoryTypeInfo import DirectoryTypeInfo
from CommonEnvironment.TypeInfo.FundamentalTypes.DurationTypeInfo import DurationTypeInfo
from CommonEnvironment.TypeInfo.FundamentalTypes.EnumTypeInfo import EnumTypeInfo
from CommonEnvironment.TypeInfo.FundamentalTypes.FilenameTypeInfo import FilenameTypeInfo
from CommonEnvironment.TypeInfo.FundamentalTypes.FloatTypeInfo import FloatTypeInfo
from CommonEnvironment.TypeInfo.FundamentalTypes.GuidTypeInfo import GuidTypeInfo
from CommonEnvironment.TypeInfo.FundamentalTypes.IntTypeInfo import IntTypeInfo
from CommonEnvironment.TypeInfo.FundamentalTypes.StringTypeInfo import StringTypeInfo
from CommonEnvironment.TypeInfo.FundamentalTypes.TimeTypeInfo import TimeTypeInfo
from CommonEnvironment.TypeInfo.FundamentalTypes.UriTypeInfo import Uri, UriTypeInfo
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# |
# | Public Types
# |
# ----------------------------------------------------------------------
ALL_FUNDAMENTAL_TYPES = [ BoolTypeInfo,
DateTimeTypeInfo,
DateTypeInfo,
DirectoryTypeInfo,
DurationTypeInfo,
EnumTypeInfo,
FilenameTypeInfo,
FloatTypeInfo,
GuidTypeInfo,
IntTypeInfo,
StringTypeInfo,
TimeTypeInfo,
UriTypeInfo,
]
# ----------------------------------------------------------------------
# |
# | Public Methods
# |
# ----------------------------------------------------------------------
def CreateFromPythonType(typ, **kwargs):
"""
Creates a TypeInfo object based on the provided type.
Examples:
CreateFromPythonType(int)
CreateFromPythonType(string)
"""
if sys.version_info[0] == 2:
if typ in [ str, unicode, basestring, ]: # <Undefined variable> pylint: disable = E0602
return StringTypeInfo(**kwargs)
else:
if typ == str:
return StringTypeInfo(**kwargs)
for potential_type_info in [ BoolTypeInfo,
DateTimeTypeInfo,
DateTypeInfo,
# Ambiguous: DirectoryTypeInfo
DurationTypeInfo,
# Abmiguous: EnumTypeInfo
# Ambiguous: FilenameTypeInfo
FloatTypeInfo,
GuidTypeInfo,
IntTypeInfo,
# Defined above: StringTypeInfo
TimeTypeInfo,
UriTypeInfo,
]:
if potential_type_info.ExpectedType == typ:
return potential_type_info(**kwargs)
raise Exception("'{}' is not a recognized type".format(typ))
| 46.919192 | 103 | 0.475996 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,420 | 0.305705 |
8c2dc756bd57296e610b4f8de275d1147bb68d78 | 6,783 | py | Python | train.py | runhani/person-classification | 40d5199a770f40f8eea5d818645860baeb76bce7 | [
"MIT"
] | null | null | null | train.py | runhani/person-classification | 40d5199a770f40f8eea5d818645860baeb76bce7 | [
"MIT"
] | null | null | null | train.py | runhani/person-classification | 40d5199a770f40f8eea5d818645860baeb76bce7 | [
"MIT"
] | null | null | null |
import os
import matplotlib.pyplot as plt
from keras import applications
from keras.preprocessing.image import ImageDataGenerator, load_img
from keras import optimizers
from keras.models import Sequential, Model, load_model
from keras.layers import Dropout, Flatten, Dense, MaxPooling2D
from keras.regularizers import l2
from keras.callbacks import ModelCheckpoint
import sys
import argparse
import efficientnet
# Starter Code for Image Classification
def parse_arguments(argv):
parser = argparse.ArgumentParser(description='person classification training code')
# model name
parser.add_argument('--model_name', default='xception', type=str, help='', choices=['xception', 'efficientnet'])
# input
'''
db/
train/
positive/
skt_t_p00001.jpg
skt_t_p00002.jpg
...
negative/
skt_t_n00001.jpg
skt_t_n00001.jpg
...
validation/
positive/
skt_v_p00001.jpg
skt_v_p00002.jpg
...
negative/
skt_v_n00001.jpg
skt_v_n00002.jpg
...
'''
parser.add_argument('--train_data_dir', default='./db/train', type=str, help='root folder path for training (contaning at least two image folders)')
parser.add_argument('--val_data_dir', default='./db/validation', type=str, help='root folder path for validation (contaning at least two image folders)')
parser.add_argument('--number_of_classes', default=2, type=int, help='')
# hyper parameter
parser.add_argument('--init_lr', default=1e-4, type=float, help='')
parser.add_argument('--image_size', default=299, type=int, help='')
parser.add_argument('--train_epoch', default=20, type=int, help='')
parser.add_argument('--freeze_layer', default=-30, type=int, help='')
parser.add_argument('--dense_units', default=2048, type=int, help='')
parser.add_argument('--dropout_rate', default=0.2, type=float, help='')
# change batch_size according to your GPU memory for speed up
parser.add_argument('--train_batch_size', default=16, type=int, help='')
parser.add_argument('--val_batch_size', default=100, type=int, help='')
return parser.parse_args(argv)
def train(args):
if 'efficientnet' in args.model_name:
pretrained_model = efficientnet.EfficientNetB5(weights='imagenet', include_top=False, input_shape=(args.image_size, args.image_size, 3), pooling='avg')
else:
pretrained_model = applications.xception.Xception(weights='imagenet', include_top=False, input_shape=(args.image_size, args.image_size, 3), pooling='avg')
# Freeze the layers except the last N layers
for layer in pretrained_model.layers[:args.freeze_layer]:
layer.trainable = False
# Create the model
model = Sequential()
# Add the transper learning base model
model.add(pretrained_model)
# Add new layers
#model.add(Flatten())
model.add(Dense(args.dense_units, activation='relu', kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01)))
model.add(Dropout(args.dropout_rate))
model.add(Dense(args.number_of_classes, activation='softmax'))
# Show a summary of the model. Check the number of trainable parameters
model.summary()
# Save the checkpoint with model name
model_file_path="%s_base.h5" % args.model_name
# Keep only a single checkpoint, the best over test accuracy.
checkpoint = ModelCheckpoint(model_file_path,
monitor='val_acc',
verbose=1,
save_best_only=True,
mode='max')
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=90,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.1,
zoom_range=0.1,
horizontal_flip=True,
fill_mode='nearest')
validation_datagen = ImageDataGenerator(rescale=1./255)
# Change the batch_size according to your system RAM
train_generator = train_datagen.flow_from_directory(
args.train_data_dir,
target_size=(args.image_size, args.image_size),
batch_size=args.train_batch_size,
class_mode='categorical')
validation_generator = validation_datagen.flow_from_directory(
args.val_data_dir,
target_size=(args.image_size, args.image_size),
batch_size=args.val_batch_size,
class_mode='categorical',
shuffle=False)
# Compile the model
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.Adam(lr=args.init_lr),
metrics=['acc'])
# Train the model
history = model.fit_generator(
train_generator,
steps_per_epoch=train_generator.samples/train_generator.batch_size ,
epochs=args.train_epoch,
validation_data=validation_generator,
validation_steps=validation_generator.samples/validation_generator.batch_size,
verbose=1,
callbacks=[checkpoint])
return history
def view(history):
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, loss, 'b', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title('Loss')
plt.legend()
plt.figure()
plt.plot(epochs, acc, 'b', label='Training acc')
plt.plot(epochs, val_acc, 'r', label='Validation acc')
plt.title('Accuracy')
plt.legend()
plt.show()
def count_dirs(folder_path):
dirs = [o for o in os.listdir(folder_path) if os.path.isdir(os.path.join(folder_path,o))]
return len(dirs)
def check_input_params(args):
valid = True
number_of_train_folders = count_dirs(args.train_data_dir)
number_of_validation_folders = count_dirs(args.val_data_dir)
if args.number_of_classes != number_of_train_folders:
print('plz, check [%s] (# of classes:%d) != (# of folders:%d)' % (args.train_data_dir, args.number_of_classes, number_of_train_folders))
valid = False
if args.number_of_classes != number_of_validation_folders:
print('plz, check [%s] (# of classes:%d) != (# of folders:%d)' % (args.val_data_dir, args.number_of_classes, number_of_validation_folders))
valid = False
return valid
def main(args):
if check_input_params(args):
history = train(args)
view(history)
if __name__ == "__main__":
main(parse_arguments(sys.argv[1:]))
| 34.257576 | 162 | 0.65502 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,866 | 0.2751 |
8c2f157870a65c3393becd00864f981eb90091df | 691 | py | Python | custom_config.py | prise-3d/Thesis-NoiseDetection-rfe-attributes | f5062a03876e8f270f79c3d811bf7ba1ec7d9ba1 | [
"MIT"
] | null | null | null | custom_config.py | prise-3d/Thesis-NoiseDetection-rfe-attributes | f5062a03876e8f270f79c3d811bf7ba1ec7d9ba1 | [
"MIT"
] | null | null | null | custom_config.py | prise-3d/Thesis-NoiseDetection-rfe-attributes | f5062a03876e8f270f79c3d811bf7ba1ec7d9ba1 | [
"MIT"
] | null | null | null | from modules.config.attributes_config import *
# store all variables from global config
context_vars = vars()
# folders
logs_folder = 'logs'
backup_folder = 'backups'
## min_max_custom_folder = 'custom_norm'
## correlation_indices_folder = 'corr_indices'
# variables
features_choices_labels = ['filters_statistics', 'svd', 'filters_statistics_sobel', 'svd_sobel']
models_names_list = ["rfe_svm_model"]
## models_names_list = ["svm_model","ensemble_model","ensemble_model_v2","deep_keras"]
## normalization_choices = ['svd', 'svdn', 'svdne']
# parameters | 32.904762 | 112 | 0.617945 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 426 | 0.616498 |
8c2f47b6bc6e992abed62a2818205e164178fdb1 | 7,969 | py | Python | optable_submission/optable_package/optable/manipulations/target_encodings/hist_neighborhood_target_encoding.py | pfnet-research/KDD-Cup-AutoML-5 | 54202eb6aa414316a70faa8e07a68e1c8ca7bd1b | [
"MIT"
] | 18 | 2019-07-22T06:35:37.000Z | 2021-03-20T08:37:56.000Z | optable_submission/optable_package/optable/manipulations/target_encodings/hist_neighborhood_target_encoding.py | pfnet-research/KDD-Cup-AutoML-5 | 54202eb6aa414316a70faa8e07a68e1c8ca7bd1b | [
"MIT"
] | 1 | 2020-03-22T21:06:57.000Z | 2020-03-22T21:06:57.000Z | optable_submission/optable_package/optable/manipulations/target_encodings/hist_neighborhood_target_encoding.py | pfnet-research/KDD-Cup-AutoML-5 | 54202eb6aa414316a70faa8e07a68e1c8ca7bd1b | [
"MIT"
] | 11 | 2019-07-23T04:06:08.000Z | 2020-05-12T08:44:01.000Z | import numpy as np
from scipy import stats
from sklearn import metrics
from optable.synthesis import manipulation
from optable.synthesis import manipulation_candidate
from optable.dataset import feature_types
from optable import _core
# TODO: もうちょっとto-many, to-oneうまく扱う、このままだと抜けが出てくる
# TODO: dstのlenで判断しているのもよくない
class HistNeighborhoodTargetEncodingManipulation(manipulation.Manipulation):
def __init__(self, path, dataset, col):
self.__path = path
self.__dataset = dataset
self.__col = col
super(HistNeighborhoodTargetEncodingManipulation, self).__init__()
def __repr__(self):
return "HistNeighborhoodTargetEncodingManipulation {} {}".format(
self.__path, self.__col)
@property
def path(self):
return self.__path
@property
def dataset(self):
return self.__dataset
@property
def col(self):
return self.__col
def calculate_priority(self):
return 2.7 + 1.2 * self.path.substance_to_many_count(
self.dataset, self.col)
def calculate_size(self):
return 1
def meta_feature_size():
return 2
def meta_feature(self):
return [
1,
self.path.substance_to_many_count(self.dataset, self.col)
]
def meta_feature_name():
return [
"HistNeighborhoodTargetEncodingManipulation-Constant",
"HistNeighborhoodTargetEncodingManipulation-SubstanceToManyCount"
]
def synthesis(self):
to_one_path = None
to_many_path = None
for i in range(len(self.__path), -1, -1):
if self.__path[i:].is_substance_to_one_with_col(
self.__dataset, self.__col):
to_one_path = self.__path[i:]
to_many_path = self.__path[:i]
# to_one identify
if len(to_one_path) > 0:
dst_table = self.dataset.tables[self.path.dst]
dst_data = dst_table.df[self.col].values
# TODO: nan
dst_data = stats.rankdata(dst_data) // (len(dst_data) // 100)
time_for_each_table = {
table_idx: self.dataset.tables[table_name].hour_time_data
for table_idx, table_name in enumerate(to_one_path.table_names)
if self.dataset.tables[table_name].has_time}
sorted_index_for_each_table = {
table_idx: self.dataset.tables[table_name].sorted_time_index
for table_idx, table_name in enumerate(to_one_path.table_names)
if self.dataset.tables[table_name].has_time}
src_id_for_each_relation = [
self.dataset.tables[rel.src].df[rel.src_id].values
for rel in to_one_path.relations
]
dst_id_for_each_relation = [
self.dataset.tables[rel.dst].df[rel.dst_id].values
for rel in to_one_path.relations
]
src_is_unique_for_each_relation = [
rel.type.src_is_unique
for rel in to_one_path.relations
]
dst_is_unique_for_each_relation = [
rel.type.dst_is_unique
for rel in to_one_path.relations
]
ids = _core.Aggregator().aggregate(
dst_data, time_for_each_table, sorted_index_for_each_table,
src_id_for_each_relation, dst_id_for_each_relation,
src_is_unique_for_each_relation,
dst_is_unique_for_each_relation,
"last", "last")
ids = ids.astype(np.int32)
ids[ids < 0] = -1
else:
dst_table = self.dataset.tables[to_one_path.dst]
dst_table = self.dataset.tables[self.path.dst]
dst_data = dst_table.df[self.col].values
ids = stats.rankdata(dst_data) // (len(dst_data) // 100)
ids = ids.astype(np.int32)
ids[ids < 0] = -1
# target encoding
dst_table = self.dataset.tables[to_many_path.dst]
if not dst_table.has_pseudo_target:
return
targets = dst_table.pseudo_target
encoder = _core.TargetEncoder()
k = len(np.unique(ids))
if dst_table.has_hist_time_data:
sorted_index = dst_table.sorted_time_index
time_data = dst_table.hist_time_data
new_data = encoder.temporal_encode(
targets, ids, time_data, sorted_index, k)
elif dst_table.has_time:
sorted_index = dst_table.sorted_time_index
time_data = dst_table.time_data
new_data = encoder.temporal_encode(
targets, ids, time_data, sorted_index, k)
else:
new_data = encoder.encode(targets, ids, k)
if len(to_many_path) == 0:
new_data_name = \
"{}HistNeighborhoodTargetEncodingManipulation_{}_{}".format(
feature_types.aggregate_processed_numerical.prefix,
self.__path, self.__col)
self.__dataset.tables[to_many_path.src].set_new_data(
new_data, new_data_name)
else:
# to_many_aggregate
dst_data = new_data
time_for_each_table = {
table_idx: self.dataset.tables[table_name].hour_time_data
for table_idx, table_name
in enumerate(to_many_path.table_names)
if self.dataset.tables[table_name].has_time}
sorted_index_for_each_table = {
table_idx: self.dataset.tables[table_name].sorted_time_index
for table_idx, table_name
in enumerate(to_many_path.table_names)
if self.dataset.tables[table_name].has_time}
src_id_for_each_relation = [
self.dataset.tables[rel.src].df[rel.src_id].values
for rel in to_many_path.relations
]
dst_id_for_each_relation = [
self.dataset.tables[rel.dst].df[rel.dst_id].values
for rel in to_many_path.relations
]
src_is_unique_for_each_relation = [
rel.type.src_is_unique
for rel in to_many_path.relations
]
dst_is_unique_for_each_relation = [
rel.type.dst_is_unique
for rel in to_many_path.relations
]
new_data = _core.Aggregator().aggregate(
dst_data, time_for_each_table, sorted_index_for_each_table,
src_id_for_each_relation, dst_id_for_each_relation,
src_is_unique_for_each_relation,
dst_is_unique_for_each_relation,
"mean", "mean")
new_data_name = \
"{}HistNeighborhoodTargetEncodingManipulation_{}_{}".format(
feature_types.aggregate_processed_numerical.prefix,
self.__path, self.__col)
self.__dataset.tables[to_many_path.src].set_new_data(
new_data, new_data_name)
class HistNeighborhoodTargetEncodingCandidate(
manipulation_candidate.ManipulationCandidate
):
def search(self, path, dataset):
if path.not_deeper_count == 0:
dst_table = dataset.tables[path.dst]
if not dst_table.has_pseudo_target:
return []
ret = []
for col in dst_table.df.columns:
if path.is_substance_to_one_with_col(dataset, col):
continue
ftype = dst_table.ftypes[col]
if ftype == feature_types.numerical \
or ftype == feature_types.mc_processed_numerical \
or ftype == feature_types.t_processed_numerical:
ret.append(HistNeighborhoodTargetEncodingManipulation(
path, dataset, col))
return ret
else:
return []
| 37.413146 | 79 | 0.598695 | 7,647 | 0.950292 | 0 | 0 | 172 | 0.021374 | 0 | 0 | 514 | 0.063875 |
8c3007d2f608b3d77db169c4425877d373ee00f3 | 3,798 | py | Python | PullVectorsFromSQLandRunSimilarity.py | aktivkohle/youtube-curation | 1651eae7af29f60dc3e87bd696712b7e3848217e | [
"MIT"
] | 4 | 2017-11-14T13:30:07.000Z | 2019-06-12T16:01:15.000Z | PullVectorsFromSQLandRunSimilarity.py | aktivkohle/youtube-curation | 1651eae7af29f60dc3e87bd696712b7e3848217e | [
"MIT"
] | null | null | null | PullVectorsFromSQLandRunSimilarity.py | aktivkohle/youtube-curation | 1651eae7af29f60dc3e87bd696712b7e3848217e | [
"MIT"
] | 1 | 2019-12-03T08:57:05.000Z | 2019-12-03T08:57:05.000Z | import sys
sys.path.append('../')
import config
import pymysql.cursors
import pandas as pd
import numpy as np
from scipy import io as scipyio
from tempfile import SpooledTemporaryFile
from scipy.sparse import vstack as vstack_sparse_matrices
# Function to reassemble the p matrix from the vectors
def reconstitute_vector(bytesblob):
f = SpooledTemporaryFile(max_size=1000000000)
f.write(bytesblob)
f.seek(0)
return scipyio.mmread(f)
def youtubelink(vidid):
return ('https://www.youtube.com/watch?v=' + vidid)
connection = pymysql.connect(host='localhost',
user='root',
password=config.MYSQL_SERVER_PASSWORD,
db='youtubeProjectDB',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
with connection.cursor() as cursor:
# https://stackoverflow.com/questions/612231/how-can-i-select-rows-with-maxcolumn-value-distinct-by-another-column-in-sql?rq=1
# Note - this is a very interesting query! never seen it before..
sql = """SELECT * FROM
(SELECT DISTINCT(videoId) AS v, videoTitle FROM search_api) A
INNER JOIN
(SELECT * FROM captions c
INNER JOIN(SELECT videoId AS InnerVideoId,
MAX(wordCount) AS MaxWordCount,
MAX(id) AS MaxId
FROM captions
WHERE tfidfVector IS NOT NULL
GROUP BY videoId) grouped_c
ON c.videoId = grouped_c.InnerVideoId
AND c.wordCount = grouped_c.MaxWordCount
AND c.id = grouped_c.MaxId) B
ON A.v = B.videoId;"""
cursor.execute(sql)
manyCaptions = cursor.fetchall()
videos_df = pd.read_sql(sql, connection)
connection.close()
# note that the other program which put the vectors there only did it on captions WHERE language like '%en%'
# for that reason this query does not contain language. It has instead WHERE tfidfVector IS NOT NULL
videos_df = videos_df.drop('v', 1)
videos_df['tfidfVector_NP'] = videos_df['tfidfVector'].apply(reconstitute_vector)
listOfSparseVectors = list(videos_df['tfidfVector_NP'].values.flatten())
p = vstack_sparse_matrices(listOfSparseVectors)
video_titles = list(videos_df['videoTitle'].values.flatten())
video_ids = list(videos_df['videoId'].values.flatten())
# Apply the transformation to the term document matrix to compute similarity between all pairs
pairwise_similarity = (p * p.T).A # In Scipy, .A transforms a sparse matrix to a dense one
# df9 = pd.DataFrame(pairwise_similarity, columns=video_ids, index=video_ids)
# s = pd.Series(video_titles, index=df9.index)
# df9 = pd.concat((s.rename('videoTitles'), df9), axis=1)
def nth_similar_tuple(n, ps):
title = (np.array(video_titles))[((-ps).argsort()[n])]
vid_id = (np.array(video_ids))[((-ps).argsort()[n])]
return (title, vid_id)
d = []
for a,b,c in zip(video_titles, video_ids, pairwise_similarity):
d.append({'a':(a,b),
'b': nth_similar_tuple(1,c),
'c': nth_similar_tuple(2,c),
'd': nth_similar_tuple(3,c)})
# takes about a minute to run through the 7000 unique rows.
similarity_df = pd.DataFrame(d)
similarity_df.columns = ['original', 'first_similar', 'second_similar', 'third_similar']
# split the tuples into two-level columns.
similarity_df = pd.concat(
[pd.DataFrame(x, columns=['video_title','youtube_id']) for x in similarity_df.values.T.tolist()],
axis=1,
keys=similarity_df.columns)
print ("Finished running, the Pandas DataFrame variable similarity_df should now be in scope.") | 37.98 | 138 | 0.650079 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,790 | 0.471301 |
8c321b723875c29f1ae6ed62908243de66570a8e | 3,430 | py | Python | core/views.py | lcs-amorim/OPE-EasyParty | b3439bf21523d7f3fb19b12283c24f364bf54388 | [
"Apache-2.0"
] | null | null | null | core/views.py | lcs-amorim/OPE-EasyParty | b3439bf21523d7f3fb19b12283c24f364bf54388 | [
"Apache-2.0"
] | 4 | 2020-06-05T18:01:15.000Z | 2021-09-07T23:51:04.000Z | core/views.py | lcs-amorim/OPE-EasyParty | b3439bf21523d7f3fb19b12283c24f364bf54388 | [
"Apache-2.0"
] | 1 | 2018-10-02T23:45:15.000Z | 2018-10-02T23:45:15.000Z | from django.shortcuts import render, redirect , HttpResponseRedirect, get_object_or_404
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.auth.forms import UserCreationForm, PasswordChangeForm
from django.views.generic import View, TemplateView, CreateView, UpdateView
from django.conf import settings
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render
from core.forms import ClienteForm, EditaContaClienteForm
from core.models import Produto
from core.models import Categoria
def index(request):
contexto = {
"produtos":Produto.objects.all()
}
return render(request, "index.html", contexto)
def produto(request): #, slug):
#contexto = {
# 'produto': get_object_or_404(Produto, slug=slug) #verifica se a url existe, caso nao exista ele retorna erro 404
#}
template_name = 'produto.html'
return render(request, template_name)
def lista_produto(request):
pass
def categoria(request, slug):
categoria = Categoria.objects.get(slug=slug)
contexto = {
'categoria': categoria,
'produtos': Produto.objects.filter(categoria=categoria),
}
return render(request,'categoria.html', contexto)
def contato(request):
pass
def festa(request):
return render(request,"festa.html")
#Autenticação login
def login_cliente(request):
return render(request,"login.html")
def contato(request):
return render(request,"contato.html")
#Auntenticação Usuario
@login_required(login_url="entrar")
def page_user(request):
return render(request,'index.html')
# -----------------------------------------------//---------------------------------#
# pagina de cadastro
def registrar(request):
# Se dados forem passados via POST
if request.POST:
form = ClienteForm(request.POST)
if form.is_valid(): # se o formulario for valido
form.save() # cria um novo usuario a partir dos dados enviados
form.cleaner
else:
form = ClienteForm()
contexto = {
"form":form
}
return render(request, "registrar.html", contexto)
# -----------------------------------------------//---------------------------------#
#funcao para alterar conta
@login_required
def editarConta(request):
template_name = 'editarConta.html'
contexto = {}
if request.method == 'POST':
form = EditaContaClienteForm(request.POST, instance=request.user)
if form.is_valid():
form.save()
form = EditaContaClienteForm(instance=request.user)
contexto['success'] = True
else:
form = EditaContaClienteForm(instance=request.user)
contexto['form'] = form
return render(request, template_name, contexto)
# -----------------------------------------------//---------------------------------#
#funcao para alterar senha
@login_required
def editarSenha(request):
template_name = 'editarSenha.html'
context = {}
if request.method == 'POST':
form = PasswordChangeForm(data=request.POST, user=request.user)
if form.is_valid():
form.save()
context['success'] = True
else:
form = PasswordChangeForm(user=request.user)
context['form'] = form
return render(request, template_name, context)
# -----------------------------------------------//---------------------------------# | 27.886179 | 121 | 0.625656 | 0 | 0 | 0 | 0 | 1,133 | 0.329936 | 0 | 0 | 941 | 0.274024 |
8c32b849026f787dad3dce205fac98d12cb8e86b | 478 | py | Python | adminlte_log/tests.py | beastbikes/django-only-admin | c89b782b92edbb1f75151e71163c0708afacd4f9 | [
"MIT"
] | 32 | 2016-11-24T08:33:10.000Z | 2017-12-18T00:25:00.000Z | adminlte_log/tests.py | beastbikes/django-only-admin | c89b782b92edbb1f75151e71163c0708afacd4f9 | [
"MIT"
] | 15 | 2016-11-30T08:28:56.000Z | 2017-09-20T15:54:18.000Z | adminlte_log/tests.py | beastbikes/django-only-admin | c89b782b92edbb1f75151e71163c0708afacd4f9 | [
"MIT"
] | 9 | 2016-11-25T02:14:24.000Z | 2017-12-06T13:22:51.000Z | from django.contrib.auth.models import User
from django.test import TestCase
from adminlte_log.models import AdminlteLogType, AdminlteLog
class AdminlteLogTest(TestCase):
def setUp(self):
AdminlteLogType.objects.create(name='test', code='test')
self.user = User.objects.create_user(username='bohan')
def test_log(self):
log = AdminlteLog.info('test', user=self.user, sort_desc='This is a log', foo='bar')
self.assertEqual(log.id, 1)
| 29.875 | 92 | 0.713389 | 336 | 0.702929 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.094142 |
8c3305c5267a989ec6a70f3ec4b12d3cf6b4ba05 | 9,607 | py | Python | pygmt/tests/test_text.py | tawandamoyo/pygmt | 4ef90382bfd84dfa47ce2422d2937a9b803de214 | [
"BSD-3-Clause"
] | null | null | null | pygmt/tests/test_text.py | tawandamoyo/pygmt | 4ef90382bfd84dfa47ce2422d2937a9b803de214 | [
"BSD-3-Clause"
] | 24 | 2021-02-12T08:12:30.000Z | 2022-02-08T13:04:23.000Z | pygmt/tests/test_text.py | tawandamoyo/pygmt | 4ef90382bfd84dfa47ce2422d2937a9b803de214 | [
"BSD-3-Clause"
] | 1 | 2022-01-12T20:52:52.000Z | 2022-01-12T20:52:52.000Z | # pylint: disable=redefined-outer-name
"""
Tests text.
"""
import os
import numpy as np
import pytest
from pygmt import Figure
from pygmt.exceptions import GMTCLibError, GMTInvalidInput
from pygmt.helpers import GMTTempFile
from pygmt.helpers.testing import check_figures_equal
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
POINTS_DATA = os.path.join(TEST_DATA_DIR, "points.txt")
CITIES_DATA = os.path.join(TEST_DATA_DIR, "cities.txt")
@pytest.fixture(scope="module")
def projection():
"""
The projection system.
"""
return "x4i"
@pytest.fixture(scope="module")
def region():
"""
The data region.
"""
return [0, 5, 0, 2.5]
@pytest.mark.mpl_image_compare
def test_text_single_line_of_text(region, projection):
"""
Place a single line text of text at some x, y location.
"""
fig = Figure()
fig.text(
region=region,
projection=projection,
x=1.2,
y=2.4,
text="This is a line of text",
)
return fig
@pytest.mark.mpl_image_compare
def test_text_multiple_lines_of_text(region, projection):
"""
Place multiple lines of text at their respective x, y locations.
"""
fig = Figure()
fig.text(
region=region,
projection=projection,
x=[1.2, 1.6],
y=[0.6, 0.3],
text=["This is a line of text", "This is another line of text"],
)
return fig
def test_text_without_text_input(region, projection):
"""
Run text by passing in x and y, but no text.
"""
fig = Figure()
with pytest.raises(GMTInvalidInput):
fig.text(region=region, projection=projection, x=1.2, y=2.4)
@pytest.mark.mpl_image_compare
def test_text_input_single_filename():
"""
Run text by passing in one filename to textfiles.
"""
fig = Figure()
fig.text(region=[10, 70, -5, 10], textfiles=POINTS_DATA)
return fig
@pytest.mark.mpl_image_compare
def test_text_input_remote_filename():
"""
Run text by passing in a remote filename to textfiles.
"""
fig = Figure()
fig.text(region=[0, 6.5, 0, 6.5], textfiles="@Table_5_11.txt")
return fig
@pytest.mark.mpl_image_compare
def test_text_input_multiple_filenames():
"""
Run text by passing in multiple filenames to textfiles.
"""
fig = Figure()
fig.text(region=[10, 70, -30, 10], textfiles=[POINTS_DATA, CITIES_DATA])
return fig
def test_text_nonexistent_filename():
"""
Run text by passing in a list of filenames with one that does not exist.
"""
fig = Figure()
with pytest.raises(GMTCLibError):
fig.text(region=[10, 70, -5, 10], textfiles=[POINTS_DATA, "notexist.txt"])
@pytest.mark.mpl_image_compare
def test_text_position(region):
"""
Print text at center middle (CM) and eight other positions
(Top/Middle/Bottom x Left/Centre/Right).
"""
fig = Figure()
fig.text(region=region, projection="x1c", frame="a", position="CM", text="C M")
for position in ("TL", "TC", "TR", "ML", "MR", "BL", "BC", "BR"):
fig.text(position=position, text=position)
return fig
def test_text_xy_with_position_fails(region):
"""
Run text by providing both x/y pairs and position arguments.
"""
fig = Figure()
with pytest.raises(GMTInvalidInput):
fig.text(
region=region, projection="x1c", x=1.2, y=2.4, position="MC", text="text"
)
@pytest.mark.mpl_image_compare
def test_text_position_offset_with_line(region):
"""
Print text at centre middle (CM) and eight other positions
(Top/Middle/Bottom x Left/Centre/Right), offset by 0.5 cm, with a line
drawn from the original to the shifted point.
"""
fig = Figure()
fig.text(region=region, projection="x1c", frame="a", position="CM", text="C M")
for position in ("TL", "TC", "TR", "ML", "MR", "BL", "BC", "BR"):
fig.text(position=position, text=position, offset="j0.5c+v")
return fig
@pytest.mark.mpl_image_compare
def test_text_angle_30(region, projection):
"""
Print text at 30 degrees counter-clockwise from horizontal.
"""
fig = Figure()
fig.text(
region=region,
projection=projection,
x=1.2,
y=2.4,
text="text angle 30 degrees",
angle=30,
)
return fig
@pytest.mark.mpl_image_compare
def test_text_font_bold(region, projection):
"""
Print text with a bold font.
"""
fig = Figure()
fig.text(
region=region,
projection=projection,
x=1.2,
y=2.4,
text="text in bold",
font="Helvetica-Bold",
)
return fig
@pytest.mark.mpl_image_compare
def test_text_fill(region, projection):
"""
Print text with blue color fill.
"""
fig = Figure()
fig.text(
region=region,
projection=projection,
x=1.2,
y=1.2,
text="blue fill around text",
fill="blue",
)
return fig
@pytest.mark.mpl_image_compare
def test_text_pen(region, projection):
"""
Print text with thick green dashed pen.
"""
fig = Figure()
fig.text(
region=region,
projection=projection,
x=1.2,
y=1.2,
text="green pen around text",
pen="thick,green,dashed",
)
return fig
@pytest.mark.mpl_image_compare
def test_text_round_clearance(region, projection):
"""
Print text with round rectangle box clearance.
"""
fig = Figure()
fig.text(
region=region,
projection=projection,
x=1.2,
y=1.2,
text="clearance around text",
clearance="90%+tO",
pen="default,black,dashed",
)
return fig
@pytest.mark.mpl_image_compare
def test_text_justify_bottom_right_and_top_left(region, projection):
"""
Print text justified at bottom right and top left.
"""
fig = Figure()
fig.text(
region=region,
projection=projection,
x=1.2,
y=0.2,
text="text justified bottom right",
justify="BR",
)
fig.text(
region=region,
projection=projection,
x=1.2,
y=0.2,
text="text justified top left",
justify="TL",
)
return fig
@pytest.mark.mpl_image_compare
def test_text_justify_parsed_from_textfile():
"""
Print text justified based on a column from textfile, using justify=True
boolean operation.
Loosely based on "All great-circle paths lead to Rome"
gallery example at
https://gmt.soest.hawaii.edu/doc/latest/gallery/ex23.html
"""
fig = Figure()
fig.text(
region="g",
projection="H90/9i",
justify=True,
textfiles=CITIES_DATA,
D="j0.45/0+vred", # draw red-line from xy point to text label (city name)
)
return fig
@pytest.mark.mpl_image_compare
def test_text_angle_font_justify_from_textfile():
"""
Print text with x, y, angle, font, justify, and text arguments parsed from
the textfile.
"""
fig = Figure()
with GMTTempFile(suffix=".txt") as tempfile:
with open(tempfile.name, "w") as tmpfile:
tmpfile.write("114 0.5 30 22p,Helvetica-Bold,black LM BORNEO")
fig.text(
region=[113, 117.5, -0.5, 3],
projection="M5c",
frame="a",
textfiles=tempfile.name,
angle=True,
font=True,
justify=True,
)
return fig
@check_figures_equal()
def test_text_transparency():
"""
Add texts with a constant transparency.
"""
x = np.arange(1, 10)
y = np.arange(11, 20)
text = [f"TEXT-{i}-{j}" for i, j in zip(x, y)]
fig_ref, fig_test = Figure(), Figure()
# Use single-character arguments for the reference image
with GMTTempFile() as tmpfile:
np.savetxt(tmpfile.name, np.c_[x, y, text], fmt="%s")
fig_ref.basemap(R="0/10/10/20", J="X10c", B="")
fig_ref.text(textfiles=tmpfile.name, t=50)
fig_test.basemap(region=[0, 10, 10, 20], projection="X10c", frame=True)
fig_test.text(x=x, y=y, text=text, transparency=50)
return fig_ref, fig_test
@check_figures_equal()
def test_text_varying_transparency():
"""
Add texts with varying transparency.
"""
x = np.arange(1, 10)
y = np.arange(11, 20)
text = [f"TEXT-{i}-{j}" for i, j in zip(x, y)]
transparency = np.arange(10, 100, 10)
fig_ref, fig_test = Figure(), Figure()
# Use single-character arguments for the reference image
with GMTTempFile() as tmpfile:
np.savetxt(tmpfile.name, np.c_[x, y, transparency, text], fmt="%s")
fig_ref.basemap(R="0/10/10/20", J="X10c", B="")
fig_ref.text(textfiles=tmpfile.name, t="")
fig_test.basemap(region=[0, 10, 10, 20], projection="X10c", frame=True)
fig_test.text(x=x, y=y, text=text, transparency=transparency)
return fig_ref, fig_test
@check_figures_equal()
def test_text_nonstr_text():
"""
Input text is in non-string type (e.g., int, float)
"""
fig_ref, fig_test = Figure(), Figure()
# Use single-character arguments and input files for the reference image
with GMTTempFile(suffix=".txt") as tempfile:
with open(tempfile.name, "w") as tmpfile:
tmpfile.write("1 1 1.0\n2 2 2.0\n3 3 3.0\n4 4 4.0\n")
fig_ref.text(R="0/10/0/10", J="X10c", B="", textfiles=tempfile.name)
fig_test.text(
region=[0, 10, 0, 10],
projection="X10c",
frame=True,
x=[1, 2, 3, 4],
y=[1, 2, 3, 4],
text=[1, 2, 3.0, 4.0],
)
return fig_ref, fig_test
| 25.618667 | 85 | 0.616634 | 0 | 0 | 0 | 0 | 8,265 | 0.86031 | 0 | 0 | 2,928 | 0.304778 |
8c3337efda806d1e98b40346af81d2a58586f5fc | 658 | py | Python | Python/count-primes.py | xtt129/LeetCode | 1afa893d38e2fce68e4677b34169c0f0262b6fac | [
"MIT"
] | 2 | 2020-04-08T17:57:43.000Z | 2021-11-07T09:11:51.000Z | Python/count-primes.py | xtt129/LeetCode | 1afa893d38e2fce68e4677b34169c0f0262b6fac | [
"MIT"
] | null | null | null | Python/count-primes.py | xtt129/LeetCode | 1afa893d38e2fce68e4677b34169c0f0262b6fac | [
"MIT"
] | 8 | 2018-03-13T18:20:26.000Z | 2022-03-09T19:48:11.000Z | # Time: O(n)
# Space: O(n)
# Description:
#
# Count the number of prime numbers less than a non-negative number, n
#
# Hint: The number n could be in the order of 100,000 to 5,000,000.
#
from math import sqrt
class Solution:
# @param {integer} n
# @return {integer}
def countPrimes(self, n):
if n <= 2:
return 0
is_prime = [True] * n
sqr = sqrt(n - 1)
num = 0
for i in xrange(2, n):
if is_prime[i]:
num += 1
for j in xrange(i+i, n, i):
is_prime[j] = False
return num
| 21.225806 | 70 | 0.468085 | 428 | 0.650456 | 0 | 0 | 0 | 0 | 0 | 0 | 219 | 0.332827 |
8c33eb4b835fe5ebadebb84d215778d0aae457a4 | 3,479 | py | Python | src/mds/api/signals.py | rryan/sana.mds | d62cd7e6a3d47ce933f6ee04aa7f10c8d642c944 | [
"BSD-3-Clause"
] | null | null | null | src/mds/api/signals.py | rryan/sana.mds | d62cd7e6a3d47ce933f6ee04aa7f10c8d642c944 | [
"BSD-3-Clause"
] | null | null | null | src/mds/api/signals.py | rryan/sana.mds | d62cd7e6a3d47ce933f6ee04aa7f10c8d642c944 | [
"BSD-3-Clause"
] | null | null | null | '''
Created on Aug 11, 2012
:author: Sana Development Team
:version: 2.0
'''
from django.dispatch import Signal
class ExternalDispatch(Signal):
""" Simple dispatching signal. The superclass providing_args are a
'dispatcher' key and 'data' dictionary.
"""
def __init__(self):
Signal.__init__(self, providing_args=["dispatcher","data"])
class ExternalDispatcher(object):
""" A handler for sending messages to external targets.
"""
def __init__(self, registry=None):
""" Creates a new instance and sets the registry if provided.
"""
self.registry = registry
def __call__(self,sender, **kwargs):
""" Callback signal processor for dispatching upstream messages.
sender:
An ExternalDispatch signal
"""
data = sender.get("data")
self.sender = sender.get("dispatcher")
# if we provide a registry assume that the "dispatcher" sent with the
# sender is a registry key
if self.registry:
callback = self.registry.get(self.sender)
# Otherwise we assume the "dispatcher" is a callback
else:
callback = self.sender
return callback.dispatch(**data)
class ExternalWSDispatch(Signal):
""" Simple dispatching signal. The superclass providing_args are:
wsname:
the remote web service name
pargs:
path args for formatting the path String
data:
POST data dictionary.
query:
GET query dict
"""
def __init__(self):
Signal.__init__(self, providing_args=["wsname","pargs","data", "query"])
class ExternalWSDispatcher(object):
""" A handler for sending messages to external targets.
"""
def __init__(self, wscallback):
""" Creates a new instance and sets the registry if provided.
"""
self.wscallback = wscallback
def __call__(self,sender, **kwargs):
""" Callback signal processor for dispatching upstream messages.
sender:
An ExternalWSDispatch signal
kwargs:
The data from the ExternalWSDipatch. See ExternalWSDispatch for
details.
"""
data = sender.get("data")
wsname = sender.get("wsname")
pargs = sender.get("pargs")
query = sender.get("query")
return self.callback.wsdispatch(wsname,query=query,pargs=pargs, data=data)
class EventSignal(Signal):
"""A generic message to pass to an EventSignalHandler holds content for
marking the event
"""
def __init__(self):
Signal.__init__(self, providing_args=['event'])
class EventSignalHandler(object):
""" Class based callback implementation for marking events. Creates and
saves a new instance of the model passed to the __init__ method.
"""
def __init__(self, model):
self.model = model
def __call__(self, sender, **kwargs):
try:
data = sender.get('event')
obj = self.model(**data)
obj.save()
return True
except:
return False
class CacheSignal(Signal):
def __init__(self):
Signal.__init__(self, providing_args=['uri','request', 'content'])
class FileCacheSignal(Signal):
def __init__(self):
Signal.__init__(self, providing_args=['uri','request', 'content'])
| 32.514019 | 90 | 0.61052 | 3,326 | 0.956022 | 0 | 0 | 0 | 0 | 0 | 0 | 1,739 | 0.499856 |