max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
tallguiseindex/providers/coto.py
|
seppo0010/tall-guise-index
| 0
|
12774851
|
<reponame>seppo0010/tall-guise-index
import requests
from lxml import etree
from . import Provider
class Coto(Provider):
URLS = [
'https://www.cotodigital3.com.ar/sitios/cdigi/browse/catalogo-alimentos-frescos-frutas-y-verduras-hortalizas-pesadas/_/N-g7vcbk?Ntt=1004&Ntk=product.sDisp_091',
'https://www.cotodigital3.com.ar/sitios/cdigi/browse/catalogo-alimentos-frescos-frutas-y-verduras-hortalizas-pesadas/_/N-g7vcbk?Nf=product.startDate%7CLTEQ+1.4796864E12%7C%7Cproduct.endDate%7CGTEQ+1.4796864E12&No=12&Nr=AND%28product.language%3Aespa%C3%B1ol%2COR%28product.siteId%3ACotoDigital%29%29&Nrpp=12&Ntk=product.sDisp_091&Ntt=1004'
]
def __init__(self):
self.products = {}
for url in Coto.URLS:
self.scrape_url(url)
def scrape_url(self, url):
r = requests.get(url, headers={
'User-Agent': 'Mozilla/5.0 (X11; CrOS x86_64 8743.83.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.93 Safari/537.36'
})
tree = etree.HTML(r.text)
for product in tree.iterfind('.//li'):
description = product.find('.//div[@class="descrip_full"]')
if description is None:
continue
name = description.text
price = float(product.find('.//span[@class="atg_store_newPrice"]').text.strip().strip('$'))
self.products[name] = price
def price_for_item(self, item):
loweritem = item.lower()
for (k, v) in self.products.iteritems():
if loweritem in k.lower():
return v
return None
| 2.390625
| 2
|
lamp/__init__.py
|
leonardocunha2107/LaMP
| 78
|
12774852
|
<filename>lamp/__init__.py
import lamp.Constants
import lamp.Layers
import lamp.SubLayers
import lamp.Models
import lamp.Translator
import lamp.Beam
import lamp.Encoders
import lamp.Decoders
__all__ = [
lamp.Constants,
lamp.Layers,
lamp.SubLayers,
lamp.Models,
lamp.Translator,
lamp.Beam,
lamp.Encoders,
lamp.Decoders,
]
| 1.195313
| 1
|
setup.py
|
NepsAcademy/course-introduction-to-apis
| 0
|
12774853
|
import sys
from xml.etree.ElementInclude import include
from cx_Freeze import setup, Executable
# Dependencies are automatically detected, but it might need fine tuning.
# "packages": ["os"] is used as example only
# build_exe_options = {"packages": ["os"], "excludes": ["tkinter"]}
# base="Win32GUI" should be used only for Windows GUI app
base = None
if sys.platform == "win32":
base = "Win32GUI"
includes = ["jinja2.ext"] # add jinja2.ext here
packages = ["sqlalchemy"]
excludes = ["Tkinter"]
target = Executable(script="main.py", base=base)
build_exe_options = dict(
includes=includes,
packages=packages,
excludes=excludes,
include_files=["resources/", "templates/", "static/", "app.db"],
) # folder,relative path. Use tuple like in the single file to set a absolute path.
setup(
name="Flask App",
version="0.1",
description="Flask App",
copyDependentFiles=True,
options={"build_exe": build_exe_options},
executables=[target],
)
# # Copy files
# import os
# import shutil
# import os, shutil
# def copytree(src, dst, symlinks=False, ignore=None):
# for item in os.listdir(src):
# s = os.path.join(src, item)
# d = os.path.join(dst, item)
# if os.path.isdir(s):
# shutil.copytree(s, d, symlinks, ignore)
# else:
# shutil.copy2(s, d)
# os.makedirs(os.path.join("build", "exe.win-amd64-3.9", "data"))
# copytree("data", os.path.join("build", "exe.win-amd64-3.9", "data"))
| 2.125
| 2
|
python/DynamicProgramming.py/LargestSquare1Matrix.py
|
sinderpl/CodingExamples
| 0
|
12774854
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 30 15:49:24 2021
@author: alann
"""
arr = [[1,1,0,1,0],
[0,1,1,1,0],
[1,1,1,1,0],
[0,1,1,1,1]]
def largestSquare(arr ) -> int:
if len(arr) < 1 or len(arr[0]) < 1:
return 0
largest = 0
cache = [[0 for i in range(len(arr[0]))] for j in range(len(arr))]
for i, iVal in enumerate(arr):
for j, jVal in enumerate(iVal):
if jVal:
if not i or not j:
cache[i][j] = jVal
else:
currentMin = None
currentMin = min(currentMin, cache[i-1][j]) if currentMin != None else cache[i-1][j]
currentMin = min(currentMin, cache[i-1][j-1]) if currentMin != None else cache[i-1][j-1]
currentMin = min(currentMin, cache[i][j-1]) if currentMin != None else cache[i][j-1]
cache[i][j] = currentMin + 1
largest = max(largest, cache[i][j])
return largest
largest = largestSquare(arr)
print(largest)
"""
if we are allowed to modify original array we can do it in constant space
"""
arr = [[1,1,0,1,0],
[0,1,1,1,0],
[1,1,1,1,0],
[0,1,1,1,1]]
def largestSquare(arr ) -> int:
if len(arr) < 1 or len(arr[0]) < 1:
return 0
largest = 0
for i, iVal in enumerate(arr):
for j, jVal in enumerate(iVal):
if jVal:
if not i or not j:
arr[i][j] = jVal
else:
arr[i][j] = 1 + min(arr[i-1][j], arr[i-1][j-1],arr[i][j-1])
largest = max(largest, arr[i][j])
return largest
largest = largestSquare(arr)
print(largest)
| 3.359375
| 3
|
moonlight/score/reader_test.py
|
lithomas1/moonlight
| 288
|
12774855
|
<reponame>lithomas1/moonlight
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the OMR score reader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import librosa
from protobuf import music_pb2
from moonlight import conversions
from moonlight.protobuf import musicscore_pb2
from moonlight.score import reader
# pylint: disable=invalid-name
Glyph = musicscore_pb2.Glyph
Note = music_pb2.NoteSequence.Note
Point = musicscore_pb2.Point
class ReaderTest(absltest.TestCase):
def testTreble_simple(self):
staff = musicscore_pb2.Staff(
staffline_distance=10,
center_line=[Point(x=0, y=50), Point(x=100, y=50)],
glyph=[
Glyph(
type=Glyph.CLEF_TREBLE,
x=1,
y_position=reader.TREBLE_CLEF_EXPECTED_Y),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=10, y_position=0),
])
notes = conversions.page_to_notesequence(reader.ScoreReader().read_page(
musicscore_pb2.Page(system=[musicscore_pb2.StaffSystem(
staff=[staff])])))
self.assertEqual(
notes,
music_pb2.NoteSequence(notes=[
Note(pitch=librosa.note_to_midi('B4'), start_time=0, end_time=1)
]))
def testBass_simple(self):
staff = musicscore_pb2.Staff(
staffline_distance=10,
center_line=[Point(x=0, y=50), Point(x=100, y=50)],
glyph=[
Glyph(
type=Glyph.CLEF_BASS,
x=1,
y_position=reader.BASS_CLEF_EXPECTED_Y),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=10, y_position=0),
])
notes = conversions.page_to_notesequence(reader.ScoreReader().read_page(
musicscore_pb2.Page(system=[musicscore_pb2.StaffSystem(
staff=[staff])])))
self.assertEqual(
notes,
music_pb2.NoteSequence(notes=[
Note(pitch=librosa.note_to_midi('D3'), start_time=0, end_time=1)
]))
def testTreble_accidentals(self):
staff_1 = musicscore_pb2.Staff(
staffline_distance=10,
center_line=[Point(x=0, y=50), Point(x=100, y=50)],
glyph=[
Glyph(
type=Glyph.CLEF_TREBLE,
x=1,
y_position=reader.TREBLE_CLEF_EXPECTED_Y),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=10, y_position=-6),
Glyph(type=Glyph.FLAT, x=16, y_position=-4),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=20, y_position=-4),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=30, y_position=-2),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=40, y_position=-4),
])
staff_2 = musicscore_pb2.Staff(
staffline_distance=10,
center_line=[Point(x=0, y=150), Point(x=100, y=150)],
glyph=[
Glyph(
type=Glyph.CLEF_TREBLE,
x=1,
y_position=reader.TREBLE_CLEF_EXPECTED_Y),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=10, y_position=-6),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=20, y_position=-4),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=30, y_position=-2),
Glyph(type=Glyph.SHARP, x=35, y_position=-2),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=40, y_position=-2),
Glyph(type=Glyph.NATURAL, x=45, y_position=-2),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=50, y_position=-2),
])
notes = conversions.page_to_notesequence(reader.ScoreReader().read_page(
musicscore_pb2.Page(system=[
musicscore_pb2.StaffSystem(staff=[staff_1]),
musicscore_pb2.StaffSystem(staff=[staff_2])
])))
self.assertEqual(
notes,
music_pb2.NoteSequence(notes=[
# First staff.
Note(pitch=librosa.note_to_midi('C4'), start_time=0, end_time=1),
Note(pitch=librosa.note_to_midi('Eb4'), start_time=1, end_time=2),
Note(pitch=librosa.note_to_midi('G4'), start_time=2, end_time=3),
Note(pitch=librosa.note_to_midi('Eb4'), start_time=3, end_time=4),
# Second staff.
Note(pitch=librosa.note_to_midi('C4'), start_time=4, end_time=5),
Note(pitch=librosa.note_to_midi('E4'), start_time=5, end_time=6),
Note(pitch=librosa.note_to_midi('G4'), start_time=6, end_time=7),
Note(pitch=librosa.note_to_midi('G#4'), start_time=7, end_time=8),
Note(pitch=librosa.note_to_midi('G4'), start_time=8, end_time=9),
]))
def testChords(self):
stem_1 = musicscore_pb2.LineSegment(
start=Point(x=20, y=10), end=Point(x=20, y=70))
stem_2 = musicscore_pb2.LineSegment(
start=Point(x=50, y=10), end=Point(x=50, y=70))
staff = musicscore_pb2.Staff(
staffline_distance=10,
center_line=[Point(x=0, y=50), Point(x=100, y=50)],
glyph=[
Glyph(
type=Glyph.CLEF_TREBLE,
x=1,
y_position=reader.TREBLE_CLEF_EXPECTED_Y),
# Chord of 2 notes.
Glyph(type=Glyph.NOTEHEAD_FILLED, x=10, y_position=-4, stem=stem_1),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=10, y_position=-1, stem=stem_1),
# Note not attached to a stem.
Glyph(type=Glyph.NOTEHEAD_FILLED, x=30, y_position=3),
# Chord of 3 notes.
Glyph(type=Glyph.NOTEHEAD_FILLED, x=40, y_position=0, stem=stem_2),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=60, y_position=2, stem=stem_2),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=60, y_position=4, stem=stem_2),
])
notes = conversions.page_to_notesequence(reader.ScoreReader().read_page(
musicscore_pb2.Page(system=[musicscore_pb2.StaffSystem(
staff=[staff])])))
self.assertEqual(
notes,
music_pb2.NoteSequence(notes=[
# First chord.
Note(pitch=librosa.note_to_midi('E4'), start_time=0, end_time=1),
Note(pitch=librosa.note_to_midi('A4'), start_time=0, end_time=1),
# Note without a stem.
Note(pitch=librosa.note_to_midi('E5'), start_time=1, end_time=2),
# Second chord.
Note(pitch=librosa.note_to_midi('B4'), start_time=2, end_time=3),
Note(pitch=librosa.note_to_midi('D5'), start_time=2, end_time=3),
Note(pitch=librosa.note_to_midi('F5'), start_time=2, end_time=3),
]))
def testBeams(self):
beam_1 = musicscore_pb2.LineSegment(
start=Point(x=10, y=20), end=Point(x=40, y=20))
beam_2 = musicscore_pb2.LineSegment(
start=Point(x=70, y=40), end=Point(x=90, y=40))
beam_3 = musicscore_pb2.LineSegment(
start=Point(x=70, y=60), end=Point(x=90, y=60))
staff = musicscore_pb2.Staff(
staffline_distance=10,
center_line=[Point(x=0, y=50), Point(x=100, y=50)],
glyph=[
Glyph(
type=Glyph.CLEF_TREBLE,
x=1,
y_position=reader.TREBLE_CLEF_EXPECTED_Y),
# 2 eighth notes.
Glyph(
type=Glyph.NOTEHEAD_FILLED, x=10, y_position=-4, beam=[beam_1]),
Glyph(
type=Glyph.NOTEHEAD_FILLED, x=40, y_position=-1, beam=[beam_1]),
# 1 quarter note.
Glyph(type=Glyph.NOTEHEAD_FILLED, x=50, y_position=0),
# 2 sixteenth notes.
Glyph(
type=Glyph.NOTEHEAD_FILLED,
x=60,
y_position=-2,
beam=[beam_2, beam_3]),
Glyph(
type=Glyph.NOTEHEAD_FILLED,
x=90,
y_position=2,
beam=[beam_2, beam_3]),
])
notes = conversions.page_to_notesequence(reader.ScoreReader().read_page(
musicscore_pb2.Page(system=[musicscore_pb2.StaffSystem(
staff=[staff])])))
self.assertEqual(
notes,
music_pb2.NoteSequence(notes=[
Note(pitch=librosa.note_to_midi('E4'), start_time=0, end_time=0.5),
Note(pitch=librosa.note_to_midi('A4'), start_time=0.5, end_time=1),
Note(pitch=librosa.note_to_midi('B4'), start_time=1, end_time=2),
Note(pitch=librosa.note_to_midi('G4'), start_time=2, end_time=2.25),
Note(
pitch=librosa.note_to_midi('D5'), start_time=2.25,
end_time=2.5),
]))
def testAllNoteheadTypes(self):
staff = musicscore_pb2.Staff(
staffline_distance=10,
center_line=[Point(x=0, y=50), Point(x=100, y=50)],
glyph=[
Glyph(
type=Glyph.CLEF_TREBLE,
x=1,
y_position=reader.TREBLE_CLEF_EXPECTED_Y),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=10, y_position=-6),
Glyph(type=Glyph.NOTEHEAD_EMPTY, x=10, y_position=-6),
Glyph(type=Glyph.NOTEHEAD_WHOLE, x=10, y_position=-6),
])
notes = conversions.page_to_notesequence(reader.ScoreReader().read_page(
musicscore_pb2.Page(system=[musicscore_pb2.StaffSystem(
staff=[staff])])))
self.assertEqual(
notes,
music_pb2.NoteSequence(notes=[
Note(pitch=librosa.note_to_midi('C4'), start_time=0, end_time=1),
Note(pitch=librosa.note_to_midi('C4'), start_time=1, end_time=3),
Note(pitch=librosa.note_to_midi('C4'), start_time=3, end_time=7),
]))
def testStaffSystems(self):
# 2 staff systems on separate pages, each with 2 staves, and no bars.
system_1_staff_1 = musicscore_pb2.Staff(
staffline_distance=10,
center_line=[Point(x=0, y=50), Point(x=100, y=50)],
glyph=[
Glyph(
type=Glyph.CLEF_TREBLE,
x=1,
y_position=reader.TREBLE_CLEF_EXPECTED_Y),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=10, y_position=-6),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=50, y_position=-2),
])
system_1_staff_2 = musicscore_pb2.Staff(
staffline_distance=10,
center_line=[Point(x=0, y=150), Point(x=100, y=150)],
glyph=[
Glyph(
type=Glyph.CLEF_BASS,
x=2,
y_position=reader.BASS_CLEF_EXPECTED_Y),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=10, y_position=0),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=40, y_position=2),
# Played after the second note in the first staff, although it is to
# the left of it.
Glyph(type=Glyph.NOTEHEAD_FILLED, x=45, y_position=4),
])
system_2_staff_1 = musicscore_pb2.Staff(
staffline_distance=10,
center_line=[Point(x=0, y=250), Point(x=100, y=250)],
glyph=[
Glyph(
type=Glyph.CLEF_TREBLE,
x=1,
y_position=reader.TREBLE_CLEF_EXPECTED_Y),
Glyph(type=Glyph.REST_QUARTER, x=20, y_position=0),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=50, y_position=-2),
])
system_2_staff_2 = musicscore_pb2.Staff(
staffline_distance=10,
center_line=[Point(x=0, y=250), Point(x=100, y=250)],
glyph=[
Glyph(
type=Glyph.CLEF_BASS,
x=2,
y_position=reader.BASS_CLEF_EXPECTED_Y),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=10, y_position=0),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=40, y_position=2),
])
notes = conversions.score_to_notesequence(reader.ScoreReader()(
musicscore_pb2.Score(page=[
musicscore_pb2.Page(system=[
musicscore_pb2.StaffSystem(
staff=[system_1_staff_1, system_1_staff_2]),
]),
musicscore_pb2.Page(system=[
musicscore_pb2.StaffSystem(
staff=[system_2_staff_1, system_2_staff_2]),
]),
]),))
self.assertEqual(
notes,
music_pb2.NoteSequence(notes=[
# System 1, staff 1.
Note(pitch=librosa.note_to_midi('C4'), start_time=0, end_time=1),
Note(pitch=librosa.note_to_midi('G4'), start_time=1, end_time=2),
# System 1, staff 2.
Note(pitch=librosa.note_to_midi('D3'), start_time=0, end_time=1),
Note(pitch=librosa.note_to_midi('F3'), start_time=1, end_time=2),
Note(pitch=librosa.note_to_midi('A3'), start_time=2, end_time=3),
# System 2, staff 1.
# Quarter rest.
Note(pitch=librosa.note_to_midi('G4'), start_time=4, end_time=5),
# System 2, staff 2.
Note(pitch=librosa.note_to_midi('D3'), start_time=3, end_time=4),
Note(pitch=librosa.note_to_midi('F3'), start_time=4, end_time=5),
]))
def testMeasures(self):
# 2 staves in the same staff system with multiple bars.
staff_1 = musicscore_pb2.Staff(
staffline_distance=10,
center_line=[Point(x=0, y=50), Point(x=300, y=50)],
glyph=[
Glyph(
type=Glyph.CLEF_TREBLE,
x=1,
y_position=reader.TREBLE_CLEF_EXPECTED_Y),
# Key signature.
Glyph(type=Glyph.SHARP, x=10, y_position=+4),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=20, y_position=-2),
# Accidental.
Glyph(type=Glyph.FLAT, x=40, y_position=-1),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=50, y_position=-1),
# Second bar.
Glyph(type=Glyph.NOTEHEAD_FILLED, x=120, y_position=0),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=180, y_position=+4),
# Third bar.
# Accidental not propagated to this note.
Glyph(type=Glyph.NOTEHEAD_FILLED, x=220, y_position=-1),
])
staff_2 = musicscore_pb2.Staff(
staffline_distance=10,
center_line=[Point(x=0, y=150), Point(x=300, y=150)],
glyph=[
Glyph(
type=Glyph.CLEF_BASS,
x=1,
y_position=reader.BASS_CLEF_EXPECTED_Y),
# Key signature.
Glyph(type=Glyph.FLAT, x=15, y_position=-2),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=20, y_position=-2),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=50, y_position=+2),
# Second bar.
Glyph(type=Glyph.NOTEHEAD_FILLED, x=150, y_position=-2),
# Third bar.
Glyph(type=Glyph.REST_QUARTER, x=220, y_position=0),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=280, y_position=-2),
])
staff_system = musicscore_pb2.StaffSystem(
staff=[staff_1, staff_2],
bar=[_bar(0), _bar(100), _bar(200),
_bar(300)])
notes = conversions.page_to_notesequence(reader.ScoreReader().read_page(
musicscore_pb2.Page(system=[staff_system])))
self.assertEqual(
notes,
music_pb2.NoteSequence(notes=[
# Staff 1, bar 1.
Note(pitch=librosa.note_to_midi('G4'), start_time=0, end_time=1),
Note(pitch=librosa.note_to_midi('Ab4'), start_time=1, end_time=2),
# Staff 1, bar 2.
Note(pitch=librosa.note_to_midi('B4'), start_time=2, end_time=3),
Note(pitch=librosa.note_to_midi('F#5'), start_time=3, end_time=4),
# Staff 1, bar 3.
Note(pitch=librosa.note_to_midi('A4'), start_time=4, end_time=5),
# Staff 2, bar 1.
Note(pitch=librosa.note_to_midi('Bb2'), start_time=0, end_time=1),
Note(pitch=librosa.note_to_midi('F3'), start_time=1, end_time=2),
# Staff 2, bar 2.
Note(pitch=librosa.note_to_midi('Bb2'), start_time=2, end_time=3),
# Staff 2, bar 3.
Note(pitch=librosa.note_to_midi('Bb2'), start_time=5, end_time=6),
]))
def testKeySignatures(self):
# One staff per system, two systems.
staff_1 = musicscore_pb2.Staff(glyph=[
Glyph(
type=Glyph.CLEF_TREBLE,
x=5,
y_position=reader.TREBLE_CLEF_EXPECTED_Y),
# D major key signature.
Glyph(type=Glyph.SHARP, x=15, y_position=+4),
Glyph(type=Glyph.SHARP, x=25, y_position=+1),
# Accidental which cannot be interpreted as part of the key
# signature.
Glyph(type=Glyph.SHARP, x=35, y_position=+2),
Glyph(type=Glyph.NOTEHEAD_FILLED, x=45, y_position=+2), # D#5
Glyph(type=Glyph.NOTEHEAD_EMPTY, x=55, y_position=+1), # C#5
Glyph(type=Glyph.NOTEHEAD_FILLED, x=65, y_position=-3), # F#4
# New measure. The key signature should be retained.
Glyph(type=Glyph.NOTEHEAD_EMPTY, x=105, y_position=-3), # F#4
Glyph(type=Glyph.NOTEHEAD_FILLED, x=125, y_position=+1), # C#5
# Accidental is not retained.
Glyph(type=Glyph.NOTEHEAD_FILLED, x=145, y_position=+2), # D5
])
staff_2 = musicscore_pb2.Staff(glyph=[
Glyph(
type=Glyph.CLEF_TREBLE,
x=5,
y_position=reader.TREBLE_CLEF_EXPECTED_Y),
# No key signature on this line. No accidentals.
Glyph(type=Glyph.NOTEHEAD_EMPTY, x=25, y_position=-3), # F4
Glyph(type=Glyph.NOTEHEAD_EMPTY, x=45, y_position=+1), # C5
])
notes = conversions.page_to_notesequence(reader.ScoreReader().read_page(
musicscore_pb2.Page(system=[
musicscore_pb2.StaffSystem(
staff=[staff_1], bar=[_bar(0), _bar(100),
_bar(200)]),
musicscore_pb2.StaffSystem(staff=[staff_2]),
])))
self.assertEqual(
notes,
music_pb2.NoteSequence(notes=[
# First measure.
Note(pitch=librosa.note_to_midi('D#5'), start_time=0, end_time=1),
Note(pitch=librosa.note_to_midi('C#5'), start_time=1, end_time=3),
Note(pitch=librosa.note_to_midi('F#4'), start_time=3, end_time=4),
# Second measure.
Note(pitch=librosa.note_to_midi('F#4'), start_time=4, end_time=6),
Note(pitch=librosa.note_to_midi('C#5'), start_time=6, end_time=7),
Note(pitch=librosa.note_to_midi('D5'), start_time=7, end_time=8),
# Third measure on a new line, with no key signature.
Note(pitch=librosa.note_to_midi('F4'), start_time=8, end_time=10),
Note(pitch=librosa.note_to_midi('C5'), start_time=10, end_time=12),
]))
def _bar(x):
return musicscore_pb2.StaffSystem.Bar(
x=x, type=musicscore_pb2.StaffSystem.Bar.STANDARD_BAR)
if __name__ == '__main__':
absltest.main()
| 2.046875
| 2
|
vsmomi/command_line_parser.py
|
dahuebi/vsmomi
| 0
|
12774856
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from future.builtins.disabled import *
import sys
import fnmatch
import re
import os
import argparse
from argparse import ArgumentTypeError
import traceback
from . import commands
class CommandLineParser(object):
def __init__(self):
self.initParser()
def toNumber(self, value):
number = value
try:
number = int(value)
# need native int not newint
number = eval("{}".format(number))
except (TypeError, ValueError):
pass
return number
def unitToFactor(self, unit):
units = {
"k": 1024,
"m": 1024*1024,
"g": 1024*1024*1024,
"t": 1024*1024*1024*1024,
}
factor = 1
try:
if unit:
factor = units[unit[0].lower()]
except KeyError:
raise KeyError("Unsupported unit '{}'".format(unit))
return factor
def patternToRegexp(self, pattern):
pattern = pattern.decode("UTF-8")
try:
regexp = None
if pattern.startswith("~"):
regexp = re.compile(pattern[1:])
else:
pattern = fnmatch.translate(pattern)
regexp = re.compile("^"+pattern)
except:
traceback.print_exc()
raise
return regexp
def diskModeType(self, s):
fmt = "all | none | <ctrlNr>-<slotNr> [<ctrlNr>-<slotNr>]"
s = s.lower()
if s == "all":
return s
elif s == "none":
return None
pattern = "^(\d+)-(\d+)$"
match = self.matchPattern(pattern, fmt, s)
n, m = (self.toNumber(x) for x in match.groups())
return (n, m)
def memoryType(self, s):
fmt = "<number>[m|g]"
pattern = "^(\d+)(?([m|g].?))$"
pattern = "^([\d.]+)(:?([m|M|g|G]).?)$"
match = self.matchPattern(pattern, fmt, s)
mem, unit = match.groups()
factor = self.unitToFactor(unit)
if factor == 1:
factor = 1024
mem = float(mem)
mem = factor * mem
return int(mem)
def extraConfigType(self, s):
fmt = "<key>=<value>"
pattern = "^([^=]+)=(.*)$"
match = self.matchPattern(pattern, fmt, s)
key, value = match.groups()
return (key, value)
def isoType(self, s):
fmt = "\[datastore\] <path>"
pattern = "^\[[^\]]+\]\s.*$"
match = self.matchPattern(pattern, fmt, s)
return s
def diskLinkedType(self, s):
fmt = "[<ctrlNr>-<slotNr>,]vm[:snapshot],<ctrlNr>-<slotNr>"
pattern = "^(?:(\d+)-(\d+),)?([^:,]+)(?::([^,]+))?(?:,(\d+)-(\d+))$"
match = self.matchPattern(pattern, fmt, s)
n, m, vm, snapshot, x, y = (self.toNumber(x) for x in match.groups())
return {"slot": (n, m), "vm": (vm, snapshot), "vmSlot": (x, y)}
def diskNewType(self, s):
fmt = "[<ctrlNr>-<slotNr>,]size=<capacity>[mb|gb|tb]"
pattern = "^(?:(\d+)-(\d+),)?size=(\d+)([m|M|g|G|t|T].?)?$"
match = self.matchPattern(pattern, fmt, s)
n, m, size, unit = (self.toNumber(x) for x in match.groups())
factor = self.unitToFactor(unit)
size = factor * size
return {"slot": (n, m), "capacity": size}
def diskDestroyType(self, s):
fmt = "<ctrlNr>-<slotNr>"
pattern = "^(\d+)-(\d+)$"
match = self.matchPattern(pattern, fmt, s)
n, m = (self.toNumber(x) for x in match.groups())
return (n, m)
def nicAddType(self, s):
fmt = "[mac=xx:xx:xx:xx:xx:xx,ip=a.b.c.d/8,gw=u,v,w,x]"
macPattern = "[.:]".join(["[0-9A-F]{2}"] * 6)
ipPattern = "\.".join(["\d+"] * 4)
pattern = "^(?:mac=({0}),?)?(?:ip=({1})(?:/(\d+))?,?)?(?:gw=({1}),?)?$".format(
macPattern, ipPattern)
match = self.matchPattern(pattern, fmt, s)
mac, ip, mask, gw = match.groups()
return {"mac": mac, "ip": ip, "mask": mask, "gw": gw}
def matchPattern(self, pattern, fmt, s):
reg = re.compile(pattern, re.I)
match = reg.search(s)
if not match:
raise argparse.ArgumentTypeError(
"'{}' does not match format'{}'".format(s, fmt))
return match
def getSubParser(self, function, subparsers, **kwargs):
parser = subparsers.add_parser(
function,
formatter_class=argparse.RawTextHelpFormatter,
**kwargs)
return parser
def initParser(self):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
self.parser = parser
parser.add_argument(
"--dryrun", action="store_true",
help=argparse.SUPPRESS)
parser.add_argument(
"--vcenter",
type=str,
metavar="host",
help="Hostname/IP of the VCenter")
parser.add_argument(
"--vc-user",
type=str,
metavar="user", dest="vcUser",
help="VCenter username")
parser.add_argument(
"--vc-pass",
type=str,
metavar="password", dest="vcPass",
help="VCenter password, may be base64 encoded")
parser.add_argument(
"--auth",
type=str,
default="auth.ini",
metavar="auth.ini",
help="Load credentials from auth file, user empty to save in user home")
parser.add_argument(
"--save-auth",
action="store_true", dest="saveAuth",
help="Save/update auth file")
parser.add_argument(
"--ask-cred", action="store_true", dest="askCred",
help="Force user to enter credentials")
subparsers = parser.add_subparsers(dest="which")
subparsers.required = True
for mod in commands.commands:
mod.addParser(self, subparsers)
def _currentParserArgs(self, args):
which = args.which
keys = getattr(args, "{}Args".format(which))
parserArgs = {}
for k, v in vars(args).items():
if k in keys:
parserArgs[k] = v
return parserArgs
def showFullHelp(self):
# http://stackoverflow.com/questions/20094215/argparse-subparser-monolithic-help-output
parser = self.parser
# print main help
print(parser.format_help())
# retrieve subparsers from parser
subparsers_actions = [
action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)]
# there will probably only be one subparser_action,
# but better save than sorry
for subparsers_action in subparsers_actions:
# get all subparsers and print help
for choice, subparser in subparsers_action.choices.items():
print("--------------------------------------------------------------------------------")
print("Command '{}'".format(choice))
print(subparser.format_help())
def parse(self, argv=sys.argv[1:]):
args, nestedArgv = self.parser.parse_known_args(argv)
args.m2m = False
if args.which == "m2m":
args.m2m = True
if not nestedArgv or nestedArgv[0] == "-":
# read json args from stdin
raise NotImplementedError()
else:
self.parser.parse_args(nestedArgv, namespace=args)
else:
self.parser.parse_args(argv, namespace=args)
# camelCase, remove unwanted characters
which = args.which
which = which.title()
which = re.sub("-", "", which)
which = which[0].lower() + which[1:]
args.which = which
parserArgs = self._currentParserArgs(args)
return which, args, parserArgs
| 2.78125
| 3
|
deal_solver/_context/__init__.py
|
orsinium-labs/deal-solver
| 8
|
12774857
|
from ._context import Context
from ._layer import ExceptionInfo, ReturnInfo
from ._scope import Scope
__all__ = [
'Context',
'ExceptionInfo',
'ReturnInfo',
'Scope',
]
| 1.195313
| 1
|
hatspil/hatspil.py
|
dodomorandi/hatspil
| 2
|
12774858
|
<gh_stars>1-10
"""The execution module of HaTSPiL.
This module contains the basic elements to start the execution of the
software from command line.
"""
import argparse
import itertools
import logging
import os
import re
import shutil
import sys
import traceback
from email.mime.text import MIMEText
from enum import Enum
from multiprocessing import Manager, Pool
from typing import (Any, Callable, Dict, Iterable, List, MutableMapping,
Optional, Set, Tuple, Union, cast)
from .aligner import GenericAligner, RnaSeqAligner
from .config import Config
from .core import utils
from .core.barcoded_filename import Analyte, BarcodedFilename, Tissue
from .runner import Runner
from .xenograft import Xenograft, XenograftClassifier, Xenome
def get_parser() -> argparse.ArgumentParser:
"""Create the command line argument parser."""
parser = argparse.ArgumentParser(
description="Makes your life easier when performing some HTS analysis."
)
parser.add_argument(
"--aligner",
action="store",
dest="aligner",
choices=[aligner.name.lower() for aligner in GenericAligner] + ["auto"],
default="auto",
help="Select the aligner. When this option is set to "
"'auto' will be used the first aligner available",
)
parser.add_argument(
"--rnaseq-aligner",
action="store",
dest="rnaseq_aligner",
choices=[aligner.name.lower() for aligner in RnaSeqAligner] + ["auto"],
default="auto",
help="Select the aligner for RNA-Seq data. When this option is set to "
"'auto' will be used the first aligner available",
)
parser.add_argument(
"--xenograft-classifier",
action="store",
dest="xenograft_classifier",
choices=[classifier.name.lower() for classifier in XenograftClassifier]
+ ["auto"],
default="auto",
help="Select the xenograft classifier, the software used to "
"distinguish the reads belonging to different organisms. When this "
"option is set to 'auto' will be used the first classifier "
"available",
)
parser.add_argument(
"--configout",
action="store",
metavar="filename",
help="Dumps a default (almost empty configuration) in "
"a file.\nWhen this option is passed, any other "
"option will be ignored and the program will exit "
"after the file is being written.",
)
parser.add_argument(
"--config",
"-c",
action="store",
metavar="config.ini",
help="Select the configuration file. If it is not "
"specified, the program will try to search for a file "
"called 'config.ini' in the current working "
"directory. If it is not available, an error will be "
"raised.",
)
parser.add_argument(
"--no-report",
dest="generate_report",
action="store_false",
help="Do not create a report for the current run. The creation of reports requires a valid MongoDB configuration.",
)
parser.add_argument(
"--no-global-report",
dest="generate_global_report",
action="store_false",
help="Do not create a global report for all the data in the database. The creation of reports requires a valid MongoDB configuration.",
)
parser.add_argument(
"--no-mail", dest="mail", action="store_false", help="Do not send emails."
)
parser.add_argument(
"--no-cutadapt",
dest="use_cutadapt",
action="store_false",
help="Skips cutadapt.",
)
parser.add_argument(
"--no-tdf", dest="use_tdf", action="store_false", help="Skips tdf generation."
)
parser.add_argument(
"--no-R-checks",
dest="r_checks",
action="store_false",
help="Skips some R dependency checks. If omitted, "
"the program will check some R depencencies and, "
"if some packages are found missing, it will try to "
"install them.",
)
parser.add_argument(
"--dont-use-normals",
action="store_false",
dest="use_normals",
help="Normally, whenever a normal sample is found, it is used. "
"In this case many phases of the analysis are "
"performed using different parameters. If this option "
"is passed, these phases are skipped.",
)
parser.add_argument(
"--dont-mark-duplicates",
action="store_false",
dest="mark_duplicates",
help="Do not mark PCR duplicates during mapping phase "
"for xenograft tissues.",
)
parser.add_argument(
"--skip-xenograft-classifier",
action="store_false",
dest="use_xenograft_classifier",
help="Do not use xenograft classifiers even with xenograft tissues",
)
parser.add_argument(
"--post-recalibration",
action="store_true",
help="Perform a post-recalibration analysis after the basic recalibration.",
)
parser.add_argument(
"--compress-fastq",
action="store_true",
help="If set, the fastqs files are compressed at the "
"end of the mapping phase.",
)
parser.add_argument(
"--trim-5",
action="store",
type=int,
metavar="n_bases",
default=5,
help="Bases that will be trimmed at 5' (default=5)",
)
parser.add_argument(
"--trim-3",
action="store",
type=int,
metavar="n_bases",
default=None,
help="Bases that will be trimmed at 3' "
"(default=10 when --xenograft is passed, 0 otherwise)",
)
parser.add_argument(
"--gatk-threads",
metavar="n",
action="store",
type=int,
default=20,
help="Number of threads to be used for GATK. Default=20.",
)
parser.add_argument(
"--picard-max-records",
action="store",
help="Sets the MAX_RECORDS_IN_RAM for Picard. "
"If unspecified, the parameter is not passed.",
)
parser.add_argument(
"--use-date",
action="store",
default=None,
type=utils.parsed_date,
metavar="YYYY_MM_DD",
dest="use_date",
help="Use the specified date instead of the current one",
)
parser.add_argument("--skip-mapping", action="store_true", help=argparse.SUPPRESS)
parser.add_argument("--only-mapping", action="store_true", help=argparse.SUPPRESS)
list_file_group = parser.add_mutually_exclusive_group(required=False)
list_file_group.add_argument(
"--list-file",
action="store",
help="The name of the file containing the name of the samples, one by line.",
)
list_file_group.add_argument(
"--scan-samples",
action="store_true",
help="Scan for sample files instead of reading them from a file",
)
parser.add_argument(
"--root-dir", action="store", help="The root directory for the analysis"
)
parser.add_argument(
"--fastq-dir",
action="store",
help="The directory where the fastq files of the samples are located.",
)
parser.add_argument(
"--process-pool",
"-p",
action="store",
type=int,
metavar="n",
default=5,
help="The size of the process pool.",
)
return parser
def set_aligner_param(
args: argparse.Namespace,
parameters: MutableMapping[str, Any],
param_name: str,
available_aligners: Iterable[Enum],
files_checkers: Iterable[Optional[Callable[[Config], bool]]],
config: Config,
) -> None:
"""Set the aligner parameters depending on the command line args.
It evaluates the command line parameters and the software available
in order to choose the best aligner. It can be used dynamically in
order to work for generic aligners and RNA-seq aligners.
It is worth noting that `available_aligners` and `files_checkers`
are iterated together using `zip`, therefore they must have the same
length to produce meaningful results.
In case no valid option are found the function print an error and
exits the process.
Args:
args: the command line arguments.
parameters: the parameters that will be updated to set the
desired aligner.
param_name: name of the argument that will be searched for and
name of the parameter that will be set.
available_aligners: an iterable collection of enumerations
containing the possible aligners.
files_checkers: an iterable collection of functions that take a
config object and return a bool. Useful to
perform fine-grained checks for each aligner.
config: the configuration object used for the analysis.
"""
type_aligner = getattr(args, param_name)
if type_aligner == "auto" or type_aligner is None:
for aligner, files_checker in zip(available_aligners, files_checkers):
aligner_exec = getattr(config, aligner.name.lower())
if aligner_exec is not None and shutil.which(aligner_exec) is not None:
if files_checker is not None and not files_checker(config):
continue
parameters[param_name] = aligner
break
if param_name not in parameters:
print(
"No valid aligner is available. "
"Please check your configuration file.\n"
"One of the following parameters must be valid: "
+ " ".join(aligner.name.lower() for aligner in available_aligners),
file=sys.stderr,
)
exit(-5)
else:
aligner_names = [aligner.name.lower() for aligner in available_aligners]
aligner_lower = type_aligner.lower()
if aligner_lower not in aligner_names:
print(
"The aligner %s is not present in the available ones" % type_aligner,
file=sys.stderr,
)
exit(-5)
aligner_index = aligner_names.index(aligner_lower)
files_checker = next(itertools.islice(files_checkers, aligner_index, None))
if files_checker is not None and not files_checker(config):
print(
"The aligner %s does not have all the needed config "
"parameters correctly set" % type_aligner
)
exit(-5)
aligner_exec = getattr(config, aligner_lower)
if aligner_exec is not None and shutil.which(aligner_exec) is not None:
if param_name == "aligner":
parameters[param_name] = GenericAligner[type_aligner.upper()]
else:
parameters[param_name] = RnaSeqAligner[type_aligner.upper()]
else:
print("The chosen aligner is not executable", file=sys.stderr)
exit(-5)
def check_all_kits_are_available(config: Config, samples: Iterable[str]) -> None:
"""Perform a check to see if all the needed kits are available.
This function checks whether all the samples have a combination of
analyte and kit that is available in the `config` object.
In case any kit is missing, an error is printed and the process
exists.
"""
unavailable_kits: Set[Tuple[int, Analyte]] = set()
for sample in samples:
barcoded = BarcodedFilename.from_sample(sample)
if not barcoded or barcoded.analyte is None or barcoded.kit is None:
continue
kit_identifier = (barcoded.kit, barcoded.analyte)
if kit_identifier not in config.kits:
unavailable_kits.add(kit_identifier)
if unavailable_kits:
print(
"Some kits are not available in the configuration file:\n"
"{}\n"
"Fix configuration and try again.".format(
"\n".join(
[
"[KIT {} {}]".format(kit[0], kit[1].name)
for kit in sorted(list(unavailable_kits))
]
)
),
file=sys.stderr,
)
exit(-1)
def main() -> None:
"""Start HaTSPil.
This is the entry point for the execution of the tool. Preliminary
checks and the handling of the analysis is performed here.
"""
parser = get_parser()
args = parser.parse_args()
if args.configout is not None:
config = Config()
config.save(args.configout)
print("Sample config written to '%s'" % args.configout)
exit(0)
elif (
(not args.list_file and not args.scan_samples)
or not args.root_dir
or not args.fastq_dir
):
print(
"--list-file (or --scan-samples), --root-dir and --fastq-dir "
"are mandatory unless --configout is specified"
)
parser.print_usage()
exit(-1)
if args.config is not None:
if os.path.exists(args.config):
config = Config(args.config)
else:
print("ERROR: config file '%s' does not exist." % args.config)
exit(-1)
else:
if os.path.exists("config.ini"):
config = Config("config.ini")
else:
print(
"ERROR: config file 'config.ini' does not exist in the "
"current directory.\nPlease use '--configout' option to "
"create a config file, then specify it with the '--config' "
"option or just name it 'config.ini' and put in the current "
"directory."
)
exit(-1)
if not config.check_programs():
print(
"ERROR: not every program inside configuration is correctly "
"set. Please fix the problem and try again."
)
exit(-1)
if not config.check_files():
print(
"ERROR: not every file inside configuration is correctly "
"set. Please fix the problem and try again."
)
exit(-1)
if args.list_file is not None and not os.path.exists(args.list_file):
print("ERROR: list_file '%s' does not exist." % args.list_file)
exit(-2)
if not os.path.isdir(args.root_dir):
print("ERROR: root_dir '%s' is not a valid directory." % args.root_dir)
exit(-2)
if not os.path.isdir(args.fastq_dir):
print("ERROR: fastq_dir '%s' is not a valid directory." % args.fastq_dir)
exit(-3)
if config.use_mongodb:
try:
from pymongo import MongoClient
except ImportError:
print(
"ERROR: use_mongodb is set to true inside config file but "
"pymongo is not installed. Please install it using pip3."
)
exit(-4)
try:
client = MongoClient(config.mongodb_host, config.mongodb_port)
except Exception:
print(
"ERROR: cannot connect to MongoDB. Please check the config "
"file under the section MONGODB"
)
exit(-4)
db = client[config.mongodb_database]
try:
db.authenticate(config.mongodb_username, config.mongodb_password)
except Exception:
print(
"ERROR: MongoDB authentication failed. Please check the "
"config file under the section MONGODB"
)
exit(-4)
try:
import bson
except Exception:
print(
"ERROR: bson module cannot be found. \n"
"If you intend to use MongoDB, just install pymongo which includes a "
"bundled bson module. Otherwise, directly install the 'bson' package."
)
exit(-4)
args.fastq_dir = os.path.abspath(args.fastq_dir)
args.root_dir = os.path.abspath(args.root_dir)
if args.list_file:
re_pattern = re.compile(
r"^([^-]+)(?:-([^-]+)(?:-(\d[0-9A-Za-z]|\*)"
r"(?:-(\d|\*)(?:(\d|\*)(\d|\*)?)?"
r"(?:-(\d|\*)(\d|\*)(\d|\*)?)?)?)?)?$"
)
all_filenames = os.listdir(args.fastq_dir)
input_samples_set = set()
with open(args.list_file) as fd:
for line_index, line in enumerate(fd):
match = re_pattern.match(line.strip())
if not match:
print("Invalid file at line %d of file list" % (line_index + 1))
exit(-1)
current_pattern = r"^("
current_pattern += match.group(1).replace("*", r"[^-]+")
group = match.group(2)
if group:
current_pattern += "-"
current_pattern += group.replace("*", r"[^-]+")
group = match.group(3)
if group:
current_pattern += "-"
current_pattern += group.replace("*", r"\d[0-9A-Za-z]")
group = match.group(4)
if group:
current_pattern += "-"
current_pattern += match.group(4).replace("*", r"\d")
group = match.group(5)
if group:
current_pattern += group.replace("*", r"\d")
group = match.group(6)
if group:
current_pattern += group.replace("*", r"\d")
group = match.group(7)
if group:
current_pattern += "-"
current_pattern += group.replace("*", r"\d")
group = match.group(8)
if group:
current_pattern += group.replace("*", r"\d")
group = match.group(9)
if group:
current_pattern += group.replace(
"*", r"\d"
)
else:
current_pattern += r"\d"
else:
current_pattern += r"\d{2}"
else:
current_pattern += r"-\d{3}"
else:
current_pattern += r"\d-\d{3}"
else:
current_pattern += r"\d{2}-\d{3}"
else:
current_pattern += r"-\d{3}-\d{3}"
else:
current_pattern += r"-\d[0-9A-Za-z]-\d{3}-\d{3}"
else:
current_pattern += r"-[^-]+-\d[0-9A-Za-z]-\d{3}-\d{3}"
current_pattern += r")(?:\.(?:hg|mm)\d+)?(?:\.R[12])?\.fastq(\.gz)?$"
re_current_pattern = re.compile(current_pattern, re.I)
added_files = 0
for filename in all_filenames:
match = re_current_pattern.match(os.path.basename(filename))
if match:
barcoded_filename = BarcodedFilename(filename)
if (
barcoded_filename.tissue != Tissue.PRIMARY_XENOGRAFT_TISSUE
and barcoded_filename.tissue
!= Tissue.CELL_LINE_DERIVED_XENOGRAFT_TISSUE
) or barcoded_filename.organism is None:
input_samples_set.add(match.group(1))
added_files += 1
if added_files == 0:
print("ERROR: cannot find any file for sample %s" % line.strip())
exit(-1)
input_samples = list(input_samples_set)
elif args.scan_samples:
fastq_files = [
filename
for filename in os.listdir(args.fastq_dir)
if re.search(r"\.fastq$", filename, re.I)
]
input_samples = list(
set(
[
re.sub(r"(\.R[12])?\.fastq$", "", filename, flags=re.I)
for filename in fastq_files
if not re.search(r"trimmed|clipped", filename, re.I)
]
)
)
else:
raise RuntimeError("Unhandled condition")
check_all_kits_are_available(config, input_samples)
parameters = {
"use_xenograft_classifier": args.use_xenograft_classifier,
"use_cutadapt": args.use_cutadapt,
"mark_duplicates": args.mark_duplicates,
"run_post_recalibration": args.post_recalibration,
"compress_fastq": args.compress_fastq,
"gatk_threads": args.gatk_threads,
"picard_max_records": args.picard_max_records,
"use_normals": args.use_normals,
"trim_5": args.trim_5,
"trim_3": args.trim_3,
"use_date": args.use_date,
"skip_mapping": args.skip_mapping,
"only_mapping": args.only_mapping,
"use_tdf": args.use_tdf,
"generate_report": args.generate_report,
"generate_global_report": args.generate_global_report,
}
logging.basicConfig(format="%(asctime)-15s %(message)s")
aligner_is_needed = False
rnaseq_aligner_is_needed = False
xenograft_classifier_is_needed = False
for sample in input_samples:
barcoded_filename = BarcodedFilename.from_sample(sample)
if barcoded_filename.analyte == Analyte.RNASEQ:
rnaseq_aligner_is_needed = True
else:
aligner_is_needed = True
if barcoded_filename.is_xenograft():
xenograft_classifier_is_needed = True
if rnaseq_aligner_is_needed:
set_aligner_param(
args,
parameters,
"rnaseq_aligner",
[RnaSeqAligner.STAR],
[Config.check_star_files],
config,
)
if aligner_is_needed:
set_aligner_param(
args,
parameters,
"aligner",
[GenericAligner.NOVOALIGN, GenericAligner.BWA],
[None, None],
config,
)
if xenograft_classifier_is_needed:
xenograft_classifier = Xenograft.get_available_classifier(args, config)
if xenograft_classifier is None:
if args.xenograft_classifier == "auto":
print(
"Xenograft classifier is needed but none is available. "
"Check config.",
file=sys.stderr,
)
else:
print(
f"Xenograft classifier '{args.xenograft_classifier}' is "
"selected, but it cannot be used. Check config.",
file=sys.stderr,
)
exit(-1)
parameters["xenograft_classifier"] = xenograft_classifier
if parameters.get("xenograft_classifier") == XenograftClassifier.XENOME:
Xenome.check_correct_index_files(config)
if args.r_checks and args.post_recalibration:
try:
import rpy2.robjects.packages as rpackages
from rpy2.robjects.vectors import StrVector
except ImportError:
rpackages = None
print("cannot correctly import rpy2. R checks are skipped.")
if rpackages:
print("Checking R packages and, eventually, performing installations")
dependencies = ("ggplot2", "gplots", "reshape", "grid", "tools", "gsalib")
rutils = rpackages.importr("utils")
base = rpackages.importr("base")
rutils.chooseCRANmirror(ind=1)
installed_packages = rutils.installed_packages().rx(True, 1)
for package in dependencies:
if not base.is_element(package, installed_packages)[0]:
sys.stdout.write("Installing R package %s..." % package)
rutils.install_packages(StrVector(dependencies), quiet=True)
print(" done.")
installed_packages = rutils.installed_packages().rx(True, 1)
for package in dependencies:
if not base.is_element(package, installed_packages)[0]:
print(
"Package %s has not been correctly installed. "
"Try again or check this dependency manually." % package
)
exit(-1)
print("Done with R packages")
manager = Manager()
runner = Runner(
manager,
root=args.root_dir,
fastq_dir=args.fastq_dir,
parameters=parameters,
config=config,
)
error_raised = False
try:
with Pool(args.process_pool) as pool:
pool.map(runner, input_samples)
if args.mail:
msg = MIMEText(
"Pipeline for file list %s successfully completed." % args.list_file
)
msg["Subject"] = "Pipeline completed"
except Exception:
error_raised = True
traceback.print_exc(file=sys.stdout)
if args.mail:
msg = MIMEText(
"Error while executing pipeline for file list %s.\n"
"Raised exception:\n%s" % (args.list_file, traceback.format_exc())
)
msg["Subject"] = "Pipeline error"
if args.use_normals and not args.only_mapping:
samples: Dict[str, Dict[str, Union[str, List[Tuple[str, str]]]]] = {}
normals: Dict[str, BarcodedFilename] = {}
last_operations = {}
for sample, last_operation in runner.last_operations.items():
last_operations[sample] = last_operation
for filename in utils.get_sample_filenames(last_operation):
barcoded_filename = BarcodedFilename(filename)
assert barcoded_filename.biopsy is not None
assert barcoded_filename.sequencing is not None
fake_sample = "-".join(barcoded_filename.get_barcode().split("-")[0:4])
if barcoded_filename.tissue.is_normal():
sample_type = "control"
else:
sample_type = "sample"
if fake_sample not in samples:
samples[fake_sample] = {}
if sample_type == "control":
samples[fake_sample]["control"] = filename
normals[filename] = barcoded_filename
else:
if "sample" not in samples[fake_sample]:
samples[fake_sample]["sample"] = []
cast(List[Tuple[str, str]], samples[fake_sample]["sample"]).append(
(filename, sample)
)
samples_with_no_normal = {
sample: sample_filenames["sample"][0][0]
for sample, sample_filenames in samples.items()
if "control" not in sample_filenames and len(sample_filenames["sample"]) > 0
}
for sample, filename in samples_with_no_normal.items():
sample_barcode = BarcodedFilename(filename)
candidates = {
filename: barcode
for filename, barcode in normals.items()
if barcode.project == sample_barcode.project
and barcode.patient == sample_barcode.patient
and barcode.molecule == sample_barcode.molecule
and barcode.analyte == sample_barcode.analyte
and barcode.kit == sample_barcode.kit
and barcode.biopsy == sample_barcode.biopsy
and barcode.sample == sample_barcode.sample
}
if len(candidates) == 0:
candidates = {
filename: barcode
for filename, barcode in normals.items()
if barcode.project == sample_barcode.project
and barcode.patient == sample_barcode.patient
and barcode.molecule == sample_barcode.molecule
and barcode.analyte == sample_barcode.analyte
and barcode.kit == sample_barcode.kit
and barcode.biopsy == sample_barcode.biopsy
}
if len(candidates) == 0:
candidates = {
filename: barcode
for filename, barcode in normals.items()
if barcode.project == sample_barcode.project
and barcode.patient == sample_barcode.patient
and barcode.molecule == sample_barcode.molecule
and barcode.analyte == sample_barcode.analyte
and barcode.kit == sample_barcode.kit
}
if len(candidates) == 0:
candidates = {
filename: barcode
for filename, barcode in normals.items()
if barcode.project == sample_barcode.project
and barcode.patient == sample_barcode.patient
and barcode.molecule == sample_barcode.molecule
and barcode.analyte == sample_barcode.analyte
}
if len(candidates) == 0:
candidates = {
filename: barcode
for filename, barcode in normals.items()
if barcode.project == sample_barcode.project
and barcode.patient == sample_barcode.patient
and barcode.molecule == sample_barcode.molecule
}
if len(candidates) == 1:
samples[sample]["control"] = list(candidates.items())[0][0]
elif len(candidates) > 1:
candidates_list = list(candidates.items())
del candidates
candidates_list.sort(key=lambda x: os.stat(x[0]).st_size)
samples[sample]["control"] = candidates_list[-1][0]
triplets: List[Tuple[str, str, Optional[str]]] = []
for sample, values in samples.items():
if "sample" not in values:
continue
for tumor in values["sample"]:
if "control" in values:
triplets.append(
cast(
Tuple[str, str, str],
(tumor[1], tumor[0], values["control"]),
)
)
else:
triplets.append((tumor[1], tumor[0], None))
error_raised = False
try:
with Pool(args.process_pool) as pool:
pool.starmap(runner.with_normals, triplets)
if args.mail:
msg = MIMEText(
"Pipeline for file list %s successfully completed." % args.list_file
)
msg["Subject"] = "Pipeline completed"
except RuntimeError:
error_raised = True
traceback.print_exc(file=sys.stdout)
if args.mail:
msg = MIMEText(
"Error while executing pipeline for file list %s.\n"
"Raised exception:\n%s" % (args.list_file, traceback.format_exc())
)
msg["Subject"] = "Pipeline error"
if not args.only_mapping and config.use_mongodb:
runner.generate_reports(input_samples)
if args.mail and len(config.mails) > 0:
msg["From"] = "HaTSPiL <<EMAIL>>"
msg["To"] = config.mails
try:
import smtplib
smtp = smtplib.SMTP("localhost")
smtp.send_message(msg)
smtp.quit()
except Exception:
print("Cannot send the emails", file=sys.stderr)
if error_raised:
exit(-1)
| 2.59375
| 3
|
latools/helpers/helpers.py
|
douglascoenen/latools
| 0
|
12774859
|
"""
Helper functions used by multiple parts of LAtools.
(c) <NAME> : https://github.com/oscarbranson
"""
import os
import shutil
import re
import configparser
import datetime as dt
import numpy as np
import dateutil as du
import pkg_resources as pkgrs
import uncertainties.unumpy as un
import scipy.interpolate as interp
from .stat_fns import nominal_values
from .analyte_names import pretty_element
# Bunch modifies dict to allow item access using dot (.) operator
class Bunch(dict):
def __init__(self, *args, **kwds):
super(Bunch, self).__init__(*args, **kwds)
self.__dict__ = self
# warnings monkeypatch
# https://stackoverflow.com/questions/2187269/python-print-only-the-message-on-warnings
def _warning(message, category=UserWarning,
filename='', lineno=-1,
file=None, line=None):
print(message)
def get_date(datetime, time_format=None):
"""
Return a datetime oject from a string, with optional time format.
Parameters
----------
datetime : str
Date-time as string in any sensible format.
time_format : datetime str (optional)
String describing the datetime format. If missing uses
dateutil.parser to guess time format.
"""
if time_format is None:
t = du.parser.parse(datetime)
else:
t = dt.datetime.strptime(datetime, time_format)
return t
def get_total_n_points(d):
"""
Returns the total number of data points in values of dict.
Paramters
---------
d : dict
"""
n = 0
for di in d.values():
n += len(di)
return n
def get_total_time_span(d):
"""
Returns total length of analysis.
"""
tmax = 0
for di in d.values():
if di.uTime.max() > tmax:
tmax = di.uTime.max()
return tmax
def unitpicker(a, llim=0.1, denominator=None, focus_stage=None):
"""
Determines the most appropriate plotting unit for data.
Parameters
----------
a : float or array-like
number to optimise. If array like, the 25% quantile is optimised.
llim : float
minimum allowable value in scaled data.
Returns
-------
(float, str)
(multiplier, unit)
"""
if not isinstance(a, (int, float)):
a = nominal_values(a)
a = np.percentile(a[~np.isnan(a)], 25)
if a == 0:
raise ValueError("Cannot calculate unit for zero.")
if denominator is not None:
pd = pretty_element(denominator)
else:
pd = ''
if focus_stage == 'calibrated':
udict = {0: 'mol/mol ' + pd,
1: 'mmol/mol ' + pd,
2: '$\mu$mol/mol ' + pd,
3: 'nmol/mol ' + pd,
4: 'pmol/mol ' + pd,
5: 'fmol/mol ' + pd}
elif focus_stage == 'ratios':
udict = {0: 'counts/count ' + pd,
1: '$10^{-3}$ counts/count ' + pd,
2: '$10^{-6}$ counts/count ' + pd,
3: '$10^{-9}$ counts/count ' + pd,
4: '$10^{-12}$ counts/count ' + pd,
5: '$10^{-15}$ counts/count ' + pd}
elif focus_stage in ('rawdata', 'despiked', 'bkgsub'):
udict = udict = {0: 'counts',
1: '$10^{-3}$ counts',
2: '$10^{-6}$ counts',
3: '$10^{-9}$ counts',
4: '$10^{-12}$ counts',
5: '$10^{-15}$ counts'}
else:
udict = {0: '', 1: '', 2: '', 3: '', 4: '', 5: ''}
a = abs(a)
n = 0
if a < llim:
while a < llim:
a *= 1000
n += 1
return float(1000**n), udict[n]
def collate_data(in_dir, extension='.csv', out_dir=None):
"""
Copy all csvs in nested directroy to single directory.
Function to copy all csvs from a directory, and place
them in a new directory.
Parameters
----------
in_dir : str
Input directory containing csv files in subfolders
extension : str
The extension that identifies your data files.
Defaults to '.csv'.
out_dir : str
Destination directory
Returns
-------
None
"""
if out_dir is None:
out_dir = './' + re.search('^\.(.*)', extension).groups(0)[0]
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
for p, d, fs in os.walk(in_dir):
for f in fs:
if extension in f:
shutil.copy(p + '/' + f, out_dir + '/' + f)
return
def bool_transitions(a):
"""
Return indices where a boolean array changes from True to False
"""
return np.where(a[:-1] != a[1:])[0]
def bool_2_indices(a):
"""
Convert boolean array into a 2D array of (start, stop) pairs.
"""
if any(a):
lims = []
lims.append(np.where(a[:-1] != a[1:])[0])
if a[0]:
lims.append([0])
if a[-1]:
lims.append([len(a) - 1])
lims = np.concatenate(lims)
lims.sort()
return np.reshape(lims, (lims.size // 2, 2))
else:
return None
def enumerate_bool(bool_array, nstart=0):
"""
Consecutively numbers contiguous booleans in array.
i.e. a boolean sequence, and resulting numbering
T F T T T F T F F F T T F
0-1 1 1 - 2 ---3 3 -
where ' - '
Parameters
----------
bool_array : array_like
Array of booleans.
nstart : int
The number of the first boolean group.
"""
ind = bool_2_indices(bool_array)
ns = np.full(bool_array.size, nstart, dtype=int)
for n, lims in enumerate(ind):
ns[lims[0]:lims[-1] + 1] = nstart + n + 1
return ns
def tuples_2_bool(tuples, x):
"""
Generate boolean array from list of limit tuples.
Parameters
----------
tuples : array_like
[2, n] array of (start, end) values
x : array_like
x scale the tuples are mapped to
Returns
-------
array_like
boolean array, True where x is between each pair of tuples.
"""
if np.ndim(tuples) == 1:
tuples = [tuples]
out = np.zeros(x.size, dtype=bool)
for l, u in tuples:
out[(x > l) & (x < u)] = True
return out
def get_example_data(destination_dir):
if os.path.isdir(destination_dir):
overwrite = input(destination_dir +
' already exists. Overwrite? [N/y]: ').lower() == 'y'
if overwrite:
shutil.rmtree(destination_dir)
else:
print(destination_dir + ' was not overwritten.')
shutil.copytree(pkgrs.resource_filename('latools', 'resources/test_data'),
destination_dir)
return
def rangecalc(xs, pad=0.05):
mn = np.nanmin(xs)
mx = np.nanmax(xs)
xr = mx - mn
return [mn - pad * xr, mx + pad * xr]
class un_interp1d(object):
"""
object for handling interpolation of values with uncertainties.
"""
def __init__(self, x, y, fill_value=np.nan, **kwargs):
if isinstance(fill_value, tuple):
nom_fill = tuple([un.nominal_values(v) for v in fill_value])
std_fill = tuple([un.std_devs(v) for v in fill_value])
else:
nom_fill = std_fill = fill_value
self.nom_interp = interp.interp1d(un.nominal_values(x),
un.nominal_values(y),
fill_value=nom_fill, **kwargs)
self.std_interp = interp.interp1d(un.nominal_values(x),
un.std_devs(y),
fill_value=std_fill, **kwargs)
def new(self, xn):
yn = self.nom_interp(xn)
yn_err = self.std_interp(xn)
return un.uarray(yn, yn_err)
def new_nom(self, xn):
return self.nom_interp(xn)
def new_std(self, xn):
return self.std_interp(xn)
def rolling_window(a, window, pad=None):
"""
Returns (win, len(a)) rolling - window array of data.
Parameters
----------
a : array_like
Array to calculate the rolling window of
window : int
Description of `window`.
pad : same as dtype(a)
Description of `pad`.
Returns
-------
array_like
An array of shape (n, window), where n is either len(a) - window
if pad is None, or len(a) if pad is not None.
"""
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1], )
out = np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
# pad shape
if window % 2 == 0:
npre = window // 2 - 1
npost = window // 2
else:
npre = npost = window // 2
if isinstance(pad, str):
if pad == 'ends':
prepad = np.full((npre, window), a[0])
postpad = np.full((npost, window), a[-1])
elif pad == 'mean_ends':
prepad = np.full((npre, window), np.mean(a[:(window // 2)]))
postpad = np.full((npost, window), np.mean(a[-(window // 2):]))
elif pad == 'repeat_ends':
prepad = np.full((npre, window), out[0])
postpad = np.full((npost, window), out[0])
else:
raise ValueError("If pad is a string, it must be either 'ends', 'mean_ends' or 'repeat_ends'.")
return np.concatenate((prepad, out, postpad))
elif pad is not None:
pre_blankpad = np.empty(((npre, window)))
pre_blankpad[:] = pad
post_blankpad = np.empty(((npost, window)))
post_blankpad[:] = pad
return np.concatenate([pre_blankpad, out, post_blankpad])
else:
return out
def fastsmooth(a, win=11):
"""
Returns rolling - window smooth of a.
Function to efficiently calculate the rolling mean of a numpy
array using 'stride_tricks' to split up a 1D array into an ndarray of
sub - sections of the original array, of dimensions [len(a) - win, win].
Parameters
----------
a : array_like
The 1D array to calculate the rolling gradient of.
win : int
The width of the rolling window.
Returns
-------
array_like
Gradient of a, assuming as constant integer x - scale.
"""
# check to see if 'window' is odd (even does not work)
if win % 2 == 0:
win += 1 # add 1 to window if it is even.
kernel = np.ones(win) / win
npad = int((win - 1) / 2)
spad = np.full(npad + 1, np.mean(a[:(npad + 1)]))
epad = np.full(npad - 1, np.mean(a[-(npad - 1):]))
return np.concatenate([spad, np.convolve(a, kernel, 'valid'), epad])
def fastgrad(a, win=11):
"""
Returns rolling - window gradient of a.
Function to efficiently calculate the rolling gradient of a numpy
array using 'stride_tricks' to split up a 1D array into an ndarray of
sub - sections of the original array, of dimensions [len(a) - win, win].
Parameters
----------
a : array_like
The 1D array to calculate the rolling gradient of.
win : int
The width of the rolling window.
Returns
-------
array_like
Gradient of a, assuming as constant integer x - scale.
"""
# check to see if 'window' is odd (even does not work)
if win % 2 == 0:
win += 1 # subtract 1 from window if it is even.
# trick for efficient 'rolling' computation in numpy
# shape = a.shape[:-1] + (a.shape[-1] - win + 1, win)
# strides = a.strides + (a.strides[-1], )
# wins = np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
wins = rolling_window(a, win, 'ends')
# apply rolling gradient to data
a = map(lambda x: np.polyfit(np.arange(win), x, 1)[0], wins)
return np.array(list(a))
def calc_grads(x, dat, keys=None, win=5):
"""
Calculate gradients of values in dat.
Parameters
----------
x : array like
Independent variable for items in dat.
dat : dict
{key: dependent_variable} pairs
keys : str or array-like
Which keys in dict to calculate the gradient of.
win : int
The side of the rolling window for gradient calculation
Returns
-------
dict of gradients
"""
if keys is None:
keys = dat.keys()
def grad(xy):
if (~np.isnan(xy)).all():
try:
return np.polyfit(xy[0], xy[1], 1)[0]
except ValueError:
return np.nan
else:
return np.nan
xs = rolling_window(x, win, pad='repeat_ends')
grads = Bunch()
for k in keys:
d = nominal_values(rolling_window(dat[k], win, pad='repeat_ends'))
grads[k] = np.array(list(map(grad, zip(xs, d))))
return grads
def findmins(x, y):
""" Function to find local minima.
Parameters
----------
x, y : array_like
1D arrays of the independent (x) and dependent (y) variables.
Returns
-------
array_like
Array of points in x where y has a local minimum.
"""
return x[np.r_[False, y[1:] < y[:-1]] & np.r_[y[:-1] < y[1:], False]]
def stack_keys(ddict, keys, extra=None):
"""
Combine elements of ddict into an array of shape (len(ddict[key]), len(keys)).
Useful for preparing data for sklearn.
Parameters
----------
ddict : dict
A dict containing arrays or lists to be stacked.
Must be of equal length.
keys : list or str
The keys of dict to stack. Must be present in ddict.
extra : list (optional)
A list of additional arrays to stack. Elements of extra
must be the same length as arrays in ddict.
Extras are inserted as the first columns of output.
"""
if isinstance(keys, str):
d = [ddict[keys]]
else:
d = [ddict[k] for k in keys]
if extra is not None:
d = extra + d
return np.vstack(d).T
| 2.578125
| 3
|
tasrif/test_scripts/test_pipeline_FillNAOperator.py
|
qcri/tasrif
| 20
|
12774860
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.11.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
# %%
import pandas as pd
from tasrif.processing_pipeline.pandas import FillNAOperator
df = pd.DataFrame(
{
"name": ["Alfred", "juli", "Tom", "Ali"],
"height": [np.nan, 155, 159, 165],
"born": [pd.NaT, pd.Timestamp("2010-04-25"), pd.NaT, pd.NaT],
}
)
operator = FillNAOperator(axis=0, value="laptop")
df = operator.process(df)[0]
df
| 2.75
| 3
|
Deeplabv3_Ensemble/get_feature_distribution.py
|
jackyjsy/CVPR21Chal-Agrivision
| 5
|
12774861
|
<gh_stars>1-10
import argparse
import os
import time
from tqdm import tqdm
import shutil
from datetime import datetime
import matplotlib.pyplot as plt
import torch
import torch.distributed as dist
import torch.nn as nn
import apex
from apex import amp
from apex.parallel import DistributedDataParallel as DDP
import segmentation_models_pytorch as smp
from config import cfg
from utils import *
from dataset import AgriValDataset
from model.deeplab import DeepLab
import model.backbone as B
torch.manual_seed(42)
def parse_args():
parser = argparse.ArgumentParser(
description="PyTorch Semantic Segmentation Training"
)
parser.add_argument(
"--local_rank",
default=0,
type=int
)
parser.add_argument(
"--cfg",
default="config/ade20k-resnet50dilated-ppm_deepsup.yaml",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument(
"--ibn",
default='none',
type=str
)
parser.add_argument(
"--weight", "-w",
type=str
)
parser.add_argument(
"--channels", "-c",
type=str,
)
parser.add_argument(
"--out", "-o",
type=str
)
args = parser.parse_args()
return args
def main():
args = parse_args()
torch.backends.cudnn.benchmark = True
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
args.world_size = 1
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
args.world_size = torch.distributed.get_world_size()
# print(args.world_size, args.local_rank, args.distributed)
cfg.merge_from_file(args.cfg)
cfg.DIR = os.path.join(cfg.DIR,
args.cfg.split('/')[-1].split('.')[0] +
datetime.now().strftime('-%Y-%m-%d-%a-%H:%M:%S:%f'))
# Output directory
# if not os.path.isdir(cfg.DIR):
if args.local_rank == 0:
os.makedirs(cfg.DIR, exist_ok=True)
os.makedirs(os.path.join(cfg.DIR, 'weight'), exist_ok=True)
os.makedirs(os.path.join(cfg.DIR, 'history'), exist_ok=True)
shutil.copy(args.cfg, cfg.DIR)
if os.path.exists(os.path.join(cfg.DIR, 'log.txt')):
os.remove(os.path.join(cfg.DIR, 'log.txt'))
logger = setup_logger(distributed_rank=args.local_rank,
filename=os.path.join(cfg.DIR, 'log.txt'))
logger.info("Loaded configuration file {}".format(args.cfg))
logger.info("Running with config:\n{}".format(cfg))
logger.info("{}".format(args))
if cfg.MODEL.arch == 'deeplab':
model = DeepLab(num_classes=cfg.DATASET.num_class,
backbone=cfg.MODEL.backbone, # resnet101
output_stride=cfg.MODEL.os,
ibn_mode=args.ibn,
freeze_bn=False)
else:
raise NotImplementedError
if args.local_rank == 0:
logger.info("Loading weight from {}".format(
args.weight))
weight = torch.load(args.weight,
map_location=lambda storage, loc: storage.cuda(args.local_rank))
if not args.distributed:
weight = {k[7:]: v for k, v in weight.items()}
model.load_state_dict(weight)
model = model.backbone
B.resnet.TRACK_FEAT = True
model = apex.parallel.convert_syncbn_model(model)
model = model.cuda()
model = amp.initialize(model, opt_level="O1")
if args.distributed:
model = DDP(model, delay_allreduce=True)
dataset_val = AgriValDataset(
cfg.DATASET.root_dataset,
cfg.DATASET.list_val,
cfg.DATASET,
ret_rgb_img=False,
channels=args.channels)
val_sampler = None
val_sampler = torch.utils.data.distributed.DistributedSampler(
dataset_val,
num_replicas=args.world_size,
rank=args.local_rank
)
loader_val = torch.utils.data.DataLoader(
dataset_val,
batch_size=cfg.VAL.batch_size_per_gpu,
shuffle=False, # we do not use this param
num_workers=cfg.VAL.batch_size_per_gpu,
drop_last=False,
pin_memory=True,
sampler=val_sampler
)
cfg.VAL.epoch_iters = len(loader_val)
cfg.VAL.log_fmt = 'Mean IoU: {:.4f}\n'
logger.info("World Size: {}".format(args.world_size))
logger.info("VAL.epoch_iters: {}".format(cfg.VAL.epoch_iters))
logger.info("VAL.sum_bs: {}".format(cfg.VAL.batch_size_per_gpu *
args.world_size))
means, vars = val(loader_val, model, args, logger)
# print(vars)
torch.save({'means': means, 'vars': vars}, args.out)
def val(loader_val, model, args, logger):
channels = [[] for i in range(34)]
model.eval()
# main loop
tic = time.time()
if args.local_rank == 0:
loader_val = tqdm(loader_val, total=cfg.VAL.epoch_iters)
with torch.no_grad():
for img, mask, _, _, in loader_val:
img = img.cuda()
mask = mask.cuda()
last, _ = model(img)
lst = [feat.data.float() for feat in B.resnet.SHARED_LIST]
# import ipdb; ipdb.set_trace()
for i, feat in enumerate(lst):
channels[i].append(feat.mean(dim=[0, 2, 3]))
# import ipdb; ipdb.set_trace()
for i in range(len(channels)):
channels[i] = torch.stack(channels[i], dim=0)
means = [feat.mean(dim=0).cpu() for feat in channels]
vars = [feat.var(dim=0).cpu() for feat in channels]
return means, vars
# def val(loader_val, model, args, logger):
# channel_meters = [AverageMeter() for i in range(34)]
# channel_square_meters = [AverageMeter() for i in range(34)]
# model.eval()
# # main loop
# tic = time.time()
# if args.local_rank == 0:
# loader_val = tqdm(loader_val, total=cfg.VAL.epoch_iters)
# all_feat = None
# with torch.no_grad():
# for img, mask, _, _, in loader_val:
# img = img.cuda()
# mask = mask.cuda()
# last, _ = model(img)
# lst = [feat.data.float() for feat in B.resnet.SHARED_LIST]
# # import ipdb; ipdb.set_trace()
# for i, feat in enumerate(lst):
# num = feat.flatten().shape[0] // feat.shape[1]
# channel_meters[i].update(feat.mean(dim=[0, 2, 3]), weight=num)
# channel_square_meters[i].update((feat ** 2).mean(dim=[0, 2, 3]), weight=num)
# # if all_feat is None:
# # all_feat = [[feat] for feat in lst]
# # else:
# # for i, feat in enumerate(lst):
# # all_feat[i].append(feat)
# # import ipdb; ipdb.set_trace()
# # for i in range(len(all_feat)):
# # all_feat[i] = torch.cat(all_feat[i], dim=0)
# # means = [feat.mean(dim=[0,2,3]) for feat in all_feat]
# # vars = [feat.var(dim=[0,2,3]) for feat in all_feat]
# means = []
# vars = []
# for c_meter, c_square_meter in zip(channel_meters,
# channel_square_meters):
# means.append(c_meter.average().cpu())
# count = c_meter.count
# var = (c_square_meter.sum - (c_meter.sum ** 2) / count) / (count - 1)
# var = var.cpu()
# vars.append(var)
# return means, vars
if __name__ == "__main__":
main()
| 2.03125
| 2
|
realestate/utils.py
|
jigartarpara/realestate
| 1
|
12774862
|
<gh_stars>1-10
import frappe
def sales_invoice_submit(doc, method = None):
return
assets = []
for item in doc.items:
asset = frappe.get_doc("RealEstate Assets",{"item": item.item_code})
if asset not in assets:
asset.save()
assets.append(asset)
def sales_invoice_cancel(doc, method = None):
return
assets = []
for item in doc.items:
asset = frappe.get_doc("RealEstate Assets",{"item": item.item_code})
if asset not in assets:
asset.save()
assets.append(asset)
def purchase_invoice_submit(doc, method = None):
return
assets = []
for item in doc.items:
asset = frappe.get_doc("RealEstate Assets",{"item": item.item_code})
if asset not in assets:
asset.save()
assets.append(asset)
def purchase_invoice_cancel(doc, method = None):
return
assets = []
for item in doc.items:
asset = frappe.get_doc("RealEstate Assets",{"item": item.item_code})
if asset not in assets:
asset.save()
assets.append(asset)
def payment_entry_submit(doc, method):
return
assets = []
for item in doc.references:
asset = frappe.get_doc("RealEstate Assets",{"item": item.item_code})
if asset not in assets:
asset.save()
assets.append(asset)
def payment_entry_cancel(doc, method):
return
assets = []
for item in doc.references:
asset = frappe.get_doc("RealEstate Assets",{"item": item.item_code})
if asset not in assets:
asset.save()
assets.append(asset)
# def sales_order_submit(doc, method):
# pass
# def sales_order_cancel(doc, method):
# pass
| 2.265625
| 2
|
utils.py
|
valmsmith39a/u-capstone-casting
| 0
|
12774863
|
<gh_stars>0
import json
def format(data):
return [item.format() for item in data]
| 2.375
| 2
|
froide/foirequest/migrations/0026_deliverystatus_retry_count.py
|
manonthemat/froide
| 0
|
12774864
|
<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-07-19 10:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('foirequest', '0025_foimessage_original'),
]
operations = [
migrations.AddField(
model_name='deliverystatus',
name='retry_count',
field=models.PositiveIntegerField(default=0),
),
]
| 1.492188
| 1
|
SmartDoctor/patient_new/views.py
|
alirezadaghigh99/Software-Project
| 0
|
12774865
|
<reponame>alirezadaghigh99/Software-Project<gh_stars>0
from django.contrib.auth import authenticate, logout, login
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.shortcuts import render
# Create your views here.
from patient.models import UserModel, Visit, VisitStatus
from .forms import PatientRegisterForm, PatientLoginForm
from . import views
from django.shortcuts import HttpResponse, render, redirect
from django.urls import reverse
from patient.models import UserRole
def index(request):
return render(request, 'patient_base.html', {})
def register(request):
if request.method == 'POST':
details = PatientRegisterForm(request.POST)
if details.is_valid():
print("salammmmm")
user = details.save(commit=False)
user.set_password(request.POST['<PASSWORD>'])
user.save()
return redirect(reverse('patient_new:login'))
else:
return render(request, "patient/signup.html", {'form': details} ,status=400)
else:
form = PatientRegisterForm(None)
return render(request, 'patient/signup.html', {'form': form})
def login_view(request):
if request.user.is_authenticated:
return redirect(reverse("patient_new:doctor-list"))
form = PatientLoginForm(request.POST or None)
if form.is_valid():
username = form.cleaned_data.get("username")
password = form.cleaned_data.get("password")
user = authenticate(username=username, password=password)
print(user)
if user:
login(request, user)
return HttpResponseRedirect(reverse('patient_new:doctor-list'))
return render(request, 'patient/login.html', {'form': form})
def logout_view(request):
logout(request)
return HttpResponseRedirect(reverse('patient_new:login'))
@login_required
def doctors_list(request):
doctors = UserModel.objects.filter(role=UserRole.DOCTOR)
user = request.user
visits = Visit.objects.filter(patient=user)
return render(request, "patient/doctors-list.html", {"doctors": doctors, "visits": visits})
@login_required
def make_appointment(request, doctor_id):
if request.method == "POST":
time = request.POST.get("time", "")
time = str(time)
doctor = UserModel.objects.get(id=doctor_id)
patient = request.user
visit = Visit(
patient=patient,
doctor=doctor,
time=time,
status=VisitStatus.PENDING
)
visit.save()
return redirect(reverse('patient_new:doctor-list'))
| 2.453125
| 2
|
Curso-em-video-Python/PycharmProjects/pythonExercicios/ex048 #.py
|
sartinicj/curso-em-video-python
| 0
|
12774866
|
<reponame>sartinicj/curso-em-video-python<filename>Curso-em-video-Python/PycharmProjects/pythonExercicios/ex048 #.py
s = 0
for i in range(1, 500+1, 2):
m = i + 3
s += m
print(s)
# imprime 63250 na tela
'''
soma = 0
for c in range(1, 501, 2)
if c%3 == 0:
soma = soma + c
print('A soma de todos os valores solicitados é {}'.format(soma)
'''
| 3.078125
| 3
|
bin/hexes/polyhexes-34-hexagram.py
|
tiwo/puzzler
| 0
|
12774867
|
<reponame>tiwo/puzzler
#!/usr/bin/env python
# $Id$
"""167 solutions"""
import puzzler
from puzzler.puzzles.polyhexes34 import Polyhexes34Hexagram
puzzler.run(Polyhexes34Hexagram)
| 1.070313
| 1
|
day2/shopping_list_miniprojectpy.py
|
dikshaa1702/ml
| 1
|
12774868
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Sun May 12 19:10:03 2019
@author: DiPu
"""
shopping_list=[]
print("enter items to add in list and type quit when you arew done")
while True:
ip=input("enter list")
if ip=="QUIT":
break
elif ip.upper()=="SHOW":
print(shopping_list)
elif ip.upper()=="HELP":
print("help1:if input is show then items in list will be displayed")
print("help2:if input is QUIT then total items in list will be printed and user will not be able to add more items")
else:
shopping_list.append(ip)
for count,item in enumerate(shopping_list,1):
print(count,item)
ip=input("enter add to ADDITION/REMOVE of item at some index:")
if ip=="ADDITION":
ip1=int(input("enter index:"))
item=input("enter item:")
shopping_list[ip1-1]=item
elif ip=="REMOVE":
ip1=int(input("enter index:"))
shopping_list.pop(ip1-1)
with open("shopping_list_miniprojectpy.py",'rt') as f1:
with open("shopping.txt",'wt') as f:
for lines in f1:
f.write(lines)
ip=open("shopping.txt")
content=ip.readlines()
print(content)
| 3.59375
| 4
|
delimg.py
|
Liang457/gk-imagebed-server
| 0
|
12774869
|
<filename>delimg.py
import os
def del_img():
try:
file_name = "./img"
for root, dirs, files in os.walk(file_name):
for name in files:
if name.endswith(".png"): # 填写规则
os.remove(os.path.join(root, name))
print("Delete File: " + os.path.join(root, name))
except:
pass
| 3.21875
| 3
|
global_metrics.py
|
yanb514/I24-trajectory-generation
| 1
|
12774870
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 25 13:43:23 2021
@author: wangy79
Produce metrics in the absence of ground truth
- Global metrics
ID counts (Y)
Space gap distribution
Valid/invalid (Y)
- Tracklet quality
Collision
Lane-change tracks
Outliers
Wlh mean/stdev
Lengths of tracks
Missing detection
# frames that one track has multiple meas
"""
import utils
import utils_evaluation as ev
import utils_vis as vis
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
class GlobalMetrics():
def __init__(self, params, raw_path, da_path=None, rec_path=None):
'''
'''
self.raw = utils.read_data(raw_path)
print("Select from Frame {} to {}".format(params["start"], params["end"] ))
self.raw = self.raw[(self.raw["Frame #"] >= params["start"]) & (self.raw["Frame #"] <= params["end"])]
if da_path:
self.da = utils.read_data(da_path)
self.da = self.da[(self.da["Frame #"] >= params["start"]) & (self.da["Frame #"] <= params["end"])]
else: self.da = None
if rec_path:
self.rec = utils.read_data(rec_path)
self.rec = self.rec[(self.rec["Frame #"] >= params["start"]) & (self.rec["Frame #"] <= params["end"])]
else: self.rec = None
self.params = params
self.metrics = {}
self.data = {} # storing evaluation metrics long data
def evaluate_by_frame(self):
# extract spacing distribution
return
def evaluate(self):
# evaluation raw data
self.metrics["Total tracklets"] = []
self.metrics["Valid tracklets"] = []
self.metrics["Collision with valid tracks"] = []
self.metrics["Possible lane change"] = []
self.metrics["Tracks with multiple detections"] = []
name = "Tracks > " + str(self.params["outlier_thresh"]*100) + "% outliers"
self.metrics[name] = []
self.data["outlier_ratio"] = []
self.data['xrange'] = []
self.data['Width variance'] = []
self.data['Length variance'] = []
self.data['y variance'] = []
self.data["correction_score"] = ev.get_correction_score(self.da, self.rec)
for df in [self.raw, self.da, self.rec]:
if df is None:
continue
groupList = list(df.groupby("ID").groups)
valid, collision = ev.get_invalid(df, ratio=self.params["outlier_thresh"])
invalid = set(groupList)-valid-collision
lane_change = ev.get_lane_change(df) - invalid
multiple_frames = ev.get_multiple_frame_track(df) # dict
df = df.groupby("ID").apply(ev.mark_outliers_car).reset_index(drop=True)
outlier_ratio = {carid: np.count_nonzero(car["Generation method"].values=="outlier")/car.bbr_x.count() for carid, car in df.groupby("ID")}
outlier_high = {key: value for key, value in outlier_ratio.items() if (value > self.params["outlier_thresh"]) and (key in valid)}
xranges = ev.get_x_covered(df, ratio=True)
w_var = ev.get_variation(df, "width")
l_var = ev.get_variation(df, "length")
y_var = ev.get_variation(df, "y")
# metrics are to be printed (in print_metrics)
self.metrics["Total tracklets"].append(df.groupby("ID").ngroups)
self.metrics["Valid tracklets"].append(valid)
self.metrics["Collision with valid tracks"].append(collision)
self.metrics["Possible lane change"].append(lane_change)
self.metrics["Tracks with multiple detections"].append(multiple_frames)
self.metrics[name].append(outlier_high)
# data is to be plotted (in visualize)
self.data["outlier_ratio"].append(outlier_ratio)
self.data['xrange'].append(xranges)
self.data['Width variance'].append(w_var)
self.data['Length variance'].append(l_var)
self.data['y variance'].append(y_var)
self.metrics["Score > " + str(self.params["score_thresh"])] = {carid:score for carid,score in self.data["correction_score"].items() if score>self.params["score_thresh"]}
return
def visualize_metrics(self):
for name in self.data:
data = self.data[name]
if isinstance(data, list):
data_list = [np.fromiter(data_item.values(), dtype=float) for data_item in data]
else:
data_list = np.fromiter(data.values(), dtype=float)
if name == "xrange":
xlabel = "FOV covered (%)"
ylabel = "Probability"
title = "X range (%) distribution"
elif "delta" in name:
xlabel = name
ylabel = "Probability"
title = "{} distribution".format(name)
elif name == "outlier_ratio":
xlabel = "Outlier ratio"
ylabel = "Probability"
title = "Outlier ratio distribution"
elif "variance" in name:
xlabel = name
ylabel = "Probability"
title = "{} variance distribution".format(name[0])
elif "correction_score" in name:
xlabel = "Correction score"
ylabel = "Probability"
title = "Correction score distribution"
vis.plot_histogram(data_list, bins=40,
labels="" if len(data_list)==1 else ["raw", "da", "rec"],
xlabel= xlabel,
ylabel= ylabel,
title= title)
# plot correction score vs. DA's outlier ratio
plt.figure()
for carid, score in self.data["correction_score"].items():
try:
plt.scatter(score,self.data["outlier_ratio"][1][carid], s=2, c='b')
except:
pass
plt.xlabel("correction score")
plt.ylabel("outlier ratio")
plt.title("Correction score vs. outlier ratio")
return
def print_metrics(self):
print("\n")
for name in self.metrics:
if "Valid tracklets" in name:
print("{:<30}: {}".format(name,[len(item) for item in self.metrics[name]]))
else:
if (not isinstance(self.metrics[name], int)) and (len(self.metrics[name])==0):
continue
print("{:<30}: {}".format(name,self.metrics[name]))
return
def evaluate_single_track(self, carid, plot=True, dashboard=True):
'''
identify a problematic track
'''
# raw = self.raw[self.raw["ID"]==carid]
da = self.da[self.da["ID"]==carid]
rec = self.rec[self.rec["ID"]==carid]
if plot:
vis.plot_track_compare(da,rec)
if dashboard:
vis.dashboard([da, rec],["da","rectified"])
return
if __name__ == "__main__":
data_path = r"E:\I24-postprocess\MC_tracking"
raw_path = data_path+r"\MC_reinterpolated.csv"
da_path = data_path+r"\DA\MC_tsmn.csv"
rec_path = data_path+r"\rectified\MC_tsmn.csv"
params = {
"start": 0,
"end": 1000,
"outlier_thresh": 0.25,
"score_thresh": 3
}
gm = GlobalMetrics(params, raw_path, da_path, rec_path)
gm.evaluate()
gm.print_metrics()
gm.visualize_metrics()
# gm.evaluate_single_track(197, plot=True, dashboard=True)
| 2.390625
| 2
|
0-notes/job-search/SamplesDSAlgos/data_structures/datastructures-linkedlist_singly.py
|
webdevhub42/Lambda
| 0
|
12774871
|
<reponame>webdevhub42/Lambda<gh_stars>0
"""
What is the difference between an array and a linked list?
Arrays use memory differently.
Arrays store and index elements contiguously.
Each element of linked list is stored in a node.
Each node has reference or pointer to next node.
Linked lists describe lists of things in recursive fashion.
Arrays describe lists of things in iterative fashion.
LLs are easier to insert into & delete from middle of LL than array.
LLs are not as cache friendly since caches are typically
optimized for contiguous memory accesses.
LLs don't need to be allocated with a static amount of memory up front.
You can keep adding elements to linked lists as much as you want;
can't with arrays.
What is the difference between singly and doubly linked lists?
A singly linked list is a set of nodes where each node has two fields ‘data’
and ‘link’.
The ‘data’ field stores actual piece of information and ‘link’ field is used
to point to next node.
Basically ‘link’ field is nothing but address only.
A doubly linked list contains an extra pointer, typically called previous
pointer, together with next pointer and data which are there in singly
linked list.
"""
# SINGLY LINKED LIST
# made of bunch of nodes that point to next one in list
# every node has two properties:
# value of whatever is being stored
# pointer to next node in list
# adding and removing is easy; change next pointer on previous node (O(n)))
# similar to arrays
# commonly used for holding lists of data
# certain cases when better than array
# time complexity: Avg | Worst
# Access: O(n) | O(n)
# Search: O(n) | O(n)
# Insertion: O(1) | O(1)
# Deletion: O(1) | O(1)
# space complexity: O(n)
class Node:
def __init__(self, value=None, next_node=None):
# the value at this linked list node
self.value = value
# reference to the next node in the list
self.next_node = next_node
# return value of current node
def get_value(self):
return self.value
# return next node
def get_next(self):
return self.next_node
def set_next(self, new_next):
# set this node's next_node reference to the passed in node
self.next_node = new_next
class LinkedList:
def __init__(self):
# first node in the list
self.head = None
# last node in the linked list
self.tail = None
# O(1)
def add_to_head(self, value):
new_node = Node(value)
if not self.head and not self.tail:
self.head = new_node
self.tail = new_node
else:
new_node.set_next(self.head)
self.head = new_node
# we have access to the end of the list, so we can directly add new nodes to it
# O(1)
def add_to_end(self, value):
# regardless of if the list is empty or not, we need to wrap the value in a Node
new_node = Node(value)
# what if the list is empty?
if not self.head and not self.tail:
# set both head and tail to the new node
self.head = new_node
self.tail = new_node
# what if the list isn't empty?
else:
# set the current tail's next to the new node
self.tail.set_next(new_node)
# set self.tail to the new node
self.tail = new_node
# we already have access to the head of the linked list, so we can directly remove from it
# O(1)
def remove_from_head(self):
# what if the list is empty?
if not self.head:
return None
# what if it isn't empty?
else:
# we want to return the value at the current head
value = self.head.get_value()
# remove the value at the head
# update self.head
self.head = self.head.get_next()
return value
# iterate over our linked list and print out each value in it
def print_ll_elements(self):
current = self.head
while current is not None:
print(current.value)
current = current.get_next()
ll = LinkedList()
ll.add_to_head(3)
ll.add_to_head(5)
ll.add_to_head(9)
ll.add_to_head(11)
ll.print_ll_elements()
| 3.515625
| 4
|
examples/keras/Progressive growing of GANs/Progressive growing of GANs/main.py
|
DYG111/samples-for-ai
| 0
|
12774872
|
<filename>examples/keras/Progressive growing of GANs/Progressive growing of GANs/main.py
from __future__ import print_function
import numpy as np
import sys
import os
import argparse
###################################################################
# Variables #
# When launching project or scripts from Visual Studio, #
# input_dir and output_dir are passed as arguments. #
# Users could set them from the project setting page. #
###################################################################
input_dir = None
output_dir = None
log_dir = None
#################################################################################
# Keras configs. #
# Please refer to https://keras.io/backend . #
#################################################################################
import keras
from keras import backend as K
#K.set_floatx('float32')
#String: 'float16', 'float32', or 'float64'.
#K.set_epsilon(1e-05)
#float. Sets the value of the fuzz factor used in numeric expressions.
#K.set_image_data_format('channels_first')
#data_format: string. 'channels_first' or 'channels_last'.
#################################################################################
# Keras imports. #
#################################################################################
from keras.models import Model
from keras.models import Sequential
from keras.layers import Input
from keras.layers import Lambda
from keras.layers import Layer
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Activation
from keras.layers import Flatten
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.optimizers import SGD
from keras.optimizers import RMSprop
from train import *
def main():
np.random.seed(config.random_seed)
func_params = config.train
func_name = func_params['func']
del func_params['func']
globals()[func_name](**func_params)
exit(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str,
default='datasets',
help="Input directory where where training dataset and meta data are saved",
required=True
)
parser.add_argument("--result_dir", type=str,
default='results',
help="Input directory where where logs and models are saved",
required=False
)
parser.add_argument("--resume_dir",type = str,
default = None,
help="whether resume model and where the model are saved",
required = False)
parser.add_argument("--resume_kimg",type = float,
default = 0.0,
help="previous trained images in thousands",
required = False)
args, unknown = parser.parse_known_args()
config.data_dir = args.data_dir
config.result_dir = args.result_dir
if hasattr(args,'resume_dir') and args.resume_dir != None:
config.train.update(resume_network=args.resume_dir)
if hasattr(args,'resume_kimg') and args.resume_kimg != None:
config.train.update(resume_kimg=args.resume_kimg)
main()
| 2.296875
| 2
|
grammars/job/job_normalization.py
|
JasperGuo/MeaningRepresentationBenchmark
| 9
|
12774873
|
# coding=utf8
import re
def tokenize_prolog(logical_form):
# Tokenize Prolog
normalized_lf = logical_form.replace(" ", "::")
replacements = [
('(', ' ( '),
(')', ' ) '),
(',', ' , '),
("\\+", " \\+ "),
]
for a, b in replacements:
normalized_lf = normalized_lf.replace(a, b)
toks = [t if "::" not in t else t.replace(
"::", " ") for t in normalized_lf.split()]
return toks
def normalize_prolog_variable_names(logical_form):
"""Standardize variable names in Prolog with De Brujin indices."""
toks = tokenize_prolog(logical_form)
# Replace Variable
cur_vars = []
new_toks = []
for w in toks:
if re.match('[A-Z]', w) or re.match('_\d+', w):
if w in cur_vars:
ind_from_end = len(cur_vars) - cur_vars.index(w) - 1
new_toks.append('V%d' % ind_from_end)
else:
cur_vars.append(w)
new_toks.append('NV')
else:
new_toks.append(w)
return ''.join(new_toks)
def preprocess_prolog(logical_form):
normalized_prolog = normalize_prolog_variable_names(logical_form)
normalized_prolog = re.sub(r"\s*\(\s*", "(", normalized_prolog)
normalized_prolog = re.sub(r"\s*\)\s*", ")", normalized_prolog)
normalized_prolog = re.sub(r"\s*,\s*", ",", normalized_prolog)
normalized_prolog = normalized_prolog.replace("\+ r", "\+r")
normalized_prolog = normalized_prolog
return normalized_prolog
def preprocess_funql(lf):
l = re.sub(r"\s*\(\s*", "(", lf)
l = re.sub(r"\s*\)\s*", ")", l)
l = re.sub(r"\s*,\s*", ",", l)
return l
def postprocess_prolog(logical_form):
normalized_prolog = logical_form.replace("windo nt", "windows nt")
normalized_prolog = normalized_prolog.replace("windo 95", "windows 95")
return normalized_prolog
def postprocess_sql(logical_form):
normalized_sql = logical_form.replace("windo nt", "windows nt")
normalized_sql = normalized_sql.replace("windo 95", "windows 95")
normalized_sql = normalized_sql.replace("\\'", "'")
return normalized_sql
def postprocess_lambda(logical_form):
normalized_lc = logical_form.replace("windo nt", "windows nt")
normalized_lc = normalized_lc.replace("windo 95", "windows 95")
normalized_lc = normalized_lc.replace("\\'", "'")
return normalized_lc
def normalize_sql(logical_form):
s = logical_form.replace("( ", "(").replace(" )", ")").replace(
";", "").replace('"', "'").replace(' . ', '.').strip().lower()
s = s.replace('max (', 'max(')
s = s.replace('min (', 'min(')
s = s.replace('avg (', 'avg(')
s = s.replace('count (', 'count(')
s = s.replace('sum (', 'sum(')
s = s.replace('count(1)', 'count(*)')
return s
def normalize_lambda_calculus(logical_form):
s = logical_form.replace(
'\s+', ' ').replace("( ", "(").replace(" )", ")").replace(') )', '))').replace(' :', ':').strip()
s = s.replace('"', "'").replace(') )', '))')
return s
if __name__ == '__main__':
sql = '(lambda $0:e (and (job $0) (language $0 perl) (company $0 "Lockheed Martin Aeronautics") (loc $0 colorado)))'
normalized_sql = normalize_lambda_calculus(sql).replace("'", "\\'")
sql_ = postprocess_lambda(normalized_sql)
print(sql)
print(normalized_sql)
print(sql_)
| 2.953125
| 3
|
soteria/executor.py
|
sreeja/soteria_tool
| 2
|
12774874
|
from datetime import datetime
from shutil import copy2, copytree
import os
import errno
import subprocess
import re
from soteria.exceptions import BoogieParseError, BoogieTypeError, BoogieVerificationError, BoogieUnknownError
from soteria.debug_support.debugger import Debugger
##TODO : refactor this class
class Executor:
#@classmethod
def execute_boogie(operations, specification, name = 'specification'):
model_file_path = 'results/' + name + '.model'
spec_file = Executor.create_spec_file(specification, name)
path_to_boogie = '/boogie/Binaries/Boogie.exe'
proc = subprocess.Popen(['mono', path_to_boogie, '-mv:' + model_file_path, spec_file], stdout=subprocess.PIPE)
out = proc.communicate()[0]
status = Executor.get_execution_status(name, out.decode("utf-8"), operations, spec_file, model_file_path)
return status
def create_spec_file(text, name):
with open('results/' + name + '.bpl', 'w') as f:
f.write(text)
return 'results/' + name + '.bpl'
def get_execution_status(name, result, operations, spec_file, model_file_path):
if 'parse errors detected' in result:
raise BoogieParseError(result + '\n')
if 'type checking errors detected' in result:
raise BoogieTypeError(result + '\n')
if 'Boogie program verifier finished with' in result:
errors = Executor.get_number_of_errors(result[result.index('Boogie program verifier finished with') + 38:])
if errors > 0:
specification = open(spec_file).readlines()
debugger = Debugger()
info = debugger.get_debug_info(operations, specification, result, model_file_path)
raise BoogieVerificationError(name + '::::::\n' + info + '\n')
if errors == 0:
return result
raise BoogieUnknownError(result)
def get_number_of_errors(text):
p = re.compile('\d error')
m = p.search(text)
if m:
e = re.compile('\d')
n = e.search(m.group(0))
return int(n.group(0))
return -1
| 2.1875
| 2
|
jabs/ilf/comp.py
|
hertogp/jabs
| 1
|
12774875
|
'''
ilf - compiler
'''
import os
import json
from .parse import parse
from .core import Ip4Filter, Ival
# -- GLOBALS
# (re)initialized by compile_file
GROUPS = {} # grp-name -> set([networks,.. , services, ..])
# -- AST = [(pos, [type, id, value]), ..]
def ast_iter(ast, types=None):
'iterate across statements of requested types'
types = [] if types is None else types
yield_all = len(types) == 0
for pos, stmt in ast:
if yield_all or stmt[0] in types:
yield (pos, stmt)
def ast_enum(ast, types=None):
'enumerate across statements of requested types'
types = [] if types is None else types
yield_all = len(types) == 0
for idx, (pos, stmt) in enumerate(ast):
if yield_all or stmt[0] in types:
yield (idx, pos, stmt)
def ast_errmsg(pos, err_type, stmt_type, msg):
'small helper to easily create ERROR/WARNING stmts'
return (pos, [err_type, stmt_type, msg])
def ast_includes(ast):
'expand include-statements in-place'
seen = {}
idx = -1
while idx+1 < len(ast): # while loop since ast is expanding
idx += 1
(fname, linenr, col), stmt = ast[idx]
if stmt[0] != 'INCLUDE':
continue
absname = os.path.realpath(os.path.normpath(
os.path.join(os.path.dirname(fname), stmt[1])))
if absname in seen:
ast[idx] = ast_errmsg(
(fname, linenr, 1),
'ERROR', stmt[0],
'{} already included at {}'.format(absname, seen[absname]))
continue
seen[absname] = '{}:{}:{}'.format(fname, linenr, col) # record include
try:
with open(absname, 'r') as fhdl:
include_ast = parse(fhdl) # possibly includes new includes(..)
except (IOError, OSError):
ast[idx] = ast_errmsg(
(fname, linenr, 1),
'ERROR', stmt[0],
'cannot find/read {}'.format(absname))
continue
ast[idx:idx+1] = include_ast # replace include(file) with its stmts
return ast
def _ivalify(lst, *types):
'turn a list of tokens (IP, PORTSTR, STR) into a list of Ivals'
global GROUPS
rv, errs = [], [] # in case of errors
for elm in lst:
try:
if elm[0] == 'IP':
rv.append(Ival.ip_pfx(elm[1]))
elif elm[0] == 'PORTSTR':
rv.append(Ival.port_str(elm[1]))
elif elm[0] == 'STR':
# rv.extend(GROUPS[elm[1]])
rv.extend(GROUPS.get(elm[1], []))
except (ValueError, KeyError):
errs.append(elm[1])
if len(errs):
msg = 'Invalid item(s): {}'.format(', '.join(errs))
raise ValueError(msg)
return [i for i in rv if i.type in types]
def ast_ivalify(ast):
'turn IP- and PORTSTR-values into Ival-s'
for idx, pos, stmt in ast_enum(ast, ['GROUP', 'RULE', 'RULEPLUS']):
try:
if stmt[0] == 'GROUP':
ivals = Ival.summary(_ivalify(stmt[2], Ival.IP, Ival.PORTSTR))
ast[idx] = (pos, (stmt[0], stmt[1], ivals))
elif stmt[0] == 'RULEPLUS':
scope = Ival.PORTSTR if stmt[1] == '@' else Ival.IP
ivals = Ival.summary(_ivalify(stmt[2]), scope)
ast[idx] = (pos, (stmt[0], stmt[1], ivals))
elif stmt[0] == 'RULE':
srcs = Ival.summary(_ivalify(stmt[2], Ival.IP))
dsts = Ival.summary(_ivalify(stmt[4], Ival.IP))
srvs = Ival.summary(_ivalify(stmt[5], Ival.PORTSTR))
ast[idx] = (pos, (stmt[0], stmt[1], srcs, stmt[3],
dsts, srvs, *stmt[6:]))
else:
raise ValueError('{} invalid stmt for ast_ivalify'.format(
stmt[0]))
except ValueError as e:
ast[idx] = ast_errmsg(pos, 'ERROR', stmt[0], '{}'.format((e)))
return ast
def ast_jsonify(ast):
'turn a rule\'s json string into a python dict'
# only RULE tuple's have json string (or None) as last element
for idx, pos, stmt in ast_enum(ast, ['RULE']):
try:
dta = None if stmt[-1] is None else json.loads(stmt[-1])
ast[idx] = (pos, (*stmt[0:-1], dta))
except (TypeError, json.decoder.JSONDecodeError) as e:
print('could not decode ', stmt[-1])
ast[idx] = ast_errmsg(pos, 'ERROR', stmt[0],
'json-error: {}'.format((e)))
return ast
def expand_refs(dct):
'return an expanded member list from a, possibly, recursive definition'
# dct is {name} -> set([name, ..]), which may refer to other names
for target, mbrs in dct.items():
heap = list(mbrs) # mbrs name ('STR', name)
seen, dct[target] = [target], set([])
while heap:
nxt = heap.pop()
if nxt in seen: # circular reference
continue
seen.append(nxt)
if nxt in dct:
heap.extend(list(dct[nxt]))
dct[target].add(nxt)
return dct
def ast_symbol_table(ast):
'Build the symbol table for the ast'
# need 2 passes, since forward referencing is allowed
global GROUPS
# (re-)initialise symbol table
GROUPS = {'any': set([Ival.ip_pfx('any')]),
'any/any': set([Ival.port_str('any/any')])}
TODO = {} # GROUP-name -> [group-names to include]
# 1st pass, collect direct IP/PORTSTR's per groupname and
# defer group references till phase2
for idx, pos, stmt in ast_enum(ast, ['GROUP']):
_, grpname, mbrs = stmt
refs = [t[1] for t in mbrs if t[0] == 'STR'] # only the name
TODO.setdefault(grpname, set()).update(refs) # defer named ref's
grpdef = GROUPS.setdefault(grpname, set()) # always define symbol
try:
ivals = _ivalify([m for m in mbrs if m[0] != 'STR'],
Ival.IP, Ival.PORTSTR)
grpdef.update(ivals) # add straight IP/PORTSTR's to symbol def.
except ValueError as e:
ast[idx] = (pos, ('ERROR', 'GROUP', e.args[0]))
print('dir ValueError as e', e, dir(e), e.args)
# 2nd pass, expand delayed references
for name, mbrs in expand_refs(TODO).items():
for mbr in mbrs:
xtra = GROUPS.get(mbr, [])
if len(xtra) == 0:
print('empty ref', mbr, 'for group', name)
GROUPS.setdefault(name, set()).update(xtra)
return GROUPS
def ast_rules(ast):
'expand elements of the defined rules'
# ('RULE', <name>, [src], DIR, [dst], [srv], ('ACTION',act), <json-str>)
rules = []
for pos, stmt in ast_iter(ast, ['RULE', 'RULEPLUS']):
if stmt[0] == 'RULE':
rules.append(list(stmt[1:]))
elif stmt[0] == 'RULEPLUS':
if len(rules) == 0:
raise ValueError('dangling:{}'.format(str(stmt)))
if '@' == stmt[1]:
rules[-1][4].extend(stmt[2])
if '<' in stmt[1]:
rules[-1][1].extend(stmt[2])
if '>' in stmt[1]:
rules[-1][3].extend(stmt[2])
else:
raise ValueError('ast_rules cannot handle stmt {!r}'.format(stmt))
# proces direction of rules
# rule := [name, src, dst, srv, action, json-str]
rv = []
for rule in rules:
direction = rule[2] # capture direction and remove field
del rule[2]
rule[1] = Ival.summary(rule[1]) # summarize src
rule[2] = Ival.summary(rule[2]) # summarize dst
rule[3] = Ival.summary(rule[3]) # summarize srv
if direction == '>':
rv.append(rule)
elif direction == '<':
rule[1], rule[2] = rule[2], rule[1]
rv.append(rule)
else:
rv.append(rule.copy())
if rule[1] != rule[2]:
rule[1], rule[2] = rule[2], rule[1]
rv.append(rule)
return rv
# -- SEMANTICS
def ast_semantics(ast):
'run all chk_ast_funcs on ast'
# all chk_xyz(ast) -> must return an (un)modified, valid ast
for check in [x for x in globals() if x.startswith('chk_')]:
semantics = globals()[check]
# XXX: log on informational level to console
print('semantics:', semantics.__doc__)
ast = semantics(ast)
return ast
def chk_ast_dangling(ast):
'checking RULE(PLUS) scopes'
scope = None # determines current scope (if any)
for idx, pos, stmt in ast_enum(ast):
if stmt[0] == 'BLANK':
continue
if stmt[0] == 'RULEPLUS' and scope not in ['RULE', 'RULEPLUS']:
ast[idx] = (pos, ('ERROR', 'RULEPLUS',
'not in scope of a RULE'))
scope = stmt[1] if stmt[0] in ['ERROR', 'WARNING'] else stmt[0]
return ast
def chk_ast_refs(ast):
'check group references'
global GROUPS
def undefined_refs(lst):
return [x[1] for x in lst if x[0] == 'STR' and x[1] not in GROUPS]
def empty_refs(lst):
return [x[1] for x in lst if x[0] == 'STR' and x[1] in GROUPS and len(
GROUPS.get(x[1], [])) == 0]
for idx, pos, stmt in ast_enum(ast, ['GROUP', 'RULE', 'RULEPLUS']):
unrefs = undefined_refs(stmt[2]) # unknown group-references
emptyrefs = empty_refs(stmt[2]) # undefined group-references
if stmt[0] == 'RULE':
unrefs += undefined_refs(stmt[4]) # add unknown dsts
emptyrefs += empty_refs(stmt[4])
unrefs += undefined_refs(stmt[5]) # add unknown srvs
emptyrefs += empty_refs(stmt[5])
if len(unrefs) and len(emptyrefs):
msg = 'has empty ref: {} and undefined refs: {}'.format(
', '.join(emptyrefs), ', '.join(unrefs))
elif len(unrefs):
msg = 'has undefined references: {}'.format(unrefs)
elif len(emptyrefs):
msg = 'has empty references: {}'.format(emptyrefs)
else:
continue # all is ok
ast[idx] = (pos, ('ERROR', stmt[0], msg))
return ast
def chk_ast_args(ast):
'checking argument validity'
# RULEPLUS @ has STR's or PORTSTR's, else its an ERROR
# RULEPLUS <,>,<> has STR's or IP's, else its an ERROR
# RULE, same checks for src, dst and services
NETARGS = ('IP', 'STR')
SRVARGS = ('PORTSTR', 'STR')
ALLARGS = set([*NETARGS, *SRVARGS])
for idx, pos, stmt in ast_enum(ast, ['GROUP', 'RULE', 'RULEPLUS']):
illegal = []
if stmt[0] == 'GROUP':
illegal = [x[1] for x in stmt[2] if x[0] not in ALLARGS]
elif stmt[0] == 'RULE':
illegal = [x[1] for x in stmt[2] if x[0] not in NETARGS]
illegal.extend(x[1] for x in stmt[4] if x[0] not in NETARGS)
illegal.extend(x[1] for x in stmt[5] if x[0] not in SRVARGS)
elif stmt[0] == 'RULEPLUS':
if stmt[1] == '@':
illegal = [x[1] for x in stmt[2] if x[0] not in SRVARGS]
else:
illegal = [x[1] for x in stmt[2] if x[0] not in NETARGS]
else:
raise ValueError('stmt args check: unknown stmt type {}'.format(
stmt[1]))
if len(illegal):
msg = 'illegal args: {}'.format(', '.join(str(i) for i in illegal))
ast[idx] = (pos, ('ERROR', stmt[0], msg))
return ast
# -- Compile
def print_ast(ast):
'print out the abstract syntax tree'
for pos, stmt in ast:
print('{}:{}:{}'.format(os.path.relpath(pos[0]), pos[1], pos[2]),
*(elm for elm in stmt))
def compile(src):
'compile file or script text into IP4Filter object'
global GROUPS
try:
fhdl = open(src, "rt") # either a filename
except (IOError, OSError):
import io # or text
fhdl = io.StringIO(src)
ast = parse(fhdl)
ast = ast_includes(ast) # include & parse include(files)
GROUPS = ast_symbol_table(ast) # create new symbol table
ast = ast_semantics(ast) # check validity of ast
ast = ast_ivalify(ast) # turn IP, PORTSTR strings into Ival's
ast = ast_jsonify(ast) # turn json str into python object
errors = list(ast_iter(ast, 'ERROR'))
warnings = list(ast_iter(ast, 'WARNING'))
for pos, msg in errors:
print('Error:{}:{}'.format(pos, msg))
for pos, msg in warnings:
print('Warning:{}:{}'.format(pos, msg))
print('Score: E{}, W{}'.format(len(errors), len(warnings)))
if len(errors):
print_ast(ast)
raise SystemExit('Filter script contains errors')
# TODO:
# - maybe return (errors, warnings, ip4f)
rules = ast_rules(ast)
ip4f = Ip4Filter()
for rid, (name, srcs, dsts, ports, action, obj) in enumerate(rules):
ip4f._add(rid, srcs, dsts, ports, name, action, obj)
return ip4f
| 2.078125
| 2
|
checkov/terraform/module_loading/loaders/git_loader.py
|
ekmixon/checkov
| 0
|
12774876
|
<filename>checkov/terraform/module_loading/loaders/git_loader.py<gh_stars>0
import os
from checkov.common.goget.github.get_git import GitGetter
from checkov.terraform.module_loading.content import ModuleContent
from checkov.terraform.module_loading.loader import ModuleLoader
class GenericGitLoader(ModuleLoader):
def _is_matching_loader(self):
# https://www.terraform.io/docs/modules/sources.html#generic-git-repository
return self.module_source.startswith('git::')
def _load_module(self) -> ModuleContent:
try:
module_source = self.module_source.replace('git::', '')
if os.name == 'nt':
self.logger.info(f'Operating System: {os.name}')
self._create_valid_windows_dest_dir()
git_getter = GitGetter(module_source, create_clone_and_result_dirs=False)
git_getter.temp_dir = self.dest_dir
git_getter.do_get()
return_dir = self.dest_dir
if self.inner_module:
return_dir = os.path.join(self.dest_dir, self.inner_module)
return ModuleContent(dir=return_dir)
except Exception as e:
self.logger.error(f'failed to get {self.module_source} because of {e}')
return ModuleContent(dir=None, failed_url=self.module_source)
def _create_valid_windows_dest_dir(self):
# https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file#naming-conventions
reserved_windows_chars = ['<', '>', ':', '"', '|', '?', '*']
self.logger.info(f'External module will be cloned to: {self.dest_dir}')
for char in reserved_windows_chars:
self.dest_dir = self.dest_dir.replace(char, '')
loader = GenericGitLoader()
| 2.15625
| 2
|
tests/parsers/c_parser/exprs/unary_ops/post_increment_op_tests.py
|
mehrdad-shokri/retdec-regression-tests-framework
| 21
|
12774877
|
<reponame>mehrdad-shokri/retdec-regression-tests-framework<gh_stars>10-100
"""
Tests for the
:module`regression_tests.parsers.c_parser.exprs.unary_ops.post_increment_op`
module.
"""
from tests.parsers.c_parser import WithModuleTests
class PostIncrementOpExprTests(WithModuleTests):
"""Tests for `PostIncrementOpExpr`."""
def test_post_increment_op_expr_is_post_increment_op(self):
post_increment_op_expr = self.get_expr('a++', 'int')
self.assertTrue(post_increment_op_expr.is_post_increment_op())
def test_post_increment_op_expr_is_no_other_expr(self):
post_increment_op_expr = self.get_expr('a++', 'int')
self.assertFalse(post_increment_op_expr.is_eq_op())
self.assertFalse(post_increment_op_expr.is_neq_op())
self.assertFalse(post_increment_op_expr.is_gt_op())
self.assertFalse(post_increment_op_expr.is_gt_eq_op())
self.assertFalse(post_increment_op_expr.is_lt_op())
self.assertFalse(post_increment_op_expr.is_lt_eq_op())
self.assertFalse(post_increment_op_expr.is_add_op())
self.assertFalse(post_increment_op_expr.is_sub_op())
self.assertFalse(post_increment_op_expr.is_mul_op())
self.assertFalse(post_increment_op_expr.is_mod_op())
self.assertFalse(post_increment_op_expr.is_div_op())
self.assertFalse(post_increment_op_expr.is_and_op())
self.assertFalse(post_increment_op_expr.is_or_op())
self.assertFalse(post_increment_op_expr.is_bit_and_op())
self.assertFalse(post_increment_op_expr.is_bit_or_op())
self.assertFalse(post_increment_op_expr.is_bit_xor_op())
self.assertFalse(post_increment_op_expr.is_bit_shl_op())
self.assertFalse(post_increment_op_expr.is_bit_shr_op())
self.assertFalse(post_increment_op_expr.is_not_op())
self.assertFalse(post_increment_op_expr.is_neg_op())
self.assertFalse(post_increment_op_expr.is_assign_op())
self.assertFalse(post_increment_op_expr.is_address_op())
self.assertFalse(post_increment_op_expr.is_deref_op())
self.assertFalse(post_increment_op_expr.is_array_index_op())
self.assertFalse(post_increment_op_expr.is_comma_op())
self.assertFalse(post_increment_op_expr.is_ternary_op())
self.assertFalse(post_increment_op_expr.is_call())
self.assertFalse(post_increment_op_expr.is_cast())
self.assertFalse(post_increment_op_expr.is_pre_increment_op())
self.assertFalse(post_increment_op_expr.is_pre_decrement_op())
self.assertFalse(post_increment_op_expr.is_post_decrement_op())
self.assertFalse(post_increment_op_expr.is_compound_assign_op())
self.assertFalse(post_increment_op_expr.is_struct_ref_op())
self.assertFalse(post_increment_op_expr.is_struct_deref_op())
def test_repr_returns_correct_repr(self):
add_op_expr = self.get_expr('a++', 'int')
self.assertEqual(repr(add_op_expr), '<PostIncrementOpExpr op=a>')
def test_str_returns_correct_str(self):
add_op_expr = self.get_expr('a++', 'int')
self.assertEqual(str(add_op_expr), 'a++')
| 2.28125
| 2
|
python/py-set-add.py
|
gajubadge11/HackerRank-1
| 340
|
12774878
|
#!/usr/bin/env python3
if __name__ == "__main__":
N = int(input().strip())
stamps = set()
for _ in range(N):
stamp = input().strip()
stamps.add(stamp)
print(len(stamps))
| 3.734375
| 4
|
src/mpi4py/futures/_core.py
|
renefritze/mpi4py
| 0
|
12774879
|
# Author: <NAME>
# Contact: <EMAIL>
# pylint: disable=unused-import
# pylint: disable=redefined-builtin
# pylint: disable=missing-module-docstring
try:
from concurrent.futures import (
FIRST_COMPLETED,
FIRST_EXCEPTION,
ALL_COMPLETED,
CancelledError,
TimeoutError,
Future,
Executor,
wait,
as_completed,
)
try: # Python 3.7
from concurrent.futures import BrokenExecutor
except ImportError: # pragma: no cover
class BrokenExecutor(RuntimeError):
"""The executor has become non-functional."""
try: # Python 3.8
from concurrent.futures import InvalidStateError
except ImportError: # pragma: no cover
# pylint: disable=too-few-public-methods
# pylint: disable=useless-object-inheritance
class InvalidStateError(CancelledError.__base__):
"""The operation is not allowed in this state."""
except ImportError: # pragma: no cover
from ._base import (
FIRST_COMPLETED,
FIRST_EXCEPTION,
ALL_COMPLETED,
CancelledError,
TimeoutError,
InvalidStateError,
BrokenExecutor,
Future,
Executor,
wait,
as_completed,
)
| 1.914063
| 2
|
tests/run_hook_test.py
|
SeaOfOcean/EasyParallelLibrary
| 100
|
12774880
|
# Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Test for hook of session run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from distutils.version import LooseVersion as Version
import six
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import test
from tensorflow.python.framework.versions import __version__
import epl
from epl.parallel.hooks import _append_replicated_fetches
# pylint: disable=missing-docstring,unused-argument,unused-variable
class RunHookTest(test.TestCase):
def test_for_append_replicated_fetches(self):
epl.init(config=epl.Config({"communication.gradients_reduce_method": "sum"}))
with epl.Cluster(worker_hosts="127.0.0.1:8001", worker_index=0):
with epl.replicate(device_count=1):
num_x = np.random.randint(0, 10, (500, 20)).astype(dtype=np.float32)
num_y = np.random.randint(0, 10, 500).astype(dtype=np.int64)
dataset = tf.data.Dataset.from_tensor_slices((num_x, num_y)) \
.batch(10).repeat(1)
iterator = dataset.make_initializable_iterator()
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS,
iterator.initializer)
x, labels = iterator.get_next()
logits = tf.layers.dense(x, 2)
logits = tf.layers.dense(logits, 10)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels,
logits=logits)
epl.add_to_collection(loss, epl.GraphKeys.GLOBAL_MEAN_OBJECTS)
global_step = tf.train.get_or_create_global_step()
optimizer = tf.train.MomentumOptimizer(learning_rate=0.001,
momentum=0.9)
train_op = optimizer.minimize(loss, global_step=global_step)
tf.train.MonitoredTrainingSession()
# Test for a single operation/tensor.
fetches = loss
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
replicas = [rep.name for rep in replicas]
self.assertEqual(fetches.name, "EPL_PARALLEL_STRATEGY/truediv:0")
self.assertListEqual(replicas, [
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0"
])
fetches = train_op
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
replicas = [rep.name for rep in replicas]
# Test for nvidia-tf(1.15.4) and deeprec(1.15.5).
if Version(__version__) >= Version("1.15.4") and Version(__version__) < Version("2.0"):
suffix = "/group_deps"
else:
suffix = ""
self.assertEqual(fetches.name, "Momentum" + suffix)
self.assertEqual(replicas, [
"EPL_REPLICA_1/Momentum" + suffix, "EPL_REPLICA_2/Momentum" +
suffix, "EPL_REPLICA_3/Momentum" + suffix
])
# Test for list fetches.
fetches = [loss, train_op]
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
fetches = [fetch.name for fetch in fetches]
replicas = [rep.name for rep in replicas]
self.assertListEqual(
fetches, ["EPL_PARALLEL_STRATEGY/truediv:0", "Momentum" + suffix])
self.assertListEqual(replicas, [
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0", "EPL_REPLICA_1/Momentum" +
suffix, "EPL_REPLICA_2/Momentum" + suffix,
"EPL_REPLICA_3/Momentum" + suffix
])
# Test for type of dict.
fetches = {"loss": loss, "train_op": train_op}
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
replicas = [rep.name for rep in replicas]
self.assertEqual(fetches["loss"].name,
"EPL_PARALLEL_STRATEGY/truediv:0")
self.assertEqual(fetches["train_op"].name, "Momentum" + suffix)
if six.PY2:
self.assertListEqual(replicas, [
"EPL_REPLICA_1/Momentum" + suffix, "EPL_REPLICA_2/Momentum" +
suffix, "EPL_REPLICA_3/Momentum" + suffix,
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0"
])
else:
self.assertListEqual(replicas, [
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0", "EPL_REPLICA_1/Momentum" +
suffix, "EPL_REPLICA_2/Momentum" + suffix,
"EPL_REPLICA_3/Momentum" + suffix
])
# Test for type of OrderedDict
fetches = collections.OrderedDict()
fetches["loss"] = loss
fetches["train_op"] = train_op
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
replicas = [rep.name for rep in replicas]
self.assertEqual(fetches["loss"].name,
"EPL_PARALLEL_STRATEGY/truediv:0")
self.assertEqual(fetches["train_op"].name, "Momentum" + suffix)
self.assertListEqual(replicas, [
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0", "EPL_REPLICA_1/Momentum" +
suffix, "EPL_REPLICA_2/Momentum" + suffix,
"EPL_REPLICA_3/Momentum" + suffix
])
# Test for type of tuple.
fetches = (loss, train_op)
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
replicas = [rep.name for rep in replicas]
self.assertEqual(fetches[0].name, "EPL_PARALLEL_STRATEGY/truediv:0")
self.assertEqual(fetches[1].name, "Momentum" + suffix)
self.assertListEqual(replicas, [
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0", "EPL_REPLICA_1/Momentum" +
suffix, "EPL_REPLICA_2/Momentum" + suffix,
"EPL_REPLICA_3/Momentum" + suffix
])
# Test for type of namedtuple.
fetch_type = collections.namedtuple("fetch_type", ["loss", "train_op"])
fetches = fetch_type(loss=loss, train_op=train_op)
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
replicas = [rep.name for rep in replicas]
self.assertEqual(fetches.loss.name, "EPL_PARALLEL_STRATEGY/truediv:0")
self.assertEqual(fetches.train_op.name, "Momentum" + suffix)
self.assertListEqual(replicas, [
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0", "EPL_REPLICA_1/Momentum" +
suffix, "EPL_REPLICA_2/Momentum" + suffix,
"EPL_REPLICA_3/Momentum" + suffix
])
# Test for nested list fetches.
def _flatten(li):
return sum(
([x] if not isinstance(x, list) else _flatten(x) for x in li), [])
fetches = [labels, [train_op, logits, [loss, global_step]]]
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
fetches = _flatten(fetches)
fetches = [fetch.name for fetch in fetches]
replicas = [rep.name for rep in replicas]
self.assertListEqual(fetches, [
"IteratorGetNext:1", "Momentum" + suffix, "dense_1/BiasAdd:0",
"EPL_PARALLEL_STRATEGY/truediv:0", "global_step:0"
])
self.assertListEqual(replicas, [
"EPL_REPLICA_1/IteratorGetNext:1",
"EPL_REPLICA_2/IteratorGetNext:1",
"EPL_REPLICA_3/IteratorGetNext:1", "EPL_REPLICA_1/Momentum" +
suffix, "EPL_REPLICA_2/Momentum" + suffix,
"EPL_REPLICA_3/Momentum" + suffix,
"EPL_REPLICA_1/dense_1/BiasAdd:0",
"EPL_REPLICA_2/dense_1/BiasAdd:0",
"EPL_REPLICA_3/dense_1/BiasAdd:0",
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0",
"EPL_REPLICA_1/global_step:0",
"EPL_REPLICA_2/global_step:0",
"EPL_REPLICA_3/global_step:0"
])
# Test for nested list with dict.
fetches = [labels, {"loss": loss}]
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
replicas = [rep.name for rep in replicas]
self.assertEqual(fetches[0].name, "IteratorGetNext:1")
self.assertEqual(fetches[1]["loss"].name,
"EPL_PARALLEL_STRATEGY/truediv:0")
self.assertListEqual(replicas, [
"EPL_REPLICA_1/IteratorGetNext:1",
"EPL_REPLICA_2/IteratorGetNext:1",
"EPL_REPLICA_3/IteratorGetNext:1",
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0"
])
# Test for nested list with tuple.
fetches = [labels, (loss, global_step)]
replicas = []
fetches = _append_replicated_fetches(fetches, replicas)
replicas = [rep.name for rep in replicas]
self.assertEqual(fetches[0].name, "IteratorGetNext:1")
self.assertEqual(fetches[1][0].name, "EPL_PARALLEL_STRATEGY/truediv:0")
self.assertEqual(fetches[1][1].name, "global_step:0")
self.assertListEqual(replicas, [
"EPL_REPLICA_1/IteratorGetNext:1",
"EPL_REPLICA_2/IteratorGetNext:1",
"EPL_REPLICA_3/IteratorGetNext:1",
"EPL_PARALLEL_STRATEGY/truediv_1:0",
"EPL_PARALLEL_STRATEGY/truediv_2:0",
"EPL_PARALLEL_STRATEGY/truediv_3:0",
"EPL_REPLICA_1/global_step:0",
"EPL_REPLICA_2/global_step:0",
"EPL_REPLICA_3/global_step:0"
])
# pylint: enable=missing-docstring,unused-argument,unused-variable
if __name__ == "__main__":
test.main()
| 1.515625
| 2
|
veronica/interfaces/event.py
|
nirmalhk7/veronica-cli
| 0
|
12774881
|
<filename>veronica/interfaces/event.py<gh_stars>0
from os import stat_result
from datetime import date, datetime, timedelta
class EventInterface():
link = None
title = "Untitled"
calendar = None
color = None
hangoutLink = None
start = None
end = None
def set_date(self, start, end):
if("date" in start):
self.start = datetime.strptime(start['date'], "%Y-%m-%d")
self.end = datetime.strptime(end['date'], "%Y-%m-%d")
else:
self.start = datetime.strptime(
start["dateTime"].upper().split("+")[0].split("Z")[0],
"%Y-%m-%dT%H:%M:%S")
self.end = datetime.strptime(
end["dateTime"].upper().split("+")[0].split("Z")[0],
"%Y-%m-%dT%H:%M:%S")
def __init__(self, link, title, calendar, color) -> None:
self.link = link
self.title = title
self.calendar = calendar
self.color = color
| 2.6875
| 3
|
messy_pypi/done/main_readmereader.py
|
Maxio-Arkanyota/Maxio-Arkanyota
| 2
|
12774882
|
from collections import deque # Implement Mathematiques Stacks
# from main_terminalFunctions import
from os import get_terminal_size
from main_terminalGetKey import getKey
def readfile(file):
# Gras, Italique, Strike, code, Mcode, Hilight
# 0** 1* 2__ 3_ 4~~ 5` 6``` 7==
etat = [False, False, False, False, False, False, False, False]
to_printfile = []
with open(file, "r") as f:
a = f.readlines()
for i in a:
current_ligne = i.rstrip() # Fro keep \t
if current_ligne == "---" or current_ligne == "___" or current_ligne == "***":
current_ligne = get_terminal_size()[0] * "─"
elif current_ligne[0:6] == "######":
current_ligne = "\033[33mh6\u2588\u2588\u2588\u2588" + current_ligne[6:] + "\033[0m"
elif current_ligne[0:5] == "#####":
current_ligne = "\033[33mh5\u2588\u2588\u2588" + current_ligne[5:] + "\033[0m"
elif current_ligne[0:4] == "####":
current_ligne = "\033[33mH4\u2588\u2588" + current_ligne[4:] + "\033[0m"
elif current_ligne[0:3] == "###":
current_ligne = "\033[32m\033[1m" + (' ' + current_ligne[3:] + " ").center(get_terminal_size()[0],
".") + "\033[0m" # "\033[32m\033[3m3\u2588\u2588"+ current_ligne[3:] +"\033[0m"
elif current_ligne[0:2] == "##":
current_ligne = "\033[34m\033[1m" + (' ' + current_ligne[2:] + " ").center(get_terminal_size()[0],
"─") + "\033[0m"
elif current_ligne[0:1] == "#":
current_ligne = "\033[31m\033[1m\033[4m" + (' ' + current_ligne[1:] + " ").center(get_terminal_size()[0],
"\u2588") + "\033[0m"
# While "**" or "~~" or "*" or "==" or "__" not i current line
if "**" in current_ligne and not etat[0]:
etat[0] = True
current_ligne = current_ligne.replace("**", "\033[1m\033[91m", 1)
if "**" in current_ligne and etat[0]:
etat[0] = False
current_ligne = current_ligne.replace("**", "\033[0m", 1)
if "__" in current_ligne and not etat[2]:
etat[2] = True
current_ligne = current_ligne.replace("__", "\033[1m", 1)
if "__" in current_ligne and etat[2]:
etat[2] = False
current_ligne = current_ligne.replace("__", "\033[0m", 1)
if "==" in current_ligne and not etat[7]:
etat[7] = True
current_ligne = current_ligne.replace("==", "\033[103m\033[30m", 1)
if "==" in current_ligne and etat[7]:
etat[7] = False
current_ligne = current_ligne.replace("==", "\033[0m", 1)
to_printfile.append(current_ligne)
return to_printfile
def printontermnal(to_printfile, boucle=True):
ShowLines = False
Firstline = 0
ChosedLink = 0
Reapet = True
while Reapet:
for i in to_printfile:
print(i)
if boucle:
key = getKey(debug=True)
if key == "l":
ShowLines = not ShowLines
if key == "j": # DOWN
Firstline = Firstline + 1 # min(Firstline+1, len(to_printfile))
if key == "k": # Up
Firstline = Firstline - 1 # max(Firstline-1, 0)
if key == "Tab":
ChosedLink = ChosedLink + 1 # min(ChosedLink+1, len(alllink))
if key == "ShiftTab":
ChosedLink = ChosedLink - 1 # min(ChosedLink-1, 0)
if key == "\r": # ENTER
pass # TODO: Open browser with current link
else:
Reapet = False
if __name__ == "__main__": # Si tu le lance avec python3.10 main_readmereader.py
import sys
args = sys.argv # Recuperer les arguments du terminal
if "--help" in args or "-h" in args:
print("""
-l, --lines: Affiches le numero des lignes avec
-h, --help: affiche ceci
-c, --config: Fichier config (Feature)
-i, --image : Affiche les images en Assci with `https://dev.to/natamacm/terminal-image-with-python-44mh`
-b, -blockcode : Hilight code blocks
-s, --size : definir la taille de l'output
""")
if "--exec" in args:
printontermnal(readfile("resources/Readmereader/RM.md"), boucle=False)
| 3.0625
| 3
|
provenance/core.py
|
dmaljovec/provenance
| 0
|
12774883
|
<filename>provenance/core.py
import datetime
import os
import shutil
import time
from collections import namedtuple
from copy import copy
import toolz as t
from boltons import funcutils as bfu
from . import artifact_hasher as ah
from . import repos as repos
from . import serializers as s
from . import utils
from ._dependencies import dependencies
from .hashing import file_hash, hash
class ImpureFunctionError(Exception):
pass
class MutatedArtifactValueError(Exception):
pass
def get_metadata(f):
if hasattr(f, '_provenance_metadata'):
return f._provenance_metadata
if hasattr(f, 'func'):
return get_metadata(f.func)
else:
return {}
artifact_properties = ['id', 'value_id', 'inputs', 'fn_module', 'fn_name', 'value',
'name', 'version', 'composite', 'value_id_duration',
'serializer', 'load_kwargs', 'dump_kwargs',
'compute_duration', 'hash_duration', 'computed_at',
'custom_fields', 'input_artifact_ids', 'run_info']
ArtifactRecord = namedtuple('ArtifactRecord', artifact_properties)
def fn_info(f):
info = utils.fn_info(f)
metadata = get_metadata(f)
name = metadata['name'] or '.'.join([info['module'], info['name']])
info['identifiers'] = {'name': name,
'version': metadata['version'],
'input_hash_fn': metadata['input_hash_fn']}
info['input_process_fn'] = metadata['input_process_fn']
info['composite'] = metadata['returns_composite']
info['archive_file'] = metadata['archive_file']
info['custom_fields'] = metadata['custom_fields']
info['preserve_file_ext'] = metadata['preserve_file_ext']
info['use_cache'] = metadata['use_cache']
info['read_only'] = metadata['read_only']
if info['composite']:
if info['archive_file']:
raise NotImplementedError("Using 'composite' and 'archive_file' is not supported.")
info['serializer'] = metadata['serializer'] or {}
info['load_kwargs'] = metadata['load_kwargs'] or {}
info['dump_kwargs'] = metadata['dump_kwargs'] or {}
valid_serializer = isinstance(info['serializer'], dict)
for serializer in info['serializer'].values():
valid_serializer = valid_serializer and serializer in s.serializers
if not valid_serializer:
break
elif info['archive_file']:
serializer = metadata['serializer'] or 'file'
if serializer != 'file':
raise ValueError("With 'archive_file' set True the only valid 'serializer' is 'file'")
if metadata.get('dump_kwargs') is not None:
raise ValueError("With 'archive_file' set True you may not specify any dump_kwargs.")
if metadata.get('load_kwargs') is not None:
raise ValueError("With 'archive_file' set True you may not specify any load_kwargs.")
info['serializer'] = 'file'
info['load_kwargs'] = metadata['load_kwargs'] or {}
info['dump_kwargs'] = (metadata['dump_kwargs']
or {'delete_original': metadata['delete_original_file']})
info['delete_original_file'] = metadata['delete_original_file']
valid_serializer = True
else:
info['serializer'] = metadata.get('serializer', 'auto') or 'auto'
info['load_kwargs'] = metadata.get('load_kwargs', None)
info['dump_kwargs'] = metadata.get('dump_kwargs', None)
valid_serializer = (info['serializer'] == 'auto'
or info['serializer'] in s.serializers)
if not valid_serializer:
msg = 'Invalid serializer option "{}" for artifact "{}", available serialziers: {} '.\
format(info['serializer'], info['identifiers']['name'], tuple(s.serializers.keys()))
raise ValueError(msg)
return info
def hash_inputs(inputs, check_mutations=False, func_info=None):
kargs = {}
varargs = []
all_artifacts = {}
if func_info is None:
func_info = {}
for k, v in inputs['kargs'].items():
h, artifacts = hash(v, hasher=ah.artifact_hasher())
kargs[k] = h
for a in artifacts:
comp = all_artifacts.get(a.id, (a, []))
comp[1].append(k)
all_artifacts[a.id] = comp
for i, v in enumerate(inputs['varargs']):
h, artifacts = hash(v, hasher=ah.artifact_hasher())
varargs.append(h)
for a in artifacts:
comp = all_artifacts.get(a.id, (a, []))
comp[1].append("varargs[{}]".format(i))
all_artifacts[a.id] = comp
if check_mutations:
for comp in all_artifacts.values():
a, arg_names = comp
if a.value_id != hash(a.value):
msg = "Artifact {}, of type {} was mutated before being passed to {}.{} as arguments ({})"
msg = msg.format(a.id, type(a.value), func_info.get('module'),
func_info.get('name'), ",".join(arg_names))
raise MutatedArtifactValueError(msg)
input_hashes = {'kargs': kargs, 'varargs': tuple(varargs)}
return (input_hashes, frozenset(all_artifacts.keys()))
def create_id(input_hashes, input_hash_fn, name, version):
return t.thread_first(input_hashes,
input_hash_fn,
(t.merge, {'name': name, 'version': version}),
hash)
@t.curry
def composite_artifact(repo, _run_info, inputs, input_hashes, input_artifact_ids,
input_hash_fn, artifact_info, compute_duration,
computed_at, use_cache, read_only, key, value):
start_hash_time = time.time()
info = copy(artifact_info)
info['composite'] = False
info['name'] = '{}_{}'.format(info['name'], key)
info['serializer'] = info['serializer'].get(key, 'auto')
info['load_kwargs'] = info['load_kwargs'].get(key, None)
info['dump_kwargs'] = info['dump_kwargs'].get(key, None)
if info['serializer'] == 'auto':
info['serializer'] = s.object_serializer(value)
id = create_id(input_hashes, input_hash_fn, info['name'], info['version'])
hash_duration = time.time() - start_hash_time
start_value_id_time = time.time()
value_id = hash(value)
value_id_duration = time.time() - start_hash_time
if not use_cache:
id = hash(id + value_id)
try:
artifact = repo.get_by_id(id)
except KeyError:
record = ArtifactRecord(id=id, value_id=value_id, value=value,
input_artifact_ids=input_artifact_ids,
value_id_duration=value_id_duration,
compute_duration=compute_duration,
hash_duration=hash_duration, computed_at=computed_at,
inputs=inputs, run_info=_run_info, **info)
if read_only:
artifact = repos._artifact_from_record(repo, record)
else:
artifact = repo.put(record)
return artifact
def _base_fn(f):
if utils.is_curry_func(f):
return utils.inner_function(f)
else:
return f
_EXT_MAPPINGS = {'mpeg': 'mpg',
'jpeg': 'jpg'}
def _extract_extension(filename):
ext = os.path.splitext(filename)[1]
if len(ext) > 0:
ext = ext.lower().strip()[1:]
return '.' + _EXT_MAPPINGS.get(ext, ext)
else:
return ext
def _archive_file_hash(filename, preserve_file_ext):
if hasattr(filename, '__fspath__'):
filename = filename.__fspath__()
else:
filename = str(filename)
if not os.path.exists(filename):
raise FileNotFoundError("Unable to archive file, {}, because it doesn't exist!".format(filename))
# TODO: figure out best place to put the hash_name config and use in both cases
value_id = file_hash(filename)
if preserve_file_ext:
extension = _extract_extension(filename)
value_id += extension
return value_id
def run_info():
return repos.Config.current().run_info()
@t.curry
def provenance_wrapper(repo, f):
base_fn = _base_fn(f)
extract_args = utils.args_extractor(base_fn, merge_defaults=True)
func_info = fn_info(f)
input_process_fn = func_info['input_process_fn']
artifact_info = {'name': func_info['identifiers']['name'],
'version': func_info['identifiers']['version'],
'fn_name': func_info['name'], 'fn_module': func_info['module'],
'custom_fields': func_info['custom_fields'],
'serializer': func_info['serializer'],
'load_kwargs': func_info['load_kwargs'],
'dump_kwargs': func_info['dump_kwargs'],
'composite': func_info['composite']}
@bfu.wraps(f)
def _provenance_wrapper(*args, **kargs):
artifact_info_ = copy(artifact_info)
r = repo
if repo is None:
r = repos.get_default_repo()
elif isinstance(repo, str):
r = repos.get_repo_by_name(repo)
_run_info = run_info()
archive_file = func_info['archive_file']
if func_info['use_cache'] is None:
use_cache = repos.get_use_cache()
else:
use_cache = func_info['use_cache']
if func_info['read_only'] is None:
read_only = repos.get_read_only()
else:
read_only = func_info['read_only']
start_hash_time = time.time()
varargs, argsd = extract_args(args, kargs)
raw_inputs = {'varargs': varargs + func_info['varargs'],
'kargs': t.merge(argsd, func_info['kargs'])}
inputs = input_process_fn(raw_inputs)
value_id = None
filename = None
archive_file_helper = archive_file and '_archive_file_filename' in raw_inputs['kargs']
if archive_file_helper:
filename = raw_inputs['kargs']['_archive_file_filename']
value_id = _archive_file_hash(filename, func_info['preserve_file_ext'])
inputs['filehash'] = value_id
input_hashes, input_artifact_ids = hash_inputs(inputs, repos.get_check_mutations(), func_info)
id = create_id(input_hashes, **func_info['identifiers'])
hash_duration = time.time() - start_hash_time
if use_cache:
try:
artifact = r.get_by_id(id)
except KeyError:
artifact = None
except AttributeError as e:
msg = 'The default repo is not set. '
msg += 'You may want to add the `default_repo` key to your call to `provenance.load_config.` '
msg += 'e.g., provenance\.load_config({\'default_repo\': <default repo name>, ...})'
raise AttributeError(msg).with_traceback(e.__traceback__)
else:
artifact = None
if artifact is None:
start_compute_time = time.time()
computed_at = datetime.datetime.utcnow()
value = f(*varargs, **argsd)
compute_duration = time.time() - start_compute_time
post_input_hashes, _ = hash_inputs(inputs)
if id != create_id(post_input_hashes, **func_info['identifiers']):
modified_inputs = []
kargs = input_hashes['kargs']
varargs = input_hashes['varargs']
for name, _hash in post_input_hashes['kargs'].items():
if _hash != kargs[name]:
modified_inputs.append(name)
for i, _hash in enumerate(post_input_hashes['varargs']):
if _hash != varargs[i]:
modified_inputs.append("varargs[{}]".format(i))
msg = "The {}.{} function modified arguments: ({})".format(
func_info['module'], func_info['name'], ",".join(modified_inputs))
raise ImpureFunctionError(msg)
if artifact_info_['composite']:
input_hash_fn = func_info['identifiers']['input_hash_fn']
ca = composite_artifact(r, _run_info, inputs, input_hashes,
input_artifact_ids, input_hash_fn,
artifact_info, compute_duration,
computed_at, use_cache, read_only)
value = {k: ca(k, v) for k, v in value.items()}
artifact_info_['serializer'] = 'auto'
artifact_info_['load_kwargs'] = None
artifact_info_['dump_kwargs'] = None
if artifact_info_['serializer'] == 'auto':
artifact_info_['serializer'] = s.object_serializer(value)
start_value_id_time = time.time()
if archive_file:
if not archive_file_helper:
filename = value
value_id = _archive_file_hash(filename, func_info['preserve_file_ext'])
value = ArchivedFile(value_id, filename, in_repo=False)
else:
value_id = hash(value)
value_id_duration = time.time() - start_value_id_time
if not use_cache:
id = hash(id + value_id)
try:
artifact = r.get_by_id(id)
except KeyError:
artifact = None
if artifact is None:
record = ArtifactRecord(id=id, value_id=value_id, value=value,
input_artifact_ids=input_artifact_ids,
value_id_duration=value_id_duration,
compute_duration=compute_duration,
hash_duration=hash_duration,
computed_at=computed_at, run_info=_run_info,
inputs=inputs, **artifact_info_)
if read_only:
artifact = repos._artifact_from_record(r, record)
else:
artifact = r.put(record)
if archive_file:
# mark the file as in the repo (yucky, I know)
artifact.value.in_repo = True
elif archive_file_helper and func_info.get('delete_original_file', False):
# if we hit an artifact with archive_file we may still need to clean up the
# referenced file. This is normally taken care of when the file is 'serialzied'
# (see file_dump), but in the case of an artifact hit this would never happen.
# One potential downside of this approach is that this local file will be
# deleted and if the artifact value (i.e. the existing file) is not local
# yet it will download the file that we just deleted. Another approach would
# be to do a a put_overwrite which would potentially upload files multiple times.
# So for now, the cleanest way is to accept the potential re-downloading of data.
os.remove(filename)
return artifact.proxy()
if utils.is_curry_func(f):
fb = bfu.FunctionBuilder.from_func(utils.inner_function(f))
for arg in f.args + tuple(f.keywords.keys()):
fb.remove_arg(arg)
param_info = utils.param_info(f)
args = []
defaults = []
for arg, value in param_info.items():
args.append(arg)
if value != utils.UNSPECIFIED_ARG:
defaults.append(value)
arg_inv = ['{}={}'.format(arg,arg) for arg in args]
fb.body = 'return _provenance_wrapper(%s)' % ", ".join(arg_inv)
fb.args = args
fb.defaults = tuple(defaults)
execdict = {'_provenance_wrapper': _provenance_wrapper}
ret = fb.get_func(execdict, with_dict=True)
return ret
return _provenance_wrapper
def remove_inputs_fn(to_remove):
def remove_inputs(inputs):
kargs = t.keyfilter(lambda k: k not in to_remove, inputs['kargs'])
return {'kargs': kargs, 'varargs': inputs['varargs']}
return remove_inputs
def ensure_proxies(*parameters):
"""Decorator that ensures that the provided parameters are always arguments of type ArtifactProxy.
When no parameters are passed then all arguments will be checked.
This is useful to use on functions where you want to make sure artifacts
are being passed in so lineage can be tracked.
"""
def decorator(func):
base_fn = _base_fn(func)
extract_args = utils.args_extractor(base_fn, merge_defaults=True)
@bfu.wraps(func)
def check_args(*args, **kargs):
_varargs, argsd = extract_args(args, kargs)
not_valid = None
if len(parameters) == 0:
not_valid = [p for p, a in argsd.items() if not repos.is_proxy(a)]
else:
not_valid = [p for p in parameters if not repos.is_proxy(argsd[p])]
if len(not_valid) > 0:
msg = "Arguments must be `ArtifactProxy`s but were not: [{}]"\
.format(", ".join(not_valid))
raise ValueError(msg)
return func(*args, **kargs)
return check_args
return decorator
def provenance(version=0, repo=None, name=None, merge_defaults=None,
ignore=None, input_hash_fn=None, remove=None, input_process_fn=None,
archive_file=False, delete_original_file=False, preserve_file_ext=False,
returns_composite=False, custom_fields=None,
serializer=None, load_kwargs=None, dump_kwargs=None, use_cache=None,
read_only=None, tags=None, _provenance_wrapper=provenance_wrapper):
"""
Decorates a function so that all inputs and outputs are cached. Wraps the return
value in a proxy that has an artifact attached to it allowing for the provenance
to be tracked.
Parameters
----------
version : int
Version of the code that is computing the value. You should increment this
number when anything that has changed to make a previous version of an artifact
outdated. This could be the function itself changing, other functions or libraries
that it calls has changed, or an underlying data source that is being queried has
updated data.
repo : Repository or str
Which repo this artifact should be saved in. The default repo is used when
none is provided and this is the recommended approach. When you pass in a string
it should be the name of a repo in the currently registered config.
name : str
The name of the artifact of the function being wrapped. If not provided it
defaults to the function name (without the module).
returns_composite : bool
When set to True the function should return a dictionary. Each value of the
returned dict will be serialized as an independent artifact. When the composite
artifact is returned as a cached value it will be a dict-like object that will
lazily pull back the artifacts as requested. You should use this when you need
multiple artifacts created atomically but you do not want to fetch all the them
simultaneously. That way you can lazily load only the artifacts you need.
serializer : str
The name of the serializer you want to use for this artifact. The built-in
ones are 'joblib' (the default) and 'cloudpickle'. 'joblib' is optimized for
numpy while 'cloudpickle' can serialize functions and other objects the standard
python (and joblib) pickler cannot. You can also register your own serializer
via the provenance.register_serializer function.
dump_kwargs : dict
A dict of kwargs to be passed to the serializer when dumping artifacts
associated with this function. This is rarely used.
load_kwargs : dict
A dict of kwargs to be passed to the serializer when loading artifacts
associated with this function. This is rarely used.
ignore : list, tuple, or set
A list of parameters that should be ignored when computing the input hash.
This way you can mark certain parameters as invariant to the computed result.
An example of this would be a parameter indicating how many cores should be
used to compute a result. If the result is invariant the number of cores you
would want to ignore it so the value isn't recomputed when a different number
of cores is used.
remove : list, tuple, or set
A list of parameters that should be removed prior to hashing and saving
of the inputs. The distinction between this and the ignore parameter is
that with the ignore the parameters the ignored parameters are still recorded.
The motivation to not-record, i.e. remove, certain parameters usually driven
by performance or storage considerations.
input_hash_fn : function
A function that takes a dict of all on the argument's hashes with the
structure of {'kargs': {'param_a': '1234hash'}, 'varargs': ('deadbeef',..)}.
It should return a dict of the same shape but is able to change this dict
as needed. The main use case for this function is overshadowed by the
ignore parameter and so this parameter is hardly ever used.
input_process_fn : function
A function that pre-processes the function's inputs before they are hashed
or saved. The function takes a dict of all on the functions arguments with the
structure of {'kargs': {'param_a': 42}, 'varargs': (100,..)}.
It should return a dict of the same shape but is able to change this dict
as needed. The main use case for this function is overshadowed by the
remove parameter and the value_repr function.
merge_defaults : bool or list of parameters to be merged
When True then the wrapper introspects the argspec of the function being
decorated to see what keyword arguments have default dictionary values. When
a list of strings the list is taken to be the list of parameters you want to
merge on.
When a decorated function is called then the dictionary passed in as an
argument is merged with the default dictionary. That way people only need
to specify the keys they are overriding and don't have to specify all the
default values in the default dictionary.
use_cache : bool or None (default None)
use_cache False turns off the caching effects of the provenance decorator,
while still tracking the provenance of artifacts. This should only be used during
quick local iterations of a function to avoid having to bump the version with
each change. When set to None (the default) it defers to the global provenance
use_cache setting.
read_only: bool or None (default None)
read_only True will prevent any artifacts from being persisted to the repo.
This should be used when you want to load existing artifacts from provenance
but you do not want to add artifacts if the one you're looking for does not
exist. This is useful when consuming artifacts created elsewhere, or when
you are doing quick iterations (as with use_cache False) but you still want
to use the cache for existing artifacts. When set to None (the default) it
defers to the global provenance read_only setting.
custom_fields : dict
A dict with types that serialize to json. These are saved for searching in
the repository.
tags : list, tuple or set
Will be added to custom_fields as the value for the 'tags' key.
archive_file : bool, defaults False
When True then the return value of the wrapped function will be assumed to
be a str or pathlike that represents a file that should be archived into
the blobstore. This is a good option to use when the computation of a function
can't easily be returned as an in-memory pickle-able python value.
delete_original_file : bool, defaults False
To be used in conjunction with archive_file=True, when delete_original_file
is True then the returned file will be deleted after it has been archived.
preserve_file_ext : bool, default False
To be used in conjunction with archive_file=True, when preserve_file_ext is
True then id of the artifact archived will be the hash of the file contents
plus the file extension of the original file. The motivation of setting this to
True would be if you wanted to be able to look at the contents of a blobstore
on disk and being able to preview the contents of an artifact with your
regular OS tools (e.g. viewing images or videos).
Returns
-------
ArtifactProxy
Returns the value of the decorated function as a proxy. The proxy
will act exactly like the original object/value but will have an
artifact method that returns the Artifact associated with the value.
This wrapped value should be used with all other functions that are wrapped
with the provenance decorator as it will help track the provenance and also
reduce redundant storage of a given value.
"""
if ignore and input_hash_fn:
raise ValueError("You cannot provide both ignore and input_hash_fn")
if ignore:
ignore = frozenset(ignore)
input_hash_fn = remove_inputs_fn(ignore)
if not input_hash_fn:
input_hash_fn = lambda inputs: inputs
if remove and input_process_fn:
raise ValueError("You cannot provide both remove and input_process_fn")
if remove:
remove = frozenset(remove)
input_process_fn = remove_inputs_fn(remove)
if not input_process_fn:
input_process_fn = lambda inputs: inputs
def wrapped(f):
_custom_fields = custom_fields or {}
if tags:
_custom_fields['tags'] = tags
f._provenance_metadata = {'version': version,
'name': name,
'archive_file': archive_file,
'delete_original_file': delete_original_file,
'input_hash_fn': input_hash_fn,
'input_process_fn': input_process_fn,
'archive_file': archive_file,
'delete_original_file': delete_original_file,
'preserve_file_ext': preserve_file_ext,
'returns_composite': returns_composite,
'archive_file': archive_file,
'custom_fields': _custom_fields,
'serializer': serializer,
'load_kwargs': load_kwargs,
'dump_kwargs': dump_kwargs,
'use_cache': use_cache,
'read_only': read_only}
f.__merge_defaults__ = merge_defaults
return _provenance_wrapper(repo, f)
return wrapped
class ArchivedFile(object):
def __init__(self, id, original_filename=None, in_repo=True):
self.blob_id = id
self.original_filename = original_filename
self.in_repo = in_repo
def abspath(self):
repo = repos.get_default_repo()
path = repo._filename(self.blob_id)
return os.path.abspath(path)
def __fspath__(self):
return self.abspath() if self.in_repo else self.original_filename
def __str__(self):
return self.__fspath__()
def __repr__(self):
if self.original_filename:
return "<ArchivedFile {}, {} >".format(self.blob_id, self.original_filename)
else:
return "<ArchivedFile {} >".format(self.blob_id)
def file_dump(archived_file, dest_filename, delete_original=False):
op = shutil.move if delete_original else shutil.copy
op(archived_file.original_filename, dest_filename)
def file_load(id):
return ArchivedFile(id, in_repo=True)
s.register_serializer('file', file_dump, file_load)
def archive_file(filename, name=None, delete_original=False, custom_fields=None, preserve_ext=False):
"""(beta) Copies or moves the provided filename into the Artifact Repository so it can
be used as an ``ArtifactProxy`` to inputs of other functions.
Parameters
----------
archive_file : bool, defaults False
When True then the return value of the wrapped function will be assumed to
be a str or pathlike that represents a file that should be archived into
the blobstore. This is a good option to use when the computation of a function
can't easily be returned as an in-memory pickle-able python value.
delete_original : bool, defaults False
When delete_original_file True the file will be deleted after it has been archived.
preserve_file_ext : bool, default False
When True then id of the artifact archived will be the hash of the file contents
plus the file extension of the original file. The motivation of setting this to
True would be if you wanted to be able to look at the contents of a blobstore
on disk and being able to preview the contents of an artifact with your
regular OS tools (e.g. viewing images or videos).
"""
# we want artifacts created by archive_file to be invariant to the
# filename (see remove) but not the custom_fields.
# filename is still passed in so the hash of the file on disk can be
# computed as part of the id of the artifact.
@provenance(archive_file=True, name=name or 'archive_file', preserve_file_ext=preserve_ext,
delete_original_file=delete_original, remove=['_archive_file_filename'],
custom_fields=custom_fields)
def _archive_file(_archive_file_filename, custom_fields):
return filename
return _archive_file(filename, custom_fields)
def provenance_set(set_labels=None, initial_set=None, set_labels_fn=None):
if set_labels and set_labels_fn:
raise ValueError("You cannot provide both set_labels and set_labels_fn.")
def make_wrapper(f):
if set_labels_fn:
base_fn = _base_fn(f)
extract_args = utils.args_extractor(base_fn, merge_defaults=True)
func_info = utils.fn_info(f)
@bfu.wraps(f)
def wrapper(*fargs, **fkargs):
if set_labels_fn:
varargs, argsd = extract_args(fargs, fkargs)
varargs += func_info['varargs']
argsd.update(func_info['kargs'])
labels = set_labels_fn(*varargs, **argsd)
else:
labels = set_labels
with repos.capture_set(labels=labels, initial_set=initial_set) as result:
f(*fargs, **fkargs)
return result[0]
return wrapper
return make_wrapper
def promote(artifact_or_id, to_repo, from_repo=None):
from_repo = from_repo if from_repo else repos.get_default_repo()
artifact = repos.coerce_to_artifact(artifact_or_id, repo=from_repo)
for a in dependencies(artifact):
if a not in to_repo:
to_repo.put(a)
| 1.921875
| 2
|
tests/active_learning/experiments_test.py
|
MetaExp/backend
| 1
|
12774884
|
from active_learning.oracles import UserOracle, FunctionalOracle
from active_learning.evaluation import Evaluator
from active_learning.active_learner import RandomSelectionAlgorithm, GPSelect_Algorithm, UncertaintySamplingAlgorithm
from active_learning.rating import length_based
import unittest
class ActiveLearningExperimentsTest(unittest.TestCase):
def test_evaluator(self):
algorithm = UncertaintySamplingAlgorithm
algo_params_length = {'hypothesis': 'Gaussian Process', 'hypothesis_params': {'transformation': 'length'}}
algo_params_tfidf = {'hypothesis': 'Gaussian Process', 'hypothesis_params': {'transformation': 'tfidf'}}
logs_path = '/home/freya/BP/32de-python/notebooks/active_learning/logs/testrun/f'
def rating_func_constant(c):
return lambda x: c
rating_func = rating_func_constant(1)
oracle = FunctionalOracle(**{'rating_func': rating_func})
res = Evaluator(algorithm=algorithm, algo_params=algo_params_tfidf,
oracle=oracle,
batch_size=1, dataset_name='Rotten Tomato', logs_path=logs_path).compute()
if __name__ == '__main__':
unittest.main()
| 2.25
| 2
|
nutszebra_download_cifar10.py
|
nutszebra/trainer
| 5
|
12774885
|
import six
import numpy as np
import nutszebra_utility as nz
import sys
import pickle
def unpickle(file_name):
fp = open(file_name, 'rb')
if sys.version_info.major == 2:
data = pickle.load(fp)
elif sys.version_info.major == 3:
data = pickle.load(fp, encoding='latin-1')
fp.close()
return data
class Cifar10(object):
def __init__(self):
self.utility = nz.Utility()
self.output_name = 'cifar10.pkl'
self.url = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
self.downloaded_file = 'cifar-10-python.tar.gz'
self.untared_file = 'cifar-10-batches-py'
self.batch_train_file = ['data_batch_' + str(num) for num in six.moves.range(1, 6)]
self.batch_test_file = 'test_batch'
self.meta_file = 'batches.meta'
self.converted_name = 'cifar10.pkl'
def download_cifar_10(self):
# if already downloaded and processed, then return True
if self.converted_name in self.utility.find_files(self.utility.nutszebra_path, affix_flag=True):
print('Already downloaded')
return True
# download file
print('Downloading: ' + self.downloaded_file)
self.utility.download_file(self.url, self.utility.nutszebra_path, self.downloaded_file)
print('Done')
print('Uncompressing')
# untar
self.utility.untar_gz(self.utility.nutszebra_path + '/' + self.downloaded_file)
print('Done')
# delete tar.gz file
self.utility.remove_file(self.downloaded_file)
# load train file
print('Loading train data')
train_x = np.zeros((50000, 3, 32, 32), dtype=np.float32)
train_y = np.zeros((50000), dtype=np.int32)
for i, batch_file in enumerate(self.batch_train_file):
data = unpickle(self.untared_file + '/' + batch_file)
start = i * 10000
end = start + 10000
train_x[start:end] = data['data'].reshape(10000, 3, 32, 32)
train_y[start:end] = np.array(data['labels'], dtype=np.int32)
print('Done')
# load test file
print('Loading test data')
test_x = np.zeros((10000, 3, 32, 32), dtype=np.float32)
test_y = np.zeros((10000), dtype=np.int32)
data = unpickle(self.untared_file + '/' + self.batch_test_file)
test_x[:] = data['data'].reshape(10000, 3, 32, 32)
test_y[:] = np.array(data['labels'], dtype=np.int32)
print('Done')
# load meta file
data = unpickle(self.untared_file + '/' + self.meta_file)
meta = data['label_names']
# save loaded data
print('Saving')
data = {}
data['train_x'] = train_x
data['train_y'] = train_y
data['test_x'] = test_x
data['test_y'] = test_y
data['meta'] = meta
self.utility.save_pickle(data, self.utility.nutszebra_path + '/' + self.converted_name)
def check_overlap(self):
data = self.load_cifar10_data()
length = len(data['test_x'])
result = [0] * length
for i in six.moves.range(length):
result[i] = np.any(np.all(data['test_x'][i] == data['train_x']))
return (np.any(result), result)
def load_cifar10_data(self):
self.download_cifar_10()
return unpickle(self.utility.nutszebra_path + '/' + self.converted_name)
| 2.609375
| 3
|
tests/bugs/core_5676_test.py
|
FirebirdSQL/firebird-qa
| 1
|
12774886
|
#coding:utf-8
#
# id: bugs.core_5676
# title: Consider equivalence classes for index navigation
# decription:
# Confirmed inefficiense on:
# 3.0.3.32837
# 4.0.0.800
# Checked on:
# 3.0.3.32852: OK, 1.250s.
# 4.0.0.830: OK, 1.375s.
#
# tracker_id: CORE-5676
# min_versions: ['3.0.3']
# versions: 3.0.3
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0.3
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
recreate table document(id int primary key using index pk_document);
recreate table doc_ip_doc(id int primary key using index pk_doc_ip_doc, name varchar(100));
insert into document (id) select row_number() over() from rdb$types,(select 1 i from rdb$types rows 10);
insert into doc_ip_doc (id) select row_number() over() from rdb$types;
commit;
set planonly;
select document.id, doc_ip_doc.name
from doc_ip_doc
join document on document.id=doc_ip_doc.id
order by doc_ip_doc.id;
--PLAN JOIN (DOC_IP_DOC ORDER PK_DOC_IP_DOC, DOCUMENT INDEX (PK_DOCUMENT))
select document.id, doc_ip_doc.name
from doc_ip_doc
join document on document.id=doc_ip_doc.id
order by document.id;
-- OLD: PLAN SORT (JOIN (DOC_IP_DOC NATURAL, DOCUMENT INDEX (PK_DOCUMENT)))
select doc_ip_doc.id, doc_ip_doc.name
from doc_ip_doc
join document on document.id=doc_ip_doc.id
order by id;
--PLAN JOIN (DOC_IP_DOC ORDER PK_DOC_IP_DOC, DOCUMENT INDEX (PK_DOCUMENT))
select document.id, doc_ip_doc.name
from doc_ip_doc
join document on document.id=doc_ip_doc.id
order by id;
-- OLD: PLAN SORT (JOIN (DOC_IP_DOC NATURAL, DOCUMENT INDEX (PK_DOCUMENT)))
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
PLAN JOIN (DOC_IP_DOC ORDER PK_DOC_IP_DOC, DOCUMENT INDEX (PK_DOCUMENT))
PLAN JOIN (DOC_IP_DOC ORDER PK_DOC_IP_DOC, DOCUMENT INDEX (PK_DOCUMENT))
PLAN JOIN (DOC_IP_DOC ORDER PK_DOC_IP_DOC, DOCUMENT INDEX (PK_DOCUMENT))
PLAN JOIN (DOC_IP_DOC ORDER PK_DOC_IP_DOC, DOCUMENT INDEX (PK_DOCUMENT))
"""
@pytest.mark.version('>=3.0.3')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
| 1.59375
| 2
|
statistics.py
|
gmurro/MCTS
| 0
|
12774887
|
from tqdm import tqdm
from MCTS import MCTS
from BinaryTree import BinaryTree
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(15)
def run_experiment(max_iterations, dynamic_c=False):
"""
Run a single experiment of a sequence of MCTS searches to find the optimal path.
:param max_iterations: Number of iterations to run the MCTS.
:param dynamic_c: Boolean indicating whether to use a dynamic c or not.
:return: value of the optimal path found from the search
"""
tree = BinaryTree(depth=12, b=20, tau=3)
best_leaf = max(tree.leaves)
mcts = MCTS(max_iterations=max_iterations, c=2)
optimal_path = ""
while tree.depth > 0:
# search the best direction
direction = mcts.search(tree, dynamic_c=dynamic_c, verbose=False)
optimal_path += direction
# update the tree
tree.update_root(direction)
# return the distance of the optimal path found from the search wrt the best leaf
return sum(1 for a, b in zip(optimal_path, best_leaf.address) if a != b)
def main():
# compute statistics for static c and dynamic c
n_iterations = np.logspace(0.7, 3, num=18, base=10, dtype=int)
values_static_c = [run_experiment(max_iterations=n, dynamic_c=False) for n in tqdm(n_iterations, desc='Execute MCTS with c=2', unit=' experiment')]
values_dynamic_c = [run_experiment(max_iterations=n, dynamic_c=True) for n in tqdm(n_iterations, desc='Execute MCTS with dynamic c', unit=' experiment')]
# plot the results
plt.figure(figsize=(8, 4))
plt.plot(n_iterations, values_dynamic_c, '-o', label="MCTS with dynamic c")
plt.plot(n_iterations, values_static_c, '-o', label="MCTS with c=2")
plt.xlabel("Number of iterations")
plt.ylabel("Distance of the optimal path from the best leaf")
plt.title("Compare the value of the optimal path found by MCTS with and without dynamic c")
plt.grid(linestyle='--', linewidth=1)
plt.xscale("log")
plt.xticks(n_iterations, n_iterations)
plt.legend()
plt.show()
if __name__ == "__main__":
main()
| 3.09375
| 3
|
src/pywriter/model/chapter.py
|
peter88213/PyWriter
| 1
|
12774888
|
"""Provide a class for yWriter chapter representation.
Copyright (c) 2021 <NAME>
For further information see https://github.com/peter88213/PyWriter
Published under the MIT License (https://opensource.org/licenses/mit-license.php)
"""
class Chapter():
"""yWriter chapter representation.
# xml: <CHAPTERS><CHAPTER>
"""
chapterTitlePrefix = "Chapter "
# str
# Can be changed at runtime for non-English projects.
def __init__(self):
self.title = None
# str
# xml: <Title>
self.desc = None
# str
# xml: <Desc>
self.chLevel = None
# int
# xml: <SectionStart>
# 0 = chapter level
# 1 = section level ("this chapter begins a section")
self.oldType = None
# int
# xml: <Type>
# 0 = chapter type (marked "Chapter")
# 1 = other type (marked "Other")
self.chType = None
# int
# xml: <ChapterType>
# 0 = Normal
# 1 = Notes
# 2 = Todo
self.isUnused = None
# bool
# xml: <Unused> -1
self.suppressChapterTitle = None
# bool
# xml: <Fields><Field_SuppressChapterTitle> 1
# True: Chapter heading not to be displayed in written document.
# False: Chapter heading to be displayed in written document.
self.isTrash = None
# bool
# xml: <Fields><Field_IsTrash> 1
# True: This chapter is the yw7 project's "trash bin".
# False: This chapter is not a "trash bin".
self.suppressChapterBreak = None
# bool
# xml: <Fields><Field_SuppressChapterBreak> 0
self.srtScenes = []
# list of str
# xml: <Scenes><ScID>
# The chapter's scene IDs. The order of its elements
# corresponds to the chapter's order of the scenes.
def get_title(self):
"""Fix auto-chapter titles if necessary
"""
text = self.title
if text:
text = text.replace('Chapter ', self.chapterTitlePrefix)
return text
| 3.046875
| 3
|
Python/main.py
|
ltzheng/OFDClean
| 1
|
12774889
|
import argparse
from utils.data_loader import DataLoader
from algorithms.OFDClean import OFDClean
if __name__ == '__main__':
threshold = 20
sense_dir = ['sense2/', 'sense4/', 'sense6/', 'sense8/', 'sense10/']
sense_path = 'clinical' # sense_dir[1]
err_data_path = ['data_err3', 'data_err6', 'data_err9', 'data_err12', 'data_err15']
size_data_path = ['data_size20', 'data_size40', 'data_size60', 'data_size80', 'data_size100']
data_path = 'clinical'
# data_path = err_data_path[0]
# data_path = size_data_path[4]
config = {
'data': 'datasets/data/' + data_path + '.csv',
'ofds': 'datasets/ofds/' + 'clinical.csv',
'senses': 'datasets/senses/' + sense_path + '/', # sense name should be the same as column name
}
Loader = DataLoader(config)
data = Loader.read_data()
# print('data:\n', data)
ofds, right_attrs = Loader.read_ofds()
print('ofds:\n', ofds)
# print('right_attrs:\n', right_attrs)
senses, ssets = Loader.read_senses(right_attrs)
print('senses:\n', senses)
# print('ssets:\n', ssets)
Cleaner = OFDClean(data, ofds, senses, right_attrs, ssets, threshold)
Cleaner.run()
| 2.125
| 2
|
geosoft/gxapi/GXTEST.py
|
fearaschiarrai/gxpy
| 25
|
12774890
|
<reponame>fearaschiarrai/gxpy
### extends 'class_empty.py'
### block ClassImports
# NOTICE: Do not edit anything here, it is generated code
from . import gxapi_cy
from geosoft.gxapi import GXContext, float_ref, int_ref, str_ref
### endblock ClassImports
### block Header
# NOTICE: The code generator will not replace the code in this block
### endblock Header
### block ClassImplementation
# NOTICE: Do not edit anything here, it is generated code
class GXTEST(gxapi_cy.WrapTEST):
"""
GXTEST class.
Used to place special testing methods
"""
def __init__(self, handle=0):
super(GXTEST, self).__init__(GXContext._get_tls_geo(), handle)
@classmethod
def null(cls):
"""
A null (undefined) instance of `GXTEST <geosoft.gxapi.GXTEST>`
:returns: A null `GXTEST <geosoft.gxapi.GXTEST>`
:rtype: GXTEST
"""
return GXTEST()
def is_null(self):
"""
Check if this is a null (undefined) instance
:returns: True if this is a null (undefined) instance, False otherwise.
:rtype: bool
"""
return self._internal_handle() == 0
# Miscellaneous
@classmethod
def enable_disable_arc_engine_license(cls, enable):
"""
Forcefully disable ArEngine license availability for testing purposes
:param enable: Enable/disable?
:type enable: bool
.. versionadded:: 6.4.2
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
gxapi_cy.WrapTEST._enable_disable_arc_engine_license(GXContext._get_tls_geo(), enable)
@classmethod
def arc_engine_license(cls):
"""
Test availability of an ArEngine license on this system
:returns: 0 - Not available, 1 - Available
:rtype: int
.. versionadded:: 6.4
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = gxapi_cy.WrapTEST._arc_engine_license(GXContext._get_tls_geo())
return ret_val
@classmethod
def test_mode(cls):
"""
Checks to see if we are running inside testing system
:rtype: bool
.. versionadded:: 6.4.2
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = gxapi_cy.WrapTEST._test_mode(GXContext._get_tls_geo())
return ret_val
@classmethod
def wrapper_test(cls, funcs, log):
"""
Test to make sure all wrappers are valid linking
:param funcs: List of functions to test
:param log: Output log file
:type funcs: str
:type log: str
.. versionadded:: 6.1
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
gxapi_cy.WrapTEST._wrapper_test(GXContext._get_tls_geo(), funcs.encode(), log.encode())
@classmethod
def core_class(cls, cl, log):
"""
Generic Class Test Wrapper
:param cl: Name of class to test
:param log: Output log file
:type cl: str
:type log: str
.. versionadded:: 9.2
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
gxapi_cy.WrapTEST._core_class(GXContext._get_tls_geo(), cl.encode(), log.encode())
### endblock ClassImplementation
### block ClassExtend
# NOTICE: The code generator will not replace the code in this block
### endblock ClassExtend
### block Footer
# NOTICE: The code generator will not replace the code in this block
### endblock Footer
| 1.976563
| 2
|
DSP Lab 1/make_sin02.py
|
bubbledoodle/EL-GY-6183-Digital-Signal-Processing-LAB
| 0
|
12774891
|
<filename>DSP Lab 1/make_sin02.py
# Make a wave file (.wav) consisting of a sine wave
# Adapted from http://www.swharden.com
from struct import pack
from math import sin, pi
import wave
Fs = 8000
## CREATE MONO FILE ##
wf = wave.open('sin02_mono.wav', 'w') # wf : wave file
wf.setnchannels(1) # one channel (mono)
wf.setsampwidth(4) # two bytes per sample
wf.setframerate(Fs) # samples per second
maxAmp = 2**31 - 1.0 # maximum amplitude
f = 261.625565 # Hz (middle C)
for n in range(0, 2*Fs): # 2 seconds duration
wvData = pack('i', maxAmp * sin(n*2*pi*f/Fs))
# i indicate 'integer' ('<i' or '>i' for different Endians)
wf.writeframesraw(wvData)
wf.close()
## GENERATE STERIO FILE ##
wf = wave.open('sin02_stereo.wav', 'w')
wf.setnchannels(2) # one channel (stereo)
wf.setsampwidth(4) # two bytes per sample
wf.setframerate(Fs) # samples per second
maxAmp = 2**31-1.0 # maximum amplitude
f1 = 261.625565 # 261.625565 Hz (middle C)
f2 = 440.0 # note A4
for n in range(0, 2*Fs): # 2 seconds duration
wvData = pack('i', maxAmp * sin(n*2*pi*f1/Fs)) # left
wvData += pack('i', maxAmp * sin(n*2*pi*f2/Fs)) # right
wf.writeframesraw(wvData)
wf.close()
| 3.21875
| 3
|
mytreelstm/Tree.py
|
luosichengx/treelstm.pytorch
| 0
|
12774892
|
<filename>mytreelstm/Tree.py
import os
op = ["forall","exists","and","or","not","distinct","implies","iff","symbol","function","real_constant",
"bool_constant","int_constant","str_constant","plus","minus","times","le","lt","equals",
"ite","toreal","bv_constant","bvnot","bvand","bvor","bvxor","concat","extract","rotation",
"extend","zero_extend","sign_extend","bvult","bvule","bvuge","bvugt","bvneg","bvadd","bvsub","bvmul","bvudiv",
"bvurem","bvlshl","bvlshr","bvrol","bvror","bvzext","bvsext","bvslt","bvsle","bvcomp","bvsdiv",
"bvsrem","bvashr","str_length","str_concat","str_contains","str_indexof","str_replace","str_substr",
"str_prefixof","str_suffixof","str_to_int","int_to_str","str_charat","select","store","value",
"div","pow","algebraic_constant","bvtonatural","_to_fp","=","unknown"]
# op = ["not","bvadd","bvule","extract","ite","and","or","distinct","bvmul","concat","bvashr",
# "bvuge","bvugt","bvnot","bvor","bvsle","bvsub","bvsgt","zero_extend","bvshl","bvsge","bvlshr","sign_extend",
# "bvurem","bvudiv","bvxor","bvand"]
si_op = ["extract","zero_extend","sign_extend","_to_fp"]
tri_op = ["ite"]
bv_constant = "constant"
bool_constant = "constant"
class Tree:
def __init__(self, val, left= None, mid= None, right= None):
if val in op:
pass
elif val != "constant" and not val.startswith("var") and val != None:
raise ValueError
self.val = val
for child in [left, mid, right]:
if child and not isinstance(child,Tree):
raise ValueError
self.left = left
self.mid = mid
self.right = right
self.name = None
if val == "constant":
self.name = "constant"
elif val.startswith("var"):
self.name = "var"
else:
self.name = "mid_val"
def set_name(self, name):
self.name = name
def __str__(self):
left_val = ""
if self.left and self.left.name:
left_val = self.left.name
mid_val = ""
if self.mid and self.mid.name:
mid_val = self.mid.name
right_val = ""
if self.right and self.right.name:
right_val = self.right.name
name = ""
if self.name:
name = self.name
if self.val == "concat":
mid_val = "mid_val"
return (' '.join([name,"(",self.val, left_val, mid_val, right_val, ")"]))
class varTree(Tree):
def __init__(self, val, left= None, mid= None, right= None):
super(varTree,self).__init__(val, left, mid, right)
self.var = set()
self.depth = 0
self.compress_depth = 0
# self.compress_depth2 = 0
if val.startswith("var"):
self.var.add(val)
for child in [left, mid, right]:
if child:
self.update(child)
if self.val == "concat":
self.reduce_concat()
self.compress_depth -= 1
# self.compress_depth2 -= 1
# if self.val == "ite":
# self.reduce_ite()
# self.compress_depth2 -= 1
def update(self, child):
self.depth = max(self.depth, child.depth + 1)
self.compress_depth = max(self.compress_depth, child.compress_depth + 1)
# self.compress_depth2 = max(self.compress_depth2, child.compress_depth2 + 1)
self.var.update(child.var)
def reduce_concat(self):
if not self.left:
raise ValueError
if not self.mid:
raise ValueError
if self.right:
raise ValueError
var = set()
var.update(self.left.var)
var.update(self.right.var)
if self.left.var == var and self.left.depth >= self.mid.depth:
self.replace_children(self.mid)
elif self.mid.var == var and self.left.depth <= self.mid.depth:
self.replace_children(self.left)
elif self.left.depth > self.mid.depth:
self.replace_children(self.left)
else:
self.replace_children(self.mid)
def reduce_ite(self):
if not self.left:
return
if not self.mid:
return
if not self.right:
return
var = set()
depth = 0
for children in [self.left, self.mid, self.right]:
var.update(children.var)
depth = max(depth, children.depth)
for children in [self.left, self.mid, self.right]:
if var == children.var and depth == children.depth:
self.replace_children(children)
return
for children in [self.left, self.mid, self.right]:
if depth == children.depth:
self.replace_children(children)
def replace_children(self, tree):
left, mid, right = tree.left, tree.mid, tree.right
self.left, self.mid, self.right = left, mid, right
def __str__(self):
n = super(varTree, self).__str__()
return " ".join([n, "depth:", str(self.depth), "compress_depth:" , str(self.compress_depth)])
| 2.34375
| 2
|
utill.py
|
geekSiddharth/decompiler
| 4
|
12774893
|
"""
"""
# TODO: make this list complete [exclude stuffs ending with 's']
conditionals = [
"cmp",
"cmn",
"tst",
"teq"
]
class CMP(object):
def __init__(self, line_no, text):
self.line_no = line_no
self.text = text
class Branch(object):
def __init__(self, line_no, text, label=None):
text = text.strip()
self.line_no = line_no
self.text = text
# dealing with space
self.label_text = getArgs(text)[0]
self.label = label
class Label(object):
def __init__(self, line_no, text):
self.line_no = line_no
self.text = text.replace(":", "")
class Loop(object):
def __init__(self, label, branch, cmp):
self.label = label
self.branch = branch
self.cmp = cmp
self.enterNode = label.line_no
self.exitNode = branch.line_no
def getStart(self):
return self.enterNode
def getEnd(self):
return self.exitNode
def contains(self, i, j):
if i > self.enterNode and j < self.exitNode:
return True
return False
class If(object):
def __init__(self, cmp, branch_to_end, end_label):
self.cmp = cmp
self.cmp_branch = branch_to_end
self.branch_to_end = branch_to_end
self.end_label = end_label
self.block1_start_line = branch_to_end.line_no + 1
self.block1_end_line = end_label.line_no - 1
def contains(self, i, j):
if i > self.block1_start_line and j <= self.block1_end_line:
return True
return False
def getStart(self):
return self.block1_start_line
def getEnd(self):
return self.block1_end_line
class IfElse(object):
def __init__(self, cmp, branch_to_2nd_block, branch_to_end, block2_label, end_label):
self.cmp = cmp
self.cmp_branch = branch_to_2nd_block
self.branch_to_2nd_Block = branch_to_2nd_block
self.block1_start_line = branch_to_2nd_block.line_no + 1
assert branch_to_end.line_no - 1 == block2_label.line_no - 2
self.block1_end_line = branch_to_end.line_no - 1
self.block2_start_line = block2_label.line_no + 1
self.block2_end_line = end_label.line_no - 1
self.block2_start_label = block2_label
self.block2_label = block2_label
self.block2_end_label = end_label
self.end_label = end_label
def isLabel(text):
if text.strip().endswith(":"):
return True
def isConditional(text):
text = getOpcode(text)
# if text.endswith("s"):
# return True
# print(text)
if text in conditionals:
return True
return False
# TODO: make this more robust
def isBranching(text):
if text.startswith("b"):
return True
return False
def removeSpaces(text):
text = text.replace("\t"," ")
text = text.strip(" ")
text = text.strip("\n")
i = 0
while(i != len(text)-1):
if text[i] == " " and text[i+1] == " ":
text = text[:i+1]+text[i+2:]
continue
i += 1
return text
def getOpcode(text):
text = removeSpaces(text)
op = text.split(" ")[0]
return op
def getArgs(text):
text = removeSpaces(text)
op = ''.join(text.split(" ")[1:])
args = op.split(",")
for i in range(len(args)):
args[i].strip(" ")
if args[i][0] == "#":
args[i] = args[i][1:]
return args
def getComparison(cmp, branch):
vars = getArgs(cmp)
var1 = vars[0]
var2 = vars[1]
cond = getOpcode(branch)[1:]
ans = ""
if cond == "eq":
ans = (str(var1) + " == " + str(var2))
elif cond == "lt":
ans = (str(var1) + " < " + str(var2))
elif cond == "gt":
ans = (str(var1) + " > " + str(var2))
elif cond == "ge":
ans = (str(var1) + " >= " + str(var2))
elif cond == "le":
ans = (str(var1) + " <= " + str(var2))
elif cond == "ne":
ans = (str(var1) + " != " + str(var2))
return ans
'''
def getOpDesc(text):
text = text.upper()
args = getArgs(text)
opcode = getOpcode(text)
if opcode == "add" or opcode == "vadd.f32" or opcode == "vadd.f64":
print(str(args[0]) + " = " + str(args[1]) + " + " + str(args[2]))
elif opcode == "sub":
print(str(args[0]) + " = " + str(args[1]) + " - " + str(args[2]))
elif opcode == "rsb":
print(str(args[0]) + " = " + str(args[2]) + " - " + str(args[1]))
elif opcode == "and":
print(str(args[0]) + " = " + str(args[1]) + " && " + str(args[2]))
elif opcode == "orr":
print(str(args[0]) + " = " + str(args[1]) + " || " + str(args[2]))
elif opcode == "mov" or opcode == "vmov.f32":
print(str(args[0]) + " = " + str(args[1]))
elif opcode == "str":
print(str(args[1]) + " = " + str(args[0]))
elif opcode == "ldr":
print("int " + str(args[0]) + " = " + str(args[1]))
elif opcode == "vldr.32":
print("float " + str(args[0]) + " = " + str(args[1]))
elif opcode == "vldr.64":
print("double " + str(args[0]) + " = " + str(args[1]))
elif opcode == "ldrb":
print("char " + str(args[0]) + " = " + str(args[1]))
'''
| 3.234375
| 3
|
examples/geometry/09_projection_matrix_full_CT.py
|
BAMresearch/ctsimu-toolbox
| 0
|
12774894
|
from ctsimu.geometry import *
# Set up a quick CT geometry:
myCT = Geometry()
myCT.stage.center.x = 250 # SOD
myCT.detector.center.x = 800 # SDD
# Set the detector size:
myCT.detector.setSize(
pixelsU = 2000,
pixelsV = 1000,
pitchU = 0.2,
pitchV = 0.2)
myCT.update() # signals that we made manual changes
myCT.store() # backups the initial configuration
# Scan configuration:
projections = 3000 # number of projections or angular steps
scan_range = 360.0 # degrees. One full CT rotation.
# We assume that the projections are stored in single TIFF image files,
# sequentially numbered with four digits, starting at "img_0000.tif".
projectionFilename = "img_{:04d}.tif" # for openCT
projectionFilePattern = "img_%04d.tif" # for CERA
# The following two lists will store the projection matrices
# for openCT and for CERA:
matrices_openCT = []
matrices_CERA = []
# For openCT, we also need to create a list of projection file names:
projectionFilenames = []
# Loop over each frame:
for p in range(projections):
# Restore the initial configuration from the backup,
# i.e. the situation before the stage was rotated:
myCT.restore()
# Rotate the stage to its current angle:
current_angle = float(p) * float(scan_range) / float(projections)
myCT.stage.rotateAroundW(angle = deg2rad(current_angle))
myCT.update()
# Calculate a projection matrix for this frame:
P_openCT = myCT.projectionMatrix(mode="openCT")
P_CERA = myCT.projectionMatrix(mode="CERA")
# Add to list of projection matrices:
matrices_openCT.append(P_openCT)
matrices_CERA.append(P_CERA)
# Store the current projection filename for openCT:
projectionFilenames.append(projectionFilename.format(p))
# openCT configuration:
# ----------------------
# We need the bounding box dimensions of the reconstruction volume
# in mm:
voxelSize = 0.0625
bounding_box_x = voxelSize * myCT.detector.pixelsU
bounding_box_y = voxelSize * myCT.detector.pixelsU
bounding_box_z = voxelSize * myCT.detector.pixelsV
# Write the openCT configuration file, including the projection matrices:
writeOpenCTFile(
geo=myCT,
totalAngle=scan_range,
boundingBoxX=bounding_box_x,
boundingBoxY=bounding_box_y,
boundingBoxZ=bounding_box_z,
matrices=matrices_openCT,
volumename="recon_openCT",
filename="recon_openCT.json",
projectionFilenames=projectionFilenames
)
# CERA configuration:
# -------------------
writeCERAconfig(
geo=myCT,
totalAngle=scan_range,
projectionFilePattern=projectionFilePattern,
matrices=matrices_CERA,
basename="recon_CERA",
voxelsX=myCT.detector.pixelsU,
voxelsY=myCT.detector.pixelsU,
voxelsZ=myCT.detector.pixelsV,
i0max=44000 # the average free-beam intensity
)
| 2.40625
| 2
|
inselect/lib/templates/__init__.py
|
NaturalHistoryMuseum/inselect
| 128
|
12774895
|
"""Metadata templates
"""
| 0.9375
| 1
|
data_structures/sets/quick_find_union_find.py
|
vinta/fuck-coding-interviews
| 590
|
12774896
|
# coding: utf-8
"""
Union-Find (Disjoint Set)
https://en.wikipedia.org/wiki/Disjoint-set_data_structure
"""
class QuickFindUnionFind:
def __init__(self, union_pairs=()):
self.num_groups = 0
self.auto_increment_id = 1
self.element_groups = {
# element: group_id,
}
for p, q in union_pairs:
self.union(p, q)
def __len__(self):
return self.num_groups
# O(1)
def make_group(self, element):
# Initially, every element is in its own group which contains only itself.
group_id = self.element_groups.get(element)
if group_id is None:
# Group id could be arbitrary as long as each group has an unique one.
group_id = self.auto_increment_id
self.element_groups[element] = group_id
self.num_groups += 1
self.auto_increment_id += 1
return group_id
# O(1)
def find(self, p):
try:
return self.element_groups[p]
except KeyError:
# We implicitly create a new group for the new element `p`.
return self.make_group(p)
# O(n)
def union(self, p, q):
p_group_id = self.find(p)
q_group_id = self.find(q)
if p_group_id != q_group_id:
for element, group_id in self.element_groups.items():
# Merge p into q.
if group_id == p_group_id:
self.element_groups[element] = q_group_id
self.num_groups -= 1
# O(1)
def is_connected(self, p, q):
return self.find(p) == self.find(q)
| 3.625
| 4
|
db_api/apps.py
|
constantine7cd/database-coursework
| 6
|
12774897
|
<gh_stars>1-10
from django.apps import AppConfig
class DbApiConfig(AppConfig):
name = 'db_api'
| 1.203125
| 1
|
ogl/shader.py
|
flintforge/Aris
| 0
|
12774898
|
<filename>ogl/shader.py
'''
ARIS
Author: 𝓟𝓱𝓲𝓵.𝓔𝓼𝓽𝓲𝓿𝓪𝓵 @ 𝓕𝓻𝓮𝓮.𝓯𝓻
Date:<2018-05-18 15:52:43>
Released under the MIT License
'''
from OpenGL.GL import *
from shadercompiler import ShaderCompiler
from ctypes import sizeof, c_float, c_void_p, c_uint
import debuglog
log = debuglog.init(__name__)
# goes in the c_size package
uint_size = sizeof(c_uint)
float_size = sizeof(c_float)
""" Important :
the order of attributes in the shader source matters.
The attributes MUST be declared in the same
order as the VBO packing occured :
index are automatically bound to location
exemple :
vertex_offset = c_void_p(0 * float_size)
tex_coord_offset = c_void_p(3 * float_size)
normal_offset = c_void_p(6 * float_size)
color_offset = c_void_p(9 * float_size)
"""
record_len = 12 * float_size
# used for attributes locations
var_size = {
'vec2': (2, float_size),
'vec3': (3, float_size),
'mat3': (9, float_size),
'mat4': (16, float_size)
}
def typesize(type):
var = var_size[type]
return var[0] * var[1]
class Shader():
""" Holds attribute, autobound at initialization and activation
the order of declaration of the variables in the shader *matters*
They should stick to the same order as the buffer data
the general rule of thumb is V,T,N,C
"""
Func = {
'bool': glUniform1i,
'int': glUniform1i,
'float': glUniform1f,
'sampler2D': glUniform1i,
'sampler3D': glUniform1i,
'samplerCube': glUniform1i,
'vec2': glUniform2f,
'ivec2': glUniform2i,
'vec3': glUniform3f,
'mat4': glUniformMatrix4fv
}
def __init__(self, vsfile, fsfile):
self.vsfile = vsfile
self.fsfile = fsfile
self.compile()
def compile(self):
try:
self.program, variables = ShaderCompiler.compile(
self.vsfile, self.fsfile)
except Exception as e:
# need a default red shader
log.error(e)
return None
''' bind uniforms and attributes '''
self.uniforms = dict((k,
(Shader.Func[v[1]],
glGetUniformLocation(self.program, k))
)
for (k, v) in variables.items() if v[0] == 'uniform')
self.attribs = [
(k, v)
for (k, v) in variables.items() if v[0] == 'attribute']
self.loc_attrib = {}
self.stride = 0
offset = 0
log.info('compiling ' + self.vsfile + self.fsfile)
for (i, (k, var)) in enumerate(self.attribs):
# var hold (uniform|attribute,type,name,valueset (-1))
log.debug('%i %s %s' % (i, k, var))
size = var_size[var[1]]
glBindAttribLocation(self.program, i, k)
# var => ( i=location, 2|3, offset)
self.loc_attrib[k] = (i, size[1], c_void_p(offset)) # loc[ ]
offset += size[0] * size[1] # 3*float_size
self.stride = offset
self.enableAttributes()
''' update file and recompile '''
def compileVS(self, vsfile):
self.vsfile = vsfile
self.compile()
def compileFS(self, fsfile):
self.fsfile = fsfile
self.compile()
def use(self):
glUseProgram(self.program)
def __enter__(self):
'''Use the shader'''
glUseProgram(self.program)
""" to reconsider when we just switch program. test it now"""
def __exit__(self, exc_type, exc_value, traceback):
'''Stop using the shader'''
glUseProgram(0)
def __setitem__(self, var, value):
""" the called function might accept multiple arguments
thus they need to be passed as arrays
program['afloat'] = [f]
program['avec2'] = [x,y]
program['amat4'] = [1,Cmajor,mat]
"""
item = self.uniforms[var]
item[0](item[1], *value)
def enableAttributes(self):
""" enable vertex arrays
this can comes at a cost to flexibilty,
but we could always give the list of attributes
to exclusively activate
"""
for attrib, var in self.attribs:
glEnableVertexAttribArray(self.loc_attrib[attrib][0])
def activateAttributes(self):
# assume only 32bits float for now...
for (attrib, loc) in self.loc_attrib.items():
glVertexAttribPointer(
loc[0], loc[1], GL_FLOAT, False, self.stride, loc[2])
| 2.140625
| 2
|
a03_rakhimovb.py
|
2020-Spring-CSC-226/a03-master
| 0
|
12774899
|
######################################################################
# Author: <NAME>
# Username: rakhimovb
# Assignment: A03: A Pair of Fully Functional Gitty Psychedelic Robotic Turtles
######################################################################
import turtle
def draw_rectangle(t, h, c):
"""
This function draws a rectangle
:param t: turtle name
:param h: height of the rectangle
:param c: turtle color
:return:
"""
for i in range(2):
t.color(c)
t.begin_fill()
t.forward(h)
t.left(90)
t.forward(480)
t.left(90)
t.end_fill()
def draw_flag(t):
"""
This function draws two third of the flag
:param t: turtle name
:return:
"""
for i in ["#6fff01", "white"]:
draw_rectangle(t, 80, i)
t.fd(-15)
draw_rectangle(t, 15, "red")
t.fd(-80)
def moon(t):
"""
This function draws a moon on the top left corner of the flag
:param t: turtle name
:return:
"""
t.begin_fill()
t.circle(20, 180)
t.circle(20, -130)
t.end_fill()
def change_pos(t, x, y):
"""
This function changes the position of the turtle
:param t: turtle name
:param x: x coordinate
:param y: y coordinate
:return:
"""
t.penup()
t.setpos(x, y)
t.pendown()
def star_line(t, n, y):
"""
This function draws one line of stars in front of the moon
:param t: turtle name
:param n: number of stars on the line
:param y: y coordinate to move the stars
:return:
"""
x = -115
for b in range(n):
t.begin_fill()
for i in range(5):
t.fd(10)
t.right(144)
change_pos(t, x, y)
x = x - 15
t.end_fill()
def draw_stars(t):
"""
This function draws three lines of stars with different number of stars in each line
:param t: turtle name
:return:
"""
y = 110
n = 3
for i in [95, 80, 65]:
star_line(t, n, y)
change_pos(t, -100, i)
y = y - 15
n = n + 1
def main():
wn = turtle.Screen()
wn.bgpic("samarkand-196923_1920.png")
ttl1 = turtle.Turtle()
change_pos(ttl1, -250, -50) # Change position of the turtle to start drawing
ttl1.setheading(-90)
draw_flag(ttl1) # Draw the whole flag
draw_rectangle(ttl1, 80, "#0abeff")
ttl1.color("white") # Draw a moon in white on the top left corner of the flag
ttl1.pensize(3)
change_pos(ttl1, -200, 115)
ttl1.right(80)
moon(ttl1)
change_pos(ttl1, -100, 110) # Draw stars in front of the moon
ttl1.pensize(1)
ttl1.right(170)
draw_stars(ttl1)
change_pos(ttl1, -250, -210) # Write "UZBEKISTAN" under the flag
ttl1.color("#ff1100")
ttl1.write("UZBEKISTAN", font=("Blackadder ITC", 45, "normal"))
wn.exitonclick()
main()
| 3.953125
| 4
|
fabfile.py
|
khamidou/kite
| 136
|
12774900
|
<filename>fabfile.py
# fabfile for update and deploy
# it's necessary to specify an host
from fabric.api import *
from fabric.contrib.project import rsync_project
from fabric.contrib.files import upload_template
from setup_config import *
PACKAGES = ('rsync', 'puppet')
def update_sources():
rsync_project("~", "../kite", exclude=[".git/", "*.swp", "*.pyc"])
def provision():
cmd = """FACTER_server_name="%s" && export FACTER_server_name && FACTER_user_home_dir=$HOME && export FACTER_user_home_dir && puppet apply $HOME/kite/manifests/server.pp --modulepath=$HOME/kite/puppet_modules""" % env.hosts[0]
sudo(cmd)
def update():
update_sources()
provision()
def setup():
sudo("apt-get update")
for package in PACKAGES:
sudo('apt-get -y install %s' % package)
update()
def tighten():
local("ssh-copy-id %s@%s" % (env.user, env.hosts[0]))
sudo("puppet apply $HOME/kite/manifests/sshd.pp --modulepath=$HOME/kite/puppet_modules")
| 2.078125
| 2
|
apps/tp/mdtp_strategy.py
|
yt7589/iching
| 32
|
12774901
|
<reponame>yt7589/iching
#
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
class MdtpStrategy(object):
def __init__(self):
self.name = 'apps.tp.MdtpStrategy'
self.stock_pool = [
'600000','600010','600015','600016','600018',
'600028','600030','600036','600048','600050',
'600104','600109','600111','600150','600518',
'600519','600585','600637','600795','600837',
'600887','600893','600999','601006',
'601088','601166','601169','601186',
'601318','601328','601390',
'601398','601601','601628','601668',
'601766','601857',
'601988','601989','601998']
self.tpc = {}
def startup(self):
#self.evaluate_tp()
self.tp_strategy()
def tp_strategy(self):
sh = pd.read_csv('./data/sh50p.csv', index_col='Trddt')
sh.index = pd.to_datetime(sh.index)
form_start = '2014-01-01'
form_end = '2015-01-01'
sh_form = sh[form_start : form_end]
stock_x = '601988'
stock_y = '600000'
p_x = sh_form[stock_x]
p_y = sh_form[stock_y]
log_p_x = np.log(p_x)
log_p_y = np.log(p_y)
r_x = log_p_x.diff()[1:]
r_y = log_p_y.diff()[1:]
hat_p_x = (1 + r_x).cumprod()
hat_p_y = (1 + r_y).cumprod()
tp_ssd = hat_p_y - hat_p_x
tp_ssd_mean = np.mean(tp_ssd)
tp_ssd_std = np.std(tp_ssd)
threshold_val = 1.2
threshold_up = tp_ssd_mean + threshold_val * tp_ssd_std
threshold_down = tp_ssd_mean - threshold_val * tp_ssd_std
#
plt.title('trading pair')
tp_ssd.plot()
plt.axhline(y=tp_ssd_mean, color='red')
plt.axhline(y=threshold_up, color='blue')
plt.axhline(y=threshold_down, color='blue')
plt.show()
#
trade_start = '2015-01-01'
trade_end = '2015-06-30'
p_x_t = sh.loc[trade_start:trade_end, '601988']
p_y_t = sh.loc[trade_start:trade_end, '600000']
trade_spread = self.calculate_spread(p_y_t, p_x_t)
print(trade_spread.describe())
trade_spread.plot()
plt.title('real trade data')
plt.axhline(y=tp_ssd_mean, color='red')
plt.axhline(y=threshold_up, color='blue')
plt.axhline(y=threshold_down, color='blue')
plt.show()
def calculate_spread(self, x, y):
r_x = (x - x.shift(1)) / x.shift(1)[1:]
r_y = (y - y.shift(1)) / y.shift(1)[1:]
hat_p_x = (1 + r_x).cumprod()
hat_p_y = (1 + r_y).cumprod()
return hat_p_x - hat_p_y
def evaluate_tp(self):
sh = pd.read_csv('./data/sh50p.csv', index_col='Trddt')
sh.index = pd.to_datetime(sh.index)
form_start = '2014-01-01'
form_end = '2015-01-01'
sh_form = sh[form_start : form_end]
tpc = {}
sp_len = len(self.stock_pool)
for i in range(sp_len):
for j in range(i+1, sp_len):
tpc['{0}-{1}'.format(self.stock_pool[i],
self.stock_pool[j])] = self.trading_pair(
sh_form, self.stock_pool[i],
self.stock_pool[j]
)
self.tpc = sorted(tpc.items(), key=lambda x: x[1])
for itr in self.tpc:
print('{0}: {1}'.format(itr[0], itr[1]))
def trading_pair(self, sh_form, stock_x, stock_y):
# 中国银行股价
PAf = sh_form[stock_x]
# 取浦发银行股价
PBf = sh_form[stock_y]
# 求形成期长度
pairf = pd.concat([PAf, PBf], axis=1)
form_len = len(pairf)
return self.calculate_SSD(PAf, PBf)
def calculate_SSD(self, price_x, price_y):
if price_x is None or price_y is None:
print('缺少价格序列')
return
r_x = (price_x - price_x.shift(1)) / price_x.shift(1) [1:]
r_y = (price_y - price_y.shift(1)) / price_y.shift(1) [1:]
#hat_p_x = (r_x + 1).cumsum()
hat_p_x = (r_x + 1).cumprod()
#hat_p_y = (r_y + 1).cumsum()
hat_p_y = (r_y + 1).cumprod()
SSD = np.sum( (hat_p_x - hat_p_y)**2 )
return SSD
| 2.140625
| 2
|
LeetCode/next_permutation.py
|
milkrong/Basic-Python-DS-Algs
| 0
|
12774902
|
<filename>LeetCode/next_permutation.py
class Solution(object):
def nextPermutation(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
if not nums: return None
i = len(nums)-1
j = -1 # j is set to -1 for case `4321`, so need to reverse all in following step
while i > 0:
if nums[i-1] < nums[i]: # first one violates the trend
j = i-1
break
i-=1
for i in xrange(len(nums)-1, -1, -1):
if nums[i] > nums[j]: #
nums[i], nums[j] = nums[j], nums[i] # swap position
nums[j+1:] = sorted(nums[j+1:]) # sort rest
return
| 3.4375
| 3
|
randompy/__init__.py
|
brennerm/randompy
| 7
|
12774903
|
<gh_stars>1-10
import random
import string as st
import datetime as dt
def string(length, chars='', uppercase=True, lowercase=True, digits=True):
if chars == '':
chars += st.ascii_uppercase if uppercase else ''
chars += st.ascii_lowercase if lowercase else ''
chars += st.digits if digits else ''
return ''.join(random.choice(chars) for _ in range(length))
def integer(minimum, maximum, even=None):
if minimum > maximum:
raise ValueError('Minimum must not be bigger than maximum')
def check_value(val):
if even is True:
if (val % 2) != 0:
return False
if even is False:
if not (val & 0x1):
return False
return True
while True:
value = random.randint(minimum, maximum)
if check_value(value):
return value
def array(source, selection_size=1, duplicates=True):
if not duplicates and len(source) < selection_size:
raise ValueError('unable to select ' + str(selection_size) + ' elements from a list of size ' + str(len(source)))
selected_elements = []
for i in range(selection_size):
selected_element = random.choice(source)
selected_elements.append(selected_element)
if not duplicates:
source.remove(selected_element)
return selected_elements
def datetime(start=dt.datetime(year=1970, month=1, day=1), end=dt.datetime(year=2050, month=1, day=1)):
delta = end - start
delta_microseconds = (delta.days * 86400000000) + (delta.seconds * 1000000) + delta.microseconds
microseconds = integer(0, delta_microseconds)
return start + dt.timedelta(microseconds=microseconds)
def mail(length_local=7, length_domain=5, domain_ending='com'):
if length_local > 64:
raise ValueError('local part must not be longer than 64 characters')
if (length_local + length_domain + len(domain_ending)) > 254:
raise ValueError('mail address must not be longer than 254 characters')
return string(length_local) + '@' + string(length_domain) + '.' + domain_ending
def mac_address(prefix=None):
mac = prefix.split(':') if prefix else list()
while len(mac) < 6:
mac.append('{:02x}'.format(integer(0, 255)))
return ':'.join(mac)
def ipv4address():
return '.'.join([str(integer(0, 255)) for _ in range(4)])
def ipv6address():
return ':'.join('{:04x}'.format(integer(0, 65535)) for _ in range(8))
| 2.890625
| 3
|
character_functions.py
|
Aearsears/mapleai
| 0
|
12774904
|
import numpy as np
import time
import keyboard
import math
import threading
def attack_mob(boxes,classes):
"""
recevies in the player box and the mob box and then will move the player towards the mob and then attack it
"""
#midpoints X1 and X2
player, closestmob = calculate_distance(boxes,classes)
#vertical movement y axis
if player[0]<closestmob[0]:
keyboard.teledown()
else:
keyboard.teleup()
# horizontal movement, i messed up the coordinates while creating the tuple index 1 is x, index 0 is y
if player[1]<closestmob[1]:
#moveleft and attack
print("player coord:"+str(player[0])+" "+str(player[1]))
print("\n mob coord:"+str(closestmob[0])+" "+str(closestmob[1]))
keyboard.moveRight()
keyboard.moveRight()
# keyboard.moveRight()
keyboard.attackFiveTimes()
keyboard.loot()
else:
# mob is to the right and attack
print("player coord:"+str(player[0])+" "+str(player[1]))
print("\n mob coord:"+str(closestmob[0])+" "+str(closestmob[1]))
keyboard.moveLeft()
keyboard.moveLeft()
# keyboard.moveLeft()
keyboard.attackFiveTimes()
keyboard.loot()
def filter(detections):
"""
takes first five detections returns boxes,scores and classes as numpy arrays
"""
#get first five predictions
boxes = detections['detection_boxes'][0].numpy()[:5]
scores = detections['detection_scores'][0].numpy()[:5]
classes = (detections['detection_classes'][0].numpy() + 1).astype(int)[:5]
isTherePlayer = False
if 2 in classes[:]:
isTherePlayer = True
return boxes, scores, classes, isTherePlayer
def calculate_distance(boxes,classes):
"""
calculates the distance between the player and the three mobs, and returns the mob with the shortest distance
"""
#get the index of the player, returns a numpy array containing the index
itemindex = np.where(classes==2)
#get the midpoints, list of tuples
midpoints =[]
for i in range(np.shape(boxes)[0]):
midpoints.append(getBoxesMidpoint(boxes[i]))
#calculate the distance between the player and the mobs
distance=np.zeros(5,dtype=np.float32)
for i in range(np.shape(boxes)[0]):
if i == itemindex[0][0]:
distance[i]= 99999.0
else:
distance[i]=distance_2points(midpoints[i],midpoints[itemindex[0][0]])
#get the min index, and return the player coord and mob coord.
minindex = np.argmin(distance)
return midpoints[itemindex[0][0]],midpoints[minindex]
def getBoxesMidpoint(box):
"""
takes in normalized coordinates of the 800x600 screen. coordinates are xmin,ymin,xmax,ymax
returns a tuple of the midpoint
"""
#denormalize them
normalized_coord = np.array([box[0]*806,box[1]*629,box[2]*806,box[3]*629],dtype=np.float32)
#offset from the origin
return (((normalized_coord[2]-normalized_coord[0])/2)+normalized_coord[0],((((normalized_coord[3]-normalized_coord[1])/2))+normalized_coord[1]))
def distance_2points(pt1,pt2):
"""
returns distance between two points pt1(x1,y1),pt2(x2,y2). points as tuples.
"""
return math.hypot(pt2[0]-pt1[0], pt2[1]-pt1[1])
def autobuff(stop_event):
starttime = time.time()
while not stop_event.wait(1):
print("Buffing!")
keyboard.buff()
keyboard.buff()
keyboard.buff()
time.sleep(65.0 - ((time.time() - starttime) % 65.0))
def autocc(stop_event):
starttime = time.time()
while not stop_event.wait(1):
print("CC'ing!")
keyboard.cc()
time.sleep(90.0 - ((time.time() - starttime) % 90.0))
if __name__ == "__main__":
pass
| 2.921875
| 3
|
cryptex/test/api_mock.py
|
coink/cryptex
| 1
|
12774905
|
import os
import io
import httpretty
class APIMock():
"""
Responses should be a {method: filename} map
"""
def __init__(self, mock_url, mock_dir, responses):
self.mock_url = mock_url
self.responses = responses
self.mock_dir = mock_dir
def request_callback(self, request, uri, headers):
method = request.parsed_body[u'method'][0]
filename = self.responses[method]
with io.open(os.path.join(self.mock_dir, filename), 'r') as f:
contents = f.read()
return (200, headers, contents)
def __enter__(self):
httpretty.enable()
httpretty.register_uri(httpretty.POST, self.mock_url,
body=self.request_callback)
def __exit__(self, type, value, traceback):
httpretty.disable()
httpretty.reset()
| 2.765625
| 3
|
py/liquid/vpn/client.py
|
hoover/liquid-setup
| 3
|
12774906
|
<filename>py/liquid/vpn/client.py
import sys
import re
import json
import subprocess
from . import ca
with open('/var/lib/liquid/conf/options.json', encoding='utf8') as f:
OPTIONS = json.load(f)
CLIENT_OVPN_TEMPLATE = """\
client
dev tun
proto udp
remote {address} {port}
resolv-retry infinite
nobind
user nobody
group nogroup
persist-key
persist-tun
remote-cert-tls server
cipher AES-256-CBC
auth SHA256
comp-lzo
verb 3
key-direction 1
<ca>
{ca_cert}
</ca>
<cert>
{client_cert}
</cert>
<key>
{client_key}
</key>
<tls-auth>
{ta_key}
</tls-auth>
"""
def read_key(name):
with (ca.CA_KEYS / name).open(encoding='utf8') as f:
return f.read().strip()
def generate_config(name):
vpn_server_address = OPTIONS['vpn']['server']['address']
return CLIENT_OVPN_TEMPLATE.format(
address=vpn_server_address['address'],
port=vpn_server_address['port'],
ca_cert=read_key('ca.crt'),
client_cert=read_key('client-{}.crt'.format(name)),
client_key=read_key('client-{}.key'.format(name)),
ta_key=read_key('ta.key'),
)
def run(cmd, encoding='latin1', **kwargs):
print('+', ' '.join(cmd))
output = subprocess.check_output(cmd, **kwargs)
return output.decode(encoding).strip()
def create(id):
env = ca.easyrsa_env()
run(['./pkitool', 'client-{}'.format(id)], cwd=str(ca.CA), env=env)
def get_key_map():
rv = {}
with (ca.CA_KEYS / 'index.txt').open(encoding='utf8') as f:
for line in f:
cols = line.split('\t')
serial = cols[3]
match = re.search(r'/CN=client-(?P<id>\d+)/', cols[5])
if match:
id = match.group('id')
rv[serial] = id
return rv
def get_revoked_serials():
crl_pem = str(ca.CA_KEYS / 'crl.pem')
txt = run(['openssl', 'crl', '-in', crl_pem, '-text'])
skip = True
for line in txt.splitlines():
if line == 'Revoked Certificates:':
skip = False
continue
if skip:
continue
if not line.startswith(' '):
break
match = re.match(r'^\s+Serial Number:\s+(?P<serial>\S+)$', line)
if match:
yield match.group('serial')
def get_keys():
rv = set()
for item in ca.CA_KEYS.iterdir():
match = re.match(r'^client-(?P<id>.+)\.crt$', item.name)
if match:
rv.add(match.group('id'))
return rv
def get_revoked():
key_map = get_key_map()
rv = set()
for serial in get_revoked_serials():
rv.add(key_map[serial])
return rv
def revoke(id):
env = ca.easyrsa_env()
try:
run(['./revoke-full', 'client-{}'.format(id)], cwd=str(ca.CA), env=env)
except subprocess.CalledProcessError:
# yes, `revoke-full` returns code 2 when it does its job successfully.
# https://is.gd/openvpn_revoke
pass
if id not in get_revoked():
raise RuntimeError('The key {} was not revoked'.format(id))
ca.copy_openvpn_keys()
def sync_keys(vars):
ca_keys = get_keys()
ca_revoked = get_revoked()
liquidcore_keys = (
vars
.get('liquid_vpn', {})
.get('server', {})
.get('client_keys', [])
)
for key in liquidcore_keys:
(id, revoked) = (key['id'], key['revoked'])
if id not in ca_keys:
print('creating vpn client key', id)
create(id)
if revoked and id not in ca_revoked:
print('revoking vpn client key', id)
revoke(id)
| 2.203125
| 2
|
Src/query1_svr.py
|
Mohib-hub/CSO-SBoM
| 2
|
12774907
|
## Copyright (c) 2020 AT&T Intellectual Property. All rights reserved.
import sys
from load_db import load_graph
from load_db import intermediate
from load_db import svr_pkgs
from load_db import svr_cve_pkgs
from load_db import pkg_cve_supr
from load_db import pkg_cve_cvss_threshold
from load_db import pkgs_with_no_cve
from sbom_helpers import get_gdbpath
from sbom_helpers import mypprint
from sbom_helpers import validate_file_access
if( len(sys.argv) != 3 ):
print("There should be two arguments")
print("first arg is date eg 2019.03.16")
print("2nd arg is server eg 84a421cd887f11e887244dfe08192208")
exit()
else:
d = sys.argv[1]
svr = sys.argv[2]
gfile = get_gdbpath() + d + '.gdb'
#validate gdb file exists
validate_file_access([gfile])
graphdata = load_graph(gfile)
print("+++ file {0} ".format(gfile))
print("++++ svr {0}".format(svr))
svr_grp = intermediate(graphdata, 'type_group', svr)
print("++++ grp {0}".format(svr_grp))
hostname = intermediate(graphdata, 'type_hostname', svr)
print("++++ hostname {0}".format(hostname))
(num_pkg_vers,
num_pkgs,
pkg_ver_dict,
pkg_multiver_dict) = svr_pkgs(graphdata, svr)
print("+++++ {0} package/versions".format(num_pkg_vers))
print("+++++ {0} packages".format(num_pkgs))
mypprint(pkg_multiver_dict)
## print supressed cves
print("supressed cve's:")
scp = svr_cve_pkgs(graphdata, svr)
sup_cves = pkg_cve_supr(scp)
mypprint(sup_cves)
## print bins of cvss
no_cves = len(pkgs_with_no_cve(scp))
print("{0} packages with no cve's:".format(no_cves))
ten_cves = pkg_cve_cvss_threshold(scp, 10, 100)
l_ten_cves = len(ten_cves)
print("{0} packages with worst cve of cvss=10:".format(l_ten_cves))
mypprint(ten_cves)
seven_cves = pkg_cve_cvss_threshold(scp, 7, 10)
l_seven_cves = len(seven_cves)
print("{0} packages with cvss <10 and >=7".format(l_seven_cves))
mypprint(seven_cves)
five_cves = pkg_cve_cvss_threshold(scp, 5, 7)
l_five_cves = len(five_cves)
print("{0} packages with cvss <7 and >=5".format(l_five_cves))
mypprint(five_cves)
low_cves = pkg_cve_cvss_threshold(scp, 0, 5)
l_low_cves = len(low_cves)
print("{0} packages with cvss <5 and >=0".format(l_low_cves))
mypprint(low_cves)
| 2.265625
| 2
|
camos/plugins/burstclean/burstclean.py
|
danilexn/camos
| 1
|
12774908
|
# -*- coding: utf-8 -*-
# Created on Sat Jun 05 2021
# Last modified on Mon Jun 07 2021
# Copyright (c) CaMOS Development Team. All Rights Reserved.
# Distributed under a MIT License. See LICENSE for more info.
import numpy as np
from camos.tasks.analysis import Analysis
from camos.utils.generategui import NumericInput, DatasetInput
from camos.utils.units import get_time
class BurstClean(Analysis):
analysis_name = "Clean Events"
required = ["dataset"]
def __init__(self, *args, **kwargs):
super(BurstClean, self).__init__(*args, **kwargs)
def _run(
self,
duration: NumericInput("Total Duration ({})".format(get_time()), 100),
_filter_min: NumericInput("Minimum Events/{}".format(get_time()), 1),
_filter_max: NumericInput("Maximum Events/{}".format(get_time()), 50),
_i_data: DatasetInput("Source dataset", 0),
):
output_type = [("CellID", "int"), ("Active", "float")]
# data should be provided in format summary (active events)
data = self.signal.data[_i_data]
self.dataname = self.signal.names[_i_data]
if not ("Active" in data.dtype.names):
raise ValueError("The dataset does not have the expected shape")
# Calculates the MFR, could be given as an input?
unique, counts = np.unique(data[:]["CellID"], return_counts=True)
active = data[:]["Active"]
IDs = data[:]["CellID"]
IDs_include = unique[
np.where(
(counts >= _filter_min * duration) & (counts <= _filter_max * duration)
)
]
idx = np.isin(IDs, IDs_include)
active_filter = active[idx]
IDs_filter = IDs[idx]
# Calculate mean firing rate per cell
self.output = np.zeros(shape=(len(active_filter), 1), dtype=output_type)
self.output[:]["CellID"] = IDs_filter.reshape(-1, 1)
self.output[:]["Active"] = active_filter.reshape(-1, 1)
self.output = self.output[1:]
self.foutput = self.output
# self.notify(
# "{}: Events Before = {}; Events After = {}".format(
# self.analysis_name, len(data), len(self.output)
# ),
# "INFO",
# )
def connectComponents(self, fields):
# Changing the input data to update the duration
fields["_i_data"].connect(
lambda x: fields["duration"].widget.setText(
str(int(self.signal.properties[x]["duration"]))
)
)
| 2.28125
| 2
|
tests/python/unittest/test_gluon_model_zoo.py
|
zt706/-mxnet_for_ssd
| 0
|
12774909
|
from __future__ import print_function
import mxnet as mx
from mxnet.gluon import nn
from mxnet.gluon.model_zoo.custom_layers import HybridConcurrent, Identity
from mxnet.gluon.model_zoo.vision import get_model
def test_concurrent():
model = HybridConcurrent(concat_dim=1)
model.add(nn.Dense(128, activation='tanh', in_units=10))
model.add(nn.Dense(64, activation='tanh', in_units=10))
model.add(nn.Dense(32, in_units=10))
# symbol
x = mx.sym.var('data')
y = model(x)
assert len(y.list_arguments()) == 7
# ndarray
model.collect_params().initialize(mx.init.Xavier(magnitude=2.24))
x = model(mx.nd.zeros((32, 10)))
assert x.shape == (32, 224)
x.wait_to_read()
def test_identity():
model = Identity()
x = mx.nd.random_uniform(shape=(128, 33, 64))
mx.test_utils.assert_almost_equal(model(x).asnumpy(),
x.asnumpy())
def test_models():
all_models = ['resnet18_v1', 'resnet34_v1', 'resnet50_v1', 'resnet101_v1', 'resnet152_v1',
'resnet18_v2', 'resnet34_v2', 'resnet50_v2', 'resnet101_v2', 'resnet152_v2',
'vgg11', 'vgg13', 'vgg16', 'vgg19',
'vgg11_bn', 'vgg13_bn', 'vgg16_bn', 'vgg19_bn',
'alexnet', 'inceptionv3',
'densenet121', 'densenet161', 'densenet169', 'densenet201',
'squeezenet1.0', 'squeezenet1.1']
pretrained_to_test = set(['squeezenet1.1'])
for model_name in all_models:
test_pretrain = model_name in pretrained_to_test
model = get_model(model_name, pretrained=test_pretrain)
data_shape = (7, 3, 224, 224) if 'inception' not in model_name else (7, 3, 299, 299)
print(model)
if not test_pretrain:
model.collect_params().initialize()
model(mx.nd.random_uniform(shape=data_shape))
if __name__ == '__main__':
import nose
nose.runmodule()
| 2.078125
| 2
|
complex_auto/dataloader.py
|
entn-at/cae-invar
| 31
|
12774910
|
"""
Created on April 13, 2018
Edited on July 05, 2019
@author: <NAME> & <NAME>
Sony CSL Paris, France
Institute for Computational Perception, Johannes Kepler University, Linz
Austrian Research Institute for Artificial Intelligence, Vienna
"""
import numpy as np
import librosa
import torch.utils.data as data
import torch
import logging
import PIL
from scipy.signal import get_window
from torchvision.transforms import Resize, ToPILImage, ToTensor, Compose, \
CenterCrop
from complex_auto.util import to_numpy, cached
LOGGER = logging.getLogger(__name__)
def standardize_(ngram):
ngram = ngram - ngram.mean()
std = ngram.std()
if std > 1e-8:
ngram = .1 * ngram / std
return ngram
class Data(object):
def __init__(self, data_x, data_y, standardize=False):
self.data_x = data_x
self.data_y = data_y
def __getitem__(self, index):
return [standardize_(torch.FloatTensor(self.data_x[index])),
standardize_(torch.FloatTensor(self.data_y[index])),
-1, -1, -1]
def __len__(self):
return len(self.data_x)
class DataSampler(object):
def __init__(self, data_x, length_ngram, samples_epoch, standard=True,
shifts=[24, 24], scales=[1., 0], shuffle=True,
transform=(0, 1, 2), emph_onset=0, random_pairs=False):
"""
Returns random ngrams from data, can shift and scale data in two
dimensions
:param data_x: data (2d)
:param length_ngram: length of sampled ngrams
:param samples_epoch: number of samples per epoch
:param standard: if instances should be standardized
:param shifts: 2-tuple, maximal random shifts in two dimensions
:param scales: 2-tuple, maximal random scaling in two dimensions
:param shuffle: instances are returned in random order
:param transform: iterable; which transforms should be applied.
pitch_shift (0), time shift (1), tempo-change (2)
:param emph_onset: onsets are emphasized
:param random_pairs: a pair is sampled using two random (unrelated)
instances
"""
self.data_x = data_x
self.length_ngram = length_ngram
self.samples_epoch = samples_epoch
self.standard = standard
self.max_x = shifts[0]
self.max_y = shifts[1]
self.scale_x = scales[0]
self.scale_y = scales[1]
self.shuffle = shuffle
self.transform = transform
self.emph_onset = emph_onset
self.random_pairs = random_pairs
self.check_lengths()
def check_lengths(self):
delete = []
for i, song in enumerate(self.data_x):
max_ = song.shape[1] - self.length_ngram - self.max_x
if not self.max_x < max_:
print(f"Warning: Song number {i} is too short to be used "
f"with ngram length {self.length_ngram} and maximal "
f"time shift of {self.max_x} (will be ignored)!")
delete.append(i)
self.data_x = [i for j, i in enumerate(self.data_x) if j not in
delete]
def __len__(self):
if not self.shuffle:
return self.get_ngram_count()
return self.samples_epoch
def __getitem__(self, index):
# Transform: pitch_shift (0), time shift (1), tempo-change (2)
if self.transform is None:
# random transform
transform = np.random.randint(0, 3)
else:
transform = np.random.choice(self.transform)
if self.random_pairs:
# song_id, start, end = self.get_random_ngram()
# ngram = self.data_x[song_id][:, start:end].copy()
# song_id, start, end = self.get_random_ngram()
# ngram_trans = self.data_x[song_id][:, start:end].copy()
if np.random.randint(2) == 0:
[ngram, ngram_trans], song_id = self.get_pairs_same_song()
label = -1
transform = -1 # skips transformation codes
else:
song_id, start, end = self.get_ngram_by_idx(index)
ngram = self.data_x[song_id][:, start:end].copy()
elif self.shuffle:
song_id, start, end = self.get_random_ngram()
ngram = self.data_x[song_id][:, start:end].copy()
else:
song_id, start, end = self.get_ngram_by_idx(index)
ngram = self.data_x[song_id][:, start:end].copy()
# Normalization needed for PIL image processing (scale)
ngram -= ngram.min()
if ngram.max() > 1e-6:
ngram /= ngram.max()
assert ngram.shape[1] != 0, f"{start}, {end}," \
f"{self.data_x[song_id].shape[1]}, " \
f"{self.max_x}"
if transform == 1:
if self.max_x == 0:
shiftx = 0
else:
shiftx = np.random.randint(-self.max_x, self.max_x)
ngram_trans = self.trans_time_shift(end, song_id, start,
shiftx)
label = "shiftx" + str(shiftx)
if transform == 0:
if self.max_y == 0:
shifty = 0
else:
shifty = np.random.randint(-self.max_y, self.max_y)
ngram_trans = self.trans_pitch_shift(ngram, shifty)
label = "shifty" + str(shifty)
if transform == 2:
scale_x = 1 + self.scale_x * np.random.rand()
ngram, ngram_trans, minus = self.trans_speed_change(ngram, scale_x)
label = scale_x if not minus else -scale_x
label = "scale" + str(label)
ngram = to_numpy(ngram)
ngram_trans = to_numpy(ngram_trans)
ngram_onset = np.diff(np.concatenate((ngram[:, 0:1], ngram), axis=1),
axis=1)
ngram_trans_onset = np.diff(np.concatenate((ngram_trans[:, 0:1],
ngram_trans), axis=1), axis=1)
ngram_onset[ngram_onset < 0] = 0
ngram_trans_onset[ngram_trans_onset < 0] = 0
ngram = ngram + ngram_onset * self.emph_onset
ngram_trans = ngram_trans + ngram_trans_onset * self.emph_onset
if self.standard:
ngram = self.standardize(ngram)
ngram_trans = self.standardize(ngram_trans)
ngram = torch.FloatTensor(ngram).view(-1)
ngram_trans = torch.FloatTensor(ngram_trans).view(-1)
return ngram+1e-8, ngram_trans+1e-8, transform, song_id, label
def get_ngram_count(self):
count = 0
count_data = len(self.data_x)
for i in range(count_data):
len_data = self.data_x[i].shape[1]
startmin = 2 * self.max_x
startmax = len_data - self.length_ngram - 2 * self.max_x
count += startmax - startmin
return count
def get_ngram_by_idx(self, index):
count = 0
count_data = len(self.data_x)
for i in range(count_data):
len_data = self.data_x[i].shape[1]
startmin = 2 * self.max_x
startmax = len_data - self.length_ngram - 2 * self.max_x
if index >= count and index + startmin < count + startmax:
song_id = i
start = index - count + startmin
break
count += startmax - startmin
end = start + self.length_ngram
return song_id, start, end
def get_random_ngram(self):
count_data = len(self.data_x)
song_id = np.random.randint(0, count_data)
len_data = self.data_x[song_id].shape[1]
start = np.random.randint(self.max_x,
len_data - self.length_ngram - self.max_x)
end = start + self.length_ngram
return song_id, start, end
def get_pairs_same_song(self):
count_data = len(self.data_x)
song_id = np.random.randint(0, count_data)
len_data = self.data_x[song_id].shape[1]
pairs = []
for i in range(2):
start = np.random.randint(2 * self.max_x,
len_data - self.length_ngram - 2 * self.max_x)
end = start + self.length_ngram
ngram = self.data_x[song_id][:, start:end].copy()
pairs.append(ngram)
return pairs, song_id
def trans_speed_change(self, ngram, scale_x):
size1 = ngram.shape[1]
size0 = ngram.shape[0]
new_size_t_x = int(scale_x * size1)
new_size_t_y = ngram.shape[0]
transform_out = Compose([
ToPILImage(),
Resize((new_size_t_y, new_size_t_x),
interpolation=PIL.Image.NEAREST),
CenterCrop((size0, size1)),
ToTensor()
])
ngram_trans = transform_out(torch.FloatTensor(ngram).unsqueeze(0))
minus = False
if np.random.randint(0, 2) == 1:
ngram_ = ngram
ngram = ngram_trans
ngram_trans = ngram_
minus = True
return ngram, ngram_trans, minus
def trans_pitch_shift(self, ngram, shifty):
return to_numpy(self.transp0(torch.FloatTensor(ngram), shifty))
def trans_time_shift(self, end, song_id, start, shiftx):
return self.data_x[song_id][:, start + shiftx:end + shiftx]
def standardize(self, ngram):
ngram = ngram - ngram.mean()
std = ngram.std()
ngram = .1 * ngram / (std + 1e-8)
return ngram
def transp0(self, x, shift):
"""
Transposes axis 0 (zero-based) of x by [shift] steps.
Missing information is padded with zeros.
:param x: the array to transpose
:param shift: the transposition distance
:return: x transposed
"""
if shift == 0:
return x
pad = torch.zeros(abs(shift), x.size(1))
if shift < 0:
return torch.cat([pad, x[:-abs(shift), :]], dim=0)
return torch.cat([x[abs(shift):, :], pad], dim=0)
def transp1(self, x, shift):
"""
Transposes axis 1 (zero-based) of x by [shift] steps.
Missing information is padded with zeros.
:param x: the array to transpose
:param shift: the transposition distance
:return: x transposed
"""
if shift == 0:
return x
pad = torch.zeros(x.size(1), abs(shift))
if shift < 0:
return torch.cat([pad, x[:, :-abs(shift)]], dim=1)
return torch.cat([x[:, abs(shift):], pad], dim=1)
class Signal(data.Dataset):
def __init__(self, filelist, sr="22050", trg_shift=0, block_size=1024,
refresh_cache=False, cache_fn="signal_cache.pyc.bz",
allow_diff_shapes=False, padded=False, random_shift=0,
samples_epoch=1000, window='hann'):
"""
Constructor for 1D signal dataset
:param filelist: list of audio file names (str)
:param sr: desired sample rate
:param trg_shift: target == input shifted by [-trg_shift] steps,
blocks are shortened accordingly
:param block_size: length of one instance in a batch
:param refresh_cache: when True recalculate and save to cache file
when False loads from cache file when available
:param cache_fn: filename of cache file
"""
self.trg_shift = trg_shift
self.block_size = block_size
self.sr = sr
self.allow_diff_shapes = allow_diff_shapes
self.padded = padded
self.random_shift = random_shift
self.window = window
self.samples_epoch = samples_epoch
self.signals = cached(cache_fn, self.load_files, (filelist,),
refresh_cache=refresh_cache)
def __getitem__(self, index):
rand_inst = np.random.randint(len(self.signals))
if self.random_shift > 0:
shift = np.random.randint(-self.random_shift, self.random_shift)
else:
shift = self.trg_shift
rand_pos = np.random.randint(abs(shift),
len(self.signals[rand_inst]) -
abs(shift) - self.block_size)
w = get_window(self.window, self.block_size)
x = self.signals[rand_inst][rand_pos:rand_pos+self.block_size]
y = self.signals[rand_inst][rand_pos+shift:
rand_pos+shift+self.block_size, :]
x = torch.FloatTensor(x.squeeze() * w)
y = torch.FloatTensor(y.squeeze() * w)
x = self.standardize(x)
y = self.standardize(y)
return x, y, -1, -1, -1
def standardize(self, signal):
ngram = signal - signal.mean()
std = ngram.std()
if std > 1e-6:
ngram = ngram / std
else: # prevent empty input
ngram = ngram + 1e-8
return ngram
def __len__(self):
return self.samples_epoch
def load_files(self, filelist):
data_all = []
for file in filelist:
file = file.strip('\n')
print(f"loading file {file}")
signal = librosa.load(file)[0][:, None]
data_all.append(signal)
if len(data_all) == 0:
LOGGER.warning("No data added to Signal Dataset!")
return data_all
| 2.265625
| 2
|
utils/sort_data_by_cumulus.py
|
CONABIO/Sipecam-Kobo-a-Zendro
| 0
|
12774911
|
def sort_data_by_cumulus(data):
"""
Sort data by submitted_by field, which holds
the cumulus number (or id),
Parameters:
data (list): A list containing the report
data.
Returns:
(dict): A dict containg the data sorted by the
cumulus id.
"""
sorted_data = {}
for d in data:
if d["user"] not in sorted_data:
sorted_data[d["user"]] = []
sorted_data[d["user"]].append(d)
else:
sorted_data[d["user"]].append(d)
return sorted_data
| 3.4375
| 3
|
parseTestSet.py
|
franneck94/Variable-Neighborhood-Search-FLP
| 1
|
12774912
|
import os
import errno
import itertools
directory = 'C:/Users/Jan/Dropbox/Bachelorarbeit/Programm/Testdaten/Raw DataSet/'
# listdir = [file for file in os.listdir(directory) if file not in ['capa.txt', 'capb.txt', 'capc.txt']]
# for d in listdir:
# print('Opening dir: ', directory+'/'+d)
# with open(directory+'/'+d) as f:
# firstLine = True
# numLocs = 0
# numCusts = 0
# text = ''
# f_i = []
# d_j = []
# b_i = []
# first_line = f.readline()
# numLocs = int(first_line.split()[0])
# numCusts = int(first_line.split()[1])
# c_ij = [[(0, 0, 0) for cus in range(numCusts)] for loc in range(numLocs) ]
# for i, line in enumerate(f):
# if i < numLocs:
# b_i.append((i, float(line.split()[0])))
# f_i.append((i, float(line.split()[1])))
# else:
# for number in line.split():
# text += ' ' + number
# text = text[1:]
# for index, item in enumerate(text.split(' ')):
# if index % (numLocs+1) == 0:
# d_j.append((index, float(item)))
# text = [val for index, val in enumerate(text.split(' ')) if index % (numLocs+1) != 0]
# for customer in range(numCusts):
# firstLine = True
# actual_allocating_costs = []
# for index, val in enumerate(text[:numLocs]):
# c_ij[index][customer] = (index, customer, float(val))
# if len(text) > numLocs:
# text = text[numLocs:]
# directory = "C:/Users/Jan/Dropbox/Bachelorarbeit/Programm/Testdaten"
# for d in listdir:
# files_to_save = ['cij.txt', 'dj.txt', 'bi.txt', 'fi.txt']
# for file, data in zip(files_to_save, [c_ij, d_j, b_i, f_i]):
# print(directory+'/'+d.split('.')[0]+'/'+file)
# os.makedirs(os.path.dirname(directory+'/'+d.split('.')[0]+'/'+file), exist_ok=True)
# with open(directory+'/'+d.split('.')[0]+'/'+file, "w") as f:
# if len(data[0]) == 2:
# for val in data:
# f.write(' '.join(map(str, val))+'\n')
# else:
# for val_i in data:
# for val_j in val_i:
# f.write(' '.join(map(str, val_j)) + '\n')
#
correct = [56, 94, 89]
listdir = [file for file in os.listdir(directory) if file in ['capa.txt', 'capb.txt', 'capc.txt']]
capacity_amount = 8000.0
for c, d in zip(range(listdir), listdir):
print('Opening dir: ', directory+d)
with open(directory+'/'+d) as f:
#Init Vars
firstLine = True
numLocs = 0
numCusts = 0
text = ''
f_i = []
d_j = []
b_i = []
first_line = f.readline()
numLocs = int(first_line.split()[0])
numCusts = int(first_line.split()[1])
c_ij = []
# Start Parsing
for i, line in enumerate(f):
if i <= numLocs and i > 0:
b_i.append(capacity_amount)
f_i.append(line.split(" ")[2])
else:
for number in line.split(" "):
text += " " + number
text_list = [item for index, item in enumerate(text.split()) if item != " " and item != "\n" and item != "capacity"]
text_list[c] = correct[c]
for item,counter in zip(text_list, range(len(text_list))):
if counter % (numLocs+1) == 0:
d_j.append(item)
else:
if " " in item:
c_ij.append(item.split(" ")[-1])
else:
c_ij.append(item)
directory = "C:/Users/Jan/Dropbox/Bachelorarbeit/Programm/Testdaten"
for d in listdir:
files_to_save = ['cij.txt', 'dj.txt', 'bi.txt', 'fi.txt']
for file, data in zip(files_to_save, [c_ij, d_j, b_i, f_i]):
print(directory+'/'+d.split('.')[0]+'/'+file)
os.makedirs(os.path.dirname(directory+'/'+d.split('.')[0]+'/'+file), exist_ok=True)
with open(directory+'/'+d.split('.')[0]+'/'+file, "w") as f:
for val in data:
f.write(str(val)+'\n')
| 2.453125
| 2
|
git_commits/schedule.py
|
akaprasanga/SentimentAnalysis_MajorProject
| 1
|
12774913
|
import schedule
import time
from gql import Main
import configparser
import json
from jsondiff import diff
from writedb import writedb
import pandas as pd
from pandas import DataFrame
config = configparser.RawConfigParser()
config.read('refresh_time.cfg')
interval = config.getint('Main','time')
t = int(interval)
response = Main.git_activities()
new = response['data']['repository']
namewithowner = new['nameWithOwner']
watchers = new['watchers']['totalCount']
fork = new['forkCount']
stars = new['stargazers']['totalCount']
commit_count = new['object']['history']['totalCount']
commit = new['object']['history']['edges']
writedb.write_repo(namewithowner,fork,stars,watchers,commit_count)
for each in commit:
date = each['node']['committedDate']
committer = each['node']['committer']['name']
message = each['node']['messageHeadline']
writedb.write_commit(committer,date,message,namewithowner)
def job():
response1 = Main.git_activities()
time.sleep(300)
response2 =Main.git_activities()
r = diff(response1,response2)
df = DataFrame(response1)
df2 = DataFrame(response2)
ne = (df != df2).any(1)
print(ne)
if r=={}:
print('no difference in git activites from last response')
else:
print('difference')
new = response2['data']['repository']
namewithowner = new['nameWithOwner']
watchers = new['watchers']['totalCount']
fork = new['forkCount']
stars = new['stargazers']['totalCount']
commit_count = new['object']['history']['totalCount']
commit = new['object']['history']['edges']
writedb.update_repo(namewithowner,fork,stars,watchers,commit_count)
for each in commit:
date = each['node']['committedDate']
committer = each['node']['committer']['name']
message = each['node']['messageHeadline']
writedb.insert_new(committer,date,message,namewithowner)
while True:
job()
| 2.4375
| 2
|
app-sdk/python/iagent_sdk/iagent/model/group.py
|
iconnect-iot/intel-device-resource-mgt-lib
| 2
|
12774914
|
# -*- coding: utf-8 -*-
# Copyright (C) 2017 Intel Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Group(object):
def __init__(self, name):
self.name = name
self.__attributes = {}
def equals(self, obj):
return self.name == obj.get_name()
def get_attribute(self, key):
return self._attributes[key]
def get_device_members(self):
devices = []
all_devices = IAgentManager.getInstance().get_all_devices()
for device in all_devices:
if self.name in device.get_groups():
devices.append(device)
return devices
def get_name(self):
return self.name
def get_resource_members(self):
resources = []
all_devices = IAgentManager.getInstance().get_all_devices()
for device in all_devices:
resources_device = device.get_resources()
for resource in resource_device:
if self.name in resource.get_groups:
resources.append(resource)
return resources
def hash_code(self):
return hash(self.name)
| 2.296875
| 2
|
lib/python/pySitools2_idoc.py
|
HELIO-HFC/SPoCA
| 2
|
12774915
|
<filename>lib/python/pySitools2_idoc.py
# -*- coding: utf-8 -*-
"""
This is a generic python Sitools2 tool
The code defines several classes SitoolsInstance, Field, Query, Dataset and Project
@author: <NAME> for IAS 28-08-2012
"""
__version__ = "0.9"
__license__ = "GPL"
__author__ ="<NAME>"
__credit__=["<NAME>", "<NAME>"]
__maintainer__="<NAME>"
__email__="<EMAIL>"
import sys
from datetime import *
import os,time
try :
import urllib
except:
sys.exit ("Import failed in module pySitools2_idoc :\n\turllib module is required")
try :
import simplejson
except:
sys.exit ("Import failed in module pySitools2_idoc :\n\tsimplejson module is required")
try :
from xml.dom.minidom import parse, parseString
except:
sys.exit ("Import failed in module pySitools2_idoc :\n\txml.dom.minidom module is required")
class Sitools2Instance() :
""""Define an install of Sitools2.
An instance of Sitools2Instance is defined using its url so this is the only attribute.
The method available : list_project().
It will return a list of the projects available for the instance.
"""
#Initialize class Sitools2Instance
def __init__(self,url):
self.instanceUrl=url
try :
simplejson.load(urllib.urlopen(url+"/sitools/portal"))
except:
err_mess="Error in Sitools2Instance.__init__() :\nSitools2 instance %s not available please contact admin : <EMAIL> for more info" % url
sys.exit(err_mess)
#List all projects available for that SitoolsInstance
def list_project(self, **kwargs):
sitools_url=self.instanceUrl
data=[]
kwargs.update({
'media' : 'json'
})
url=sitools_url+'/sitools/portal/projects'+'?'+urllib.urlencode(kwargs)
result =simplejson.load(urllib.urlopen(url))
print "%s projects detected" % result['total']
projects=result['data']
for i,project in enumerate(projects) :
p_url=sitools_url+project['sitoolsAttachementForUsers']
try :
data.append(Project(p_url))
except :
print "Error in Sitools2Instance.list_project() :\nCannot create object project %s, %s protected \nContact admin : <EMAIL> for more info\n" % (project['name'],p_url)
return data
class Field():
"""Definition of a Field class.
A field is a item from a dataset.
It has several attributes : name, type, filter(boolean), sort(boolean), behavior.
"""
#Initialize class Field
def __init__(self,dictionary):
self.name=""
self.type=""
self.filter=False
self.sort=False
self.behavior=""
self.compute_attributes(dictionary)
#Compute attribute from web service dataset description
def compute_attributes(self, dictionary):
if dictionary.has_key('columnAlias'):
self.name=dictionary['columnAlias']
if dictionary.has_key('sqlColumnType'):
self.type=dictionary['sqlColumnType']
if dictionary.has_key('filter'):
self.filter=dictionary['filter']
if dictionary.has_key('sortable'):
self.sort=dictionary['sortable']
if dictionary.has_key('columnRenderer'):
self.behavior=dictionary['columnRenderer']['behavior']
#Ouptut attributes of Field
def display(self):
print "\n\nField object display() :\n\t%s\n\t\ttype : %s\n\t\tfilter : %s\n\t\tsort : %s\n\t\tbehavior : %s" %(self.name,self.type,self.filter,self.sort, self.behavior)
class Query():
"""Definition of a Query class.
A Query defines the request passed to the server.
It can have the following attributes : fields_list, name_list, operation.
The parameter operation can value : ge, le, gte, lte, lt, eq, gt, lte, like, in, numeric_between, date_between, cadence.
"""
#Initialize class Query
def __init__(self,param_list):
self.fields_list=[]
self.name_list=[]
self.value_list=[]
self.operation=""
self.compute_attributes(param_list)
#Compute attribute from client request
def compute_attributes(self,param_list) :
if type(param_list[0]).__name__ !='list':
mess_err="Error in Query.compute_attributes() :\nQuery first argument type is : %s\nQuery first argument type should be : list" % type(param_list[0]).__name__
sys.exit(mess_err)
if type(param_list[1]).__name__ !='list':
mess_err="Error in Query.compute_attributes() :\nQuery second argument type is : %s\nQuery second argument type should be : list" % type(param_list[1]).__name__
sys.exit(mess_err)
for field in param_list[0]:
self.name_list.append(field.name)
self.fields_list=param_list[0]
self.value_list=param_list[1]
self.operation=param_list[2]
#Ouptut attributes of Query
def display(self):
name=[]
values=[]
for field in self.name_list :
name.append(field)
print "Query object display() :\n\t"+", ".join(name)
for value in self.value_list:
values.append(value)
print "\t\tvalue : "+", ".join(values)
print "\t\toperation :",self.operation
class Dataset():
"""Definition of a Dataset class.
It is related to a Sitools2 dataset, which is a set of instances of the class Field with specfic properties.
It can have the following attibutes : name, description, url, field_list,filter_list, resources_target, noClientAccess_list, primary_key,resources_list.
Dataset provides the generic powerfull search method that allows a python client to make a request on a Sitools2 installation.
"""
#Initialize class Dataset
def __init__(self, url):
try :
simplejson.load(urllib.urlopen(url))
except:
err_mess="Error in Dataset.__init__() :\nDataset %s not available, please contact admin : <EMAIL> for more info" % url
sys.exit(err_mess)
self.name = ""
self.description = ""
self.uri="/"+url.split("/")[-1]
self.url = url
self.fields_list=[]
self.fields_dict={}
self.filter_list=[]
self.allowed_filter_list=[]
self.sort_list=[]
self.allowed_sort_list=[]
self.resources_target=[]
self.noClientAccess_list=[]
self.primary_key=""
self.compute_attributes()
self.resources_list()
#Compute attribute from web service answer dataset description
def compute_attributes(self, **kwargs) :
kwargs.update({
'media' : 'json'
})
url=self.url+'?'+urllib.urlencode(kwargs)
try:
result =simplejson.load(urllib.urlopen(url))
self.name=result['dataset']['name']
self.description=result['dataset']['description']
columns=result['dataset']['columnModel']
for column in columns :
self.fields_list.append(Field(column))
self.fields_dict.update({
column['columnAlias'] : Field(column)
})
if (column.has_key('filter') and column['filter']):
self.filter_list.append(Field(column))
if (column.has_key('sortable') and column['sortable']):
self.sort_list.append(Field(column))
if (column.has_key('primaryKey') and column['primaryKey']):
self.primary_key=(Field(column))
if (column.has_key('columnRenderer')and column['columnRenderer']['behavior']=="noClientAccess"):
self.noClientAccess_list.append(column['columnAlias'])
except :
sys.exit( "Error in Dataset.compute_attributes(), please contact admin : <EMAIL> for more info")
for field in self.filter_list:
self.allowed_filter_list.append(field.name)
for field in self.sort_list:
self.allowed_sort_list.append(field.name)
#Explore and list dataset resources (method=options has to be allowed )
def resources_list(self):
try :
url = urllib.urlopen(self.url+'?method=OPTIONS')
wadl = url.read()
domWadl = parseString(wadl)
resources = domWadl.getElementsByTagName('resource')
for i in range(len(resources)):
self.resources_target.append(self.url+"/"+resources[i].getAttribute('path'))
except:
print "\t\t\tError in Dataset.ressources_list() not allowed, please contact admin : <EMAIL> for more info"
#Throw a research request on Sitools2 server, inside limit 350000 so > 1 month full cadence for SDO project
def search(self,query_list,output_list,sort_list,limit_request=350000, limit_to_nb_res_max=-1, **kwargs) :
"""This is the generic search() method of a Sitools2 instance.
The parameters available are : query_list, output_list, sort_list, limit_request & limit_to_nb_res_max.
Example of use :
result=ds1.search([Q1,Q2,Q3,Q4],O1,S1,limit_to_nb_res_max=10)
Where Q1, Q2, Q3 & Q4 can be :
Q1=Query(param_query1)
Q2=Query(param_query2)
Q3=Query(param_query3)
Q4=Query(param_query4)
Where param _query1, param_query2, param_query3, param_query4 can value :
param_query1=[[ds1.fields_list[4]],['2012-08-10T00:00','2012-08-10T01:00'],'DATE_BETWEEN']
param_query2=[[ds1.fields_list[5]],['335'],'IN']
param_query3=[[ds1.fields_list[10]],['1 min'],'CADENCE']
param_query4=[[ds1.fields_list[8]],['2.900849'],'LTE']
"""
kwargs.update({
'media' : 'json',
'limit' : 300,
'start' : 0
})
#Initialize counter
j=0#filter counter
i=0#p counter
for num_query,query in enumerate(query_list) :#create url options p[$i] and filter[$j]
operation=query.operation.upper()#transform entries as upper letter
if operation =='GE' :
operation='GTE'
elif operation == 'LE' :
operation='LTE'
if operation in ['LT', 'EQ', 'GT', 'LTE', 'GTE'] :
for field in query.fields_list :
if field.name not in self.allowed_filter_list :
err_mess="Error in Dataset.search() :\nfilter on %s is not allowed" % field.name
sys.exit(err_mess)
kwargs.update({
'filter['+str(j)+'][columnAlias]' : "|".join(query.name_list),
'filter['+str(j)+'][data][type]' : 'numeric',
'filter['+str(j)+'][data][value]' : "|".join(query.value_list),
'filter['+str(j)+'][data][comparison]' : operation
})
j+=1 #increment filter counter
elif operation in ['LIKE'] :
operation='TEXT'
i+=1#increment p counter
elif operation in ['IN'] :
operation='LISTBOXMULTIPLE'
kwargs.update({
'p['+str(i)+']' : operation+"|"+"|".join(query.name_list)+"|"+"|".join(query.value_list)
})
i+=1#increment p counter
elif operation in ['DATE_BETWEEN','NUMERIC_BETWEEN', 'CADENCE'] :
kwargs.update({
'p['+str(i)+']' : operation+"|"+"|".join(query.name_list)+"|"+"|".join(query.value_list)
})
i+=1#increment p counter
else :
allowed_operations="ge, le, gte, lte, lt, eq, gt, lte, like, in, numeric_between, date_between"
sys.exit("Operation not available : %s \nAllowed operations are : %s " % (operation,allowed_operations))#exit the program nicely with a clear error mess
output_name_list=[]
output_name_dict={}
for i, field in enumerate(output_list):#build output object list and output object dict with name as a key
output_name_list.append(field.name)
output_name_dict.update({
field.name : field
}
)
kwargs.update({#build colModel url options
'colModel' : '"'+", ".join(output_name_list)+'"'
})
sort_dic_list=[]
for field in sort_list :#build sort output options
if field[0].name not in self.allowed_sort_list :
err_mess="Error in Dataset.search():\nsort on %s is not allowed" % field.name
sys.exit(err_mess)
sort_dictionary={}
sort_dictionary.update({
"field" : field[0].name ,
"direction" : field[1]
})
sort_dic_list.append(sort_dictionary)
temp_kwargs={}
temp_kwargs.update({
'sort' : {"ordersList" : sort_dic_list}
})
temp_url=urllib.urlencode(temp_kwargs).replace('+','').replace('%27','%22')
url_count=self.url+"/count"+'?'+urllib.urlencode(kwargs)+"&"+temp_url#Build url just for count
url=self.url+"/records"+'?'+urllib.urlencode(kwargs)+"&"+temp_url#Build url for the request
result_count =simplejson.load(urllib.urlopen(url_count))
nbr_results=result_count['total']
result=[]
if nbr_results < limit_request :#Check if the request does not exceed 350 000 items
if limit_to_nb_res_max>0 and limit_to_nb_res_max < kwargs['limit']: #if nbr to display is specified and < 300
kwargs['limit']=limit_to_nb_res_max
kwargs['nocount']='true'
nbr_results=limit_to_nb_res_max
url=self.url+"/records"+'?'+urllib.urlencode(kwargs)+"&"+temp_url
elif limit_to_nb_res_max>0 and limit_to_nb_res_max >= kwargs['limit']:#if nbr to display is specified and >= 300
nbr_results=limit_to_nb_res_max
kwargs['nocount']='true'
url=self.url+"/records"+'?'+urllib.urlencode(kwargs)+"&"+temp_url
while (nbr_results-kwargs['start'])>0 :#Do the job per 300 items till nbr_result is reached
#Check that request is done each 300 items
result_temp =simplejson.load(urllib.urlopen(url))
for data in result_temp['data'] :
result_dict={}
for k,v in data.items() :
if (k not in self.noClientAccess_list and k != 'uri' and k in output_name_list) or k in output_name_list :
if output_name_dict[k].type.startswith('int'):
result_dict.update({
k : int(v)
})
elif output_name_dict[k].type.startswith('float'):
result_dict.update({
k : float(v)
})
elif output_name_dict[k].type.startswith('timestamp'):
(dt, mSecs)= v.split(".")
dt = datetime.strptime(dt,"%Y-%m-%dT%H:%M:%S")
mSeconds = timedelta(microseconds = int(mSecs))
result_dict.update({
k : dt+mSeconds
})
else :
result_dict.update({
k : v
})
result.append(result_dict)
kwargs['start'] += kwargs['limit']#increment the job by the kwargs limit given (by design)
url=self.url+"/records"+'?'+urllib.urlencode(kwargs)+"&"+temp_url#encode new kwargs and build new url for request
return result
else :
print "Not allowed\nNbr results (%d) exceeds limit_request param: %d " % (result_count['total'],limit_request)
return result
#OUtput attributes of Dataset
def display(self) :
print "\n\nDataset object display() :\n\t%s\n\t\tdescription : %s\n\t\turi : %s\n\t\turl : %s\n\t\tprimary_key : %s" % (self.name,self.description,self.uri,self.url,self.primary_key.name)
print "\t\tresources_list :"
for i, res in enumerate(self.resources_target) :
print "\t\t\t%d) %s" % (i,res)
print "\t\tfields list :"
for i, field in enumerate(self.fields_list) :
print "\t\t\t%d) %s" % (i,field.name)
print "\t\tfilter list :"
for i, field in enumerate(self.filter_list) :
print "\t\t\t%d) %s" % (i,field.name)
print "\t\tsort list :"
for i, field in enumerate(self.sort_list) :
print "\t\t\t%d) %s" % (i,field.name)
class Project():
"""Define a Project class.
A Project instance gives details about a project of Sitools2.
It has the following attributes : name, description, uri, url, resources_target.
The method dataset_list() will return information about the number of datasets available, their name and uri.
"""
#Initialize Project
def __init__(self, url):
self.name = ""
self.description = ""
self.uri = "/"+url.split("/")[-1]
self.url = url
self.resources_target = []
self.compute_attributes()
self.resources_list();
#Compute_attributes builds value for instance Project
def compute_attributes(self,**kwargs) :
kwargs.update({
'media' : 'json'
})
url=self.url+'?'+urllib.urlencode(kwargs)
result =simplejson.load(urllib.urlopen(url))
self.name=result['project']['name']
self.description=result['project']['description']
#Explore Project resources (method=options should be allowed)
def resources_list(self):
url = urllib.urlopen(self.url+'?method=OPTIONS')
wadl = url.read()
try :
domWadl = parseString(wadl)
except :
print "Project : project.resources_list() not allowed, please contact admin for more info"
else :
resources = domWadl.getElementsByTagName('resource')
for i in range(len(resources)):
self.resources_target.append(self.url+"/"+resources[i].getAttribute('path'))
#Ouptut Project attributes
def display(self):
print "\n\nProject object display() :\n\t%s\n\t\tdescription : %s\n\t\turi : %s\n\t\turl : %s" % (self.name,self.description,self.uri,self.url)
print "\t\tresources list :"
if len(self.resources_target)!=0 :
for i, res in enumerate(self.resources_target) :
print "\t\t\t%d) %s" % (i,res)
#List all datasets in the Project and create the dataset objects
def dataset_list(self, **kwargs):
"""Return relevant information concerning the datasets of your project
"""
sitools_url=self.url.split("/")[0]+"//"+self.url.split("//")[1].split("/")[0]
kwargs.update({
'media' : 'json'
})
url=self.url+'/datasets'+'?'+urllib.urlencode(kwargs)
data=[]
try:
result =simplejson.load(urllib.urlopen(url))
if len (result['data'])!=0 :
for i,dataset in enumerate(result['data']) :
ds_url=sitools_url+dataset['url']
data.append(Dataset(ds_url))
except :
print "Error in Project.dataset_list() :\nCannot dataset %s is protected\nContact <EMAIL> for more info" % url
return data
| 2.640625
| 3
|
embeddings/embedding/static/config.py
|
CLARIN-PL/embeddings
| 33
|
12774916
|
from dataclasses import dataclass
from typing import Any, Dict
from urllib.error import HTTPError
from urllib.request import urlopen
import requests
import srsly
from huggingface_hub import cached_download, hf_hub_url
from embeddings.utils.loggers import get_logger
_logger = get_logger(__name__)
@dataclass
class StaticModelHubConfig:
repo_id: str
@property
def model_type_reference(self) -> str:
reference = self._load_hub_json("module.json")["type"]
if isinstance(reference, str):
return reference
else:
raise ValueError(f"Wrong format of import reference {reference}.")
@property
def default_config(self) -> Dict[str, Any]:
config = self._load_hub_json("default_config.json")
if isinstance(config, dict):
return config
else:
raise ValueError(f"Wrong format of default config {config}.")
def _load_hub_json(self, filename: str) -> Any:
url = self._get_file_hf_hub_url(filename)
try:
path = cached_download(url)
except requests.HTTPError:
raise EnvironmentError(
"Repository not found or wrong format of a given model (module.json not found)."
)
return srsly.read_json(path)
def _get_file_hf_hub_url(self, filename: str) -> str:
url: str = hf_hub_url(self.repo_id, filename=filename)
return url
def file_accessible(self, filename: str) -> bool:
try:
result: bool = urlopen(self._get_file_hf_hub_url(filename)).getcode() == 200
return result
except HTTPError:
return False
@dataclass
class SingleFileConfig(StaticModelHubConfig):
model_name: str
@property
def cached_model(self) -> str:
url: str = self._get_file_hf_hub_url(self.model_name)
path: str = cached_download(url)
return path
@dataclass
class GensimFileConfig(SingleFileConfig):
model_name: str
@property
def cached_model(self) -> str:
url: str = self._get_file_hf_hub_url(self.model_name)
path: str = cached_download(url)
npy_vectors_url: str = self._get_file_hf_hub_url(f"{self.model_name}.vectors.npy")
try:
cached_download(npy_vectors_url, force_filename=f"{path}.vectors.npy")
except requests.HTTPError:
_logger.info(f"{self.model_name}.vectors.npy not found, skipping it.")
return path
| 2.40625
| 2
|
alipay/aop/api/domain/AlipayBossOrderDiagnosisGetModel.py
|
snowxmas/alipay-sdk-python-all
| 213
|
12774917
|
<reponame>snowxmas/alipay-sdk-python-all
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayBossOrderDiagnosisGetModel(object):
def __init__(self):
self._code = None
self._end_time = None
self._find_operator = None
self._order_no = None
self._out_order_no = None
self._source = None
self._start_time = None
@property
def code(self):
return self._code
@code.setter
def code(self, value):
self._code = value
@property
def end_time(self):
return self._end_time
@end_time.setter
def end_time(self, value):
self._end_time = value
@property
def find_operator(self):
return self._find_operator
@find_operator.setter
def find_operator(self, value):
self._find_operator = value
@property
def order_no(self):
return self._order_no
@order_no.setter
def order_no(self, value):
self._order_no = value
@property
def out_order_no(self):
return self._out_order_no
@out_order_no.setter
def out_order_no(self, value):
self._out_order_no = value
@property
def source(self):
return self._source
@source.setter
def source(self, value):
self._source = value
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self, value):
self._start_time = value
def to_alipay_dict(self):
params = dict()
if self.code:
if hasattr(self.code, 'to_alipay_dict'):
params['code'] = self.code.to_alipay_dict()
else:
params['code'] = self.code
if self.end_time:
if hasattr(self.end_time, 'to_alipay_dict'):
params['end_time'] = self.end_time.to_alipay_dict()
else:
params['end_time'] = self.end_time
if self.find_operator:
if hasattr(self.find_operator, 'to_alipay_dict'):
params['find_operator'] = self.find_operator.to_alipay_dict()
else:
params['find_operator'] = self.find_operator
if self.order_no:
if hasattr(self.order_no, 'to_alipay_dict'):
params['order_no'] = self.order_no.to_alipay_dict()
else:
params['order_no'] = self.order_no
if self.out_order_no:
if hasattr(self.out_order_no, 'to_alipay_dict'):
params['out_order_no'] = self.out_order_no.to_alipay_dict()
else:
params['out_order_no'] = self.out_order_no
if self.source:
if hasattr(self.source, 'to_alipay_dict'):
params['source'] = self.source.to_alipay_dict()
else:
params['source'] = self.source
if self.start_time:
if hasattr(self.start_time, 'to_alipay_dict'):
params['start_time'] = self.start_time.to_alipay_dict()
else:
params['start_time'] = self.start_time
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayBossOrderDiagnosisGetModel()
if 'code' in d:
o.code = d['code']
if 'end_time' in d:
o.end_time = d['end_time']
if 'find_operator' in d:
o.find_operator = d['find_operator']
if 'order_no' in d:
o.order_no = d['order_no']
if 'out_order_no' in d:
o.out_order_no = d['out_order_no']
if 'source' in d:
o.source = d['source']
if 'start_time' in d:
o.start_time = d['start_time']
return o
| 1.8125
| 2
|
biasimpacter/app/app.py
|
sammous/biasimpact
| 1
|
12774918
|
from dataprovider import Date, Validator, RSSReader, StoryRSS
from models import ModelRSS
from threading import Thread
import logging
import schedule
import time
import json
import os
logging.basicConfig(filename=os.getenv("BIASIMPACTER_OUTPUT"),
level=logging.INFO,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
logging.getLogger().addHandler(logging.StreamHandler())
def set_up_mongo():
try:
mongo_host = os.getenv("BIASIMPACTER_DC_MONGO_HOST")
mongo_port = os.getenv("MONGO_PORT")
mongo_db = os.getenv("APP_MONGO_DB")
mongo_user = os.getenv("APP_MONGO_USER")
mongo_pw = os.getenv("APP_MONGO_PASS")
mongo_uri = "mongodb://{}:{}@{}:{}/{}".format(
mongo_user, mongo_pw, mongo_host, mongo_port, mongo_db)
logging.info(mongo_uri)
return mongo_uri
except Exception() as e:
logging.error(e)
def read_source(datapath=os.path.join(os.path.dirname(os.path.dirname(__file__)), "source.txt")):
with open(datapath, 'r') as f:
return [line.rstrip().split(", ") for line in f]
def main():
uri = set_up_mongo()
mongo_rss = ModelRSS(uri)
urls = read_source()
for name, url in urls:
try:
logging.info("Reading story: {}".format(name))
story = StoryRSS(name, url, mongo_rss)
story.save_story()
except Exception as e:
logging.error(e)
if __name__ == "__main__":
main()
| 2.328125
| 2
|
gui/snd.py
|
celephicus/tadtas-joystick
| 0
|
12774919
|
<filename>gui/snd.py
#!/usr/bin/env python3
import sys
import numpy as np
import sounddevice as sd
start_idx = 0
f1, f2 = 261.5, 261.5*2
finc =
amplitude = 0.4
device = None # Seems to use speaker as a default.
samplerate = sd.query_devices(device, 'output')['default_samplerate']
def mk_samples(t, f):
a = amplitude if f != 500 else 0.0
return a * np.sin(2 * np.pi * f * t)
def callback(outdata, frames, time, status):
if status:
print(status, file=sys.stderr)
global start_idx
t = (start_idx + np.arange(frames)) / samplerate
t = t.reshape(-1, 1)
outdata[:] = list(zip(mk_samples(t, f1), mk_samples(t, f2)))
start_idx += frames
stros = sd.OutputStream(device=device, channels=2, callback=callback, samplerate=samplerate)
stros.start()
import msvcrt
while True:
if msvcrt.kbhit():
key_stroke = msvcrt.getch()
x = key_stroke.decode()
if x == '1': f1 += 20
if x == '2': f1 -= 20
if x == '3': f2 += 20
if x == '4': f2 -= 20
if x == 'x': sys.exit(0)
| 2.375
| 2
|
amieclient/client.py
|
ericblau/amieclient
| 0
|
12774920
|
import json
from math import ceil, floor
import requests
from .packet import PacketList
from .packet.base import Packet
from .transaction import Transaction
from .usage import (UsageMessage, UsageRecord, UsageResponse, UsageResponseError,
FailedUsageResponse, UsageStatus)
"""AMIE client and Usage Client classes"""
class AMIERequestError(requests.RequestException):
pass
class AMIEClient(object):
"""
AMIE Client.
Args:
site_name (str): Name of the client site.
api_key (str): API key secret
amie_url (str): Base URL for the XSEDE AMIE api
Examples:
>>> psc_client = amieclient.AMIEClient(site_name='PSC', api_key=some_secrets_store['amie_api_key'])
You can also override the amie_url and usage_url parameters, if you're
doing local development or testing out a new version.
>>> psc_alt_base_client = amieclient.AMIEClient(site_name='PSC', api_key='test_api_key', amie_url='https://amieclient.xsede.org/v0.20_beta/)
"""
def __init__(self, site_name, api_key,
amie_url='https://amieclient.xsede.org/v0.10/'):
if not amie_url.endswith('/'):
self.amie_url = amie_url + '/'
else:
self.amie_url = amie_url
self.site_name = site_name
amie_headers = {
'XA-API-KEY': api_key,
'XA-SITE': site_name
}
s = requests.Session()
s.headers.update(amie_headers)
self._session = s
def __enter__(self):
return self
def __exit__(self, *args):
self._session.close()
@staticmethod
def _join_list(things):
if things is not None and things != []:
# If we're given a list, join it with commas
return ','.join(things)
elif things == []:
# if we're given an empty list, return None
return None
else:
# If we're given anything else, i.e. None or some other single
# thing, give it back
return things
@staticmethod
def _dt_range(start, end):
if start is None and end is None:
time_str = None
else:
start_str = start.isoformat() if start else ""
end_str = end.isoformat() if end else ""
time_str = "{},{}".format(start_str, end_str)
return time_str
def get_transaction(self, *, transaction_or_id):
"""
Given a single transaction record id, fetches the related transaction.
See the :swagger:`Swagger documentation <AMIE_Client/get_transactions__site_name___amie_transaction_id__packets/>` for more details.
Args:
transaction_or_id: The transaction or transaction record ID.
Returns:
amieclient.Transaction
"""
if isinstance(transaction_or_id, Transaction):
tx_id = transaction_or_id.trans_rec_id
else:
tx_id = transaction_or_id
url = self.amie_url + 'transactions/{}/{}/packets'.format(self.site_name, tx_id)
r = self._session.get(url)
response = r.json()
if r.status_code > 200:
message = response.get('message', 'Server did not provide an error message')
raise AMIERequestError(message, response=r)
return Transaction.from_dict(response)
def set_transaction_failed(self, *, transaction_or_id):
"""
Given a single transaction or transaction record id, marks it faield.
See the :swagger:`Swagger documentation <AMIE_Client/put_transactions__site_name___amie_transaction_id__state_failed>` for more details.
Args:
transaction_or_id: The transaction or transaction record ID.
"""
if isinstance(transaction_or_id, Transaction):
tx_id = transaction_or_id.trans_rec_id
else:
tx_id = transaction_or_id
url = self.amie_url + 'transactions/{}/{}/state/failed'.format(self.site_name, tx_id)
r = self._session.put(url)
response = r.json()
if r.status_code > 200:
message = response.get('message', 'Server did not provide an error message')
raise AMIERequestError(message, response=r)
return r
def get_packet(self, *, packet_rec_id):
"""
Given a single packet record id, fetches the packet.
See the :swagger:`Swagger documentation <AMIE_Client/get_packets__site_name_>` for more details.
Args:
packet_rec_id: The transaction record ID.
Returns:
amieclient.Packet
"""
url = self.amie_url + 'packets/{}/{}'.format(self.site_name, packet_rec_id)
r = self._session.get(url)
response = r.json()
if r.status_code > 200:
message = response.get('message', 'Server did not provide an error message')
raise AMIERequestError(message, response=r)
return Packet.from_dict(response['result'])
def list_packets(self, *, trans_rec_ids=None, outgoing=None,
update_time_start=None, update_time_until=None,
states=None, client_states=None, transaction_states=None,
incoming=None):
"""
Fetches a list of packets based on the provided search parameters
See the :swagger:`Swagger documentation <AMIE_Client/get_packets__site_name_>` for more details.
Args:
trans_rec_ids (list): Searches for packets with these transaction record IDs.
states (list): Searches for packets with the provided states.
update_time_start (datetime.Datetime): Searches for packets updated since this time.
update_time_until (datetime.Datetime): Searches for packets updated before this time.
states (list): Searches for packets in the provided states.
client_states (list): Searches for packets in the provided client states.
transaction_states (list): Searches for packets in the provided client states.
incoming (bool): If true, search is limited to incoming packets.
Returns:
amieclient.PacketList: a list of packets matching the provided parameters.
"""
trans_rec_ids_str = self._join_list(trans_rec_ids)
states_str = self._join_list(states)
client_states_str = self._join_list(client_states)
transaction_states_str = self._join_list(transaction_states)
time_str = self._dt_range(update_time_start, update_time_until)
# Build a dict of parameters. Requests skips any with a None value,
# so no need to weed them out
params = {
'trans_rec_id': trans_rec_ids_str,
'outgoing': outgoing,
'update_time': time_str,
'states': states_str,
'client_state': client_states_str,
'transaction_state': transaction_states_str,
'incoming': incoming
}
# Get the list of packets
url = self.amie_url + 'packets/{}'.format(self.site_name)
r = self._session.get(url, params=params)
response = r.json()
if r.status_code > 200:
message = response.get('message', 'Server did not provide an error message')
raise AMIERequestError(message, response=r)
return PacketList.from_dict(response)
def send_packet(self, packet, skip_validation=False):
"""
Send a packet
See the :swagger:`Swagger documentation <AMIE_Client/post_packets__site_name_>` for more details.
Args:
packet (amieclient.Packet): The packet to send.
Returns:
requests.Response: The response from the AMIE API.
"""
if not skip_validation:
packet.validate_data(raise_on_invalid=True)
url = self.amie_url + 'packets/{}'.format(self.site_name)
r = self._session.post(url, json=packet.as_dict())
response = r.json()
if r.status_code > 200:
message = response.get('message', 'Server did not provide an error message')
raise AMIERequestError(message, response=r)
return r
def set_packet_client_state(self, packet_or_id, state):
"""
Set the client state on the server of the packet corresponding to the given
packet_or_id.
See the :swagger:`Swagger documentation <AMIE_Client/put_packets__site_name___packet_rec_id__client_state__client_state_>` for more details.
Args:
packet_or_id (Packet, int): The packet or packet_rec_id to set state on.
state (str): The state to set
"""
if isinstance(packet_or_id, Packet):
pkt_id = packet_or_id.packet_rec_id
else:
pkt_id = packet_or_id
url = self.amie_url + 'packets/{}/{}/client_state/{}'.format(self.site_name,
pkt_id, state)
r = self._session.put(url)
response = r.json()
if r.status_code > 200:
message = response.get('message', 'Server did not provide an error message')
raise AMIERequestError(message, response=r)
return r
def clear_packet_client_state(self, packet_or_id):
"""
Clears the client state on the server of the packet corresponding to the given
packet_or_id.
See the :swagger:`Swagger documentation <AMIE_Client/delete_packets__site_name___packet_rec_id__client_state_>` for more details.
Args:
packet_or_id (Packet, int): The packet or packet_rec_id to clear client_state on.
"""
if isinstance(packet_or_id, Packet):
pkt_id = packet_or_id.packet_rec_id
else:
pkt_id = packet_or_id
url = self.amie_url + 'packets/{}/{}/client_state'.format(self.site_name, pkt_id)
r = self._session.delete(url)
response = r.json()
if r.status_code > 200:
message = response.get('message', 'Server did not provide an error message')
raise AMIERequestError(message, response=r)
return r
def set_packet_client_json(self, packet_or_id, client_json):
"""
Set the client JSON on the server of the packet corresponding to the given
packet_or_id.
See the :swagger:`Swagger documentation <AMIE_Client/put_packets__site_name___packet_rec_id__client_json>` for more details.
Args:
packet_or_id (Packet, int): The packet or packet_rec_id to set client_json on.
client_json: The json to set. Can be any serializable object or a string of
JSON.
"""
if isinstance(packet_or_id, Packet):
pkt_id = packet_or_id.packet_rec_id
else:
pkt_id = packet_or_id
url = self.amie_url + 'packets/{}/{}/client_json'.format(self.site_name, pkt_id)
if isinstance(client_json, str):
# Best to parse the json here. Ensures it's valid and that everything
# serializes back properly when we do the PUT
client_json = json.loads(client_json)
r = self._session.put(url, json=client_json)
response = r.json()
if r.status_code > 200:
message = response.get('message', 'Server did not provide an error message')
raise AMIERequestError(message, response=r)
return r
def clear_packet_client_json(self, packet_or_id):
"""
Clears the client JSON on the server of the packet corresponding to the given
packet_or_id.
See the :swagger:`Swagger documentation <AMIE_Client/delete_packets__site_name___packet_rec_id__client_json>` for more details.
Args:
packet_or_id (Packet, int): The packet or packet_rec_id to clear client_json on.
"""
if isinstance(packet_or_id, Packet):
pkt_id = packet_or_id.packet_rec_id
else:
pkt_id = packet_or_id
url = self.amie_url + 'packets/{}/{}/client_json'.format(self.site_name, pkt_id)
r = self._session.delete(url)
response = r.json()
if r.status_code > 200:
message = response.get('message', 'Server did not provide an error message')
raise AMIERequestError(message, response=r)
return r
class UsageClient:
"""
AMIE Usage Client.
Args:
site_name (str): Name of the client site.
api_key (str): API key secret
usage_url (str): Base URL for the XSEDE Usage api
Examples:
>>> psc_client = amieclient.UsageClient(site_name='PSC', api_key=some_secrets_store['amie_api_key'])
You can also override the amie_url and usage_url parameters, if you're
doing local development or testing out a new version.
>>> psc_alt_base_client = amieclient.UsageClient(site_name='PSC', api_key='test_api_key', usage_url='https://amieclient.xsede.org/v0.20_beta/)
"""
def __init__(self, site_name, api_key,
usage_url='https://usage.xsede.org/api/v1'):
if not usage_url.endswith('/'):
self.usage_url = usage_url + '/'
else:
self.usage_url = usage_url
self.site_name = site_name
amie_headers = {
'XA-API-KEY': api_key,
'XA-SITE': site_name
}
s = requests.Session()
s.headers.update(amie_headers)
self._session = s
def __enter__(self):
return self
def __exit__(self, *args):
self._session.close()
def send(self, usage_packets):
"""
Sends a usage update to the Usage API host. This function accepts
individual UsageMessages, lists of UsageRecords, or even a single
UsageRecord. Returns a list of UsageResponses
The API currently has a request size limit of 1024KiB. We get
ample room for overhead that may be added by intermediate layers
(reverse proxies, etc) by capping the size of the request we send
to 768KiB. This happens automatically, no need to chunk your usage
packets yourself. But this potential chunking means that we may get
more than one response, so for the sake of consistency this method
will return a list of responses.
Args:
usage_packets (UsageMessage, [UsageRecord], UsageRecord):
A UsageMessage object, list of UsageRecords, or a single
UsageRecord to send.
Returns:
list of responses
"""
if isinstance(usage_packets, UsageRecord):
pkt_list = UsageMessage([usage_packets])
elif isinstance(usage_packets, list):
pkt_list = UsageMessage(usage_packets)
elif isinstance(usage_packets, UsageMessage):
pkt_list = usage_packets
url = self.usage_url + 'usage/'
# prepare the request
req = requests.Request('POST', url, json=pkt_list.as_dict())
prepped_req = self._session.prepare_request(req)
# Get the size of the content
content_length = int(prepped_req.headers.get('Content-Length'))
# Cap content_length at 786432 bytes
if content_length >= 786432:
# Get the safe number of safe chunks:
number_of_chunks = ceil(content_length / 786432)
# Get the size of those chunks
chunk_size = floor(len(pkt_list) / number_of_chunks)
results = list()
for chunk in pkt_list._chunked(chunk_size=chunk_size):
# Send each chunk
r = self.send_usage(chunk)
results.extend(r)
return results
r = self._session.send(prepped_req)
if r.status_code == 200:
resp = UsageResponse.from_dict(r.json())
elif r.status_code == 400:
# Get the message if we're given one; otherwise
msg = r.json().get('error', 'Bad Request, but error not specified by server')
raise UsageResponseError(msg)
else:
r.raise_for_status()
return [resp]
def summary(self):
"""
Gets a usage summary
Not implemented yet
"""
raise NotImplementedError("Usage summaries are not yet implemented in the AMIE Usage api")
def get_failed_records(self):
"""
Gets all failed records
Takes no arguments
"""
url = self.usage_url + 'usage/failed'
r = self._session.get(url)
if r.status_code > 200:
# Get the message if we're given one; otherwise placeholder
msg = r.json().get('error', 'Bad Request, but error not specified by server')
raise UsageResponseError(msg)
return FailedUsageResponse.from_dict(r.json())
def clear_failed_records(self, failed_records_or_ids):
"""
Tells the server to clear the failed records given
Args:
failed_records_or_ids ([FailedUsageRecord], [int]):
A list of FailedUsageRecords, or plain FailedRecordIds, to unmark as
failed
"""
def _get_id(fr):
if hasattr(fr, 'failed_record_id'):
return str(fr.failed_record_id)
else:
return str(fr)
if isinstance(failed_records_or_ids, list):
failed_ids = map(_get_id, failed_records_or_ids)
else:
failed_ids = [_get_id(failed_records_or_ids)]
fids = ','.join(failed_ids)
url = self.usage_url + 'usage/failed/{fids}'.format(fids)
r = self._session.delete(url)
r.raise_for_status()
return True
def status(self, from_time=None, to_time=None):
"""
Gets the status of records processed from the queue in the provided interval.
Args:
from_date (Datetime): Start date and time
to_date (Datetime): End date and time
"""
from_iso = from_time.isoformat() if from_time is not None else None
to_iso = to_time.isoformat() if to_time is not None else None
p = {'FromTime': from_iso, 'ToTime': to_iso}
url = self.usage_url + 'usage/status'
r = self._session.get(url, params=p)
if r.status_code > 200:
# Get the message if we're given one; otherwise
msg = r.json().get('error', 'Bad Request, but error not specified by server')
raise UsageResponseError(msg)
return UsageStatus.from_list(r.json())
| 2.421875
| 2
|
app/config.py
|
pawan0410/aig-docs
| 0
|
12774921
|
"""
Configuration file
"""
class Config:
"""
Base Configuration
"""
DEBUG = True
SECRET_KEY = r'<KEY>'
SQLALCHEMY_POOL_SIZE = 5
SQLALCHEMY_POOL_TIMEOUT = 120
SQLALCHEMY_POOL_RECYCLE = 280
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 465
MAIL_USERNAME = r'<EMAIL>'
MAIL_PASSWORD = r'<PASSWORD>'
MAIL_USE_TLS = False
MAIL_USE_SSL = True
FTP_SERVER = 'aigbusiness.in'
FTP_USER = '<EMAIL>'
FTP_PASSWORD = '<PASSWORD>'
class DevelopmentConfig(Config):
"""
Local Development
"""
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'mysql://root@127.0.0.1/aig_docs'
class ProductionConfig(Config):
"""
Production configurations
"""
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'mysql://root:maria@aig2016@192.168.8.37/aig_docs'
app_config = {
'development': DevelopmentConfig,
'production': ProductionConfig
}
| 2.171875
| 2
|
contre/weights.py
|
b2-hive/CONTRE
| 2
|
12774922
|
<filename>contre/weights.py
from numpy import mean
def get_weights(expert_df, normalize_to):
"""Return dataframe with additional weight column.
The weights are calculated with w = q / (1 - q).
This is only valid if the output of the classifier is in the range [0,1).
The weights should be normalized to match the ratio of `data / mc`
of the samples used for training.
Columns are named: q (classifier output), EventType and weight.
Parameters:
expert (pd.DataFrame): dataframe with the classifier output
for MC.
normalize_to (float): normalize the weights,
if 0: no normalization.
"""
key_q = expert_df.keys()[0] # classifier output
key_EventType = expert_df.keys()[1]
assert key_EventType.endswith('EventType')
# rename columns
expert_df = expert_df.rename(
columns={
key_q: "q",
key_EventType: "EventType"}
)
expert_df['weight'] = ((expert_df["q"]) / (1 - expert_df["q"]))
if normalize_to != 0:
weight_mean = mean(expert_df['weight'])
resize = normalize_to / weight_mean
expert_df['weight'] *= resize
return expert_df
| 3.515625
| 4
|
xetra/transformers/xetra_transformer.py
|
Kenebehi/xetra-production-etl-pipeline
| 0
|
12774923
|
"""Xetra ETL Component"""
import logging
from datetime import datetime
from typing import NamedTuple
import pandas as pd
from xetra.common.s3 import S3BucketConnector
from xetra.common.meta_process import MetaProcess
class XetraSourceConfig(NamedTuple):
"""
Class for source configuration data
src_first_extract_date: determines the date for extracting the source
src_columns: source column names
src_col_date: column name for date in source
src_col_isin: column name for isin in source
src_col_time: column name for time in source
src_col_start_price: column name for starting price in source
src_col_min_price: column name for minimum price in source
src_col_max_price: column name for maximum price in source
src_col_traded_vol: column name for traded volumne in source
"""
src_first_extract_date: str
src_columns: list
src_col_date: str
src_col_isin: str
src_col_time: str
src_col_start_price: str
src_col_min_price: str
src_col_max_price: str
src_col_traded_vol: str
class XetraTargetConfig(NamedTuple):
"""
Class for target configuration data
trg_col_isin: column name for isin in target
trg_col_date: column name for date in target
trg_col_op_price: column name for opening price in target
trg_col_clos_price: column name for closing price in target
trg_col_min_price: column name for minimum price in target
trg_col_max_price: column name for maximum price in target
trg_col_dail_trad_vol: column name for daily traded volume in target
trg_col_ch_prev_clos: column name for change to previous day's closing price in target
trg_key: basic key of target file
trg_key_date_format: date format of target file key
trg_format: file format of the target file
"""
trg_col_isin: str
trg_col_date: str
trg_col_op_price: str
trg_col_clos_price: str
trg_col_min_price: str
trg_col_max_price: str
trg_col_dail_trad_vol: str
trg_col_ch_prev_clos: str
trg_key: str
trg_key_date_format: str
trg_format: str
class XetraETL():
"""
Reads the Xetra data, transforms and writes the transformed to target
"""
def __init__(self, s3_bucket_src: S3BucketConnector,
s3_bucket_trg: S3BucketConnector, meta_key: str,
src_args: XetraSourceConfig, trg_args: XetraTargetConfig):
"""
Constructor for XetraTransformer
:param s3_bucket_src: connection to source S3 bucket
:param s3_bucket_trg: connection to target S3 bucket
:param meta_key: used as self.meta_key -> key of meta file
:param src_args: NamedTouple class with source configuration data
:param trg_args: NamedTouple class with target configuration data
"""
self._logger = logging.getLogger(__name__)
self.s3_bucket_src = s3_bucket_src
self.s3_bucket_trg = s3_bucket_trg
self.meta_key = meta_key
self.src_args = src_args
self.trg_args = trg_args
self.extract_date, self.extract_date_list = MetaProcess.return_date_list(
self.src_args.src_first_extract_date, self.meta_key, self.s3_bucket_trg)
self.meta_update_list = [date for date in self.extract_date_list\
if date >= self.extract_date]
def extract(self):
"""
Read the source data and concatenates them to one Pandas DataFrame
:returns:
data_frame: Pandas DataFrame with the extracted data
"""
self._logger.info('Extracting Xetra source files started...')
files = [key for date in self.extract_date_list\
for key in self.s3_bucket_src.list_files_in_prefix(date)]
if not files:
data_frame = pd.DataFrame()
else:
data_frame = pd.concat([self.s3_bucket_src.read_csv_to_df(file)\
for file in files], ignore_index=True)
self._logger.info('Extracting Xetra source files finished.')
return data_frame
def transform_report1(self, data_frame: pd.DataFrame):
"""
Applies the necessary transformation to create report 1
:param data_frame: Pandas DataFrame as Input
:returns:
data_frame: Transformed Pandas DataFrame as Output
"""
if data_frame.empty:
self._logger.info('The dataframe is empty. No transformations will be applied.')
return data_frame
self._logger.info('Applying transformations to Xetra source data for report 1 started...')
# Filtering necessary source columns
data_frame = data_frame.loc[:, self.src_args.src_columns]
# Removing rows with missing values
data_frame.dropna(inplace=True)
# Calculating opening price per ISIN and day
data_frame[self.trg_args.trg_col_op_price] = data_frame\
.sort_values(by=[self.src_args.src_col_time])\
.groupby([
self.src_args.src_col_isin,
self.src_args.src_col_date
])[self.src_args.src_col_start_price]\
.transform('first')
# Calculating closing price per ISIN and day
data_frame[self.trg_args.trg_col_clos_price] = data_frame\
.sort_values(by=[self.src_args.src_col_time])\
.groupby([
self.src_args.src_col_isin,
self.src_args.src_col_date
])[self.src_args.src_col_start_price]\
.transform('last')
# Renaming columns
data_frame.rename(columns={
self.src_args.src_col_min_price: self.trg_args.trg_col_min_price,
self.src_args.src_col_max_price: self.trg_args.trg_col_max_price,
self.src_args.src_col_traded_vol: self.trg_args.trg_col_dail_trad_vol
}, inplace=True)
# Aggregating per ISIN and day -> opening price, closing price,
# minimum price, maximum price, traded volume
data_frame = data_frame.groupby([
self.src_args.src_col_isin,
self.src_args.src_col_date], as_index=False)\
.agg({
self.trg_args.trg_col_op_price: 'min',
self.trg_args.trg_col_clos_price: 'min',
self.trg_args.trg_col_min_price: 'min',
self.trg_args.trg_col_max_price: 'max',
self.trg_args.trg_col_dail_trad_vol: 'sum'})
# Change of current day's closing price compared to the
# previous trading day's closing price in %
data_frame[self.trg_args.trg_col_ch_prev_clos] = data_frame\
.sort_values(by=[self.src_args.src_col_date])\
.groupby([self.src_args.src_col_isin])[self.trg_args.trg_col_op_price]\
.shift(1)
data_frame[self.trg_args.trg_col_ch_prev_clos] = (
data_frame[self.trg_args.trg_col_op_price] \
- data_frame[self.trg_args.trg_col_ch_prev_clos]
) / data_frame[self.trg_args.trg_col_ch_prev_clos ] * 100
# Rounding to 2 decimals
data_frame = data_frame.round(decimals=2)
# Removing the day before extract_date
data_frame = data_frame[data_frame.Date >= self.extract_date].reset_index(drop=True)
self._logger.info('Applying transformations to Xetra source data finished...')
return data_frame
def load(self, data_frame: pd.DataFrame):
"""
Saves a Pandas DataFrame to the target
:param data_frame: Pandas DataFrame as Input
"""
# Creating target key
target_key = (
f'{self.trg_args.trg_key}'
f'{datetime.today().strftime(self.trg_args.trg_key_date_format)}.'
f'{self.trg_args.trg_format}'
)
# Writing to target
self.s3_bucket_trg.write_df_to_s3(data_frame, target_key, self.trg_args.trg_format)
self._logger.info('Xetra target data successfully written.')
# Updating meta file
MetaProcess.update_meta_file(self.meta_update_list, self.meta_key, self.s3_bucket_trg)
self._logger.info('Xetra meta file successfully updated.')
return True
def etl_report1(self):
"""
Extract, transform and load to create report 1
"""
# Extraction
data_frame = self.extract()
# Transformation
data_frame = self.transform_report1(data_frame)
# Load
self.load(data_frame)
return True
| 2.171875
| 2
|
tests/test_stereo.py
|
zkbt/two-eyes
| 0
|
12774924
|
<filename>tests/test_stereo.py<gh_stars>0
from twoeyes import Stereo
from twoeyes.imports import data_directory, os
example_directory = 'two-eyes-examples'
try:
os.mkdir(example_directory)
except:
pass
def test_stereo():
s = Stereo(os.path.join(data_directory, 'left.jpg'),
os.path.join(data_directory, 'right.jpg'))
s.to_sidebyside(directory=example_directory)
s.to_anaglyph(directory=example_directory)
s.to_gif(directory=example_directory)
| 2.46875
| 2
|
60-69/60_Permutation Sequence.py
|
yanchdh/LeetCode
| 2
|
12774925
|
<filename>60-69/60_Permutation Sequence.py
# -*- coding:utf-8 -*-
# https://leetcode.com/problems/permutation-sequence/description/
class Solution(object):
def getPermutation(self, n, k):
"""
:type n: int
:type k: int
:rtype: str
"""
factorial = [1]
for i in range(1, n):
factorial.append(i * factorial[-1])
num = [i for i in range(1, n + 1)]
ret = []
for i in range(n - 1, -1, -1):
m = factorial[i]
ret.append(num.pop((k - 1) / m))
k = ((k - 1) % m) + 1
return ''.join(map(str, ret))
| 3.515625
| 4
|
vuln_check/wapitiCore/net/web.py
|
erick-maina/was
| 2
|
12774926
|
<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of the Wapiti project (http://wapiti.sourceforge.io)
# Copyright (C) 2008-2020 <NAME>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from urllib.parse import urlparse, quote_plus, unquote, quote
import posixpath
from copy import deepcopy
import sys
def urlencode(query, safe='', encoding=None, errors=None, quote_via=quote_plus):
"""Encode a dict or sequence of two-element tuples into a URL query string.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
The components of a query arg may each be either a string or a bytes type.
The safe, encoding, and errors parameters are passed down to the function
specified by quote_via (encoding and errors only if a component is a str).
"""
if hasattr(query, "items"):
query = query.items()
else:
# It's a bother at times that strings and string-like objects are
# sequences.
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# Zero-length sequences of all types will get here and succeed,
# but that's a minor nit. Since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty, va, tb = sys.exc_info()
raise TypeError("not a valid non-string sequence "
"or mapping object").with_traceback(tb)
key_value_pair = []
for k, v in query:
if isinstance(k, bytes):
k = quote_via(k, safe)
else:
k = quote_via(str(k), safe, encoding, errors)
if v is None:
key_value_pair.append(k)
elif isinstance(v, bytes):
v = quote_via(v, safe)
key_value_pair.append(k + '=' + v)
elif isinstance(v, str):
v = quote_via(v, safe, encoding, errors)
key_value_pair.append(k + '=' + v)
else:
try:
# Is this a sufficient test for sequence-ness?
x = len(v)
except TypeError:
# not a sequence
v = quote_via(str(v), safe, encoding, errors)
key_value_pair.append(k + '=' + v)
else:
# loop over the sequence
for elt in v:
if isinstance(elt, bytes):
elt = quote_via(elt, safe)
else:
elt = quote_via(str(elt), safe, encoding, errors)
key_value_pair.append(k + '=' + elt)
return '&'.join(key_value_pair)
def parse_qsl(qs, strict_parsing=False, encoding='utf-8', errors='replace', max_num_fields=None):
"""Parse a query given as a string argument.
Arguments:
qs: percent-encoded query string to be parsed
strict_parsing: flag indicating what to do with parsing errors. If
false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
max_num_fields: int. If set, then throws a ValueError
if there are more than n fields read by parse_qsl().
Returns a list, as G-d intended.
"""
# If max_num_fields is defined then check that the number of fields
# is less than max_num_fields. This prevents a memory exhaustion DOS
# attack via post bodies with many fields.
if max_num_fields is not None:
num_fields = 1 + qs.count('&') + qs.count(';')
if max_num_fields < num_fields:
raise ValueError('Max number of fields exceeded')
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError("bad query field: %r" % (name_value,))
# Handle case of a control-name with no equal sign
nv.append(None)
name = nv[0].replace('+', ' ')
name = unquote(name, encoding=encoding, errors=errors)
if nv[1]:
value = nv[1].replace('+', ' ')
value = unquote(value, encoding=encoding, errors=errors)
else:
value = nv[1]
r.append((name, value))
return r
def shell_escape(s: str):
s = s.replace('\\', '\\\\')
s = s.replace('"', '\\"')
s = s.replace('$', '\\$')
s = s.replace('!', '\\!')
s = s.replace('`', '\\`')
return s
class Request:
def __init__(
self, path: str, method: str = "",
get_params: list = None, post_params: list = None, file_params: list = None,
encoding: str = "UTF-8", enctype: str = "",
referer: str = "", link_depth: int = 0):
"""Create a new Request object.
Takes the following arguments:
path : The path of the HTTP resource on the server. It can contain a query string.
get_params : A list of key/value parameters (each one is a list of two string).
Each string should already be urlencoded in the good encoding format.
post_params : Same structure as above but specify the parameters sent in the HTTP body.
file_params : Same as above expect the values are a tuple (filename, file_content).
encoding : A string specifying the encoding used to send data to this URL.
Don't mistake it with the encoding of the webpage pointed out by the Request.
referer : The URL from which the current Request was found.
"""
self._resource_path = path.split("#")[0]
# Most of the members of a Request object are immutable so we compute
# the data only one time (when asked for) and we keep it in memory for less
# calculations in those "cached" vars.
self._cached_url = ""
self._cached_get_keys = None
self._cached_post_keys = None
self._cached_file_keys = None
self._cached_encoded_params = None
self._cached_encoded_data = None
self._cached_encoded_files = None
self._cached_hash = None
self._cached_hash_params = None
self._status = None
if not method:
# For lazy
if post_params or file_params:
self._method = "POST"
else:
self._method = "GET"
else:
self._method = method
self._enctype = ""
if self._method == "POST":
if enctype:
self._enctype = enctype.lower().strip()
else:
if file_params:
self._enctype = "multipart/form-data"
else:
self._enctype = "application/x-www-form-urlencoded"
# same structure as _get_params, see below
if not post_params:
# None or empty string or empty list
self._post_params = []
else:
if isinstance(post_params, list):
# Non empty list
self._post_params = deepcopy(post_params)
elif isinstance(post_params, str):
if "urlencoded" in self.enctype or self.is_multipart:
# special case of multipart is dealt when sending request
self._post_params = []
if len(post_params):
for kv in post_params.split("&"):
if kv.find("=") > 0:
self._post_params.append(kv.split("=", 1))
else:
# ?param without value
self._post_params.append([kv, None])
else:
# must be something like application/json or text/xml
self._post_params = post_params
# eg: files = [['file_field', ('file_name', 'file_content')]]
if not file_params:
self._file_params = []
else:
if isinstance(file_params, list):
self._file_params = deepcopy(file_params)
else:
self._file_params = file_params
# eg: get = [['id', '25'], ['color', 'green']]
if not get_params:
self._get_params = []
if "?" in self._resource_path:
query_string = urlparse(self._resource_path).query
self._get_params = [[k, v] for k, v in parse_qsl(query_string)]
self._resource_path = self._resource_path.split("?")[0]
else:
if isinstance(get_params, list):
self._resource_path = self._resource_path.split("?")[0]
self._get_params = deepcopy(get_params)
else:
self._get_params = get_params
self._encoding = encoding
self._referer = referer
self._link_depth = link_depth
parsed = urlparse(self._resource_path)
self._file_path = parsed.path
self._hostname = parsed.netloc
self._port = 80
if parsed.port is not None:
self._port = parsed.port
elif parsed.scheme == "https":
self._port = 443
self._headers = None
self._start_time = None
self._duration = -1
self._size = 0
self._path_id = None
# TODO: hashable objects should be read-only. Currently the Mutator get a deepcopy of params to play with but
# having read-only params in Request class would be more Pythonic. More work on the Mutator in a future version ?
def __hash__(self):
if self._cached_hash is None:
get_kv = tuple([tuple(param) for param in self._get_params])
if isinstance(self._post_params, list):
post_kv = tuple([tuple(param) for param in self._post_params])
else:
post_kv = self._enctype + str(len(self._post_params))
file_kv = tuple([tuple([param[0], param[1][0]]) for param in self._file_params])
self._cached_hash = hash((self._method, self._resource_path, get_kv, post_kv, file_kv))
return self._cached_hash
def __eq__(self, other):
if not isinstance(other, Request):
return NotImplemented
if self._method != other.method:
return False
if self._resource_path != other.path:
return False
return hash(self) == hash(other)
def __lt__(self, other):
if not isinstance(other, Request):
return NotImplemented
if self.url < other.url:
return True
else:
if self.url == other.url:
return self.encoded_data < other.encoded_data
return False
def __le__(self, other):
if not isinstance(other, Request):
return NotImplemented
if self.url < other.url:
return True
elif self.url == other.url:
return self.encoded_data <= other.encoded_data
return False
def __ne__(self, other):
if not isinstance(other, Request):
return NotImplemented
if self.method != other.method:
return True
if self._resource_path != other.path:
return True
return hash(self) != hash(other)
def __gt__(self, other):
if not isinstance(other, Request):
return NotImplemented
if self.url > other.url:
return True
elif self.url == other.url:
return self.encoded_data > other.encoded_data
return False
def __ge__(self, other):
if not isinstance(other, Request):
return NotImplemented
if self.url > other.url:
return True
elif self.url == other.url:
return self.encoded_data >= other.encoded_data
return False
def __len__(self):
if isinstance(self._post_params, list):
return len(self.get_params) + len(self._post_params) + len(self._file_params)
else:
return len(self.get_params) + len(self._file_params)
@staticmethod
def _encoded_keys(params):
return "&".join([quote(key, safe='%') for key in sorted(kv[0] for kv in params)])
def __repr__(self):
if self._get_params:
buff = "{0} {1} ({2})".format(self._method, self.url, self._link_depth)
else:
buff = "{0} {1} ({2})".format(self._method, self._resource_path, self._link_depth)
if self._post_params:
buff += "\n\tdata: {}".format(self.encoded_data.replace("\n", "\n\t"))
if self._file_params:
buff += "\n\tfiles: {}".format(self.encoded_files)
return buff
def http_repr(self, left_margin=" "):
rel_url = self.url.split('/', 3)[3]
http_string = "{3}{0} /{1} HTTP/1.1\n{3}Host: {2}\n".format(
self._method,
rel_url,
self._hostname,
left_margin
)
if self._referer:
http_string += "{}Referer: {}\n".format(left_margin, self._referer)
if self._file_params:
boundary = "------------------------boundarystring"
http_string += "{}Content-Type: multipart/form-data; boundary={}\n\n".format(left_margin, boundary)
for field_name, field_value in self._post_params:
http_string += (
"{3}{0}\n{3}Content-Disposition: form-data; "
"name=\"{1}\"\n\n{3}{2}\n"
).format(boundary, field_name, field_value, left_margin)
for field_name, field_value in self._file_params:
http_string += (
"{3}{0}\n{3}Content-Disposition: form-data; name=\"{1}\"; filename=\"{2}\"\n\n"
"{3}{4}\n"
).format(
boundary,
field_name,
field_value[0],
left_margin,
field_value[1].replace("\n", "\n" + left_margin).strip()
)
http_string += "{0}{1}--\n".format(left_margin, boundary)
elif self._post_params:
if "urlencoded" in self.enctype:
http_string += "{}Content-Type: application/x-www-form-urlencoded\n".format(left_margin)
http_string += "\n{}{}".format(left_margin, self.encoded_data)
else:
http_string += "{}Content-Type: {}\n".format(left_margin, self.enctype)
http_string += "\n{}{}".format(
left_margin,
self.encoded_data.replace("\n", "\n" + left_margin).strip()
)
return http_string.rstrip()
@property
def curl_repr(self):
curl_string = "curl \"{0}\"".format(shell_escape(self.url))
if self._referer:
curl_string += " -e \"{0}\"".format(shell_escape(self._referer))
if self._file_params:
# POST with multipart
for field_name, field_value in self._post_params:
curl_string += " -F \"{0}\"".format(shell_escape("{0}={1}".format(field_name, field_value)))
for field_name, field_value in self._file_params:
curl_upload_kv = "{0}=@your_local_file;filename={1}".format(field_name, field_value[0])
curl_string += " -F \"{0}\"".format(shell_escape(curl_upload_kv))
pass
elif self._post_params:
# POST either urlencoded
if "urlencoded" in self._enctype:
curl_string += " -d \"{0}\"".format(shell_escape(self.encoded_data))
else:
# Or raw blob
curl_string += " -H \"Content-Type: {}\" -d @payload_file".format(self._enctype)
return curl_string
def set_headers(self, response_headers):
"""Set the HTTP headers received while requesting the resource"""
self._headers = response_headers
@property
def size(self):
return self._size
@size.setter
def size(self, value: int):
self._size = value
@property
def duration(self):
return self._duration
@duration.setter
def duration(self, value: float):
self._duration = value
@property
def status(self) -> int:
return self._status
@status.setter
def status(self, value: int):
self._status = value
@property
def url(self) -> str:
if not self._cached_url:
if self._get_params:
self._cached_url = "{0}?{1}".format(
self._resource_path,
self._encode_params(self._get_params)
)
else:
self._cached_url = self._resource_path
return self._cached_url
@property
def hostname(self) -> str:
return self._hostname
@property
def port(self):
return self._port
@property
def path(self):
return self._resource_path
@property
def file_path(self):
return self._file_path
@property
def is_root(self) -> bool:
return True if self._file_path == "/" else False
@property
def file_ext(self) -> str:
return posixpath.splitext(self._file_path)[1].lower()
@property
def file_name(self) -> str:
return posixpath.basename(self._file_path)
@property
def dir_name(self):
if self.file_name:
return posixpath.dirname(self._resource_path) + "/"
return self._resource_path
@property
def parent_dir(self):
if self.file_name:
return posixpath.dirname(self._resource_path) + "/"
elif self.is_root:
return self._resource_path
else:
return posixpath.dirname(posixpath.dirname(self._resource_path)) + "/"
@property
def method(self) -> str:
return self._method
@property
def encoding(self) -> str:
return self._encoding
@property
def enctype(self) -> str:
return self._enctype
@property
def is_multipart(self) -> bool:
return "multipart" in self._enctype
@property
def headers(self):
return self._headers
@property
def referer(self) -> str:
return self._referer
@property
def link_depth(self) -> int:
return self._link_depth
@link_depth.setter
def link_depth(self, value: int):
self._link_depth = value
# To prevent errors, always return a deepcopy of the internal lists
@property
def get_params(self):
# Return a list of lists containing two elements (parameter name and parameter value)
return deepcopy(self._get_params)
@property
def post_params(self):
if isinstance(self._post_params, list):
return deepcopy(self._post_params)
return self._post_params
@property
def file_params(self):
return deepcopy(self._file_params)
@property
def get_keys(self):
if len(self._get_params):
return list(zip(*self._get_params))[0]
return ()
@property
def post_keys(self):
if isinstance(self._post_params, list) and len(self._post_params):
return list(zip(*self._post_params))[0]
return ()
@property
def file_keys(self):
if len(self._file_params):
return list(zip(*self._file_params))[0]
return ()
@staticmethod
def _encode_params(params):
if not params:
return ""
if not isinstance(params, list):
return params
key_values = []
for k, v in params:
if isinstance(v, tuple) or isinstance(v, list):
key_values.append((k, v[0]))
else:
# May be empty string or None but will be processed differently by our own urlencode()
key_values.append((k, v))
return urlencode(key_values)
@property
def encoded_params(self):
return self._encode_params(self._get_params)
@property
def encoded_data(self):
"""Return a raw string of key/value parameters for POST requests"""
return self._encode_params(self._post_params)
@property
def encoded_files(self):
return self._encode_params(self._file_params)
@property
def encoded_get_keys(self):
if self._cached_get_keys is None:
self._cached_get_keys = self._encoded_keys(self._get_params)
return self._cached_get_keys
@property
def encoded_post_keys(self):
if self._cached_post_keys is None and "urlencoded" in self.enctype:
self._cached_post_keys = self._encoded_keys(self._post_params)
return self._cached_post_keys
@property
def encoded_file_keys(self):
if self._cached_file_keys is None:
self._cached_file_keys = self._encoded_keys(self._file_params)
return self._cached_file_keys
@property
def encoded_keys(self):
return "{}|{}|{}".format(self.encoded_get_keys, self.encoded_post_keys, self.encoded_file_keys)
@property
def pattern(self):
return "{}?{}".format(self.path, self.encoded_keys)
@property
def hash_params(self):
if self._cached_hash_params is None:
self._cached_hash_params = hash(self.pattern)
return self._cached_hash_params
@property
def path_id(self):
return self._path_id
@path_id.setter
def path_id(self, value: int):
self._path_id = value
| 2.390625
| 2
|
spatialstats/polyspectra/cuda_powerspectrum.py
|
mjo22/mobstats
| 10
|
12774927
|
"""
Implementation using CuPy acceleration.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
from time import time
import cupy as cp
from cupyx.scipy import fft as cufft
def powerspectrum(*u, average=True, diagnostics=False,
kmin=None, kmax=None, npts=None,
compute_fft=True, compute_sqr=True,
double=True, bench=False, **kwargs):
"""
See the documentation for the :ref:`CPU version<powerspectrum>`.
Parameters
----------
u : `np.ndarray`
Scalar or vector field.
If vector data, pass arguments as ``u1, u2, ..., un``
where ``ui`` is the ith vector component.
Each ``ui`` can be 1D, 2D, or 3D, and all must have the
same ``ui.shape`` and ``ui.dtype``.
average : `bool`, optional
If ``True``, average over values in a given
bin and multiply by the bin volume.
If ``False``, compute the sum.
diagnostics : `bool`, optional
Return the standard deviation and number of points
in a particular radial bin.
kmin : `int` or `float`, optional
Minimum wavenumber in power spectrum bins.
If ``None``, ``kmin = 1``.
kmax : `int` or `float`, optional
Maximum wavenumber in power spectrum bins.
If ``None``, ``kmax = max(u.shape)//2``.
npts : `int`, optional
Number of modes between ``kmin`` and ``kmax``,
inclusive.
If ``None``, ``npts = kmax-kmin+1``.
compute_fft : `bool`, optional
If ``False``, do not take the FFT of the input data.
FFTs should not be passed with the zero-frequency
component in the center.
compute_sqr : `bool`, optional
If ``False``, sum the real part of the FFT. This can be
useful for purely real FFTs, where the sign of the
FFT is useful information. If ``True``, take the square
as usual.
double : `bool`, optional
If ``False``, calculate FFTs in single precision.
Useful for saving memory.
bench : `bool`, optional
Print message for time of calculation.
kwargs
Additional keyword arguments passed to
``cupyx.scipy.fft.fftn`` or ``cupyx.scipy.fft.rfftn``.
Returns
-------
spectrum : `np.ndarray`, shape `(npts,)`
Radially averaged power spectrum :math:`P(k)`.
kn : `np.ndarray`, shape `(npts,)`
Left edges of radial bins :math:`k`.
counts : `np.ndarray`, shape `(npts,)`, optional
Number of points :math:`N_k` in each bin.
vol : `np.ndarray`, shape `(npts,)`, optional
Volume :math:`V_k` of each bin.
stdev : `np.ndarray`, shape `(npts,)`, optional
Standard deviation multiplied with :math:`V_k`
in each bin.
"""
if bench:
t0 = time()
shape = u[0].shape
ndim = u[0].ndim
ncomp = len(u)
N = max(u[0].shape)
if np.issubdtype(u[0].dtype, np.floating):
real = True
dtype = cp.float64 if double else cp.float32
else:
real = False
dtype = cp.complex128 if double else cp.complex64
if ndim not in [1, 2, 3]:
raise ValueError("Dimension of image must be 1, 2, or 3.")
# Get memory pools
mempool = cp.get_default_memory_pool()
pinned_mempool = cp.get_default_pinned_memory_pool()
# Compute pqower spectral density with memory efficiency
density = None
comp = cp.empty(shape, dtype=dtype)
for i in range(ncomp):
temp = cp.asarray(u[i], dtype=dtype)
comp[...] = temp
del temp
if compute_fft:
fft = _cufftn(comp, **kwargs)
else:
fft = comp
if density is None:
fftshape = fft.shape
density = cp.zeros(fft.shape)
if compute_sqr:
density[...] += _mod_squared(fft)
else:
density[...] += cp.real(fft)
del fft
mempool.free_all_blocks()
pinned_mempool.free_all_blocks()
# Need to double count if using rfftn
if real and compute_fft:
density[...] *= 2
# Get radial coordinates
kr = cp.asarray(_kmag_sampling(fftshape, real=real).astype(np.float32))
# Flatten arrays
kr = kr.ravel()
density = density.ravel()
# Get minimum and maximum k for binning if not given
if kmin is None:
kmin = 1
if kmax is None:
kmax = int(N/2)
if npts is None:
npts = kmax-kmin+1
# Generate bins
kn = cp.linspace(kmin, kmax, npts, endpoint=True) # Left edges of bins
dk = kn[1] - kn[0]
# Radially average power spectral density
if ndim == 1:
fac = 2*np.pi
elif ndim == 2:
fac = 4*np.pi
elif ndim == 3:
fac = 4./3.*np.pi
spectrum = cp.zeros_like(kn)
stdev = cp.zeros_like(kn)
vol = cp.zeros_like(kn)
counts = cp.zeros(kn.shape, dtype=np.int64)
for i, ki in enumerate(kn):
ii = cp.where(cp.logical_and(kr >= ki, kr < ki+dk))
samples = density[ii]
vk = fac*cp.pi*((ki+dk)**ndim-(ki)**ndim)
if average:
spectrum[i] = vk*cp.mean(samples)
else:
spectrum[i] = cp.sum(samples)
if diagnostics:
Nk = samples.size
stdev[i] = vk * cp.std(samples, ddof=1)
vol[i] = vk
counts[i] = Nk
del density, kr
mempool.free_all_blocks()
pinned_mempool.free_all_blocks()
if bench:
print(f"Time: {time() - t0:.04f} s")
result = [spectrum.get(), kn.get()]
if diagnostics:
result.extend([counts.get(), vol.get(), stdev.get()])
return tuple(result)
def _cufftn(data, overwrite_input=False, **kwargs):
"""
Calculate the N-dimensional fft of an image
with memory efficiency
"""
# Get memory pools
mempool = cp.get_default_memory_pool()
pinned_mempool = cp.get_default_pinned_memory_pool()
# Real vs. Complex data
if data.dtype in [cp.float32, cp.float64]:
value_type = 'R2C'
fftn = cufft.rfftn
elif data.dtype in [cp.complex64, cp.complex128]:
value_type = 'C2C'
fftn = cufft.fftn
else:
raise ValueError(f"{data.dtype} is unrecognized data type.")
# Get plan for computing fft
plan = cufft.get_fft_plan(data, value_type=value_type)
# Compute fft
with plan:
fft = fftn(data, overwrite_x=overwrite_input, **kwargs)
# Release memory
del plan
mempool.free_all_blocks()
pinned_mempool.free_all_blocks()
return fft
@cp.fuse(kernel_name='mod_squared')
def _mod_squared(a):
return cp.real(a*cp.conj(a))
def _kmag_sampling(shape, real=True):
"""
Generates the |k| coordinate system.
"""
if real:
freq = np.fft.rfftfreq
s = list(shape)
s[-1] = (s[-1]-1)*2
shape = s
else:
freq = np.fft.fftfreq
ndim = len(shape)
kmag = np.zeros(shape)
ksqr = []
for i in range(ndim):
ni = shape[i]
sample = freq(ni) if i == ndim - 1 else np.fft.fftfreq(ni)
if real:
sample = np.abs(sample)
k1d = sample * ni
ksqr.append(k1d * k1d)
if ndim == 1:
ksqr = ksqr[0]
elif ndim == 2:
ksqr = np.add.outer(ksqr[0], ksqr[1])
elif ndim == 3:
ksqr = np.add.outer(np.add.outer(ksqr[0], ksqr[1]), ksqr[2])
kmag = np.sqrt(ksqr)
return kmag
if __name__ == '__main__':
import pyFC
from matplotlib import pyplot as plt
dim = 100
fc = pyFC.LogNormalFractalCube(
ni=dim, nj=dim, nk=dim, kmin=10, mean=1, beta=-5/3)
fc.gen_cube()
data = fc.cube
psd, kn, stdev, vol, N = powerspectrum(data, diagnostics=True)
print(psd.mean())
def zero_log10(s):
"""
Takes logarithm of an array while retaining the zeros
"""
sp = np.where(s > 0., s, 1)
return np.log10(sp)
log_psd = zero_log10(psd)
log_kn = zero_log10(kn)
idxs = np.where(log_kn >= np.log10(fc.kmin))
m, b = np.polyfit(log_kn[idxs], log_psd[idxs], 1)
plt.errorbar(kn, psd,
label=rf'PSD, $\beta = {fc.beta}$', color='g')
plt.plot(log_kn[idxs], m*log_kn[idxs]+b,
label=rf'Fit, $\beta = {m}$', color='k')
plt.ylabel(r"$\log{P(k)}$")
plt.xlabel(r"$\log{k}$")
plt.legend(loc='upper right')
plt.show()
| 2.453125
| 2
|
process.py
|
fgassert/grace-processing
| 0
|
12774928
|
#!/usr/bin/env python
import numpy as np
import netCDF4 as nc
import scipy.stats as stats
import rasterio as rio
from rasterio import Affine as A
NETCDFS=['jpl.nc','csr.nc','gfz.nc']
SCALER='scaler.nc'
SLOPE='slope.csv'
R2='r2.csv'
P='p.csv'
ERR='err.csv'
OUT='grace.tif'
def main():
# load and average netcdfs
arr = None
for f in NETCDFS:
ds = nc.Dataset(f,'r')
if arr is None:
print ds.variables.keys()
arr = np.asarray(ds.variables['lwe_thickness']) / len(NETCDFS)
else:
arr += np.asarray(ds.variables['lwe_thickness']) / len(NETCDFS)
# multiply by scale factor
ds = nc.Dataset(SCALER,'r')
print ds.variables.keys()
scaler = np.asarray(ds.variables['SCALE_FACTOR'])
print scaler.shape
arr = arr*scaler
# extract error grids
m_err = np.asarray(ds.variables['MEASUREMENT_ERROR'])
l_err = np.asarray(ds.variables['LEAKAGE_ERROR'])
t_err = np.sqrt(m_err*m_err + l_err*l_err)
# compute slopes, coefficients
print arr.shape
slope_arr = np.zeros(arr.shape[1:])
r2_arr = np.zeros(arr.shape[1:])
p_arr = np.zeros(arr.shape[1:])
print slope_arr.shape
time = np.arange(arr.shape[0])
print time.shape
for i in range(arr.shape[1]):
for j in range(arr.shape[2]):
b1, b0, r2, p, sd = stats.linregress(arr[:,i,j], time)
slope_arr[i,j]=b1
r2_arr[i,j]=r2
p_arr[i,j]=p
# dump to csv
np.savetxt(SLOPE,slope_arr,delimiter=',')
np.savetxt(R2,r2_arr,delimiter=',')
np.savetxt(P,p_arr,delimiter=',')
np.savetxt(ERR,t_err,delimiter=',')
# rescale to WGS84 and dump to tif bands
rows = arr.shape[1]
cols = arr.shape[2]
d = 1
transform = A.translation(-cols*d/2,-rows*d/2) * A.scale(d,d)
print transform
slope_arr = np.roll(slope_arr.astype(rio.float64),180)
r2_arr = np.roll(r2_arr.astype(rio.float64),180)
p_arr = np.roll(p_arr.astype(rio.float64),180)
t_err = np.roll(t_err.astype(rio.float64),180)
with rio.open(OUT, 'w',
'GTiff',
width=cols,
height=rows,
dtype=rio.float64,
crs={'init': 'EPSG:4326'},
transform=transform,
count=4) as out:
out.write_band(1, slope_arr)
out.write_band(2, r2_arr)
out.write_band(3, p_arr)
out.write_band(4, t_err)
if __name__ == "__main__":
main()
| 2.109375
| 2
|
HW5_LeNet/src/config.py
|
Citing/CV-Course
| 4
|
12774929
|
<filename>HW5_LeNet/src/config.py<gh_stars>1-10
datasetDir = '../dataset/'
model = '../model/lenet'
modelDir = '../model/'
epochs = 20
batchSize = 128
rate = 0.001
mu = 0
sigma = 0.1
| 1.140625
| 1
|
algorithms/refinement/parameterisation/scan_varying_model_parameters.py
|
jbeilstenedmands/dials
| 0
|
12774930
|
from __future__ import absolute_import, division, print_function
from dials.algorithms.refinement.parameterisation.model_parameters import (
Parameter,
ModelParameterisation,
)
import abc
from scitbx.array_family import flex
from dials_refinement_helpers_ext import GaussianSmoother as GS
# reusable PHIL string for options affecting scan-varying parameterisation
phil_str = """
smoother
.help = "Options that affect scan-varying parameterisation"
.expert_level = 1
{
interval_width_degrees = 36.0
.help = "Width of scan between checkpoints in degrees. Can be set to Auto."
.type = float(value_min=0.)
absolute_num_intervals = None
.help = "Number of intervals between checkpoints if scan_varying"
"refinement is requested. If set, this overrides"
"interval_width_degrees"
.type = int(value_min=1)
}
"""
class ScanVaryingParameterSet(Parameter):
"""Testing a class for a scan-varying parameter, in which values at rotation
angle phi may be derived using smoothed interpolation between checkpoint
values stored here. Externally, this is presented as a set of parameters.
num_samples is the number of checkpoints. Other arguments are as Parameter.
"""
def __init__(
self,
value,
num_samples=5,
axis=None,
ptype=None,
name="ScanVaryingParameterSet",
):
assert num_samples >= 2 # otherwise use scan-independent parameterisation
value = [value] * num_samples
self._name_stem = name
name = [
e + "_sample%d" % i for i, e in enumerate([self._name_stem] * num_samples)
]
Parameter.__init__(self, value, axis, ptype, name)
self._esd = [None] * num_samples
self._num_samples = num_samples
return
def __len__(self):
return self._num_samples
@property
def value(self):
return self._value
@value.setter
def value(self, val):
assert len(val) == len(self)
self._value = val
self._esd = [None] * len(self)
@property
def name_stem(self):
return self._name_stem
def __str__(self):
msg = "ScanVaryingParameterSet " + self.name_stem + ":\n"
try:
msg += " Type: " + self.param_type + "\n"
except TypeError:
msg += " Type: " + str(self.param_type) + "\n"
try:
msg += " Axis: (%5.3f, %5.3f, %5.3f)" % tuple(self.axis) + "\n"
except TypeError:
msg += " Axis: " + str(self.axis) + "\n"
vals = ", ".join(["%5.3f"] * len(self)) % tuple(self.value)
msg += " Values: " + vals + "\n"
try:
sigs = ", ".join(["%5.3f"] * len(self)) % tuple(self.esd)
except TypeError:
sigs = ", ".join([str(e) for e in self.esd])
msg += " Sigmas: (" + sigs + ") \n"
return msg
# wrap the C++ GaussianSmoother, modifying return values to emulate the
# old Python version.
class GaussianSmoother(GS):
"""A Gaussian smoother for ScanVaryingModelParameterisations"""
def value_weight(self, x, param):
result = super(GaussianSmoother, self).value_weight(x, flex.double(param.value))
return (result.get_value(), result.get_weight(), result.get_sumweight())
def multi_value_weight(self, x, param):
result = super(GaussianSmoother, self).multi_value_weight(
flex.double(x), flex.double(param.value)
)
return (result.get_value(), result.get_weight(), result.get_sumweight())
def positions(self):
return list(super(GaussianSmoother, self).positions())
class ScanVaryingModelParameterisation(ModelParameterisation):
"""Extending ModelParameterisation to deal with ScanVaryingParameterSets.
For simplicity at this stage it is decreed that a
ScanVaryingModelParameterisation consists only of ScanVaryingParameterSets.
There is no combination with normal Parameters. This could be changed later,
but there may be no reason to do so, hence starting with this simpler
design"""
# The initial state is here equivalent to the initial state of the
# time static version of the parameterisation, as it is assumed that we
# start with a flat model wrt rotation angle.
__metaclass__ = abc.ABCMeta
def __init__(
self,
model,
initial_state,
param_sets,
smoother,
experiment_ids,
is_multi_state=False,
):
ModelParameterisation.__init__(
self, model, initial_state, param_sets, experiment_ids, is_multi_state
)
self._num_sets = len(self._param)
self._num_samples = len(param_sets[0])
self._total_len = self._num_samples * self._num_sets
# ensure all internal parameter sets have the same number of parameters
for param in self._param[1:]:
assert len(param) == self._num_samples
# Link up with an object that will perform the smoothing.
self._smoother = smoother
assert self._smoother.num_values() == self._num_samples
# define an attribute for caching the variance-covariance matrix of
# parameters
self._var_cov = None
return
def num_samples(self):
"""the number of samples of each parameter"""
return self._num_samples
def num_free(self):
"""the number of free parameters"""
if self._num_free is None:
self._num_free = (
sum(not x.get_fixed() for x in self._param) * self._num_samples
)
return self._num_free
# def num_total(self): inherited unchanged from ModelParameterisation
def num_sets(self):
"""the number of parameter sets"""
return self._num_sets
@abc.abstractmethod
def compose(self, t):
"""compose the model state at image number t from its initial state and
its parameter list. Also calculate the derivatives of the state wrt
each parameter in the list.
Unlike ModelParameterisation, does not automatically update the actual
model class. This should be done once refinement is complete."""
pass
def get_param_vals(self, only_free=True):
"""export the values of the internal list of parameters as a
sequence of floats.
If only_free, the values of fixed parameters are filtered from the
returned list. Otherwise all parameter values are returned"""
if only_free:
return [x for e in self._param if not e.get_fixed() for x in e.value]
else:
return [x for e in self._param for x in e.value]
def get_param_names(self, only_free=True):
"""export the names of the internal list of parameters
If only_free, the names of fixed parameters are filtered from the
returned list. Otherwise all parameter names are returned"""
# FIXME combine functionality with get_param_vals by returning a named,
# ordered list?
if only_free:
return [x for e in self._param if not e.get_fixed() for x in e.name]
else:
return [x for e in self._param for x in e.name]
def set_param_vals(self, vals):
"""set the values of the internal list of parameters from a
sequence of floats.
First break the sequence into sub sequences of the same length
as the _num_samples.
Only free parameter sets can have values assigned, therefore the
length of vals must equal the value of num_free"""
assert len(vals) == self.num_free()
i = 0
for p in self._param:
if not p.get_fixed(): # only set the free parameter sets
new_vals = vals[i : i + self._num_samples]
p.value = new_vals
i += self._num_samples
# compose with the new parameter values
# self.compose()
return
def set_param_esds(self, esds):
"""set the estimated standard deviations of the internal list of parameters
from a sequence of floats.
First break the sequence into sub sequences of the same length
as the _num_samples.
Only free parameters can be set, therefore the length of esds must equal
the value of num_free"""
assert len(esds) == self.num_free()
i = 0
for p in self._param:
if not p.get_fixed(): # only set the free parameter sets
new_esds = esds[i : i + self._num_samples]
p.esd = new_esds
i += self._num_samples
return
# def get_fixed(self): inherited unchanged from ModelParameterisation
# def set_fixed(self, fix): inherited unchanged from ModelParameterisation
# def get_state(self): inherited unchanged from ModelParameterisation
def get_ds_dp(self, only_free=True, use_none_as_null=False):
"""get a list of derivatives of the state wrt each parameter, as
a list in the same order as the internal list of parameters. Requires
compose to be called first at scan coordinate 't' so that each
scan-dependent parameter is evaluated at coordinate t, corresponding to
the original, unnormalised coordinates used to set up the smoother
(t will most likely be along the dimension of image number).
If only_free, the derivatives with respect to fixed parameters are
omitted from the returned list. Otherwise a list for all parameters is
returned, with null values for the fixed parameters.
The internal list of derivatives self._dstate_dp may use None for null
elements. By default these are converted to the null state, but
optionally these may remain None to detect them easier and avoid
doing calculations on null elements
"""
if use_none_as_null:
null = None
else:
null = self._null_state
if only_free:
return [
null if ds_dp is None else ds_dp
for row, p in zip(self._dstate_dp, self._param)
if not p.get_fixed()
for ds_dp in row
]
else:
return [
null if p.get_fixed() or ds_dp is None else ds_dp
for row, p in zip(self._dstate_dp, self._param)
for ds_dp in row
]
def get_smoothed_parameter_value(self, t, pset):
"""export the smoothed value of a parameter set at image number 't'
using the smoother."""
return self._smoother.value_weight(t, pset)[0]
def calculate_state_uncertainties(self, var_cov=None):
"""Given a variance-covariance array for the parameters of this model,
propagate those estimated errors into the uncertainties of the model state
at every scan point"""
if var_cov is not None:
# first call, just cache the variance-covariance matrix
self._var_cov = var_cov
return None
# later calls, make sure it has been cached! Otherwise ESDs cannot be
# calculated, so return None
if self._var_cov is None:
return None
# later calls, assumes compose has been called at image number t, so that
# get_ds_dp will be specific for that image. Now call the base class method
# and return the result
return super(
ScanVaryingModelParameterisation, self
).calculate_state_uncertainties(self._var_cov)
def set_state_uncertainties(self, var_cov_list):
"""Send the calculated variance-covariance matrices for model state elements
for all scan points back to the model for storage alongside the model state
"""
pass
| 2.109375
| 2
|
Week6/a3.py
|
stuart22/coursera-p1-002
| 0
|
12774931
|
<gh_stars>0
"""A board is a list of list of str. For example, the board
ANTT
XSOB
is represented as the list
[['A', 'N', 'T', 'T'], ['X', 'S', 'O', 'B']]
A word list is a list of str. For example, the list of words
ANT
BOX
SOB
TO
is represented as the list
['ANT', 'BOX', 'SOB', 'TO']
"""
def is_valid_word(wordlist, word):
""" (list of str, str) -> bool
Return True if and only if word is an element of wordlist.
>>> is_valid_word(['ANT', 'BOX', 'SOB', 'TO'], 'TO')
True
"""
return word in wordlist
def make_str_from_row(board, row_index):
""" (list of list of str, int) -> str
Return the characters from the row of the board with index row_index
as a single string.
>>> make_str_from_row([['A', 'N', 'T', 'T'], ['X', 'S', 'O', 'B']], 0)
'ANTT'
"""
str_row = ''
i=0
for i in range(len(board[row_index])):
str_row = str_row+board[row_index[i]]
i = i+1
return str_row
def make_str_from_column(board, column_index):
""" (list of list of str, int) -> str
Return the characters from the column of the board with index column_index
as a single string.
>>> make_str_from_column([['A', 'N', 'T', 'T'], ['X', 'S', 'O', 'B']], 1)
'NS'
"""
str_col=''
for strList in board:
str_col += strList[column_index]
return str_col
def board_contains_word_in_row(board, word):
""" (list of list of str, str) -> bool
Return True if and only if one or more of the rows of the board contains
word.
Precondition: board has at least one row and one column, and word is a
valid word.
>>> board_contains_word_in_row([['A', 'N', 'T', 'T'], ['X', 'S', 'O', 'B']], 'SOB')
True
"""
for row_index in range(len(board)):
if word in make_str_from_row(board, row_index):
return True
return False
def board_contains_word_in_column(board, word):
""" (list of list of str, str) -> bool
Return True if and only if one or more of the columns of the board
contains word.
Precondition: board has at least one row and one column, and word is a
valid word.
>>> board_contains_word_in_column([['A', 'N', 'T', 'T'], ['X', 'S', 'O', 'B']], 'NO')
False
"""
for index in range(len(board)):
myStr = make_str_from_column(board, index)
if word in myStr:
return True
else:
return False
def board_contains_word(board, word):
""" (list of list of str, str) -> bool
Return True if and only if word appears in board.
Precondition: board has at least one row and one column.
>>> board_contains_word([['A', 'N', 'T', 'T'], ['X', 'S', 'O', 'B']], 'ANT')
True
"""
return (board_contains_word_in_row(board, word) or board_contains_word_in_column(board, word))
def word_score(word):
""" (str) -> int
Return the point value the word earns.
Word length: < 3: 0 points
3-6: 1 point per character for all characters in word
7-9: 2 points per character for all characters in word
10+: 3 points per character for all characters in word
>>> word_score('DRUDGERY')
16
"""
if len(word) < 3:
return 0
elif len(word) <7:
return 1
elif len(word) < 10:
return 2
else:
return 3
def update_score(player_info, word):
""" ([str, int] list, str) -> NoneType
player_info is a list with the player's name and score. Update player_info
by adding the point value word earns to the player's score.
>>> update_score(['Jonathan', 4], 'ANT')
"""
player_info[1] += word_score(word)
def num_words_on_board(board, words):
""" (list of list of str, list of str) -> int
Return how many words appear on board.
>>> num_words_on_board([['A', 'N', 'T', 'T'], ['X', 'S', 'O', 'B']], ['ANT', 'BOX', 'SOB', 'TO'])
3
"""
count=0
for word in words:
if board_contains_word(board, word):
count +=1
return count
def read_words(words_file):
""" (file open for reading) -> list of str
Return a list of all words (with newlines removed) from open file
words_file.
Precondition: Each line of the file contains a word in uppercase characters
from the standard English alphabet.
"""
f = open(words_file)
myList = []
for line in f.readlines():
myList.append(line[0:len(line)-1])
f.close()
return myList
def read_board(board_file):
""" (file open for reading) -> list of list of str
Return a board read from open file board_file. The board file will contain
one row of the board per line. Newlines are not included in the board.
"""
f = open(board_file)
myList = []
for word in f.readlines():
word = word[0:len(word) - 1]
tempList = []
for letter in word:
tempList.append(letter)
myList.append(tempList)
f.close()
return myList
| 4.25
| 4
|
torchero/models/model.py
|
juancruzsosa/torchero
| 10
|
12774932
|
import json
import zipfile
import importlib
from functools import partial
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, Dataset
import torchero
from torchero.utils.mixins import DeviceMixin
from torchero import meters
from torchero import SupervisedTrainer
class InputDataset(Dataset):
""" Simple Dataset wrapper
to transform input before giving it
to the dataloader
"""
def __init__(self, ds, transform):
self.ds = ds
self.transform = transform
def __getitem__(self, idx):
return self.transform(self.ds[idx])
def __len__(self):
return len(self.ds)
class ModelImportException(Exception):
pass
class ModelNotCompiled(Exception):
pass
class PredictionItem(object):
def __init__(self, preds):
self._preds = preds
def __repr__(self):
return '{}({})'.format(self.__class__.__name__,
repr(self._preds))
@property
def tensor(self):
return self._preds
def numpy(self):
return self._preds.cpu().numpy()
class PredictionsResult(object):
def __init__(self, preds, pred_class=PredictionItem):
self._preds = [
pred_class(pred) for pred in preds
]
@property
def tensor(self):
return torch.stack([pred.tensor for pred in self._preds])
def numpy(self):
return np.stack([pred.numpy() for pred in self._preds])
def __iter__(self):
return iter(self._preds)
def __len__(self):
return len(self._preds)
def __getitem__(self, idx):
return self._preds[idx]
def __repr__(self):
list_format = []
for pred in self._preds[:10]:
list_format.append(repr(pred))
if len(self._preds) > 10:
list_format.append('...')
format_string = '{}([{}])'.format(self.__class__.__name__,
'\n,'.join(list_format))
return format_string
class Model(DeviceMixin):
""" Model Class for Binary Classification (single or multilabel) tasks
"""
""" Model class that wrap nn.Module models to add
training, prediction, saving & loading capabilities
"""
@classmethod
def load_from_file(_, path_or_fp, net=None):
""" Load a saved model from disk an convert it to the desired type (ImageModel, TextModel, etc)
Arguments:
net (nn.Module): Neural network initialized in the same way as the saved one.
path_or_fp (file-like or str): Path to saved model
"""
with zipfile.ZipFile(path_or_fp, mode='r') as zip_fp:
with zip_fp.open('config.json', 'r') as fp:
config = json.loads(fp.read().decode('utf-8'))
model_type = config['torchero_model_type']
module = importlib.import_module(model_type['module'])
model_type = getattr(module, model_type['type'])
if net is None:
if 'net' not in config:
raise ModelImportException("Invalid network configuration json (Expected 'net' key)")
net_type = config['net']['type']
net_module = importlib.import_module(net_type['module'])
net_type = getattr(net_module, net_type['type'])
if 'config' not in config['net']:
raise ModelImportException("Network configuration not found in config.json ('net.config'). Create function passing an already initialized network")
if hasattr(net_type, 'from_config') and 'config' in config['net']:
net = net_type.from_config(config['net']['config'])
model = model_type(net)
model.load(path_or_fp)
return model
def __init__(self, model):
""" Constructor
Arguments:
model (nn.Module): Model to be wrapped
"""
super(Model, self).__init__()
self.model = model
self._trainer = None
def pred_class(self, preds):
return PredictionsResult(preds)
@property
def trainer(self):
if self._trainer is None:
raise ModelNotCompiled("Model hasn't been compiled with any trainer. Use model.compile first")
return self._trainer
def compile(self, optimizer, loss, metrics, hparams={}, callbacks=[], val_metrics=None):
""" Compile this model with a optimizer a loss and set of given metrics
Arguments:
optimizer (str or instance of torch.optim.Optimizer): Optimizer to train the model
loss (str or instance of torch.nn.Module): Loss (criterion) to be minimized
metrics (list or dict of `torchero.meters.BaseMeter`, optional): A list of metrics
or dictionary of metrics names and meters to record for training set
hparams (list or dict of `torchero.meters.BaseMeter`, optional): A list of meters
or dictionary of metrics names and hyperparameters to record
val_metrics (list or dict of `torchero.meters.BaseMeter`, optional): Same as metrics argument
for only used for validation set. If None it uses the same metrics as `metrics` argument.
callbacks (list of `torchero.callbacks.Callback`): List of callbacks to use in trainings
"""
self._trainer = SupervisedTrainer(model=self.model,
criterion=loss,
optimizer=optimizer,
callbacks=callbacks,
acc_meters=metrics,
val_acc_meters=val_metrics,
hparams=hparams)
self._trainer.to(self.device)
return self
def input_to_tensor(self, *X):
""" Converts inputs to tensors
"""
return X
def _predict_batch(self, *X):
""" Generate output predictions for the input tensors
This method can be called with a single input or multiple (If the model has multiple inputs)
This method is not intended to be used directly. Use predict instead
"""
self.model.train(False)
with torch.no_grad():
# Converts each input tensor to the given device
X = list(map(self._convert_tensor, X))
return self.model(*X)
@property
def callbacks(self):
return self.trainer.callbacks
@property
def optimizer(self):
return self.trainer.optimizer
@optimizer.setter
def optimizer(Self, optimizer):
self.trainer.optimizer = optimizer
@property
def hparams(self):
return dict(self.trainer.hparams)
@property
def history(self):
return self.trainer.history
@property
def loss(self):
return self.trainer.criterion
@loss.setter
def loss(self, loss):
self.trainer.criterion = loss
def total_parameters(self):
""" Returns the total number of parameters
"""
parameters = self.model.parameters()
parameters = map(lambda p: p.numel(), parameters)
return sum(parameters)
def total_trainable_parameters(self):
""" Returns the total number of trainable parameters
"""
parameters = self.model.parameters()
parameters = filter(lambda p: p.requires_grad, parameters)
parameters = map(lambda p: p.numel(), parameters)
return sum(parameters)
def to(self, device):
""" Moves the model to the given device
Arguments:
device (str or torch.device)
"""
super(Model, self).to(device)
try:
self.trainer.to(device)
except ModelNotCompiled:
pass
def _combine_preds(self, preds):
""" Combines the list of predictions in a single tensor
"""
preds = torch.stack(preds)
return self.pred_class(preds)
def predict_on_dataloader(self, dl, has_targets=True):
""" Generate output predictions on an dataloader
Arguments:
dl (`torch.utils.data.DataLoader`): input DataLoader
has_targets (`torch.utils.DataLoader`): Omit target
Notes:
* The dataloader batches should yield `torch.Tensor`'s
"""
preds = []
for X in dl:
if has_targets:
X, _ = X
if isinstance(X, tuple):
y = self._predict_batch(*X)
else:
y = self._predict_batch(X)
preds.extend(y)
preds = self._combine_preds(preds)
return preds
def predict(self,
ds,
batch_size=None,
to_tensor=True,
has_targets=False,
num_workers=0,
pin_memory=False,
prefetch_factor=2):
""" Generate output predictions
Arguments:
ds (* `torch.utils.data.Dataset`
* `torch.utils.data.DataLoader`
* `list`
* `np.array`): Input samples
batch_size (int or None): Number of samples per batch. If None is
passed it will default to 32.
to_tensor (bool): Set this to True to convert inputs to tensors first (default behaviour)
has_targets (bool): Whether to omit samples that already contains targets
num_workers (int, optional): Number of subprocesses to use for data
loading. 0 means that the data will be loaded in the main process.
pin_memory (bool): If True, the data loader will copy Tensors into
CUDA pinned memory before returning them. If your data elements are
a custom type, or your collate_fn returns a batch that is a custom
type, see the example below.
prefetch_factor (int, optional):
Number of samples loaded in advance by each worker. 2 means
there will be a total of 2 * num_workers samples prefetched
across all workers.
"""
dl = self._get_dataloader(ds,
shuffle=False,
batch_size=batch_size,
shallow_dl=to_tensor,
num_workers=num_workers,
pin_memory=pin_memory,
prefetch_factor=prefetch_factor)
return self.predict_on_dataloader(dl, has_targets=has_targets)
def train_on_dataloader(self, train_dl, val_dl=None, epochs=1):
""" Trains the model for a fixed number of epochs
Arguments:
train_ds (`torch.utils.data.DataLoader`): Train dataloader
val_ds (`torch.utils.data.Dataset`): Test dataloader
epochs (int): Number of epochs to train the model
"""
self.trainer.train(dataloader=train_dl,
valid_dataloader=val_dl,
epochs=epochs)
return self.trainer.history
def load_checkpoint(self, checkpoint=None):
self.trainer.load_checkpoint(checkpoint=None)
def evaluate_on_dataloader(self,
dataloader,
metrics=None):
""" Evaluate metrics on a given dataloader
Arguments:
dataloader (`torch.utils.data.DataLoader`): Input Dataloader
metrics (list of mapping, optional): Metrics to evaluate. If None is passed
it will used the same defined at compile step
"""
return self.trainer.evaluate(dataloader=dataloader,
metrics=metrics)
def _create_dataloader(self, *args, **kwargs):
return DataLoader(*args, **kwargs)
def _get_dataloader(self,
ds,
batch_size=None,
shallow_dl=False,
**dl_kwargs):
if isinstance(ds, (Dataset, list)):
dl = self._create_dataloader(InputDataset(ds, self.input_to_tensor) if shallow_dl else ds,
batch_size=batch_size or 32,
**dl_kwargs)
elif isinstance(ds, DataLoader):
dl = ds
else:
raise TypeError("ds type not supported. Use Dataloader or Dataset instances")
return dl
def evaluate(self,
ds,
metrics=None,
batch_size=None,
collate_fn=None,
sampler=None,
num_workers=0,
pin_memory=False,
prefetch_factor=2):
""" Evaluate metrics
Arguments:
ds (* `torch.utils.data.Dataset`
* `torch.utils.data.DataLoader`
* `list`
* `np.array`): Input data
metrics (list of mapping, optional): Metrics to evaluate. If None is passed
it will used the same defined at compile step
batch_size (int or None): Number of samples per batch. If None is
passed it will default to 32. Only relevant for non dataloader data
collate_fn (callable, optional): merges a list of samples to form a
mini-batch of Tensor(s). Used when using batched loading from a
map-style dataset. See `torch.utils.data.DataLoader`
sampler (Sampler or Iterable, optional): Defines the strategy to draw
samples from the dataset. Can be any ``Iterable`` with ``__len__``
implemented. If specified, :attr:`shuffle` must not be specified.
See ``torch.utisl.data.DataLoader``
num_workers (int, optional): Number of subprocesses to use for data
loading. 0 means that the data will be loaded in the main process.
pin_memory (bool): If True, the data loader will copy Tensors into
CUDA pinned memory before returning them. If your data elements are
a custom type, or your collate_fn returns a batch that is a custom
type, see the example below.
prefetch_factor (int, optional):
Number of samples loaded in advance by each worker. 2 means
there will be a total of 2 * num_workers samples prefetched
across all workers.
"""
dl = self._get_dataloader(ds,
batch_size=batch_size,
shuffle=False,
collate_fn=collate_fn,
sampler=sampler,
num_workers=num_workers,
pin_memory=pin_memory,
prefetch_factor=prefetch_factor)
return self.evaluate_on_dataloader(dl, metrics=metrics)
def fit(self,
train_ds,
val_ds=None,
epochs=1,
batch_size=None,
shuffle=True,
collate_fn=None,
sampler=None,
num_workers=0,
val_num_workers=None,
pin_memory=False,
val_pin_memory=False,
prefetch_factor=2,
val_prefetch_factor=None):
""" Trains the model for a fixed number of epochs
Arguments:
train_ds (* `torch.utils.data.Dataset`
* `torch.utils.data.DataLoader`
* `list`
* `np.array`): Train data
val_ds (* `torch.utils.data.Dataset`
* `torch.utils.data.DataLoader`
* `list`
* `np.array`): Validation data
batch_size (int or None): Number of samples per batch. If None is
passed it will default to 32. Only relevant for non dataloader data
epochs (int): Number of epochs to train the model
shuffle (bool): Set to ``True``to shuffle train dataset before every epoch. Only for
non dataloader train data.
collate_fn (callable, optional): merges a list of samples to form a
mini-batch of Tensor(s). Used when using batched loading from a
map-style dataset. See `torch.utils.data.DataLoader`
sampler (Sampler or Iterable, optional): Defines the strategy to draw
samples from the dataset. Can be any ``Iterable`` with ``__len__``
implemented. If specified, :attr:`shuffle` must not be specified.
See ``torch.utisl.data.DataLoader``
num_workers (int, optional): Number of subprocesses to use for data
loading. 0 means that the data will be loaded in the main process.
val_num_workers (int, optional): Same as num_workers but for the validation dataset.
If not passed num_workers argument will be used
pin_memory (bool): If True, the data loader will copy Tensors into
CUDA pinned memory before returning them. If your data elements are
a custom type, or your collate_fn returns a batch that is a custom
type, see the example below.
val_pin_memory (bool): Same as pin_memory but for the validation dataset.
If not passed pin_memory argument will be used
prefetch_factor (int, optional):
Number of samples loaded in advance by each worker. 2 means
there will be a total of 2 * num_workers samples prefetched
across all workers.
val_prefetch_factor (int, optional): Same as prefetch_factor but for the validation dataset.
If not passed prefetch_factor argument will be used
"""
train_dl = self._get_dataloader(train_ds,
batch_size=batch_size,
shuffle=shuffle,
collate_fn=collate_fn,
sampler=sampler,
num_workers=num_workers,
pin_memory=pin_memory,
prefetch_factor=prefetch_factor)
if val_ds is None:
val_dl = None
else:
val_dl = self._get_dataloader(val_ds,
batch_size=batch_size,
shuffle=False,
collate_fn=collate_fn,
sampler=sampler,
num_workers=val_num_workers or num_workers,
pin_memory=val_pin_memory or pin_memory,
prefetch_factor=val_prefetch_factor or prefetch_factor)
return self.train_on_dataloader(train_dl,
val_dl,
epochs)
@property
def config(self):
config = {
'torchero_version': torchero.__version__,
'torchero_model_type': {'module': self.__class__.__module__,
'type': self.__class__.__name__},
'compiled': self._trainer is not None,
}
if hasattr(self.model, 'config'):
config.update({'net': {
'type': {'module': self.model.__class__.__module__,
'type': self.model.__class__.__name__},
'config': self.model.config
}})
return config
def init_from_config(self, config):
pass
def save(self, path_or_fp):
self.model.eval()
with zipfile.ZipFile(path_or_fp, mode='w') as zip_fp:
self._save_to_zip(zip_fp)
def _save_to_zip(self, zip_fp):
with zip_fp.open('model.pth', 'w') as fp:
torch.save(self.model.state_dict(), fp)
with zip_fp.open('config.json', 'w') as fp:
fp.write(json.dumps(self.config, indent=4).encode())
try:
self.trainer._save_to_zip(zip_fp, prefix='trainer/')
except ModelNotCompiled:
pass
def load(self, path_or_fp):
with zipfile.ZipFile(path_or_fp, mode='r') as zip_fp:
self._load_from_zip(zip_fp)
def _load_from_zip(self, zip_fp):
with zip_fp.open('model.pth', 'r') as fp:
self.model.load_state_dict(torch.load(fp))
with zip_fp.open('config.json', 'r') as config_fp:
config = json.loads(config_fp.read().decode())
if config['compiled'] is True:
self._trainer = SupervisedTrainer(model=self.model,
criterion=None,
optimizer=None)
self._trainer._load_from_zip(zip_fp, prefix='trainer/')
self.init_from_config(config)
class UnamedClassificationPredictionItem(PredictionItem):
""" Model Prediction with classes names
"""
def __init__(self, preds):
super(UnamedClassificationPredictionItem, self).__init__(preds)
if self._preds.ndim == 0:
self._preds = self._preds.unsqueeze(-1)
def as_dict(self):
return dict(enumerate(self._preds.tolist()))
def max(self):
return self._preds.max().item()
def argmax(self):
return self._preds.argmax().item()
def topk(self, k):
values, indices = self._preds.topk(k)
return list(zip(indices.tolist(), values.tolist()))
def as_tuple(self):
return tuple(self._preds.tolist())
def __repr__(self):
return repr(self.as_tuple())
class NamedClassificationPredictionItem(PredictionItem):
""" Model Prediction with classes names
"""
def __init__(self, preds, names=None):
super(NamedClassificationPredictionItem, self).__init__(preds)
self.names = names
if self._preds.ndim == 0:
self._preds = self._preds.unsqueeze(-1)
def max(self):
return self._preds.max().item()
def argmax(self):
return self.names[self._preds.argmax().item()]
def topk(self, k):
values, indices = self._preds.topk(k)
names = map(self.names.__getitem__, indices.tolist())
return list(zip(names, values.tolist()))
def as_dict(self):
return dict(zip(self.names, self._preds.tolist()))
def as_tuple(self):
return tuple(pred.tolist())
def __repr__(self):
return repr(self.as_dict())
class ClassificationPredictionsResult(PredictionsResult):
""" List of model classification predictions
"""
def __init__(self, preds, names=None):
self.names = names
if self.names is None:
pred_class = UnamedClassificationPredictionItem
else:
pred_class = partial(NamedClassificationPredictionItem, names=self.names)
super(ClassificationPredictionsResult, self).__init__(preds, pred_class=pred_class)
def as_dict(self):
return [pred.as_dict() for pred in self._preds]
def as_tuple(self):
return [pred.as_tuple() for pred in self._preds]
def max(self):
return [pred.max() for pred in self._preds]
def argmax(self):
return [pred.argmax() for pred in self._preds]
def topk(self, k):
return [pred.topk(k) for pred in self._preds]
def as_df(self):
import pandas as pd
return pd.DataFrame.from_records(self.as_dict())
class BinaryClassificationModel(Model):
def __init__(self, model, use_logits=True, threshold=0.5, labels=None):
""" Constructor
Arguments:
model (nn.Module): Model to be wrapped
use_logits (bool): Set this as `True` if your model does **not**
contain sigmoid as activation in the final layer (preferable)
or 'False' otherwise
threshold (float): Threshold used for metrics and predictions to determine if a prediction is true
"""
super(BinaryClassificationModel, self).__init__(model)
self.use_logits = use_logits
self.threshold = threshold
self.labels = labels
@property
def config(self):
config = super(BinaryClassificationModel, self).config
config['labels'] = self.labels
return config
def init_from_config(self, config):
super(BinaryClassificationModel, self).init_from_config(config)
self.labels = config['labels']
def compile(self, optimizer, loss=None, metrics=None, hparams={}, callbacks=[], val_metrics=None):
""" Compile this model with a optimizer a loss and set of given metrics
Arguments:
optimizer (str or instance of torch.optim.Optimizer): Optimizer to train the model
loss (str or instance of torch.nn.Module, optional): Loss (criterion) to be minimized.
By default 'binary_cross_entropy_wl' (logits are already calculated on the loss)
if use_entropy else 'binary_cross_entropy' (logits are not calculated on the loss)
metrics (list or dict of `torchero.meters.BaseMeter`): A list of metrics
or dictionary of metrics names and meters to record for training set.
By default ['accuracy', 'balanced_accuracy']
hparams (list or dict of `torchero.meters.BaseMeter`, optional): A list of meters
or dictionary of metrics names and hyperparameters to record
val_metrics (list or dict of `torchero.meters.BaseMeter`, optional): Same as metrics argument
for only used for validation set. If None it uses the same metrics as `metrics` argument.
callbacks (list of `torchero.callbacks.Callback`): List of callbacks to use in trainings
"""
if loss is None:
loss = 'binary_cross_entropy_wl' if self.use_logits else 'binary_cross_entropy'
if metrics is None:
metrics = ([meters.BinaryWithLogitsAccuracy(threshold=self.threshold),
meters.Recall(threshold=self.threshold, with_logits=True),
meters.Precision(threshold=self.threshold, with_logits=True),
meters.F1Score(threshold=self.threshold, with_logits=True)]
if self.use_logits else
[meters.BinaryAccuracy(threshold=self.threshold),
meters.Recall(threshold=self.threshold, with_logits=False),
meters.Precision(threshold=self.threshold, with_logits=False),
meters.F1Score(threshold=self.threshold, with_logits=False)])
return super(BinaryClassificationModel, self).compile(optimizer=optimizer,
loss=loss,
metrics=metrics,
hparams=hparams,
callbacks=callbacks,
val_metrics=val_metrics)
def pred_class(self, preds):
return ClassificationPredictionsResult(preds, names=self.labels)
def classification_report(self,
ds,
batch_size=None,
collate_fn=None,
sampler=None):
clf_report = meters.binary_scores.BinaryClassificationReport(threshold=self.threshold,
with_logits=self.use_logits,
names=self.labels)
metrics = self.evaluate(ds,
metrics={'clf': clf_report},
batch_size=batch_size,
collate_fn=collate_fn,
sampler=sampler)
return metrics['clf']
def _predict_batch(self, *X, output_probas=True):
preds = super(BinaryClassificationModel, self)._predict_batch(*X)
if self.use_logits:
preds = torch.sigmoid(preds)
if not output_probas:
preds = preds > self.threshold
return preds
class ClassificationModel(Model):
""" Model Class for Classification (for categorical targets) tasks
"""
def __init__(self, model, use_softmax=True, classes=None):
""" Constructor
Arguments:
model (nn.Module): Model to be wrapped
use_softmax (bool): Set this as `True` if your model does **not**
contain softmax as activation in the final layer (preferable)
or 'False' otherwise
"""
super(ClassificationModel, self).__init__(model)
self.use_softmax = use_softmax
self.classes = classes
@property
def config(self):
config = super(ClassificationModel, self).config
config['classes'] = self.classes
return config
def init_from_config(self, config):
super(ClassificationModel, self).init_from_config(config)
self.classes = config['classes']
def compile(self, optimizer, loss=None, metrics=None, hparams={}, callbacks=[], val_metrics=None):
""" Compile this model with a optimizer a loss and set of given metrics
Arguments:
optimizer (str or instance of torch.optim.Optimizer): Optimizer to train the model
loss (str or instance of torch.nn.Module, optional): Loss (criterion) to be minimized.
By default 'cross_entropy' if use_entropy else 'nll'
metrics (list or dict of `torchero.meters.BaseMeter`): A list of metrics
or dictionary of metrics names and meters to record for training set.
By default ['accuracy', 'balanced_accuracy']
hparams (list or dict of `torchero.meters.BaseMeter`, optional): A list of meters
or dictionary of metrics names and hyperparameters to record
val_metrics (list or dict of `torchero.meters.BaseMeter`, optional): Same as metrics argument
for only used for validation set. If None it uses the same metrics as `metrics` argument.
callbacks (list of `torchero.callbacks.Callback`): List of callbacks to use in trainings
"""
if loss is None:
loss = 'cross_entropy' if self.use_softmax else 'nll'
if metrics is None:
metrics = [meters.CategoricalAccuracy(), meters.BalancedAccuracy()]
return super(ClassificationModel, self).compile(optimizer=optimizer,
loss=loss,
metrics=metrics,
hparams=hparams,
callbacks=callbacks,
val_metrics=val_metrics)
def pred_class(self, preds):
return ClassificationPredictionsResult(preds, names=self.classes)
def _predict_batch(self, *X):
preds = super(ClassificationModel, self)._predict_batch(*X)
if self.use_softmax:
preds = torch.softmax(preds, dim=-1)
return preds
class RegressionModel(Model):
""" Model Class for regression tasks
"""
def compile(self, optimizer, loss='mse', metrics=None, hparams={}, callbacks=[], val_metrics=None):
""" Compile this model with a optimizer a loss and set of given metrics
Arguments:
optimizer (str or instance of torch.optim.Optimizer): Optimizer to train the model
loss (str or instance of torch.nn.Module, optional): Loss (criterion) to be minimized. Default: 'mse'
metrics (list or dict of `torchero.meters.BaseMeter`): A list of metrics
or dictionary of metrics names and meters to record for training set.
By default RMSE
hparams (list or dict of `torchero.meters.BaseMeter`, optional): A list of meters
or dictionary of metrics names and hyperparameters to record
val_metrics (list or dict of `torchero.meters.BaseMeter`, optional): Same as metrics argument
for only used for validation set. If None it uses the same metrics as `metrics` argument.
callbacks (list of `torchero.callbacks.Callback`): List of callbacks to use in trainings
"""
if metrics is None:
metrics = [meters.RMSE()]
return super(RegressionModel, self).compile(optimizer=optimizer,
loss=loss,
metrics=metrics,
hparams=hparams,
callbacks=callbacks,
val_metrics=val_metrics)
def load_model_from_file(path_or_fp, net=None):
return Model.load_from_file(path_or_fp, net)
| 2.484375
| 2
|
Hackerrank_python/6.itertools/51.itertools.combinations_with_replacement().py
|
manish1822510059/Hackerrank
| 39
|
12774933
|
<gh_stars>10-100
# Enter your code here. Read input from STDIN. Print output to STDOUT
from itertools import combinations_with_replacement
x=input().split()
s,p=x[0],int(x[1])
y=combinations_with_replacement(sorted(s),p)
for i in (y):
print(*i,sep="")
| 3.03125
| 3
|
Data Visualization/titanic/Missing Value5.py
|
ALDOR99/Python
| 2
|
12774934
|
<reponame>ALDOR99/Python<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 18 14:34:17 2021
@author: ali_d
"""
#Missing Value
# -Find Missing Value
# -Fill Missing Value
#Load and Check Data
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use("seaborn-whitegrid")
import seaborn as sns
from collections import Counter
import warnings
warnings.filterwarnings("ignore")
train_df = pd.read_csv("train.csv")
test_df = pd.read_csv("test.csv")
test_passengerId = test_df["PassengerId"]
train_df_len = len(train_df)
print(train_df_len)
train_df = pd.concat([train_df,test_df],axis = 0).reset_index(drop = True)
print(train_df.head())
# Find Missing Value
print(train_df.columns[train_df.isnull().any()])
print("-"*30)
print(train_df.isnull().sum())
print("-"*30)
# Fill Missing Value
b = (train_df[train_df["Embarked"].isnull()])
print()
train_df.boxplot(column="Fare",by = "Embarked")
plt.show()
print()
train_df["Embarked"] = train_df["Embarked"].fillna("C")
print("-"*30)
train_df[train_df["Fare"].isnull()]
d = train_df[train_df["Pclass"] == 3]["Fare"]
print(np.mean(d))
train_df["Fare"]=train_df["Fare"].fillna(np.mean(d))
print(train_df[train_df["Fare"].isnull()])
| 3.34375
| 3
|
nnet/losses.py
|
c-ma13/sepTFNet
| 1
|
12774935
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import torch
from itertools import permutations
def loss_calc(est, ref, loss_type):
"""
time-domain loss: sisdr
"""
# time domain (wav input)
if loss_type == "sisdr":
loss = batch_SDR_torch(est, ref)
if loss_type == "mse":
loss = batch_mse_torch(est, ref)
if loss_type == "log_mse":
loss = batch_log_mse_torch(est, ref)
return loss
def calc_sdr_torch(estimation, origin, mask=None):
"""
batch-wise SDR caculation for one audio file on pytorch Variables.
estimation: (batch, nsample)
origin: (batch, nsample)
mask: optional, (batch, nsample), binary
"""
if mask is not None:
origin = origin * mask
estimation = estimation * mask
origin_power = torch.pow(origin, 2).sum(1, keepdim=True) + 1e-8 # (batch, 1)
scale = torch.sum(origin*estimation, 1, keepdim=True) / origin_power # (batch, 1)
est_true = scale * origin # (batch, nsample)
est_res = estimation - est_true # (batch, nsample)
true_power = torch.pow(est_true, 2).sum(1)
res_power = torch.pow(est_res, 2).sum(1)
return 10*torch.log10(true_power) - 10*torch.log10(res_power) # (batch, 1)
def batch_SDR_torch(estimation, origin, mask=None):
"""
batch-wise SDR caculation for multiple audio files.
estimation: (batch, nsource, nsample)
origin: (batch, nsource, nsample)
mask: optional, (batch, nsample), binary
"""
batch_size_est, nsource_est, nsample_est = estimation.size()
batch_size_ori, nsource_ori, nsample_ori = origin.size()
assert batch_size_est == batch_size_ori, "Estimation and original sources should have same shape."
assert nsource_est == nsource_ori, "Estimation and original sources should have same shape."
assert nsample_est == nsample_ori, "Estimation and original sources should have same shape."
assert nsource_est < nsample_est, "Axis 1 should be the number of sources, and axis 2 should be the signal."
batch_size = batch_size_est
nsource = nsource_est
nsample = nsample_est
# zero mean signals
estimation = estimation - torch.mean(estimation, 2, keepdim=True).expand_as(estimation)
origin = origin - torch.mean(origin, 2, keepdim=True).expand_as(estimation)
# possible permutations
perm = list(set(permutations(np.arange(nsource))))
# pair-wise SDR
SDR = torch.zeros((batch_size, nsource, nsource)).type(estimation.type())
for i in range(nsource):
for j in range(nsource):
SDR[:,i,j] = calc_sdr_torch(estimation[:,i], origin[:,j], mask)
# choose the best permutation
SDR_max = []
SDR_perm = []
for permute in perm:
sdr = []
for idx in range(len(permute)):
sdr.append(SDR[:,idx,permute[idx]].view(batch_size,-1))
sdr = torch.sum(torch.cat(sdr, 1), 1)
SDR_perm.append(sdr.view(batch_size, 1))
SDR_perm = torch.cat(SDR_perm, 1)
SDR_max, _ = torch.max(SDR_perm, dim=1)
return - SDR_max / nsource
# def calc_mse_torch(estimation, origin):
# return torch.mean(torch.pow(estimation-origin,2),1).mean(1)
def batch_mse_torch(estimation, origin):
"""
batch-wise mse caculation for multiple audio files.
estimation: (batch, nsource, frames, freq_bins)
origin: (batch, nsource, frames, freq_bins)
nsource = 2
"""
mse1 = torch.sqrt(torch.pow(estimation - origin, 2).mean([3])).mean([1,2])
mse2 = torch.sqrt(torch.pow(estimation - origin.flip([1]), 2).mean([3])).mean([1,2])
return torch.stack((mse1, mse2),1).min(1)[0]
def batch_log_mse_torch(estimation, origin):
"""
batch-wise mse caculation for multiple audio files.
estimation: (batch, nsource, frames, freq_bins)
origin: (batch, nsource, frames, freq_bins)
nsource = 2
"""
# eps = 1e-20
# mse1 = torch.log10(torch.sqrt(torch.pow(estimation - origin, 2).mean([3])).mean([1,2])+eps)
# mse2 = torch.log10(torch.sqrt(torch.pow(estimation - origin.flip([1]), 2).mean([3])).mean([1,2])+eps)
mse1 = torch.log10(torch.pow(estimation - origin, 2).mean([3])).mean([1,2])
mse2 = torch.log10(torch.pow(estimation - origin.flip([1]), 2).mean([3])).mean([1,2])
return torch.stack((mse1, mse2),1).min(1)[0]
if __name__ == "__main__":
est = torch.rand(10, 2, 32, 1000)
ref = torch.rand(10, 2, 32, 1000)
out = loss_calc(est, ref, "mse")
print(out.shape)
print(out)
| 2.265625
| 2
|
Parser.py
|
DoubleNy/WADE-HACKATON
| 0
|
12774936
|
<reponame>DoubleNy/WADE-HACKATON
import xlrd
from xlrd.sheet import ctype_text
class Parser:
def __init__(self):
self.parsed_sheets = dict()
self.parsed_sheets_names = []
def get_parsed(self):
return self.parsed_sheets_names, self.parsed_sheets
def parse(self, file):
workbook = xlrd.open_workbook(file, encoding_override='cp1252')
self.parsed_sheets_names = workbook.sheet_names()
for sheet_number in range(0, len(self.parsed_sheets_names)):
sheet = workbook.sheet_by_name(self.parsed_sheets_names[sheet_number])
start_row = 2
num_cols = sheet.ncols
fields = []
for col_idx in range(0, num_cols):
cell_obj = sheet.cell(start_row, col_idx)
fields.append(cell_obj.value)
# print(fields)
# print(num_cols)
new_parsed_sheet = []
for row_idx in range(start_row + 1, sheet.nrows):
new_obj = dict()
for col_idx in range(0, num_cols): # Iterate through columns
cell_obj = sheet.cell(row_idx, col_idx)
new_obj[fields[col_idx]] = (cell_obj.value, cell_obj.ctype)
new_parsed_sheet.append(new_obj)
self.parsed_sheets[self.parsed_sheets_names[sheet_number]] = new_parsed_sheet
| 2.953125
| 3
|
python/import.py
|
mkanenobu/trashbox
| 2
|
12774937
|
<gh_stars>1-10
#!/usr/bin/python3
# name_main.pyをモジュールとして読み込む
import name_main
| 1.398438
| 1
|
backend/src/__init__.py
|
fjacob21/mididecweb
| 0
|
12774938
|
from .event import Event
__all__ = [Event]
| 1.085938
| 1
|
pyRVtest/construction.py
|
chrissullivanecon/pyRVtest
| 0
|
12774939
|
<gh_stars>0
"""Data construction."""
from typing import Any, Callable, Dict, Iterator, List, Mapping, Optional, Union
import numpy as np
from numpy.linalg import inv
from . import exceptions, options
from .configurations.formulation import Formulation
from .utilities.basics import Array, Groups, RecArray, extract_matrix, interact_ids, structure_matrices, get_indices
from .utilities.algebra import precisely_invert
def build_ownership_testing(
product_data: Mapping, firm_col: str, kappa_specification: Optional[Union[str, Callable[[Any, Any], float]]] = None) -> Array:
r"""Build ownership matrices, :math:`O`.
Ownership or product holding matrices are defined by their cooperation matrix counterparts, :math:`\kappa`. For each
market :math:`t`, :math:`\mathscr{H}_{jk} = \kappa_{fg}` where :math:`j \in J_{ft}`, the set of products
produced by firm :math:`f` in the market, and similarly, :math:`g \in J_{gt}`.
Parameters
----------
product_data : `structured array-like`
Each row corresponds to a product. Markets can have differing numbers of products. The following fields are
required (except for ``firm_ids`` when ``kappa_specification`` is one of the special cases):
- **market_ids** : (`object`) - IDs that associate products with markets.
firm_col: column in product_data with firm idsthat associate products with firms. This field is ignored if
``kappa_specification`` is one of the special cases and not a function.
kappa_specification : `str or callable, optional`
Specification for each market's cooperation matrix, :math:`\kappa`, which can either be a general function or a
string that implements a special case. The general function is is of the following form::
kappa(f, g) -> value
where ``value`` is :math:`\mathscr{H}_{jk}` and both ``f`` and ``g`` are firm IDs from the ``firm_ids`` field of
``product_data``.
The default specification, ``lambda: f, g: int(f == g)``, constructs traditional ownership matrices. That is,
:math:`\kappa = I`, the identify matrix, implies that :math:`\mathscr{H}_{jk}` is :math:`1` if the same firm
produces products :math:`j` and :math:`k`, and is :math:`0` otherwise.
If ``firm_ids`` happen to be indices for an actual :math:`\kappa` matrix, ``lambda f, g: kappa[f, g]`` will
build ownership matrices according to the matrix ``kappa``.
When one of the special cases is specified, ``firm_ids`` in ``product_data`` are not required and if specified
will be ignored:
- ``'monopoly'`` - Monopoly ownership matrices are all ones: :math:`\mathscr{H}_{jk} = 1` for all :math:`j`
and :math:`k`.
- ``'single'`` - Single product firm ownership matrices are identity matrices: :math:`\mathscr{H}_{jk} = 1`
if :math:`j = k` and :math:`0` otherwise.
Returns
-------
`ndarray`
Stacked :math:`J_t \times J_t` ownership matrices, :math:`\mathscr{H}`, for each market :math:`t`. If a market
has fewer products than others, extra columns will contain ``numpy.nan``.
Examples
--------
.. raw:: latex
\begin{examplenotebook}
.. toctree::
/_notebooks/api/build_ownership.ipynb
.. raw:: latex
\end{examplenotebook}
"""
# validate or use the default kappa specification
if kappa_specification is None:
kappa_specification = lambda f, g: np.where(f == g, 1, 0).astype(options.dtype)
elif callable(kappa_specification):
kappa_specification = np.vectorize(kappa_specification, [options.dtype])
elif kappa_specification not in {'monopoly', 'single'}:
raise ValueError("kappa_specification must be None, callable, 'monopoly', or 'single'.")
# extract and validate IDs
market_ids = extract_matrix(product_data, 'market_ids')
firm_ids = extract_matrix(product_data, firm_col)
if market_ids is None:
raise KeyError("product_data must have a market_ids field.")
if market_ids.shape[1] > 1:
raise ValueError("The market_ids field of product_data must be one-dimensional.")
if callable(kappa_specification):
if firm_ids is None:
raise KeyError("product_data must have a field named firm_col when kappa_specification is not a special case.")
if firm_ids.shape[1] > 1:
raise ValueError("The firm_ids field of product_data must be one-dimensional.")
# determine the overall number of products and the maximum number in a market
market_indices = get_indices(market_ids)
N = market_ids.size
max_J = max(i.size for i in market_indices.values())
# construct the ownership matrices
ownership = np.full((N, max_J), np.nan, options.dtype)
for indices_t in market_indices.values():
if kappa_specification == 'monopoly':
ownership[indices_t, :indices_t.size] = 1
elif kappa_specification == 'single':
ownership[indices_t, :indices_t.size] = np.eye(indices_t.size)
else:
assert callable(kappa_specification) and firm_ids is not None
ids_t = firm_ids[indices_t]
tiled_ids_t = np.tile(np.c_[ids_t], ids_t.size)
ownership[indices_t, :indices_t.size] = kappa_specification(tiled_ids_t, tiled_ids_t.T)
return ownership
def build_blp_instruments(formulation: Formulation, product_data: Mapping) -> Array:
r"""Construct "sums of characteristics" excluded BLP instruments.
Traditional "sums of characteristics" BLP instruments are
.. math:: Z^\text{BLP}(X) = [Z^\text{BLP,Other}(X), Z^\text{BLP,Rival}(X)],
in which :math:`X` is a matrix of product characteristics, :math:`Z^\text{BLP,Other}(X)` is a second matrix that
consists of sums over characteristics of non-rival goods, and :math:`Z^\text{BLP,Rival}(X)` is a third matrix that
consists of sums over rival goods. All three matrices have the same dimensions.
.. note::
To construct simpler, firm-agnostic instruments that are sums over characteristics of other goods, specify a
constant column of firm IDs and keep only the first half of the instrument columns.
Let :math:`x_{jt}` be the vector of characteristics in :math:`X` for product :math:`j` in market :math:`t`, which is
produced by firm :math:`f`. That is, :math:`j \in J_{ft}`. Then,
.. math::
Z_{jt}^\text{BLP,Other}(X) = \sum_{k \in J_{ft} \setminus \{j\}} x_{kt}, \\
Z_{jt}^\text{BLP,Rival}(X) = \sum_{k \notin J_{ft}} x_{kt}.
.. note::
Usually, any supply or demand shifters are added to these excluded instruments, depending on whether they are
meant to be used for demand- or supply-side estimation.
Parameters
----------
formulation : `Formulation`
:class:`Formulation` configuration for :math:`X`, the matrix of product characteristics used to build excluded
instruments. Variable names should correspond to fields in ``product_data``.
product_data : `structured array-like`
Each row corresponds to a product. Markets can have differing numbers of products. The following fields are
required:
- **market_ids** : (`object`) - IDs that associate products with markets.
- **firm_ids** : (`object`) - IDs that associate products with firms.
Along with ``market_ids`` and ``firm_ids``, the names of any additional fields can be used as variables in
``formulation``.
Returns
-------
`ndarray`
Traditional "sums of characteristics" BLP instruments, :math:`Z^\text{BLP}(X)`.
Examples
--------
.. raw:: latex
\begin{examplenotebook}
.. toctree::
/_notebooks/api/build_blp_instruments.ipynb
.. raw:: latex
\end{examplenotebook}
"""
# load IDs
market_ids = extract_matrix(product_data, 'market_ids')
firm_ids = extract_matrix(product_data, 'firm_ids')
if market_ids is None or firm_ids is None:
raise KeyError("product_data must have market_ids and firm_ids fields.")
if market_ids.shape[1] > 1:
raise ValueError("The market_ids field of product_data must be one-dimensional.")
if firm_ids.shape[1] > 1:
raise ValueError("The firm_ids field of product_data must be one-dimensional.")
# initialize grouping objects
market_groups = Groups(market_ids)
paired_groups = Groups(interact_ids(market_ids, firm_ids))
# build the instruments
X = build_matrix(formulation, product_data)
other = paired_groups.expand(paired_groups.sum(X)) - X
rival = market_groups.expand(market_groups.sum(X)) - X - other
return np.ascontiguousarray(np.c_[other, rival])
def build_differentiation_instruments(
formulation: Formulation, product_data: Mapping, version: str = 'local', interact: bool = False) -> Array:
r"""Construct excluded differentiation instruments.
Differentiation instruments in the spirit of :ref:`references:Gandhi and Houde (2017)` are
.. math:: Z^\text{Diff}(X) = [Z^\text{Diff,Other}(X), Z^\text{Diff,Rival}(X)],
in which :math:`X` is a matrix of product characteristics, :math:`Z^\text{Diff,Other}(X)` is a second matrix that
consists of sums over functions of differences between non-rival goods, and :math:`Z^\text{Diff,Rival}(X)` is a
third matrix that consists of sums over rival goods. Without optional interaction terms, all three matrices have the
same dimensions.
.. note::
To construct simpler, firm-agnostic instruments that are sums over functions of differences between all different
goods, specify a constant column of firm IDs and keep only the first half of the instrument columns.
Let :math:`x_{jt\ell}` be characteristic :math:`\ell` in :math:`X` for product :math:`j` in market :math:`t`, which
is produced by firm :math:`f`. That is, :math:`j \in J_{ft}`. Then in the "local" version of
:math:`Z^\text{Diff}(X)`,
.. math::
:label: local_instruments
Z_{jt\ell}^\text{Local,Other}(X) =
\sum_{k \in J_{ft} \setminus \{j\}} 1(|d_{jkt\ell}| < \text{SD}_\ell), \\
Z_{jt\ell}^\text{Local,Rival}(X) =
\sum_{k \notin J_{ft}} 1(|d_{jkt\ell}| < \text{SD}_\ell),
where :math:`d_{jkt\ell} = x_{kt\ell} - x_{jt\ell}` is the difference between products :math:`j` and :math:`k` in
terms of characteristic :math:`\ell`, :math:`\text{SD}_\ell` is the standard deviation of these pairwise differences
computed across all markets, and :math:`1(|d_{jkt\ell}| < \text{SD}_\ell)` indicates that products :math:`j` and
:math:`k` are close to each other in terms of characteristic :math:`\ell`.
The intuition behind this "local" version is that demand for products is often most influenced by a small number of
other goods that are very similar. For the "quadratic" version of :math:`Z^\text{Diff}(X)`, which uses a more
continuous measure of the distance between goods,
.. math::
:label: quadratic_instruments
Z_{jtk}^\text{Quad,Other}(X) = \sum_{k \in J_{ft} \setminus\{j\}} d_{jkt\ell}^2, \\
Z_{jtk}^\text{Quad,Rival}(X) = \sum_{k \notin J_{ft}} d_{jkt\ell}^2.
With interaction terms, which reflect covariances between different characteristics, the summands for the "local"
versions are :math:`1(|d_{jkt\ell}| < \text{SD}_\ell) \times d_{jkt\ell'}` for all characteristics :math:`\ell'`,
and the summands for the "quadratic" versions are :math:`d_{jkt\ell} \times d_{jkt\ell'}` for all
:math:`\ell' \geq \ell`.
.. note::
Usually, any supply or demand shifters are added to these excluded instruments, depending on whether they are
meant to be used for demand- or supply-side estimation.
Parameters
----------
formulation : `Formulation`
:class:`Formulation` configuration for :math:`X`, the matrix of product characteristics used to build excluded
instruments. Variable names should correspond to fields in ``product_data``.
product_data : `structured array-like`
Each row corresponds to a product. Markets can have differing numbers of products. The following fields are
required:
- **market_ids** : (`object`) - IDs that associate products with markets.
- **firm_ids** : (`object`) - IDs that associate products with firms.
Along with ``market_ids`` and ``firm_ids``, the names of any additional fields can be used as variables in
``formulation``.
version : `str, optional`
The version of differentiation instruments to construct:
- ``'local'`` (default) - Construct the instruments in :eq:`local_instruments` that consider only the
characteristics of "close" products in each market.
- ``'quadratic'`` - Construct the more continuous instruments in :eq:`quadratic_instruments` that consider
all products in each market.
interact : `bool, optional`
Whether to include interaction terms between different product characteristics, which can help capture
covariances between product characteristics.
Returns
-------
`ndarray`
Excluded differentiation instruments, :math:`Z^\text{Diff}(X)`.
Examples
--------
.. raw:: latex
\begin{examplenotebook}
.. toctree::
/_notebooks/api/build_differentiation_instruments.ipynb
.. raw:: latex
\end{examplenotebook}
"""
# load IDs
market_ids = extract_matrix(product_data, 'market_ids')
firm_ids = extract_matrix(product_data, 'firm_ids')
if market_ids is None or firm_ids is None:
raise KeyError("product_data must have market_ids and firm_ids fields.")
if market_ids.shape[1] > 1:
raise ValueError("The market_ids field of product_data must be one-dimensional.")
if firm_ids.shape[1] > 1:
raise ValueError("The firm_ids field of product_data must be one-dimensional.")
# identify markets
market_indices = get_indices(market_ids)
# build the matrix and count its dimensions
X = build_matrix(formulation, product_data)
N, K = X.shape
# for the local version, do a first pass to compute standard deviations of pairwise differences across all markets
sd_mapping: Dict[int, Array] = {}
if version == 'local':
for k in range(K):
distances_count = distances_sum = squared_distances_sum = 0
for t, indices_t in market_indices.items():
x = X[indices_t][:, [k]]
distances = x - x.T
np.fill_diagonal(distances, 0)
distances_count += distances.size - x.size
distances_sum += np.sum(distances)
squared_distances_sum += np.sum(distances**2)
sd_mapping[k] = np.sqrt(squared_distances_sum / distances_count - (distances_sum / distances_count)**2)
# build instruments market-by-market to conserve memory
other_blocks: List[List[Array]] = []
rival_blocks: List[List[Array]] = []
for t, indices_t in market_indices.items():
# build distance matrices for all characteristics
distances_mapping: Dict[int, Array] = {}
for k in range(K):
x = X[indices_t][:, [k]]
distances_mapping[k] = x - x.T
np.fill_diagonal(distances_mapping[k], 0 if version == 'quadratic' else np.inf)
def generate_instrument_terms() -> Iterator[Array]:
"""Generate terms that will be summed to create instruments."""
for k1 in range(K):
if version == 'quadratic':
for k2 in range(k1, K if interact else k1 + 1):
yield distances_mapping[k1] * distances_mapping[k2]
elif version == 'local':
with np.errstate(invalid='ignore'):
close = (np.abs(distances_mapping[k1]) < sd_mapping[k1]).astype(np.float64)
if not interact:
yield close
else:
for k2 in range(K):
yield close * np.nan_to_num(distances_mapping[k2])
else:
raise ValueError("version must be 'local' or 'quadratic'.")
# append instrument blocks
other_blocks.append([])
rival_blocks.append([])
ownership = (firm_ids[indices_t] == firm_ids[indices_t].T).astype(np.float64)
nonownership = 1 - ownership
for term in generate_instrument_terms():
other_blocks[-1].append((ownership * term).sum(axis=1, keepdims=True))
rival_blocks[-1].append((nonownership * term).sum(axis=1, keepdims=True))
return np.c_[np.block(other_blocks), np.block(rival_blocks)]
def build_matrix(formulation: Formulation, data: Mapping) -> Array:
r"""Construct a matrix according to a formulation.
Parameters
----------
formulation : `Formulation`
:class:`Formulation` configuration for the matrix. Variable names should correspond to fields in ``data``. The
``absorb`` argument of :class:`Formulation` can be used to absorb fixed effects after the matrix has been
constructed.
data : `structured array-like`
Fields can be used as variables in ``formulation``.
Returns
-------
`ndarray`
The built matrix.
Examples
--------
.. raw:: latex
\begin{examplenotebook}
.. toctree::
/_notebooks/api/build_matrix.ipynb
.. raw:: latex
\end{examplenotebook}
"""
if not isinstance(formulation, Formulation):
raise TypeError("formulation must be a Formulation instance.")
matrix = formulation._build_matrix(data)[0]
if formulation._absorbed_terms:
absorb = formulation._build_absorb(formulation._build_ids(data))
matrix, errors = absorb(matrix)
if errors:
raise exceptions.MultipleErrors(errors)
return matrix
def data_to_dict(data: RecArray, ignore_empty: bool = True) -> Dict[str, Array]:
r"""Convert a NumPy record array into a dictionary.
Most data in PyBLP are structured as NumPy record arrays (e.g., :attr:`Problem.products` and
:attr:`SimulationResults.product_data`) which can be cumbersome to work with when working with data types that can't
represent matrices, such as the :class:`pandas.DataFrame`.
This function converts record arrays created by PyBLP into dictionaries that map field names to one-dimensional
arrays. Matrices in the original record array (e.g., ``demand_instruments``) are split into as many fields as there
are columns (e.g., ``demand_instruments0``, ``demand_instruments1``, and so on).
Parameters
----------
data : `recarray`
Record array created by PyBLP.
ignore_empty : `bool, optional`
Whether to ignore matrices with zero size. By default, these are ignored.
Returns
-------
`dict`
The data re-structured as a dictionary.
Examples
--------
.. raw:: latex
\begin{examplenotebook}
.. toctree::
/_notebooks/api/data_to_dict.ipynb
.. raw:: latex
\end{examplenotebook}
"""
if not isinstance(data, np.recarray):
raise TypeError("data must be a NumPy record array.")
mapping: Dict[str, Array] = {}
for key in data.dtype.names:
if len(data[key].shape) > 2:
raise ValueError("Arrays with more than two dimensions are not supported.")
if ignore_empty and data[key].size == 0:
continue
if len(data[key].shape) == 1 or data[key].shape[1] == 1 or data[key].size == 0:
mapping[key] = data[key].flatten()
continue
for index in range(data[key].shape[1]):
new_key = f'{key}{index}'
if new_key in data.dtype.names:
raise KeyError(f"'{key}' cannot be split into columns because '{new_key}' is already a field.")
mapping[new_key] = data[key][:, index].flatten()
return mapping
def build_markups_all(products: RecArray, demand_results: Mapping, model_downstream: Array, ownership_downstream: Array,
model_upstream: Optional[Array] = None, ownership_upstream: Optional[Array] = None, vertical_integration: Optional[Array] = None)-> Array:
## This function computes markups for a large set of standard models. These include:
## standard bertrand with ownership matrix based on firm id
## price setting with arbitrary ownership matrix (e.g. profit weight model)
## standard cournot with ownership matrix based on firm id
## quantity setting with arbitrary ownership matrix (e.g. profit weight model)
## monopoly
## bilateral oligopoly with any combination of the above models upstream and downstream
## bilateral oligopoly as above but with subset of products vertically integrated
## Any of the above with consumer surplus weights (maybe)
## Inputs: product_data used for pytBLP demand estimation
## results structure from pyBLP demand estimation
## model_downstream in ['bertrand', 'cournot', 'monopoly']. If model_upstream not specified, this is model without vertical integration
## ownership_downstream (optional, default is standard ownership) ownership matrix for price or quantity setting
## model_upstream in ['non'' (default), bertrand', 'cournot', 'monopoly']. Upstream firm's model
## ownership_upstream (optional, default is standard ownership) ownership matrix for price or quantity setting of upstream firms
## store_prod_ids = vector indicating which product_ids are vertically integrated (ie store brands) . Default is missing and no vertical integration
## Notes: for models w/o vertical integration, firm_ids must be defined in product_data
## for vi models, an firm_ids_upstream and firm_ids (=firm_ids_downstream) must be defined
## Compute downstream markups
N = np.size(products.prices)
elas = demand_results.compute_elasticities()
M = len(model_downstream)
markups = [None]*M
markups_upstream = [None]*M
markups_downstream = [None]*M
for kk in range(M):
markups_downstream[kk] = np.zeros((N,1))
markups_upstream[kk] = np.zeros((N,1))
mkts = np.unique(products.market_ids)
if not model_upstream is None:
CP = demand_results.compute_probabilities()
#get alpha for each draw
NS_all = len(demand_results.problem.agents)
sigma_price = np.zeros((NS_all,1))
pi_price = np.zeros((NS_all,1))
for kk in range(len(demand_results.beta)):
if demand_results.beta_labels[kk] == 'prices':
alpha = demand_results.beta[kk]
if demand_results.problem.K2 > 0:
for kk in range(len(demand_results.sigma)):
if demand_results.sigma_labels[kk] == 'prices':
if not np.all((demand_results.sigma[kk] == 0)):
sigma_price = demand_results.problem.agents.nodes@np.transpose(demand_results.sigma[kk])
sigma_price = sigma_price.reshape(NS_all,1)
if demand_results.problem.D > 0:
if not np.all((demand_results.pi[kk] == 0)):
pi_price = demand_results.problem.agents.demographics@np.transpose(demand_results.pi[kk])
pi_price = pi_price.reshape(NS_all,1)
alpha_i = alpha + sigma_price + pi_price
# Compute Markups market-by-market
for mm in mkts:
ind_mm = np.where(demand_results.problem.products['market_ids'] == mm)[0]
p = products.prices[ind_mm]
s = products.shares[ind_mm]
elas_mm = elas[ind_mm]
elas_mm = elas_mm[:, ~np.isnan(elas_mm).all(axis=0)]
dsdp = elas_mm*np.outer(s,1/p)
for kk in range(M):
#Compute downstream markups
O_mm = ownership_downstream[kk][ind_mm]
O_mm = O_mm[:, ~np.isnan(O_mm).all(axis=0)]
if model_downstream[kk] == 'bertrand':
markups_mm = -inv(O_mm*dsdp)@s
elif model_downstream[kk] == 'cournot':
markups_mm = -(O_mm*inv(dsdp))@s
elif model_downstream[kk] == 'monopoly':
markups_mm = -inv(dsdp)@s
markups_downstream[kk][ind_mm] = markups_mm
#Compute upstream markups (if applicable) following formula in Villas-Boas (2007)
if not all(model_upstream[ll] is None for ll in range(M)):
P_ii = CP[ind_mm]
P_ii = P_ii[:, ~np.isnan(P_ii).all(axis=0)]
J = len(p)
indA_mm = np.where(demand_results.problem.agents['market_ids'] == mm)[0]
alpha_mi = np.repeat(np.transpose(alpha_i[indA_mm]),J,axis = 0)
alpha2_mi = alpha_mi**2
H = np.transpose(O_mm*dsdp)
g = np.zeros((J,J))
if len(demand_results.rho) == 0:
Weights = demand_results.problem.agents.weights[indA_mm]
NS = len(Weights)
Weights = Weights.reshape(NS,1)
Weights_a = np.repeat(Weights,J,axis=1)
for kk in range(J):
tmp1 = np.zeros((J,J))
tmp4 = np.zeros((J,J))
P_ik = P_ii[kk].reshape(NS,1)
s_iis_ik= (alpha2_mi*P_ii)@(P_ik*Weights)
tmp1[kk] = np.transpose(s_iis_ik) #corresponds to i = k, put integ(s_jls_kl) in kth row
tmp2 = np.transpose(tmp1) #corresponds to j = k, put integ(s_jls_kl) in kth col
tmp3 = np.diagflat(s_iis_ik) #corresponds to i = j, put integ(s_ils_kl) on main diag
a2s = (alpha2_mi*P_ii)@Weights
tmp4[kk,kk] = a2s[kk] #corresponds to i = j = k, matrix of zeros with s_i at (k,k)
P_ik = np.repeat(P_ik,J,axis=1)
P_ij = np.transpose(P_ii)
s_iis_jis_ki = (alpha2_mi*P_ii)@(P_ij*P_ik*Weights_a)
d2s_idpjpk = (2*s_iis_jis_ki-tmp1-tmp2-tmp3+tmp4)
g[kk] = np.transpose(markups_mm)@(O_mm*d2s_idpjpk)
g = np.transpose(g)
G = dsdp + H + g
dpdp_u=inv(G)@H
dsdp_u=np.transpose(dpdp_u)@dsdp
for ii in range(M):
if not model_upstream[ii] is None:
#Compute downstream markups
Ou_mm = ownership_upstream[ii][ind_mm]
Ou_mm = Ou_mm[:, ~np.isnan(Ou_mm).all(axis=0)]
if model_upstream[ii] == 'bertrand':
markups_umm = -inv(Ou_mm*dsdp_u)@s
elif model_upstream[ii] == 'cournot':
markups_umm = -(Ou_mm*inv(dsdp_u))@s
elif model_upstream[ii] == 'monopoly':
markups_umm = -inv(dsdp_u)@s
markups_upstream[ii][ind_mm] = markups_umm
#Compute total markups as sum of upstream and downstream mkps, taking into account vertical integration
for kk in range(M):
if vertical_integration[kk] is None:
vi = np.ones((N,1))
else:
vi = (vertical_integration[kk]-1)**2
markups[kk] = markups_downstream[kk] + vi*markups_upstream[kk]
return(markups,markups_downstream,markups_upstream)
| 2.515625
| 3
|
code/src/main/python/store/mongo_store.py
|
DynamicCodeSearch/CodeSeer
| 5
|
12774940
|
import sys
import os
sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True
__author__ = "bigfatnoob"
from store import base_store, mongo_driver
from utils import logger, lib
import properties
import re
LOGGER = logger.get_logger(os.path.basename(__file__.split(".")[0]))
class InputStore(base_store.InputStore):
def __init__(self, dataset, **kwargs):
base_store.InputStore.__init__(self, dataset, **kwargs)
def load_inputs(self, args_key):
arguments = mongo_driver.get_collection(self.dataset, "fuzzed_args").find_one({"key": args_key})["args"]
assert len(arguments) == properties.FUZZ_ARGUMENT_SIZE
if self.is_array(arguments):
key_args = arguments
else:
key_args = [[] for _ in range(len(arguments[0]))]
for i in range(len(arguments[0])):
for arg in arguments:
key_args[i].append(arg)
return key_args
class FunctionStore(base_store.FunctionStore):
def __init__(self, dataset, **kwargs):
self.is_test = None
base_store.FunctionStore.__init__(self, dataset, **kwargs)
def load_function(self, function_name):
collection_name = "test_functions_executed" if self.is_test else "functions_executed"
collection = mongo_driver.get_collection(self.dataset, collection_name)
return collection.find_one({"name": function_name})
def load_functions(self):
collection_name = "test_functions_executed" if self.is_test else "functions_executed"
collection = mongo_driver.get_collection(self.dataset, collection_name)
return collection.find()
def load_metadata(self, funct):
return mongo_driver.get_collection(self.dataset, "functions_metadata").find_one({"name": funct["name"]})
def update_function_arg_type(self, function_name, function_arg_types):
collection = mongo_driver.get_collection(self.dataset, "py_functions_arg_types")
if not mongo_driver.is_collection_exists(collection):
mongo_driver.create_unique_index_for_collection(collection, "name")
collection.insert({
"name": function_name,
"types": function_arg_types
})
def load_function_arg_type(self, function_name):
try:
return mongo_driver.get_collection(self.dataset, "py_functions_arg_types").find_one({"name": function_name})
except Exception as e:
LOGGER.critical("Failed to load args for function: '%s'. Returning None."
"\nMessage: %s" % (function_name, e.message))
return None
def save_py_function(self, function_json):
collection_name = "test_py_functions_executed" if self.is_test else "py_functions_executed"
collection = mongo_driver.get_collection(self.dataset, collection_name)
if not mongo_driver.is_collection_exists(collection):
mongo_driver.create_unique_index_for_collection(collection, "name")
try:
collection.insert(function_json)
except Exception:
del function_json['outputs']
self.save_failed_py_function(function_json)
def load_py_function(self, function_name):
collection_name = "test_py_functions_executed" if self.is_test else "py_functions_executed"
collection = mongo_driver.get_collection(self.dataset, collection_name)
return collection.find_one({"name": function_name})
def exists_py_function(self, function_name):
return self.load_py_function(function_name) is not None
def save_failed_py_function(self, function_json):
collection_name = "test_py_functions_failed" if self.is_test else "py_functions_failed"
collection = mongo_driver.get_collection(self.dataset, collection_name)
if not mongo_driver.is_collection_exists(collection):
mongo_driver.create_unique_index_for_collection(collection, "name")
collection.insert(function_json)
def is_invalid_py_function(self, function_name):
collection_name = "test_py_functions_failed" if self.is_test else "py_functions_failed"
collection = mongo_driver.get_collection(self.dataset, collection_name)
return collection.find_one({"name": function_name}) is not None
def load_py_functions(self):
collection_name = "test_py_functions_executed" if self.is_test else "py_functions_executed"
collection = mongo_driver.get_collection(self.dataset, collection_name)
return collection.find()
def save_py_metadata(self, func_json):
collection = mongo_driver.get_collection(self.dataset, "py_functions_metadata")
if not mongo_driver.is_collection_exists(collection):
mongo_driver.create_unique_index_for_collection(collection, "name")
if mongo_driver.contains_document(collection, "name", func_json["name"]):
mongo_driver.delete_document(collection, "name", func_json["name"])
collection.insert(func_json)
def load_py_metadata(self, function_name):
try:
collection = mongo_driver.get_collection(self.dataset, "py_functions_metadata")
return collection.find_one({"name": function_name})
except Exception:
LOGGER.exception("Failed to metadata for function: '%s'. Returning None" % function_name)
return None
def get_executed_functions(self, language):
collection = mongo_driver.get_collection(self.dataset, "language_executed_functions")
document = collection.find_one({"language": language})
if document is None:
return None
return document['names']
class PyFileMetaStore(base_store.PyFileMetaStore):
def __init__(self, dataset, **kwargs):
base_store.PyFileMetaStore.__init__(self, dataset, **kwargs)
def load_meta(self, file_name):
sep_positions = [m.start() for m in re.finditer(os.sep, file_name)]
if sep_positions and len(sep_positions) > 3:
fp_regex = file_name[sep_positions[2]:]
else:
fp_regex = file_name
collection = mongo_driver.get_collection(self.dataset, "py_file_meta")
return collection.find_one({"file_path": {"$regex": fp_regex}})
def save_meta(self, bson_dict):
collection = mongo_driver.get_collection(self.dataset, "py_file_meta")
if not mongo_driver.is_collection_exists(collection):
mongo_driver.create_unique_index_for_collection(collection, "file_path")
collection.insert(bson_dict)
class ArgumentStore(base_store.ArgumentStore):
def __init__(self, dataset, **kwargs):
self.is_test = None
base_store.ArgumentStore.__init__(self, dataset, **kwargs)
def load_args(self, args_key):
collection_name = "test_fuzzed_args" if self.is_test else "fuzzed_args"
collection = mongo_driver.get_collection(self.dataset, collection_name)
try:
return collection.find_one({"key": args_key})
except Exception as e:
LOGGER.exception("Failed to load args with key: '%s'. Returning None" % args_key)
return None
class ExecutionStore(base_store.ExecutionStore):
def __init__(self, dataset, **kwargs):
base_store.ExecutionStore.__init__(self, dataset, **kwargs)
def save_language_executed_function_names(self, language, names):
collection = mongo_driver.get_collection(self.dataset, "language_executed_functions")
if not mongo_driver.is_collection_exists(collection):
mongo_driver.create_unique_index_for_collection(collection, "language")
if mongo_driver.contains_document(collection, "language", language):
mongo_driver.delete_document(collection, "language", language)
collection.insert({
"language": language,
"names": names
})
def save_cloned_function_names(self, name, clones):
collection = mongo_driver.get_collection(self.dataset, "cloned_functions")
if not mongo_driver.is_collection_exists(collection):
mongo_driver.create_unique_index_for_collection(collection, "_function_name_")
if mongo_driver.contains_document(collection, "_function_name_", name):
mongo_driver.delete_document(collection, "_function_name_", name)
clones["_function_name_"] = name
collection.insert(clones)
def load_cloned_function_names(self, name):
collection = mongo_driver.get_collection(self.dataset, "cloned_functions")
return mongo_driver.get_document(collection, "_function_name_", name)
class ClusterStore(base_store.ClusterStore):
def __init__(self, dataset, **kwargs):
base_store.ClusterStore.__init__(self, dataset, **kwargs)
def save_clusters(self, clusters, suffix):
collection_name = "clusters_%s" % suffix
collection = mongo_driver.get_collection(self.dataset, collection_name)
if not mongo_driver.is_collection_exists(collection):
mongo_driver.create_unique_index_for_collection(collection, "cluster_id")
for cluster_id, functions in clusters.items():
LOGGER.info("Saving cluster: '%d', with %d functions" % (cluster_id, len(functions)))
cluster = {
"cluster_id": cluster_id,
"functions": [lib.to_json(f) for f in functions]
}
collection.insert(cluster)
| 2.203125
| 2
|
blog/migrations/0010_auto_20200309_1619.py
|
thecodeblogs/django-tcp-blog
| 1
|
12774941
|
<filename>blog/migrations/0010_auto_20200309_1619.py<gh_stars>1-10
# Generated by Django 3.0.3 on 2020-03-09 16:19
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0009_auto_20200309_1619'),
]
operations = [
migrations.RenameField('Comment', 'entry', 'entry_envelope'),
migrations.RenameField('EntryEnvelope', 'contents', 'entry'),
]
| 1.460938
| 1
|
okonomiyaki/runtimes/runtime_schemas.py
|
enthought/okonomiyaki
| 1
|
12774942
|
# flake8: noqa
_JULIA_V1 = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "PythonRuntimeMetadata v1.0",
"description": "PythonRuntimeMetadata runtime/metadata.json schema.",
"type": "object",
"properties": {
"metadata_version": {
"description": "The metadata version.",
"type": "string"
},
"implementation": {
"description": "The implementation (e.g. cpython)",
"type": "string"
},
"version": {
"description": "The implementation version, e.g. pypy 2.6.1 would report 2.6.1 as the 'upstream' part.",
"type": "string"
},
"abi": {
"description": "The runtime's ABI, e.g. 'msvc2008' or 'gnu'.",
"type": "string"
},
"language_version": {
"description": "This is the 'language' version, e.g. pypy 2.6.1 would report 2.7.10 here.",
"type": "string"
},
"platform": {
"description": ("The platform string (as can be parsed by"
"EPDPlatform.from_epd_string"),
"type": "string"
},
"build_revision": {
"description": "Build revision (internal only).",
"type": "string",
},
"executable": {
"description": "The full path to the actual runtime executable.",
"type": "string",
},
"paths": {
"description": "The list of path to have access to this runtime.",
"type": "array",
"items": {"type": "string"},
},
"post_install": {
"description": ("The command (as a list) to execute after "
"installation."),
"type": "array",
"items": {"type": "string"},
},
},
"required": [
"metadata_version",
"implementation",
"version",
"abi",
"language_version",
"platform",
"build_revision",
"executable",
"paths",
"post_install",
]
}
_PYTHON_V1 = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "PythonRuntimeMetadata v1.0",
"description": "PythonRuntimeMetadata runtime/metadata.json schema.",
"type": "object",
"properties": {
"metadata_version": {
"description": "The metadata version.",
"type": "string"
},
"implementation": {
"description": "The implementation (e.g. cpython)",
"type": "string"
},
"version": {
"description": "The implementation version, e.g. pypy 2.6.1 would report 2.6.1 as the 'upstream' part.",
"type": "string"
},
"abi": {
"description": "The runtime's ABI, e.g. 'msvc2008' or 'gnu'.",
"type": "string"
},
"language_version": {
"description": "This is the 'language' version, e.g. pypy 2.6.1 would report 2.7.10 here.",
"type": "string"
},
"platform": {
"description": ("The platform string (as can be parsed by"
"EPDPlatform.from_epd_string"),
"type": "string"
},
"build_revision": {
"description": "Build revision (internal only).",
"type": "string",
},
"executable": {
"description": "The full path to the actual runtime executable.",
"type": "string",
},
"paths": {
"description": "The list of path to have access to this runtime.",
"type": "array",
"items": {"type": "string"},
},
"post_install": {
"description": ("The command (as a list) to execute after "
"installation."),
"type": "array",
"items": {"type": "string"},
},
"scriptsdir": {
"description": "Full path to scripts directory.",
"type": "string",
},
"site_packages": {
"description": "The full path to the python site packages.",
"type": "string",
},
"python_tag": {
"description": "The python tag, as defined in PEP 425.",
"type": "string",
},
},
"required": [
"metadata_version",
"implementation",
"version",
"abi",
"language_version",
"platform",
"build_revision",
"executable",
"paths",
"post_install",
"scriptsdir",
"site_packages",
"python_tag",
]
}
| 1.898438
| 2
|
python_sandbox/python_sandbox/tests/effective_python/test_item11.py
|
jduan/cosmos
| 0
|
12774943
|
<gh_stars>0
import unittest
from itertools import zip_longest
class TestItem11(unittest.TestCase):
def test1(self):
names = ['Cecilia', 'Lise', 'Marie']
letters = [len(n) for n in names]
max_letters = 0
longest_name = None
for name, count in zip(names, letters):
if count > max_letters:
max_letters = count
longest_name = name
self.assertEqual('Cecilia', longest_name)
def test2(self):
"""
zip truncates its output silently if you supply it with iterators of different lengths.
Use zip_longest from itertools if you don't like the truncating behavior.
:return:
:rtype:
"""
names = ['Cecilia', 'Lise', 'Marie']
ages = [30, 50, 20, 99]
names_and_ages = list(zip(names, ages))
self.assertEqual([('Cecilia', 30), ('Lise', 50), ('Marie', 20)], names_and_ages)
# the default fillvalue is None
names_and_ages2 = list(zip_longest(names, ages, fillvalue=''))
self.assertEqual(
[('Cecilia', 30), ('Lise', 50), ('Marie', 20), ('', 99)],
names_and_ages2
)
| 3.5625
| 4
|
python/pymxp/pymxp/messages/program_fragment.py
|
MoysheBenRabi/setp
| 1
|
12774944
|
# Copyright 2009 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is always generated file. Do not edit directyly.
# Instead edit messagegen.pl and descr.txt
class ProgramFragment(object):
def __init__(self):
self.max_program_name = 25
self.program_name = ''
self.program_major_version = 0
self.program_minor_version = 0
self.protocol_major_version = 0
self.protocol_minor_version = 0
self.protocol_source_revision = 0
def clear(self):
self.program_name = ''
self.program_major_version = 0
self.program_minor_version = 0
self.protocol_major_version = 0
self.protocol_minor_version = 0
self.protocol_source_revision = 0
super(ProgramFragment,self).clear()
def frame_data_size(self, frame_index):
result = 0
result += 1
result += 1
result += 1
result += 1
result += 4
return result
def serialize(self, writer):
writer.writeRange(self.program_name,self.max_program_name,'chr')
writer.write(self.program_major_version,'byte')
writer.write(self.program_minor_version,'byte')
writer.write(self.protocol_major_version,'byte')
writer.write(self.protocol_minor_version,'byte')
writer.write(self.protocol_source_revision,'uint')
def deserialize(self, reader):
(self.program_name, c) = reader.readRange(self.max_program_name,'chr',1)
(self.program_major_version, c) = reader.read('byte')
(self.program_minor_version, c) = reader.read('byte')
(self.protocol_major_version, c) = reader.read('byte')
(self.protocol_minor_version, c) = reader.read('byte')
(self.protocol_source_revision, c) = reader.read('uint')
def __str__(self):
return 'ProgramFragment('+self.program_name \
+ str(self.program_major_version) \
+ str(self.program_minor_version) \
+ str(self.protocol_major_version) \
+ str(self.protocol_minor_version) \
+ str(self.protocol_source_revision)+')'
def __eq__(self,other):
return True and \
(self.program_name == other.program_name) and \
self.program_major_version == other.program_major_version and \
self.program_minor_version == other.program_minor_version and \
self.protocol_major_version == other.protocol_major_version and \
self.protocol_minor_version == other.protocol_minor_version and \
self.protocol_source_revision == other.protocol_source_revision
def __ne__(self,other):
return True or \
(self.program_name != other.program_name) or \
self.program_major_version != other.program_major_version or \
self.program_minor_version != other.program_minor_version or \
self.protocol_major_version != other.protocol_major_version or \
self.protocol_minor_version != other.protocol_minor_version or \
self.protocol_source_revision != other.protocol_source_revision
| 2.015625
| 2
|
Server/app/views/v1/mixed/post/faq.py
|
moreal/DMS-Backend
| 27
|
12774945
|
<reponame>moreal/DMS-Backend
from flask import Blueprint
from flask_restful import Api
from app.views.v1 import auth_required
from app.models.post import FAQModel
from app.views.v1.mixed.post import PostAPIResource
api = Api(Blueprint('faq-api', __name__))
@api.resource('/faq')
class FAQList(PostAPIResource):
@auth_required
def get(self):
"""
FAQ 리스트 조회
"""
return self.get_list_as_response(FAQModel)
@api.resource('/faq/<post_id>')
class FAQItem(PostAPIResource):
@auth_required
def get(self, post_id):
"""
FAQ 내용 조회
"""
return self.get_item_as_response(FAQModel, post_id)
| 2.25
| 2
|
python/Coffee Machine/coffeeMachine.py
|
ninefyi/hacktoberfest2021
| 0
|
12774946
|
<filename>python/Coffee Machine/coffeeMachine.py<gh_stars>0
# In need of coffee but it's lockdown,
# so Here I bring a digital coffee machine...
import os
import coffeeMachine_art
from coffeeMachine_data import MENU, resources
money_in_machine = 0
machine_ON = True
def make_transaction():
print("Please insert coins.")
money = float(input("how many quarters($0.25)?: ")) * 0.25
money += float(input("how many dimes($0.10)?: ")) * 0.10
money += float(input("how many nickles($0.05)?: ")) * 0.05
money += float(input("how many pennies($0.01)?: ")) * 0.01
return money
def resources_check(key):
if resources['water'] < MENU[key]['ingredients']['water']:
print(f"Sorry there is not enough water.")
return False
elif resources['coffee'] < MENU[key]['ingredients']['coffee']:
print(f"Sorry there is not enough coffee.")
return False
elif key != 'espresso' and resources['milk'] < MENU[key]['ingredients']['milk']:
print(f"Sorry there is not enough water.")
return False
else:
resources['water'] -= MENU[key]['ingredients']['water']
resources['coffee'] -= MENU[key]['ingredients']['coffee']
if key != 'espresso':
resources['milk'] -= MENU[key]['ingredients']['milk']
return True
def make_coffee(key):
global money_in_machine
money_required = MENU[key]["cost"]
if resources_check(key):
print(f"Cost: ${MENU[key]['cost']}")
money_received = make_transaction()
if money_received < money_required:
print("Sorry that's not enough money. Money refunded.")
else:
money_in_machine += money_required
money_return = round(money_received - money_required, 2)
print(f"Here is ${money_return} change.")
print(f"Here is your {key} ☕. Enjoy!")
os.system('clear')
print(coffeeMachine_art.coffee_art)
print(coffeeMachine_art.logo)
while machine_ON:
print('''
What would you like?
1. 'Espresso'
2. 'Latte'
3. 'Cappuccino'
4. 'Report' (for knowing current status of resources in the machine)
5. 'Off' (for turning off the machine)
''')
choice = input("Type your desired option: ").lower()
if choice == 'off':
machine_ON = False
print("Thank you for using coffee machine...")
elif choice == 'report':
print(f"Water: {resources['water']}ml")
print(f"Milk: {resources['milk']}ml")
print(f"Coffee: {resources['coffee']}gm")
print(f"Money: ${money_in_machine}")
input("Press Enter to continue...")
elif choice == 'espresso' or choice == 'latte' or choice == 'cappuccino':
make_coffee(choice)
input("Press Enter to continue...")
else:
print("Invalid choice...")
input("Press Enter to continue...")
| 3.796875
| 4
|
Code/photometry_functions.py
|
MichaelDAlbrow/pyDIA
| 10
|
12774947
|
import sys
import os
import numpy as np
from astropy.io import fits
from pyraf import iraf
from io_functions import read_fits_file, write_image
from image_functions import compute_saturated_pixel_mask, subtract_sky
def transform_coeffs(deg,dx,xx,yy):
a = np.zeros((deg+1,deg+1))
nterms = (deg+1)*(deg+2)/2
M = np.zeros((nterms,nterms))
v = np.zeros(nterms)
i = 0
for m in range(deg+1):
for n in range(deg+1-m):
v[i] = np.sum(dx* xx**m * yy**n)
j = 0
for p in range(deg+1):
for q in range(deg+1-p):
M[i,j] = np.sum(xx**(m+p) * yy**(n+q))
j += 1
i += 1
c = np.linalg.solve(M,v)
i = 0
for m in range(deg+1):
for n in range(deg+1-m):
a[m,n] = c[i]
i += 1
return a
def compute_xy_shift(pos1,pos2,threshold,dx=0.0,dy=0.0,degree=0):
x1 = pos1[:,0]
y1 = pos1[:,1]
x2 = pos2[:,0]
y2 = pos2[:,1]
xx = (x1 - np.mean(x1))/np.mean(x1)
yy = (y1 - np.mean(y1))/np.mean(y1)
print 'Matching positions for',len(x1),'stars'
match = np.zeros_like(x1,dtype=np.int32)
deltax = np.zeros_like(x1)
deltay = np.zeros_like(x1)
for deg in range(degree+1):
a = np.zeros((deg+1,deg+1))
b = np.zeros((deg+1,deg+1))
if deg == 0:
a[0,0] = dx
b[0,0] = dy
else:
for m in range(deg):
for n in range(deg-m):
a[m,n] = a_prev[m,n]
b[m,n] = b_prev[m,n]
for scale in range(21,0,-4):
xoffset = np.zeros_like(x1)
yoffset = np.zeros_like(x1)
for m in range(deg+1):
for n in range(deg+1-m):
xoffset += a[m,n]*(xx**m)*(yy**n)
yoffset += b[m,n]*(xx**m)*(yy**n)
for j1 in range(len(x1)):
r2 = (x1[j1]-x2-xoffset[j1])**2 + (y1[j1]-y2-yoffset[j1])**2
mm = np.where(r2 == np.min(r2))
try:
match[j1] = np.where(r2 == np.min(r2))[0][0]
except:
print r2
print np.min(np.sqrt(r2))
print mm
print mm[0]
sys.exit(0)
deltax[j1] = x1[j1] - x2[match[j1]] - xoffset[j1]
deltay[j1] = y1[j1] - y2[match[j1]] - yoffset[j1]
deltar = np.sqrt(deltax**2 + deltay**2)
good = np.where(deltar<scale*threshold)[0]
dx = x1 - x2[match]
dy = y1 - y2[match]
a = transform_coeffs(deg,dx[good],xx[good],yy[good])
b = transform_coeffs(deg,dy[good],xx[good],yy[good])
print 'degree', deg, 'using',good.shape[0],'stars'
print 'threshold = ',scale*threshold,'pixels'
print 'a = ',a
print 'b = ',b
print 'std = ',np.std(deltar),'(all) ',np.std(deltar[good]),'(matched)'
a_prev = a
b_prev = b
return a, b
def detect_stars(f,params):
print 'Detecting stars in',f.name
print 'Current directory is', os.getcwd()
fp = params.loc_output+os.path.sep
fn = f.fullname
iraf.digiphot()
iraf.daophot()
print 'FWHM = ',f.fw
nstars = 0
thresh = 100
while (nstars < 2*params.nstamps) and (thresh > 1.5):
print 'thresh = ',thresh
for d in ['temp.stars','temp.phot']:
if os.path.exists(fp+d):
os.system('/bin/rm '+fp+d)
iraf.daofind(image=fn,output=fp+'temp.stars',interactive='no',verify='no',
threshold=thresh,sigma=30,fwhmpsf=f.fw,
datamin=params.pixel_min,datamax=params.pixel_max,
epadu=params.gain,readnoise=params.readnoise,
noise='poisson')
iraf.phot(image=fn,output=fp+'temp.phot',coords=fp+'temp.stars',interactive='no',
verify='no',
sigma=30,fwhmpsf=f.fw,datamin=params.pixel_min,
datamax=params.pixel_max,epadu=params.gain,
readnoise=params.readnoise,noise='poisson',Stdout='/dev/null')
nstars = 0
if os.path.exists(fp+'temp.phot'):
iraf.psort(infiles=fp+'temp.phot',field='MAG')
iraf.prenumber(infile=fp+'temp.phot')
s = iraf.pdump(infiles=fp+'temp.phot',Stdout=1,fields='ID,XCENTER,YCENTER,MAG',
expr='yes')
stars = np.zeros([len(s),3])
i = 0
for line in s:
mag = line.split()[3]
if not(mag == 'INDEF'):
stars[i,:] = np.array(map(float,line.split()[1:4]))
i += 1
nstars = i
thresh = thresh*0.5
if nstars == 0:
print 'Error: could not detect stars in',fn
return None
stars = stars[:i,:].copy()
sys.old_stdout = sys.stdout
return stars
def choose_stamps(f,params):
mask = compute_saturated_pixel_mask(f.image,params)
stars = detect_stars(f,params)
(xmax,ymax) = f.image.shape
n_good = 0
snum = np.zeros(params.nstamps).astype(np.int)
md = params.stamp_edge_distance
q = np.where((stars[:,0] > md) & (stars[:,0] < xmax-md) &
(stars[:,1] > md) & (stars[:,1] < ymax-md))
if len(q[0]) >= params.nstamps:
gstars = stars[q]
else:
print 'Warning: using stamps close to edge of detector'
gstars = stars
md = int(params.stamp_half_width)
i = 0
while (n_good < params.nstamps) & (i<gstars.shape[0]):
if ((gstars[i,0] > md) & (gstars[i,0] < xmax-md) & (gstars[i,1] > md) &
(gstars[i,1] < ymax-md)):
mstamp = mask[int(gstars[i,0]+0.5)-md:int(gstars[i,0]+0.5)+md,int(gstars[i,1]+0.5)-md:int(gstars[i,1]+0.5)+md]
q = np.where(mstamp<1)
if len(q[0]) == 0:
snum[n_good] = i
n_good += 1
i += 1
if n_good < params.nstamps:
print 'Warning: stamps may contain saturated pixels'
stamps = gstars[:params.nstamps,:]
else:
stamps = gstars[snum]
return stamps
def rewrite_psg(file1,file2):
min_separation = 100.0
q = open(file2,'w')
lastgroup = -1
for line in open(file1,'r'):
if line[0] == '#':
q.write(line)
else:
group = int(line.split()[1])
if group > lastgroup:
lastgroup = group
x0 = float(line.split()[2])
y0 = float(line.split()[2])
else:
x = float(line.split()[2])
y = float(line.split()[2])
separation = np.sqrt((x-x0)**2 + (y-y0)**2)
if separation < min_separation:
min_separation = separation
q.write(line)
q.close()
return int(min_separation)
def compute_psf_image(params,g,psf_deg=1,psf_rad=8,
star_file='phot.mags',psf_image='psf.fits',edge_dist=5):
iraf.digiphot()
iraf.daophot()
fp = params.loc_output+os.path.sep
f_im = g.image*g.mask
f = fp+'temp.ref.fits'
write_image(f_im,f)
g.fw = np.max([1.5,g.fw])
g.fw = np.min([0.5*params.psf_max_radius,g.fw])
logfile = fp+'psf.log'
fd = fits.getdata(f)
xmax = fd.shape[0] - edge_dist
ymax = fd.shape[1] - edge_dist
for d in ['temp.stars','temp.phot','temp.phot1','temp.phot2','temp.pst',
'temp.opst','temp.opst2',
'temp.psf.fits','temp.psf1.fits','temp.psf2.fits','temp.psg',
'temp.psg2','temp.psg3','temp.psg5','temp.rej','temp.rej2',
'temp.sub.fits','temp.sub1.fits',
'temp.sub2.fits','temp.opst1','temp.opst3','temp.rej3',
'temp.nst','temp.stars1','ref.mags',psf_image,'temp.als',
'temp.als2']:
if os.path.exists(fp+d):
os.remove(fp+d)
# locate stars
iraf.daofind(image=f,output=fp+'temp.stars',interactive='no',verify='no',
threshold=3,sigma=params.star_detect_sigma,fwhmpsf=g.fw,
datamin=1,datamax=params.pixel_max,
epadu=params.gain,readnoise=params.readnoise,
noise='poisson')
if params.star_file:
als_recenter = 'no'
all_template_stars = np.genfromtxt(params.star_file)
all_new_stars = np.genfromtxt(fp+'temp.stars')
if all_new_stars.shape[0] > params.star_file_number_match:
new_stars = all_new_stars[all_new_stars[:,2].argsort()][:params.star_file_number_match]
else:
new_stars = all_new_stars
if all_template_stars.shape[0] > params.star_file_number_match:
template_stars = all_template_stars[all_template_stars[:,3].argsort()][:params.star_file_number_match]
else:
template_stars = all_template_stars
tx, ty = compute_xy_shift(new_stars,template_stars[:,1:3],0.5,
degree=params.star_file_transform_degree)
if params.star_file_has_magnitudes:
star_positions = all_template_stars[:,1:4]
xx = (star_positions[:,0]-np.mean(new_stars[:,0]))/np.mean(new_stars[:,0])
yy = (star_positions[:,1]-np.mean(new_stars[:,1]))/np.mean(new_stars[:,1])
for m in range(params.star_file_transform_degree+1):
for n in range(params.star_file_transform_degree+1-m):
star_positions[:,0] += tx[m,n]* xx**m * yy**n
star_positions[:,1] += ty[m,n]* xx**m * yy**n
np.savetxt(fp+'temp.stars.1',star_positions,fmt='%10.3f %10.3f %10.3f')
else:
star_positions = all_template_stars[:,1:3]
xx = (star_positions[:,0]-np.mean(new_stars[:,0]))/np.mean(new_stars[:,0])
yy = (star_positions[:,1]-np.mean(new_stars[:,1]))/np.mean(new_stars[:,1])
for m in range(params.star_file_transform_degree+1):
for n in range(params.star_file_transform_degree+1-m):
star_positions[:,0] += tx[m,n]* xx**m * yy**n
star_positions[:,1] += ty[m,n]* xx**m * yy**n
np.savetxt(fp+'temp.stars.1',star_positions,fmt='%10.3f %10.3f')
all_template_stars[:,1] = star_positions[:,0]
all_template_stars[:,2] = star_positions[:,1]
else:
als_recenter = 'yes'
star_positions = np.genfromtxt(fp+'temp.stars')
np.savetxt(fp+'temp.stars.1',star_positions[:,:2],fmt='%10.3f %10.3f')
iraf.phot(image=f,output=fp+'temp.phot',coords=fp+'temp.stars.1',interactive='no',
verify='no',
sigma=params.star_detect_sigma,fwhmpsf=g.fw,apertures=g.fw,
datamin=1,
datamax=2*params.pixel_max,epadu=params.gain,annulus=3*g.fw,
dannulus=3.0,
readnoise=params.readnoise,noise='poisson')
print 'fw = ',g.fw
#fw = np.max([4.0,fw])
#print 'fw = ',fw
# select PSF stars
iraf.pstselect(image=f,photfile=fp+'temp.phot',pstfile=fp+'temp.pst',maxnpsf=40,
interactive='no',verify='no',datamin=1,fitrad=2.0,
datamax=params.pixel_max,epadu=params.gain,psfrad=np.max([3.0,g.fw]),
readnoise=params.readnoise,noise='poisson')
if params.star_file and params.star_file_has_magnitudes:
# We don't need to do the photometry - only make the PSF
# Initial PSF estimate to generate PSF groups
#psfrad=3*np.max([g.fw,1.8])
iraf.psf(image=f,photfile=fp+'temp.phot',pstfile=fp+'temp.pst',psfimage=fp+'temp.psf',
function=params.psf_profile_type,opstfile=fp+'temp.opst',
groupfile=fp+'temp.psg',
interactive='no',
verify='no',varorder=0 ,psfrad=2*np.max([g.fw,1.8]),
datamin=-10000,datamax=0.95*params.pixel_max,
scale=1.0)
# construct a file of the psf neighbour stars
slist = []
psf_stars = np.loadtxt(fp+'temp.opst',usecols=(0,1,2))
for star in range(psf_stars.shape[0]):
xp = psf_stars[star,1]
yp = psf_stars[star,2]
xmin = np.max([np.int(xp-10*g.fw),0])
xmax = np.min([np.int(xp+10*g.fw),f_im.shape[0]])
ymin = np.max([np.int(yp-10*g.fw),0])
ymax = np.min([np.int(yp+10*g.fw),f_im.shape[1]])
p = star_positions[np.logical_and(np.logical_and(star_positions[:,0]>xmin,
star_positions[:,0]<xmax),
np.logical_and(star_positions[:,1]>ymin,
star_positions[:,1]<ymax))]
slist.append(p)
group_stars = np.concatenate(slist)
np.savetxt(fp+'temp.nst',group_stars,fmt='%10.3f %10.3f %10.3f')
# subtract PSF star neighbours
iraf.substar(image=f,photfile=fp+'temp.nst',psfimage=fp+'temp.psf',
exfile=fp+'temp.opst',fitrad=2.0,
subimage=fp+'temp.sub1',verify='no',datamin=1,
datamax=params.pixel_max,epadu=params.gain,
readnoise=params.readnoise,noise='poisson')
# final PSF
iraf.psf(image=fp+'temp.sub1',photfile=fp+'temp.phot',pstfile=fp+'temp.opst',
psfimage=psf_image,psfrad=2*g.fw,
function=params.psf_profile_type,opstfile=fp+'temp.opst2',
groupfile=fp+'temp.psg2',
interactive='no',
verify='no',varorder=0,
datamin=1,datamax=0.95*params.pixel_max,
scale=1.0)
np.savetxt(fp+'ref.mags',all_template_stars,fmt='%7d %10.3f %10.3f %10.3f')
stars = all_template_stars
else:
# initial PSF estimate
iraf.psf(image=f,photfile=fp+'temp.phot',pstfile=fp+'temp.pst',psfimage=fp+'temp.psf',
function=params.psf_profile_type,opstfile=fp+'temp.opst',
groupfile=fp+'temp.psg1',
interactive='no',
verify='no',varorder=0 ,psfrad=2*g.fw,
datamin=1,datamax=0.95*params.pixel_max,
scale=1.0)
# separation distance of near neighbours
separation = np.max([rewrite_psg(fp+'temp.psg1',fp+'temp.psg2'),3])
print 'separation = ',separation
# subtract all stars using truncated PSF
iraf.allstar(image=f,photfile=fp+'temp.phot',psfimage=fp+'temp.psf',
allstarfile=fp+'temp.als',rejfile='',
subimage=fp+'temp.sub',verify='no',psfrad=2*g.fw,fitrad=2.0,
recenter='yes',groupsky='yes',fitsky='yes',sannulus=7,wsannulus=10,
datamin=1,datamax=params.pixel_max,
epadu=params.gain,readnoise=params.readnoise,
noise='poisson')
if params.star_file:
os.system('cp '+fp+'temp.phot '+fp+'temp.phot2')
else:
# locate new stars
iraf.daofind(image=fp+'temp.sub',output=fp+'temp.stars1',interactive='no',verify='no',
threshold=3,sigma=params.star_detect_sigma,fwhmpsf=2*g.fw,
datamin=1,datamax=params.pixel_max,
epadu=params.gain,readnoise=params.readnoise,
noise='poisson')
# magnitudes for new stars
iraf.phot(image=fp+'temp.sub',output=fp+'temp.phot1',coords=fp+'temp.stars1',
interactive='no',
verify='no',sigma=params.star_detect_sigma,
fwhmpsf=g.fw,datamin=1,
datamax=params.pixel_max,epadu=params.gain,
readnoise=params.readnoise,noise='poisson')
# join star lists together
iraf.pconcat(infiles=fp+'temp.phot,'+fp+'temp.phot1',outfile=fp+'temp.phot2')
# new PSF estimate to generate PSF groups
iraf.psf(image=f,photfile=fp+'temp.phot2',pstfile=fp+'temp.pst',psfimage=fp+'temp.psf2',
function=params.psf_profile_type,opstfile=fp+'temp.opst2',
groupfile=fp+'temp.psg3',
interactive='no',
verify='no',varorder=0 ,psfrad=2*g.fw,
datamin=-10000,datamax=0.95*params.pixel_max,
scale=1.0)
# magnitudes for PSF group stars
iraf.nstar(image=f,groupfile=fp+'temp.psg3',psfimage=fp+'temp.psf2',
nstarfile=fp+'temp.nst',
rejfile='',verify='no',psfrad=2*g.fw,fitrad=2.0,
recenter='no',
groupsky='yes',fitsky='yes',sannulus=7,wsannulus=10,
datamin=1,datamax=params.pixel_max,
epadu=params.gain,readnoise=params.readnoise,noise='poisson')
# subtract PSF star neighbours
iraf.substar(image=f,photfile=fp+'temp.nst',psfimage=fp+'temp.psf2',
exfile=fp+'temp.opst2',fitrad=2.0,
subimage=fp+'temp.sub1',verify='no',datamin=1,
datamax=params.pixel_max,epadu=params.gain,
readnoise=params.readnoise,noise='poisson')
# final PSF
iraf.psf(image=fp+'temp.sub1',photfile=fp+'temp.phot2',
pstfile=fp+'temp.opst2',
psfimage=psf_image,psfrad=2*g.fw,
function=params.psf_profile_type,opstfile=fp+'temp.opst3',
groupfile=fp+'temp.psg5',
interactive='no',
verify='no',varorder=0,
datamin=1,datamax=0.95*params.pixel_max,
scale=1.0)
# final photometry
iraf.allstar(image=g.fullname,photfile=fp+'temp.phot2',psfimage=psf_image,
allstarfile=fp+'temp.als2',rejfile='',
subimage=fp+'temp.sub2',verify='no',psfrad=2*g.fw,
recenter=als_recenter,groupsky='yes',fitsky='yes',sannulus=7,
wsannulus=10,fitrad=2.0,
datamin=params.pixel_min,datamax=params.pixel_max,
epadu=params.gain,readnoise=params.readnoise,
noise='poisson')
psfmag = 10.0
for line in open(fp+'temp.als2','r'):
sline = line.split()
if sline[1] == 'PSFMAG':
psfmag = float(sline[3])
break
if params.star_file:
iraf.psort(infiles=fp+'temp.als2',field='ID')
os.system('cp '+fp+'temp.als2 '+fp+'temp.als3')
else:
selection = 'XCE >= '+str(edge_dist)+' && XCE <= '+str(xmax)+' && YCE >= '+str(edge_dist)+' && YCE <= '+str(ymax)+' && MAG != INDEF'
iraf.pselect(infiles=fp+'temp.als2',outfiles=fp+'temp.als3',expr=selection)
iraf.psort(infiles=fp+'temp.als3',field='MAG')
iraf.prenumber(infile=fp+'temp.als3')
s = iraf.pdump(infiles=fp+'temp.als3',Stdout=1,
fields='ID,XCENTER,YCENTER,MAG,MERR,MSKY,SHARPNESS,CHI',expr='yes')
sf = [k.replace('INDEF','22.00') for k in s]
stars = np.zeros([len(sf),5])
for i, line in enumerate(sf):
stars[i,:] = np.array(map(float,sf[i].split()[1:6]))
s = iraf.pdump(infiles=fp+'temp.als3',Stdout=1,
fields='ID,XCENTER,YCENTER,MAG,MERR,SHARPNESS,CHI,MSKY',expr='yes')
sf = [k.replace('INDEF','22.00') for k in s]
with open(fp+'ref.mags','w') as fid:
for s in sf:
fid.write(s+'\n')
return stars
def group_stars_ccd(params,star_positions,reference):
print 'grouping stars'
d, h = read_fits_file(reference)
ccd_size = d.shape
print d.shape
xpos = np.abs(star_positions[:,0])
ypos = np.abs(star_positions[:,1])
g_size = params.ccd_group_size
n_groups_x = (ccd_size[1]-1)/g_size + 1
n_groups_y = (ccd_size[0]-1)/g_size + 1
print np.min(xpos), np.min(ypos)
print np.max(xpos), np.max(ypos)
print n_groups_x, n_groups_y
indx = (xpos*0).astype(np.int)
c = 0
k = 0
mposx = np.zeros(n_groups_x*n_groups_y)
mposy = np.zeros(n_groups_x*n_groups_y)
g_bound = np.zeros(n_groups_x*n_groups_y).astype(np.int)
for i in range(n_groups_x):
for j in range(n_groups_y):
print 'group',i,j,i*g_size,(i+1)*g_size,j*g_size,(j+1)*g_size
mposx[k] = (i+0.5)*g_size
mposy[k] = (j+0.5)*g_size
p = np.where((xpos>=i*g_size) & (xpos<(i+1)*g_size) &
(ypos>=j*g_size) & (ypos<(j+1)*g_size))[0]
if p.shape[0]:
pn = p.shape[0]
indx[c:c+pn] = p
c += pn
print k, pn, c
g_bound[k] = c
k += 1
return indx,g_bound,mposx,mposy
| 2.25
| 2
|
label_studio/utils/functions.py
|
sdadas/label-studio
| 0
|
12774948
|
<reponame>sdadas/label-studio
# big chunks of code
import os
import numpy as np
import pandas as pd
from collections import defaultdict
from urllib.parse import urlencode
from lxml import etree
try:
import ujson as json
except:
import json
# examples for import tasks
_DATA_EXAMPLES = None
# label config validation schema
_LABEL_CONFIG_SCHEMA = os.path.join(os.path.dirname(__file__), 'schema', 'label_config_schema.json')
with open(_LABEL_CONFIG_SCHEMA) as f:
_LABEL_CONFIG_SCHEMA_DATA = json.load(f)
PROTOCOL = ''
HOSTNAME = ''
def get_task_from_labeling_config(config):
# try to get task data, completions & predictions from config comment
task_data, completions, predictions = {}, None, None
start = config.find('<!-- {')
start = start if start >= 0 else config.find('<!--{')
start += 4
end = config[start:].find('-->') if start >= 0 else -1
if 3 < start < start + end:
try:
body = json.loads(config[start:start + end])
except:
pass
else:
dont_use_root = 'predictions' in body or 'completions' in body
task_data = body['data'] if 'data' in body else (None if dont_use_root else body)
predictions = body['predictions'] if 'predictions' in body else None
completions = body['completions'] if 'completions' in body else None
return task_data, completions, predictions
def data_examples(mode):
""" Data examples for editor preview and task upload examples
"""
global _DATA_EXAMPLES
if _DATA_EXAMPLES is None:
with open(os.path.join(os.path.dirname(__file__), 'schema', 'data_examples.json')) as f:
_DATA_EXAMPLES = json.load(f)
roots = ['editor_preview', 'upload']
for root in roots:
for key, value in _DATA_EXAMPLES[root].items():
if isinstance(value, str):
_DATA_EXAMPLES[root][key] = value.replace('<HOSTNAME>', HOSTNAME)
return _DATA_EXAMPLES[mode]
def generate_sample_task_without_check(label_config, mode='upload'):
""" Generate sample task only
"""
# load config
parser = etree.XMLParser()
xml = etree.fromstring(label_config, parser)
if xml is None:
raise etree.XMLSchemaParseError('Project config is empty or incorrect')
# make examples pretty
examples = data_examples(mode=mode)
# iterate over xml tree and find values with '$'
task = {}
parent = xml.findall('.//*[@value]') # take all tags with value attribute
for p in parent:
value = p.get('value')
value_type = p.get('valueType', p.get('valuetype', None))
# process List
if p.tag == 'List':
key = p.get('elementValue').replace('$', '')
examples['List'] = [{key: 'Hello world'}, {key: 'Goodbye world'}]
# valueType="url"
examples['Text'] = examples['TextUrl'] if value_type == 'url' else examples['TextRaw']
examples['TimeSeries'] = examples['TimeSeriesUrl'] if value_type == 'url' or value_type is None else examples['TimeSeriesRaw']
if value and value[0] == '$':
# try get example by variable name
by_name = examples.get(value, None)
# not found by name, try get example by type
task[value[1:]] = examples.get(p.tag, 'Something') if by_name is None else by_name
# TimeSeries special case
for ts_tag in xml.findall('.//TimeSeries'):
time_column = ts_tag.get('timeColumn')
value_columns = []
for ts_child in ts_tag:
if ts_child.tag != 'Channel':
continue
value_columns.append(ts_child.get('column'))
sep = ts_tag.get('sep')
time_format = ts_tag.get('timeFormat')
tag_value = ts_tag.attrib['value'].lstrip('$')
ts_task = task[tag_value]
if isinstance(ts_task, str):
# data is URL
params = {'time': time_column, 'values': ','.join(value_columns)}
if sep:
params['sep'] = sep
if time_format:
params['tf'] = time_format
task[tag_value] = '/samples/time-series.csv?' + urlencode(params)
elif isinstance(ts_task, dict):
# data is JSON
task[tag_value] = generate_time_series_json(time_column, value_columns, time_format)
return task
def _is_strftime_string(s):
# very dumb but works
return '%' in s
def generate_time_series_json(time_column, value_columns, time_format=None):
n = 100
if time_format is not None and not _is_strftime_string(time_format):
time_fmt_map = {
'yyyy-MM-dd': '%Y-%m-%d'
}
time_format = time_fmt_map.get(time_format)
if time_format is None:
times = np.arange(n).tolist()
else:
times = pd.date_range('2020-01-01', periods=n, freq='D').strftime(time_format).tolist()
ts = {time_column: times}
for value_col in value_columns:
ts[value_col] = np.random.randn(n).tolist()
return ts
def generate_sample_task(project):
""" Generate task example for upload and check it with serializer validation
:param project: project with label config
:return: task dict
"""
task = generate_sample_task_without_check(project.label_config)
return task
def set_full_hostname(hostname):
global HOSTNAME
HOSTNAME = hostname
def get_full_hostname():
global HOSTNAME
return HOSTNAME
def get_web_protocol():
""" http or https
"""
global PROTOCOL
return PROTOCOL
def set_web_protocol(protocol):
""" http or https
"""
global PROTOCOL
PROTOCOL = protocol
| 2.34375
| 2
|
posturlgenerator/post_url_generator.py
|
sean-bailey/image-to-svg
| 1
|
12774949
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
import os
import boto3
import logging
import json
logger = logging.getLogger("handler_logger")
logger.setLevel(logging.DEBUG)
def handler(event, context):
statuscode=200
bodydata=None
try:
file_name = event.get('fileName')
operation_type=event.get('operation')
bucket_name=os.environ['bucket_name']
timeout=int(os.environ['expire_time'])
s3=boto3.client('s3')
if operation_type=="upload":
bodydata=json.dumps({
'returndata' : s3.generate_presigned_post(Bucket=bucket_name,Key=file_name,ExpiresIn=timeout),
#'returndata':{
#'url':s3.generate_presigned_url(ClientMethod="put_object",Params={'Bucket':bucket_name,'Key':file_name}, ExpiresIn=30,HttpMethod="POST")
#},
'successcode':'0'
})
else:
bodydata= json.dumps({
'returndata' : s3.generate_presigned_url('get_object',
Params={'Bucket':bucket_name,
'Key': file_name},
ExpiresIn=3600),
'successcode':'1'
})
except Exception as e:
logger.error(e)
bodydata = json.dumps({
'returndata':"",
'successcode': '2'
})
finalresponse={}
finalresponse["headers"]={
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
}
finalresponse['statusCode']=statuscode
finalresponse['body']=bodydata
return finalresponse
| 2.015625
| 2
|
pauli_tm.py
|
johnkerl/sack
| 6
|
12774950
|
<reponame>johnkerl/sack<gh_stars>1-10
#!/usr/bin/python -Wall
# ================================================================
# Please see LICENSE.txt in the same directory as this file.
# <NAME>
# <EMAIL>
# 2007-05-31
# ================================================================
# Type module for the group of Pauli matrices.
import re
# sigmax = 0 1
# 1 0
#
# sigmay = 0 -i
# i 0
#
# sigmaz = 1 0
# 0 -1
# ----------------------------------------------------------------
def sanitize1(x):
if (type(x) == type(0)):
return x
elif (type(x) == type(0.0)):
return x
elif (x == x.conjugate()):
return x.real
else:
return x
# ----------------------------------------------------------------
class pauli_t:
def sanitize(self):
self.a = sanitize1(self.a)
self.b = sanitize1(self.b)
self.c = sanitize1(self.c)
self.d = sanitize1(self.d)
def __init__(self, a, b, c, d):
self.a = a
self.b = b
self.c = c
self.d = d
self.sanitize()
def __mul__(X,Y):
# a b a b
# c d c d
za = X.a*Y.a + X.b*Y.c
zb = X.a*Y.b + X.b*Y.d
zc = X.c*Y.a + X.d*Y.c
zd = X.c*Y.b + X.d*Y.d
Z = pauli_t(za, zb, zc, zd)
return Z
def __eq__(X,Y):
if (X.a != Y.a): return 0
if (X.b != Y.b): return 0
if (X.c != Y.c): return 0
if (X.d != Y.d): return 0
return 1
def __ne__(X,Y):
return not (X == Y)
def __lt__(X,Y):
if (X.a < Y.a): return 0
if (X.b < Y.b): return 0
if (X.c < Y.c): return 0
if (X.d < Y.d): return 0
return 1
def __le__(X,Y):
if (X.a <= Y.a): return 0
if (X.b <= Y.b): return 0
if (X.c <= Y.c): return 0
if (X.d <= Y.d): return 0
return 1
def __gt__(X,Y):
if (X.a > Y.a): return 0
if (X.b > Y.b): return 0
if (X.c > Y.c): return 0
if (X.d > Y.d): return 0
return 1
def __ge__(X,Y):
if (X.a >= Y.a): return 0
if (X.b >= Y.b): return 0
if (X.c >= Y.c): return 0
if (X.d >= Y.d): return 0
return 1
def inv(X):
det = X.a*X.d - X.b*X.c
Z = pauli_t(X.d/det, -X.b/det, -X.c/det, X.a/det)
return Z
# xxx stub
def scan(self, string):
if (string == "I"):
self.__init__(1,0,0,1)
elif (string == "sx"):
self.__init__(0,1,1,0)
elif (string == "sy"):
self.__init__(0,-1j,1j,0)
elif (string == "sz"):
self.__init__(1,0,0,-1)
# parse on slashes ...
else:
raise IOError
def __str__(self):
return str(self.a) + "/" + str(self.b) + "/" + str(self.c) + "/" + str(self.d)
def __repr__(self):
return self.__str__()
def params_from_string(params_string):
# xxx check empty
return 0
def from_string(value_string, params_string):
not_used = params_from_string(params_string)
obj = pauli_t(0,0,0,0)
obj.scan(value_string)
return obj
## ----------------------------------------------------------------
#from sackgrp import *
#X=from_string("sx",""); print X
#Y=from_string("sy",""); print Y
#Z=from_string("sz",""); print Z
#XX=X*X;print XX
#YY=Y*Y;print YY
#ZZ=Z*Z;print ZZ
#print
#G=[X,Y,Z]
#close_group(G)
#for g in G:
# print g
#print
#print_cayley_table(G)
#print
#orders = get_orders(G)
#n = len(G)
#for k in range(0, n):
# print G[k], orders[k]
# ================================================================
import unittest
if __name__ == '__main__':
class test_cases(unittest.TestCase):
def test_sanitize1(self):
pass # to be implemented
def test_sanitize(self):
pass # to be implemented
def test___init__(self):
pass # to be implemented
def test___mul__(self):
pass # to be implemented
def test___eq__(self):
pass # to be implemented
def test___ne__(self):
pass # to be implemented
def test___lt__(self):
pass # to be implemented
def test___le__(self):
pass # to be implemented
def test___gt__(self):
pass # to be implemented
def test___ge__(self):
pass # to be implemented
def test_inv(self):
pass # to be implemented
def test_scan(self):
pass # to be implemented
def test___str__(self):
pass # to be implemented
def test___repr__(self):
pass # to be implemented
def test_params_from_string(self):
pass # to be implemented
def test_from_string(self):
pass # to be implemented
# ----------------------------------------------------------------
unittest.main()
| 2.40625
| 2
|