content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import os
import logging
from gamechangerml.api.fastapi.model_config import Config
logger = logging.getLogger()
| [
11748,
28686,
198,
11748,
18931,
198,
6738,
983,
354,
2564,
4029,
13,
15042,
13,
7217,
15042,
13,
19849,
62,
11250,
1330,
17056,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
3419,
628
] | 3.352941 | 34 |
# Licensed under a 3-clause BSD style license - see PYFITS.rst
import warnings
from ...extern import six
from ...utils.compat.numpycompat import NUMPY_LT_1_10
from ...utils.exceptions import AstropyUserWarning
if not six.PY2:
# Stuff to do if Python 3
# Make the decode_ascii utility function actually work
from . import util
import numpy
util.encode_ascii = encode_ascii
util.decode_ascii = decode_ascii
# Here we monkey patch (yes, I know) numpy to fix a few numpy Python 3
# bugs. The only behavior that's modified is that bugs are fixed, so that
# should be OK.
# Fix chararrays; this is necessary in numpy 1.9.x and below
# The fix for this is in https://github.com/numpy/numpy/pull/5982 and is
# available as of Numpy 1.10
if NUMPY_LT_1_10:
_chararray = numpy.char.chararray
for m in [numpy, numpy.char, numpy.core.defchararray,
numpy.core.records]:
m.chararray = chararray
| [
2,
49962,
739,
257,
513,
12,
565,
682,
347,
10305,
3918,
5964,
532,
766,
350,
56,
37,
29722,
13,
81,
301,
198,
198,
11748,
14601,
198,
6738,
2644,
1069,
759,
1330,
2237,
198,
6738,
2644,
26791,
13,
5589,
265,
13,
77,
32152,
5589,
... | 2.567708 | 384 |
# -*- coding: utf-8 -*-
"""
Script Name: GraphicPathItem.py
Author: Do Trinh/Jimmy - 3D artist.
Description:
"""
# -------------------------------------------------------------------------------------------------------------
""" Import """
from PySide2.QtWidgets import QGraphicsPathItem
from pyPLM.models import DamgSignals
from pyPLM.settings import AppSettings
# -------------------------------------------------------------------------------------------------------------
# Created by panda on 3/12/2019 - 4:12 AM
# © 2017 - 2018 DAMGteam. All rights reserved | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
198,
7391,
6530,
25,
43029,
15235,
7449,
13,
9078,
198,
13838,
25,
2141,
33822,
71,
14,
40335,
532,
513,
35,
6802,
13,
198,
198,
11828,
25,
198,
198,
37811... | 3.913907 | 151 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @trojanzhex
from pyrogram import filters
from pyrogram import Client as trojanz
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from config import Config
from script import Script
@trojanz.on_message(filters.private & (filters.document | filters.video))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
201,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
2,
2488,
23528,
13881,
89,
33095,
201,
198,
201,
198,
201,
198,
6738,
12972,
39529,
1330,
16628,
201,
1... | 2.866667 | 120 |
from paraview.simple import *
# Load the distributed plugin.
LoadDistributedPlugin("VTKmFilters" , remote=False, ns=globals())
assert VTKmContour
| [
6738,
1582,
615,
769,
13,
36439,
1330,
1635,
198,
198,
2,
8778,
262,
9387,
13877,
13,
198,
8912,
20344,
6169,
37233,
7203,
36392,
42,
76,
11928,
1010,
1,
837,
6569,
28,
25101,
11,
36545,
28,
4743,
672,
874,
28955,
198,
198,
30493,
3... | 3.083333 | 48 |
from __future__ import print_function
import os
import time
import sys
sys.path.append("..")
import tensorflow as tf
import keras.backend.tensorflow_backend as KTF
gpu_frac = 0.7
def get_session(gpu_fraction=gpu_frac):
'''Assume that you have 6GB of GPU memory and want to allocate ~2GB then gpu_frac=0.3 '''
num_threads = os.environ.get('OMP_NUM_THREADS')
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
if num_threads:
return tf.Session(config=tf.ConfigProto(
gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))
else:
return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
## attempt to limit the memory usage of TF
KTF.set_session(get_session(gpu_fraction=gpu_frac))
import keras
from keras.utils import plot_model
from keras.callbacks import CSVLogger, ModelCheckpoint
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import numpy as np
import cv2
import matplotlib.pyplot as plt
from utils import *
from get_config import *
from get_models import *
from data_loader import *
class ETATimer:
''' Simple class to store a timer for the ETA of one epoch/validation run '''
if __name__ == '__main__':
train_with_multibatch()
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
28686,
198,
11748,
640,
198,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
7203,
492,
4943,
198,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
41927,
292,
13... | 2.769231 | 494 |
#!/usr/bin/env python
import argparse
import subprocess
import sqlite3 as sql
from LLC_Membranes.llclib import topology, file_rw
from LLC_Membranes.analysis import solute_partitioning
from LLC_Membranes.setup import lc_class, equil, solvate_tails
import mdtraj as md
import numpy as np
import os
if __name__ == "__main__":
os.environ["GMX_MAXBACKUP"] = "-1" # stop GROMACS from making backups
args = initialize().parse_args()
sys = System(args.build_monomer, args.weight_percent, args.ratio, solute='HOH', nopores=args.nopores,
ncolumns=args.ncolumns, monomers_per_column=args.monomers_per_column, p2p=args.p2p,
parallel_displaced=args.parallel_displaced, dbwl=args.dbwl, random_seed=args.random_seed,
mpi=args.mpi, nproc=args.nproc, tolerance=args.tolerance)
while not sys.converged:
sys.query_database(database='water_content.db', guess_range=args.guess_range, guess_stride=args.guess_stride)
sys.equilibrate(input_files=True, length=args.length_nvt, force=args.forces[0],
restraint_residue=args.restraint_residue, restraint_axis=args.restraint_axis,
ring_restraints=args.ring_restraints)
sys.calculate_pore_water(args.forces[0])
sys.write_final_pore_configuration()
sys.place_water_tails(args.output)
sys.full_equilibration(args.forces, fully_solvated=args.output, l_berendsen=args.length_berendsen,
l_nvt=args.length_nvt, l_pr=args.length_Parrinello_Rahman,
restraint_axis=args.restraint_axis, restraint_residue=args.restraint_residue,
ring_restraints=args.ring_restraints)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
1822,
29572,
198,
11748,
850,
14681,
198,
11748,
44161,
578,
18,
355,
44161,
198,
6738,
11419,
62,
13579,
1671,
7305,
13,
297,
565,
571,
1330,
1353,
1435,
11,
2393,
62,
316... | 2.252618 | 764 |
from anarcho import app
from flask import jsonify
from flask_swagger import swagger
@app.route('/swagger/spec.json', methods=['GET'])
def spec():
"""
Returns the swagger spec.
Read more by this link https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md
:return:
"""
swag = swagger(app)
swag['info']['title'] = app.name
swag['consumes'] = ['application/json']
swag['produces'] = ['application/json']
return jsonify(swag)
| [
6738,
14061,
78,
1330,
598,
198,
6738,
42903,
1330,
33918,
1958,
198,
6738,
42903,
62,
2032,
7928,
1330,
1509,
7928,
628,
198,
31,
1324,
13,
38629,
10786,
14,
2032,
7928,
14,
16684,
13,
17752,
3256,
5050,
28,
17816,
18851,
6,
12962,
1... | 2.637363 | 182 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright 2020, Yutong Xie, UIUC.
Using Two Pointers to find the interval intersection.
'''
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
7061,
6,
198,
220,
220,
220,
15069,
12131,
11,
575,
315,
506,
48989,
11,
12454,
9598,
13,
198,
220,
220,
220,
8554,
... | 2.62069 | 58 |
import asyncio
import json
import os
import sys
import multiprocessing
import webbrowser
import requests
import requests.cookies
import logging as log
import subprocess
import time
from galaxy.api.consts import LocalGameState, Platform
from galaxy.api.plugin import Plugin, create_and_run_plugin
from galaxy.api.types import Achievement, Game, LicenseInfo, LocalGame
from galaxy.api.errors import ( AuthenticationRequired,
BackendTimeout, BackendNotAvailable, BackendError, NetworkError, UnknownError, InvalidCredentials
)
from version import __version__ as version
from process import ProcessProvider
from local_client_base import ClientNotInstalledError
from local_client import LocalClient
from backend import BackendClient, AccessTokenExpired
from definitions import Blizzard, DataclassJSONEncoder, License_Map, ClassicGame
from consts import SYSTEM
from consts import Platform as pf
from http_client import AuthenticatedHttpClient
# async def get_unlocked_achievements(self, game_id):
# if not self.website_client.is_authenticated():
# raise AuthenticationRequired()
# try:
# if game_id == "21298":
# return await self._get_sc2_achievements()
# elif game_id == "5730135":
# return await self._get_wow_achievements()
# else:
# return []
# except requests.Timeout:
# raise BackendTimeout()
# except requests.ConnectionError:
# raise NetworkError()
# except Exception as e:
# log.exception(str(e))
# return []
if __name__ == "__main__":
main()
| [
11748,
30351,
952,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
18540,
305,
919,
278,
198,
11748,
3992,
40259,
198,
11748,
7007,
198,
11748,
7007,
13,
27916,
444,
198,
11748,
18931,
355,
2604,
198,
11748,
850,
1468... | 2.874564 | 574 |
# Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
''' synthesis filter bank Op unit-test '''
import os
from pathlib import Path
import tensorflow as tf
from absl import logging
from delta.layers.ops import py_x_ops
from delta.data import feat as feat_lib
class SfbOpTest(tf.test.TestCase):
''' synthesis filter bank op unittest'''
def setUp(self):
'''set up'''
self.wavpath = str(
Path(os.environ['MAIN_ROOT']).joinpath(
'delta/layers/ops/data/sm1_cln.wav'))
def tearDown(self):
'''tear down'''
def test_sfb(self):
''' test sfb op'''
with self.session():
sample_rate, input_data = feat_lib.load_wav(self.wavpath, sr=16000)
power_spc, phase_spc = py_x_ops.analyfiltbank(input_data, sample_rate)
logging.info('power_spc: {}'.format(power_spc.eval().shape))
logging.info('phase_spc: {}'.format(phase_spc.eval().shape))
output = py_x_ops.synthfiltbank(power_spc.eval(), phase_spc.eval(),
sample_rate)
self.assertEqual(tf.rank(output).eval(), 1)
# beginning 400 samples are different, due to the overlap and add
self.assertAllClose(
output.eval().flatten()[500:550],
input_data[500:550],
rtol=1e-4,
atol=1e-4)
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
tf.test.main()
| [
2,
15069,
357,
34,
8,
2177,
11618,
7731,
72,
22385,
8987,
290,
7712,
1766,
1539,
43,
8671,
13,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198... | 2.700394 | 761 |
from django.urls import path
from rest_framework.authtoken import views as drf_views
from . import views
from . import api_views
urlpatterns = [
path('users/', api_views.UserList.as_view()),
path('users/<int:id>/', api_views.UserRetrieveUpdate.as_view()),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
1334,
62,
30604,
13,
18439,
30001,
1330,
5009,
355,
1553,
69,
62,
33571,
198,
6738,
764,
1330,
5009,
198,
6738,
764,
1330,
40391,
62,
33571,
198,
198,
6371,
33279,
82,
796,
685,... | 2.810526 | 95 |
import numpy as np
import pandas as pd
class Cake:
"""example"""
x=2
class Datacleaner:
""""class for cleaning our data"""
def replace_values(self, value=np.nan, new=0):
"""replace values in a dataframe"""
self.df = self.df.replace(value, new)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
4871,
30799,
25,
220,
198,
220,
220,
220,
37227,
20688,
37811,
198,
220,
220,
220,
2124,
28,
17,
198,
198,
4871,
16092,
6008,
272,
263,
25,
220,
198,
220... | 2.46087 | 115 |
# -*- coding: utf-8 -*-
import unittest
from unittest import mock
from pastepwn.analyzers.googleapikeyanalyzer import GoogleApiKeyAnalyzer
if __name__ == '__main__':
unittest.main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
555,
715,
395,
198,
6738,
555,
715,
395,
1330,
15290,
198,
198,
6738,
1613,
538,
675,
13,
38200,
47031,
13,
13297,
499,
522,
4121,
3400,
9107,
1330,
3012,
32,
1... | 2.661972 | 71 |
from django.contrib import admin
from .models import Entry, Topic
admin.site.register(Topic)
admin.site.register(Entry)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
764,
27530,
1330,
21617,
11,
47373,
198,
198,
28482,
13,
15654,
13,
30238,
7,
33221,
8,
198,
28482,
13,
15654,
13,
30238,
7,
30150,
8,
198
] | 3.297297 | 37 |
# uploads.py - Discourse only sends the real addresses of some embedded
# images in the 'cooked' HTML. This module helps to extract them from
# there and turn them into real links in the Markdown.
import html.parser
import io
import commonmark
import commonmark_extensions.plaintext
| [
2,
9516,
82,
13,
9078,
532,
8444,
9047,
691,
12800,
262,
1103,
9405,
286,
617,
14553,
198,
2,
4263,
287,
262,
705,
46591,
6,
11532,
13,
220,
770,
8265,
5419,
284,
7925,
606,
422,
198,
2,
612,
290,
1210,
606,
656,
1103,
6117,
287,
... | 3.931507 | 73 |
#!/bin/env python
import profile
from pstats import Stats
from Main import main
s = profile.run('main()', 'pksampler.profile')
Stats('pksampler.profile').sort_stats('calls').print_stats()
| [
2,
48443,
8800,
14,
24330,
21015,
198,
198,
11748,
7034,
198,
6738,
279,
34242,
1330,
20595,
198,
6738,
8774,
1330,
1388,
198,
198,
82,
796,
7034,
13,
5143,
10786,
12417,
3419,
3256,
705,
79,
591,
321,
20053,
13,
13317,
11537,
198,
29... | 3.064516 | 62 |
from piece import piece
| [
6738,
3704,
1330,
3704,
628
] | 5 | 5 |
#!/usr/bin/env python3
from nlptools.text.tokenizer import Tokenizer_BERT
from nlptools.utils import zload
import sys
s = Tokenizer_BERT(bert_model_name='/home/pzhu/.pytorch_pretrained_bert/bert-base-uncased')
txt = 'Who was Jim Henson ? Jim Henson was a puppeteer'
print(s.seg(txt))
print(s.vocab)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
6738,
299,
75,
457,
10141,
13,
5239,
13,
30001,
7509,
1330,
29130,
7509,
62,
13246,
51,
198,
6738,
299,
75,
457,
10141,
13,
26791,
1330,
1976,
2220,
198,
11748,
25064,
198,
198,
... | 2.525 | 120 |
from __future__ import unicode_literals, print_function, division
import requests
from datetime import datetime
from bs4 import BeautifulSoup
import re
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
11,
3601,
62,
8818,
11,
7297,
198,
11748,
7007,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
302,
628
] | 3.923077 | 39 |
# -*- coding: utf-8 -*-
"""
Stack Floating Buttons
======================
Copyright © 2010-2018 HeaTTheatR
For suggestions and questions:
<kivydevelopment@gmail.com>
This file is distributed under the terms of the same license,
as the Kivy framework.
Example
-------
from kivy.app import App
from kivy.lang import Builder
from kivy.factory import Factory
from kivymd.toast import toast
from kivymd.theming import ThemeManager
from kivymd.stackfloatingbuttons import MDStackFloatingButtons
Builder.load_string('''
#:import Toolbar kivymd.toolbar.Toolbar
<ExampleFloatingButtons@BoxLayout>
orientation: 'vertical'
Toolbar:
title: 'Stack Floating Buttons'
md_bg_color: app.theme_cls.primary_color
elevation: 10
left_action_items: [['menu', lambda x: None]]
''')
class Example(App):
theme_cls = ThemeManager()
theme_cls.primary_palette = 'Teal'
title = "Example Stack Floating Buttons"
create_stack_floating_buttons = False
floating_data = {
'Python': 'language-python',
'Php': 'language-php',
'C++': 'language-cpp'}
def set_my_language(self, instance_button):
toast(instance_button.icon)
def build(self):
screen = Factory.ExampleFloatingButtons()
# Use this condition otherwise the stack will be created each time.
if not self.create_stack_floating_buttons:
screen.add_widget(MDStackFloatingButtons(
icon='lead-pencil',
floating_data={
'Python': 'language-python',
'Php': 'language-php',
'C++': 'language-cpp'},
callback=self.set_my_language))
self.create_stack_floating_buttons = True
return screen
Example().run()
"""
from kivy.animation import Animation
from kivy.core.window import Window
from kivy.uix.floatlayout import FloatLayout
from kivy.lang import Builder
from kivy.properties import StringProperty, DictProperty, ObjectProperty
from kivy.metrics import dp
from kivymd.cards import MDCard
Builder.load_string('''
#:import Window kivy.core.window.Window
#:import MDFloatingActionButton kivymd.button.MDFloatingActionButton
<FloatingButton@MDFloatingActionButton>
x: Window.width - (self.width + dp(21))
y: dp(25)
size_hint: None, None
size: dp(46), dp(46)
elevation: 5
md_bg_color: app.theme_cls.primary_color
on_release: self.parent.callback(self)
<MDFloatingLabel>
size_hint: None, None
height: dp(20)
width: label.texture_size[0]
border_color_a: .5
md_bg_color: app.theme_cls.primary_color
x: -self.width
Label:
id: label
color: 0, 0, 0, 1
bold: True
markup: True
text: ' %s ' % root.text
<MDStackFloatingButtons>
FloatingButton:
id: f_btn_1
icon: list(root.floating_data.values())[0]
FloatingButton:
id: f_btn_2
icon: list(root.floating_data.values())[1]
FloatingButton:
id: f_btn_3
icon: list(root.floating_data.values())[2]
MDFloatingLabel:
id: f_lbl_1
text: list(root.floating_data.keys())[0]
y: dp(117)
MDFloatingLabel:
id: f_lbl_2
text: list(root.floating_data.keys())[1]
y: dp(170)
MDFloatingLabel:
id: f_lbl_3
text: list(root.floating_data.keys())[2]
y: dp(226)
MDFloatingActionButton:
icon: root.icon
size: dp(56), dp(56)
x: Window.width - (self.width + dp(15))
md_bg_color: app.theme_cls.primary_color
y: dp(15)
on_release: root.show_floating_buttons()
''')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
25896,
49768,
887,
27288,
198,
4770,
50155,
198,
198,
15269,
10673,
3050,
12,
7908,
679,
64,
51,
464,
265,
49,
198,
198,
1890,
11776,
290,
2683,
25,
1... | 2.25353 | 1,629 |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from threading import Event
from twitter.common import log
from .health_check import StatusHealthCheck
from .task_util import StatusHelper
from gen.apache.aurora.api.ttypes import ScheduleStatus, TaskQuery
| [
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330,
257,
4866,
286,
262,
13789,
379,... | 3.802956 | 203 |
"""
Hydrus helper is module containing various functions for working with Hydrus
output files and processing them in Jupyter Notebooks
For now the functions are:
- copy_output():
It will copy the selected files from temporary working directory of a
Hydrus project to a Hydrus/project_name subdirectory.
"""
# some imports first
import glob
import os
import sys
import pathlib
from shutil import copyfile
import pandas as pd
from tkinter import *
from tkinter.filedialog import askopenfilename
def copy_output():
"""
Copy specified Hydrus output file(s) for a specified project
to a working folder of this function (script),
(if other destination is not specified. #should be implemented later)
Works only for Temp working directory,
which exists only if some project is open.
Parameters
----------
(project_name) : string
Need to be entered after a prompt of this function.
All avaible projet names will be listed.
(file_name) : string
Need to be entered after a prompt of this function.
All avaible output files will be listed
Returns
-------
copy of the original file(s)
"""
working_path = "C:\\HYDRUS\\Temp\\~Hydrus3D_2xx\\"
cwd = os.getcwd()
# creating and printing list of all projects in Hydrus working folder
out_p = [p.split("\\")[-1] for p in glob.glob(working_path + "*")]
out_p.remove('Hydrus3D_2xx.tmp')
print("List of projects in the Hydrus working directory:")
for p in out_p:
print(p)
print("")
# choice of project with desired output files
project = input("Enter the projet name from the printed list: ")
print("")
while project not in out_p:
print("There is no such project name %s" % project)
project = input(
"Check the list again and enter an existing projet name: "
)
print("")
# creating and printing list of all output files
out_f = [f.split("\\")[-1] for f in glob.glob(
working_path + project + "\\" + "*.out")]
print("List of output files in the %s working directory" % project)
for f in out_f:
print(f.split("\\")[-1])
print("")
# creating the list of files to copy from user input
files = input("Enter the file name(s) you want to copy: ")
if "," in files:
files = [x.strip() for x in files.split(',')]
else:
files = [x.strip() for x in files.split(' ')]
# check if all the file names are right
result = all(elem in out_f for elem in files)
print("")
# if not run it again with check
while not result:
print("Some error is in file(s) name(s)")
files = input(
"Check the list again and enter the output files names: ")
print("")
if "," in files:
files = [x.strip() for x in files.split(',')]
else:
files = [x.strip() for x in files.split(' ')]
result = all(elem in out_f for elem in files)
# finally the copy of files will be done
for name in files:
print("Filename: %s" % name)
source = working_path + project + "\\" + name
print("Source: %s" % source)
pathlib.Path(
cwd + "\\hydrus\\" + project + "\\"
).mkdir(parents=True, exist_ok=True)
destination = cwd + "\\hydrus\\" + project + "\\" + name
print("Destination: %s" % destination)
copyfile(source, destination)
print("file %s succesefuly copied to %s" % (name, destination))
def read_file(proc_type='flow'):
"""
Function will read the specific composition of a hydrus output file and
convert it to a dataframe.
Now it works only for the "v_Mean.out" file type
args:
proc_type: string
Optional argument for better processing of the Hydrus output according
your simulation type.
Values for choice are:
- flow - only flow simulation (default value)
- tracer - solution simulation with one solute (tracer)
- cwm1 - biokinetic simulation with CWM1 model
- cw2d - biokinetic simulation with CW2D model
Parameters
----------
filepath : string
Full or relative path to the file
Returns
-------
pandas dataframe
"""
proc_types = ['flow', 'tracer', 'cwm1', 'cw2d']
if proc_type not in proc_types:
raise ValueError(
"Invalid process type. Expected one of: %s" % proc_types
)
root = Tk()
root.update()
filepath = askopenfilename()
root.destroy()
filepath = filepath.replace('/', '\\')
# dictionaries for renaming columns in the returned dataframe
v_mean_col = {
"Time": "time",
"rAtm": "pot_surface_flux_atm",
"rRoot": "pot_transp_rate",
"vAtm": "act_surface_flux_atm",
"vRoot": "act_transp_rate",
"vKode3": "total_bottom flux",
"vKode1": "total_boundary_flux",
"vSeep": "total_seepage_flux",
"vKode5": "total_b_node_flux",
"Runoff": "average_surface_ runoff",
"Evapor": "average_evapor_flux",
"Infiltr": "average_infil_flux",
"SnowLayer": "surface_snow_layer"
}
cum_q_col = {
"Time": "time",
"CumQAP": "c_pot_surface_flux_atm",
"CumQRP": "c_pot_transp_rate",
"CumQA": "c_act_surface_flux_atm",
"CumQR": "c_act_transp_rate",
"CumQ3": "c_total_bottom flux",
"CumQ1": "c_total_boundary_flux",
"CumQS": "c_total_seepage_flux",
"CumQ5": "c_total_b_node_flux",
"cRunoff": "c_surface_ runoff",
"cEvapor": "c_evapor_flux",
"cInfiltr": "c_infil_flux",
}
obsnode_col = {
"hNew": "hNew.0",
"theta": "theta.0",
"Temp": "Temp.0",
"Conc": "Conc.0",
"Sorb": "Sorb.0"
}
col_cwm1 = {
"Conc.0": "oxygen.1",
"Conc.1": "readillyCOD.1",
"Conc.2": "acetat.1",
"Conc.3": "in_sol_COD.1",
"Conc.4": "NH4.1",
"Conc.5": "NO3.1",
"Conc.6": "SSO4.1",
"Conc.7": "H2S.1",
"Conc.8": "slowlyCOD.1",
"Conc.9": "in_part_COD.1",
"Sorb.10": "heterotrophic.1",
"Sorb.11": "autotrophic.1",
"Sorb.12": "fermenting.1",
"Sorb.13": "methanogenic.1",
"Sorb.14": "sulphate_reducing.1",
"Sorb.15": "sulphide_oxidising.1",
}
if "v_Mean.out" in filepath:
data = pd.read_csv(filepath,
engine='python',
skiprows=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12],
sep=" ",
skipinitialspace=True,
skipfooter=1
)
data = data.loc[:, (data != 0).any(axis=0)]
data = data.rename(v_mean_col, axis='columns')
print("Data from %s were read into the Pandas DataFrame" % filepath)
return data
elif "Cum_Q.out" in filepath:
data = pd.read_csv(filepath,
engine='python',
skiprows=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12],
sep=" ",
skipinitialspace=True,
skipfooter=1
)
data = data.loc[:, (data != 0).any(axis=0)]
data = data.rename(cum_q_col, axis='columns')
print("Data from %s were read into the Pandas DataFrame" % filepath)
return data
elif "ObsNod.out" in filepath:
if proc_type == "cwm1":
data = pd.read_csv(filepath,
engine='python',
skiprows=[0, 1, 2, 3, 4],
sep=" ",
skipinitialspace=True,
skipfooter=1
)
data = data.rename(obsnode_col, axis='columns')
data = data.rename(col_cwm1, axis='columns')
data = data.loc[:, (data != 0).any(axis=0)]
else:
data = pd.read_csv(filepath,
engine='python',
skiprows=[0, 1, 2, 3, 4],
sep=" ",
skipinitialspace=True,
skipfooter=1
)
data = data.rename(obsnode_col, axis='columns')
data = data.loc[:, (data != 0).any(axis=0)]
print("Data from %s were read into the Pandas DataFrame" % filepath)
return data
else:
print("Sorry, data reader for this file type is not yet implemented.")
| [
37811,
201,
198,
40436,
14932,
31904,
318,
8265,
7268,
2972,
5499,
329,
1762,
351,
15084,
14932,
201,
198,
22915,
3696,
290,
7587,
606,
287,
449,
929,
88,
353,
5740,
12106,
201,
198,
201,
198,
1890,
783,
262,
5499,
389,
25,
201,
198,
... | 1.968655 | 4,594 |
from telethon import events
from iahr.reg import TextSender, VoidSender, MultiArgs, any_send
from iahr.config import IahrConfig
from iahr.utils import AccessList, EventService
from .utils import mention, local, CHAT_TAG
##################################################
# Routines themselves
##################################################
@VoidSender(about=local['tagall']['about'], tags={CHAT_TAG})
| [
6738,
5735,
400,
261,
1330,
2995,
198,
198,
6738,
220,
9520,
81,
13,
2301,
1330,
8255,
50,
2194,
11,
18331,
50,
2194,
11,
15237,
42035,
11,
597,
62,
21280,
198,
6738,
220,
9520,
81,
13,
11250,
1330,
314,
993,
81,
16934,
198,
6738,
... | 3.761468 | 109 |
# -*- coding: UTF-8 -*-
# Copyright (c) 2008 - 2014, Pascal Volk
# See COPYING for distribution information.
"""
vmm.ext.postconf
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Wrapper class for Postfix's postconf.
Postconf instances can be used to read actual values of configuration
parameters or edit the value of a configuration parameter.
postconf.read(parameter) -> value
postconf.edit(parameter, value)
"""
import re
from gettext import gettext as _
from subprocess import Popen, PIPE
from vmm.errors import VMMError
from vmm.constants import VMM_ERROR
class Postconf:
"""Wrapper class for Postfix's postconf."""
__slots__ = ("_bin", "_val")
_parameter_re = re.compile(r"^\w+$", re.ASCII)
_variables_re = re.compile(r"\$\b\w+\b", re.ASCII)
def __init__(self, postconf_bin):
"""Creates a new Postconf instance.
Argument:
`postconf_bin` : str
absolute path to the Postfix postconf binary.
"""
self._bin = postconf_bin
self._val = ""
def edit(self, parameter, value):
"""Set the `parameter`'s value to `value`.
Arguments:
`parameter` : str
the name of a Postfix configuration parameter
`value` : str
the parameter's new value.
"""
self._check_parameter(parameter)
stderr = Popen(
(self._bin, "-e", parameter + "=" + str(value)), stderr=PIPE
).communicate()[1]
if stderr:
raise VMMError(stderr.strip().decode(), VMM_ERROR)
def read(self, parameter, expand_vars=True):
"""Returns the parameter's value.
If expand_vars is True (default), all variables in the value will be
expanded:
e.g. mydestination: mail.example.com, localhost.example.com, localhost
Otherwise the value may contain one or more variables.
e.g. mydestination: $myhostname, localhost.$mydomain, localhost
Arguments:
`parameter` : str
the name of a Postfix configuration parameter.
`expand_vars` : bool
indicates if variables should be expanded or not, default True
"""
self._check_parameter(parameter)
self._val = self._read(parameter)
if expand_vars:
self._expand_vars()
return self._val
def _check_parameter(self, parameter):
"""Check that the `parameter` looks like a configuration parameter.
If not, a VMMError will be raised."""
if not self.__class__._parameter_re.match(parameter):
raise VMMError(
_(
"The value '%s' does not look like a valid "
"Postfix configuration parameter name."
)
% parameter,
VMM_ERROR,
)
def _expand_vars(self):
"""Expand the $variables in self._val to their values."""
while True:
pvars = set(self.__class__._variables_re.findall(self._val))
if not pvars:
break
if len(pvars) > 1:
self._expand_multi_vars(self._read_multi(pvars))
continue
pvars = pvars.pop()
self._val = self._val.replace(pvars, self._read(pvars[1:]))
def _expand_multi_vars(self, old_new):
"""Replace all $vars in self._val with their values."""
for old, new in old_new.items():
self._val = self._val.replace("$" + old, new)
def _read(self, parameter):
"""Ask postconf for the value of a single configuration parameter."""
stdout, stderr = Popen(
[self._bin, "-h", parameter], stdout=PIPE, stderr=PIPE
).communicate()
if stderr:
raise VMMError(stderr.strip().decode(), VMM_ERROR)
return stdout.strip().decode()
def _read_multi(self, parameters):
"""Ask postconf for multiple configuration parameters. Returns a dict
parameter: value items."""
cmd = [self._bin]
cmd.extend(parameter[1:] for parameter in parameters)
stdout, stderr = Popen(cmd, stdout=PIPE, stderr=PIPE).communicate()
if stderr:
raise VMMError(stderr.strip().decode(), VMM_ERROR)
par_val = {}
for line in stdout.decode().splitlines():
par, val = line.split(" = ")
par_val[par] = val
return par_val
| [
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
2,
15069,
357,
66,
8,
3648,
532,
1946,
11,
35163,
4709,
74,
198,
2,
4091,
27975,
45761,
329,
6082,
1321,
13,
198,
37811,
198,
220,
220,
220,
410,
3020,
13,
2302,
13,
73... | 2.238579 | 1,970 |
""" Sixth version, make the code easier and more modifiable """
# Define the main programme
from funcs import store_namespace
from funcs import load_namespace
from funcs import emulate_jmod
import os
import datetime
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#from multiprocessing import Pool
from mpcpy import units
from mpcpy import variables
from mpcpy import models_mod as models
from scipy.optimize import curve_fit
from scipy.linalg import expm
from numpy.linalg import inv
from sklearn.metrics import mean_squared_error, r2_score
from statsmodels.graphics.tsaplots import plot_pacf
from statsmodels.graphics.tsaplots import plot_acf
from Simulator_HP_mod3 import SimHandler
if __name__ == "__main__":
# Naming conventions for the simulation
community = 'ResidentialCommunityUK_rad_2elements'
sim_id = 'MinEne'
model_id = 'R2CW_HP'
bldg_list = load_namespace(os.path.join('file_path_to_folder', 'teaser_bldgs_residential'))
bldg_index_start = 0
bldg_index_end = 30
emulate = 0 # Emulate or not?
old_sim_param = 1 # Choose initial guesses
# Overall options
date = '1/1/2017 '
start = '1/1/2017 00:00:00'
end = '1/9/2017 00:00:00'
train_start = start
valid_start = '1/6/2017 00:00:00'
train_end = valid_start
valid_end = '1/9/2017 00:00:00'
meas_sampl = '300'
mon = 'jan'
folder = 'path_to_file\\results_sysid_new_'+mon
# Features to use in training
exog = ['weaTDryBul_delay1', 'weaHGloHor_delay1','PowerCompr', 'PowerCompr_delay1', 'T_in_delay13']
target = 'TAir'
features_dict = {}
exog_list = []
j = 0
for item in exog:
exog_list.append(item)
ind_exog = item
ar = []
for i in range(5):
ar.append('T_in_delay'+str(i+1))
features_dict['ARX_lag_'+str(i+1)+'_exog'+str(j)] = exog_list + ar
features_dict['ARX_lag_'+str(i+1)+'_'+ind_exog] = [ind_exog] + ar
j += 1
# Instantiate Simulator
Sim_list = []
i = 0
for bldg in bldg_list[bldg_index_start:bldg_index_end]:
i = i+1
Sim = SimHandler(sim_start = start,
sim_end = end,
meas_sampl = meas_sampl
)
Sim.building = bldg+'_'+model_id
Sim.fmupath_emu = os.path.join(Sim.simu_path, 'fmus', community, community+'_'+bldg+'_'+bldg+'_Models_'+bldg+'_House_mpc.fmu')
Sim.fmupath_ref = os.path.join(Sim.simu_path, 'fmus', community, community+'_'+bldg+'_'+bldg+'_Models_'+bldg+'_House_PI.fmu')
Sim.moinfo_emu = (os.path.join(Sim.mod_path, community, bldg,bldg+'_Models',bldg+'_House_mpc.mo'), community+'.'+bldg+'.'+bldg+'_Models.'+bldg+'_House_mpc',
{}
)
Sim.moinfo_emu_ref = (os.path.join(Sim.mod_path, community, bldg,bldg+'_Models',bldg+'_House_PI.mo'), community+'.'+bldg+'.'+bldg+'_Models.'+bldg+'_House_PI',
{}
)
if emulate == 1:
# Initialise exogenous data sources
if i == 1:
Sim.update_weather(start, end)
index = pd.date_range(start, end, freq = meas_sampl+'S', tz=Sim.weather.tz_name)
else:
Sim.weather = Sim_list[i-2].weather
#Sim.sim_start= '1/1/2017 00:00'
Sim.get_control()
#Sim.sim_start= start
Sim.get_other_input(start,end)
Sim.get_constraints(start,end,upd_control=1)
Sim.param_file = os.path.join(Sim.simu_path,'csvs','Parameters_R2CW.csv')
Sim.get_params()
if i > 1:
Sim.control = Sim_list[i-2].control
store_namespace(os.path.join(folder, 'sysid_control_'+Sim.building+'_'+mon), Sim.control)
else:
store_namespace(os.path.join(folder, 'sysid_control_'+Sim.building+'_'+mon), Sim.control)
# Initialise models
Sim.init_models(use_ukf=1, use_fmu_mpc=1, use_fmu_emu=1) # Use for initialising
# Add to list of simulations
Sim_list.append(Sim)
index = pd.date_range(start, end, freq = meas_sampl+'S')
train_dict = {}
test_dict = {}
results_dict = {}
for Sim in Sim_list:
if emulate == 1:
# Emlate to get data
emulate_jmod(Sim.emu, Sim.meas_vars_emu, Sim.meas_sampl, start, end)
# Handle data
print(Sim.emu.display_measurements('Measured'))
measurements = Sim.emu.display_measurements('Measured')
index = pd.to_datetime(measurements.index)
measurements.index = index
weather = Sim.weather.display_data().resample(meas_sampl+'S').ffill()
#print(weather)
weather.index = index
df = pd.concat([measurements, weather],axis=1)[start:end]
df['PowerCompr'] = df['PowerCompr']/1000.0
df['TAir'] = df['TAir']-273.15
for j in range(1,20):
df['T_in_delay'+str(j)] = df['TAir'].shift(periods=j)
df['PowerCompr_delay'+str(j)] = df['PowerCompr'].shift(periods=j)
df['weaTDryBul_delay'+str(j)] = df['weaTDryBul'].shift(periods=j)
df = df.fillna(method='bfill')
# Remove the lags from the beginning
train_start = datetime.datetime.strptime(start, '%m/%d/%Y %H:%M:%S') + datetime.timedelta(seconds = 10*float(meas_sampl))
# Split the dataset
df_train = df[train_start:train_end]
df_test = df[valid_start:valid_end]
train_dict[bldg] = df_train
test_dict[bldg] = df_test
store_namespace(os.path.join(folder, 'all_data_'+mon+'_'+Sim.building), df)
store_namespace(os.path.join(folder, 'train_data_'+mon+'_'+Sim.building), df_train)
store_namespace(os.path.join(folder, 'test_data_'+mon+'_'+Sim.building), df_test)
else:
df = load_namespace(os.path.join('results_sysid_test_'+mon, 'all_data_'+mon+'_'+Sim.building))
# Remove the lags from the beginning
#train_start = start.strftime('%m/%d/%Y %H:%M:%S')
train_start = datetime.datetime.strptime(start, '%m/%d/%Y %H:%M:%S') + datetime.timedelta(seconds = 10*float(meas_sampl))
for j in range(1,20):
df['T_in_delay'+str(j)] = df['TAir'].shift(periods=j)
df['PowerCompr_delay'+str(j)] = df['PowerCompr'].shift(periods=j)
df['weaTDryBul_delay'+str(j)] = df['weaTDryBul'].shift(periods=j)
df['weaHGloHor_delay'+str(j)] = df['weaHGloHor'].shift(periods=j)
df = df.fillna(method='bfill')
# Split the dataset
df_train = df[train_start:train_end]
df_test = df[valid_start:valid_end]
train_dict[bldg] = df_train
test_dict[bldg] = df_test
store_namespace(os.path.join(folder, 'train_data_'+mon+'_'+Sim.building), df_train)
store_namespace(os.path.join(folder, 'test_data_'+mon+'_'+Sim.building), df_test)
#print(df_train['weaTDryBul'])
'''Identify parameters '''
train_data = df_train
i = 0
for case in features_dict.keys():
while True:
try:
features = features_dict[case]
feats = features + [target]
Sim.init_ARX_model(features, target, train_data)
Sim.ARX_model.evaluate()
# Make some predictions
test_x = df_train[features].values
Sim.ARX_model.predict(test_x)
preds = Sim.ARX_model.predictions
if not os.path.exists(os.path.join(folder,case)):
os.makedirs(os.path.join(folder,case))
store_namespace(os.path.join(folder, case, 'sysid_ARXmodel_'+mon+'_'+Sim.building), Sim.ARX_model)
store_namespace(os.path.join(folder, case, 'sysid_ARXparams_results_'+mon+'_'+Sim.building), Sim.ARX_model.fit_results.params)
store_namespace(os.path.join(folder, case, 'sysid_ARXparams_IDpreds_'+mon+'_'+Sim.building), preds)
results_dict[case+'_'+Sim.building] = {}
results_dict[case+'_'+Sim.building]['AIC'] = Sim.ARX_model.fit_results.aic
results_dict[case+'_'+Sim.building]['BIC'] = Sim.ARX_model.fit_results.bic
results_dict[case+'_'+Sim.building]['MSE-total'] = Sim.ARX_model.fit_results.mse_total
results_dict[case+'_'+Sim.building]['MSE-model'] = Sim.ARX_model.fit_results.mse_model
results_dict[case+'_'+Sim.building]['MSE-resid'] = Sim.ARX_model.fit_results.mse_resid
results_dict[case+'_'+Sim.building]['R2-ID'] = Sim.ARX_model.fit_results.rsquared
'''Validate'''
# Make some predictions
test_x = df_test[features].values
Sim.ARX_model.predict(test_x)
preds = Sim.ARX_model.predictions
mse = mean_squared_error(df_test['TAir'].values, preds)
rscore = r2_score(df_test['TAir'].values, preds)
print('Mean squared error - validation')
print(mean_squared_error(df_test['TAir'].values, preds))
print('R^2 score - validation')
print(r2_score(df_test['TAir'].values, preds))
results_dict[case+'_'+Sim.building]['MSE-model-valid'] = mse
results_dict[case+'_'+Sim.building]['R2-valid'] = rscore
store_namespace(os.path.join(folder, case, 'sysid_ARXparams_validpreds_'+mon+'_'+Sim.building), preds)
store_namespace(os.path.join(folder, case, 'sysid_ARXparams_validMSE_'+mon+'_'+Sim.building), mse)
store_namespace(os.path.join(folder, case, 'sysid_ARXparams_validR2_'+mon+'_'+Sim.building), rscore)
break
except:
print('%%%%%%%%%%%%%%%%%% Failed, trying again! %%%%%%%%%%%%%%%%%%%%%%')
continue
results_pd = pd.DataFrame.from_dict(results_dict, orient='index')
results_pd.to_csv(os.path.join(folder, 'model_selection_all.csv'))
| [
37811,
28980,
2196,
11,
787,
262,
2438,
4577,
290,
517,
953,
16823,
37227,
198,
2,
2896,
500,
262,
1388,
11383,
198,
198,
6738,
1257,
6359,
1330,
3650,
62,
14933,
10223,
198,
6738,
1257,
6359,
1330,
3440,
62,
14933,
10223,
198,
6738,
... | 1.811902 | 6,167 |
# Generated by Django 3.2.3 on 2021-06-02 14:44
from typing import List, Tuple
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
18,
319,
33448,
12,
3312,
12,
2999,
1478,
25,
2598,
198,
6738,
19720,
1330,
7343,
11,
309,
29291,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 3.05 | 40 |
# coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
from ask_smapi_model.v1.skill.manifest.distribution_countries import DistributionCountriesV1
from ask_smapi_model.v1.skill.manifest.skill_manifest_localized_publishing_information import SkillManifestLocalizedPublishingInformationV1
from ask_smapi_model.v1.skill.manifest.distribution_mode import DistributionModeV1
from ask_smapi_model.v1.skill.manifest.manifest_gadget_support import ManifestGadgetSupportV1
class SkillManifestPublishingInformation(object):
"""
Defines the structure for publishing information in the skill manifest.
:param name: Name of the skill that is displayed to customers in the Alexa app.
:type name: (optional) str
:param description: Description of the skill's purpose and feature and how it works. Should describe any prerequisites like hardware or account requirements and detailed steps for the customer to get started. For Flash Briefing skill list the feeds offered within the skill. Use a conversational tone and correct grammar and punctuation. This description displays to customers on the skill detail card in the Alexa app.
:type description: (optional) str
:param locales: Defines the structure for locale specific publishing information in the skill manifest.
:type locales: (optional) dict(str, ask_smapi_model.v1.skill.manifest.skill_manifest_localized_publishing_information.SkillManifestLocalizedPublishingInformation)
:param is_available_worldwide: True if the skill should be distributed in all countries where Amazon distributes skill false otherwise.
:type is_available_worldwide: (optional) bool
:param distribution_mode:
:type distribution_mode: (optional) ask_smapi_model.v1.skill.manifest.distribution_mode.DistributionMode
:param gadget_support:
:type gadget_support: (optional) ask_smapi_model.v1.skill.manifest.manifest_gadget_support.ManifestGadgetSupport
:param testing_instructions: Special instructions provided by the developer to test the skill.
:type testing_instructions: (optional) str
:param category: Category that best describes a skill. Indicates the filter category for the skill in the Alexa App.
:type category: (optional) str
:param distribution_countries: Selected list of countries provided by the skill owner where Amazon can distribute the skill.
:type distribution_countries: (optional) list[ask_smapi_model.v1.skill.manifest.distribution_countries.DistributionCountries]
"""
deserialized_types = {
'name': 'str',
'description': 'str',
'locales': 'dict(str, ask_smapi_model.v1.skill.manifest.skill_manifest_localized_publishing_information.SkillManifestLocalizedPublishingInformation)',
'is_available_worldwide': 'bool',
'distribution_mode': 'ask_smapi_model.v1.skill.manifest.distribution_mode.DistributionMode',
'gadget_support': 'ask_smapi_model.v1.skill.manifest.manifest_gadget_support.ManifestGadgetSupport',
'testing_instructions': 'str',
'category': 'str',
'distribution_countries': 'list[ask_smapi_model.v1.skill.manifest.distribution_countries.DistributionCountries]'
} # type: Dict
attribute_map = {
'name': 'name',
'description': 'description',
'locales': 'locales',
'is_available_worldwide': 'isAvailableWorldwide',
'distribution_mode': 'distributionMode',
'gadget_support': 'gadgetSupport',
'testing_instructions': 'testingInstructions',
'category': 'category',
'distribution_countries': 'distributionCountries'
} # type: Dict
supports_multiple_types = False
def __init__(self, name=None, description=None, locales=None, is_available_worldwide=None, distribution_mode=None, gadget_support=None, testing_instructions=None, category=None, distribution_countries=None):
# type: (Optional[str], Optional[str], Optional[Dict[str, SkillManifestLocalizedPublishingInformationV1]], Optional[bool], Optional[DistributionModeV1], Optional[ManifestGadgetSupportV1], Optional[str], Optional[str], Optional[List[DistributionCountriesV1]]) -> None
"""Defines the structure for publishing information in the skill manifest.
:param name: Name of the skill that is displayed to customers in the Alexa app.
:type name: (optional) str
:param description: Description of the skill's purpose and feature and how it works. Should describe any prerequisites like hardware or account requirements and detailed steps for the customer to get started. For Flash Briefing skill list the feeds offered within the skill. Use a conversational tone and correct grammar and punctuation. This description displays to customers on the skill detail card in the Alexa app.
:type description: (optional) str
:param locales: Defines the structure for locale specific publishing information in the skill manifest.
:type locales: (optional) dict(str, ask_smapi_model.v1.skill.manifest.skill_manifest_localized_publishing_information.SkillManifestLocalizedPublishingInformation)
:param is_available_worldwide: True if the skill should be distributed in all countries where Amazon distributes skill false otherwise.
:type is_available_worldwide: (optional) bool
:param distribution_mode:
:type distribution_mode: (optional) ask_smapi_model.v1.skill.manifest.distribution_mode.DistributionMode
:param gadget_support:
:type gadget_support: (optional) ask_smapi_model.v1.skill.manifest.manifest_gadget_support.ManifestGadgetSupport
:param testing_instructions: Special instructions provided by the developer to test the skill.
:type testing_instructions: (optional) str
:param category: Category that best describes a skill. Indicates the filter category for the skill in the Alexa App.
:type category: (optional) str
:param distribution_countries: Selected list of countries provided by the skill owner where Amazon can distribute the skill.
:type distribution_countries: (optional) list[ask_smapi_model.v1.skill.manifest.distribution_countries.DistributionCountries]
"""
self.__discriminator_value = None # type: str
self.name = name
self.description = description
self.locales = locales
self.is_available_worldwide = is_available_worldwide
self.distribution_mode = distribution_mode
self.gadget_support = gadget_support
self.testing_instructions = testing_instructions
self.category = category
self.distribution_countries = distribution_countries
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, SkillManifestPublishingInformation):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
198,
2,
15069,
13130,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
... | 2.85396 | 3,232 |
import xquantum as xq
import math
from qiskit.circuit.library.standard_gates import RYGate
from qiskit import QuantumCircuit, execute, Aer, IBMQ
# https://algassert.com/quirk#circuit={%22cols%22:[[1,{%22id%22:%22Ryft%22,%22arg%22:%221.982313%22}],[],[{%22id%22:%22Ryft%22,%22arg%22:%221.714144%22},%22%E2%80%A2%22],[],[{%22id%22:%22Ryft%22,%22arg%22:%221.910633%22},%22%E2%97%A6%22],[],[%22Amps2%22],[],[1,1,%22H%22],[%22%E2%80%A2%22,%22X%22,%22%E2%80%A2%22],[{%22id%22:%22Ryft%22,%22arg%22:%22pi/2%22},1,%22%E2%80%A2%22],[%22%E2%80%A2%22,%22X%22,%22%E2%80%A2%22],[%22X%22,%22%E2%80%A2%22,%22%E2%80%A2%22],[1,{%22id%22:%22Ryft%22,%22arg%22:%22pi/3%22},%22%E2%80%A2%22],[%22X%22,%22%E2%80%A2%22,%22%E2%80%A2%22],[{%22id%22:%22Ryft%22,%22arg%22:%22pi/4%22},1,%22%E2%80%A2%22],[%22Z%22,1,%22%E2%80%A2%22],[1,1,%22X%22],[%22%E2%80%A2%22,%22X%22,%22%E2%80%A2%22],[{%22id%22:%22Ryft%22,%22arg%22:%22pi/2+pi%22},1,%22%E2%80%A2%22],[],[%22%E2%80%A2%22,%22X%22,%22%E2%80%A2%22],[%22X%22,%22%E2%80%A2%22,%22%E2%80%A2%22],[1,{%22id%22:%22Ryft%22,%22arg%22:%22pi/3%22},%22%E2%80%A2%22],[%22X%22,%22%E2%80%A2%22,%22%E2%80%A2%22],[{%22id%22:%22Ryft%22,%22arg%22:%22pi/4%22},1,%22%E2%80%A2%22],[1,1,%22X%22],[1,1,%22H%22],[%22Density%22,%22Density%22,%22Density%22]],%22init%22:[0,0,1]}
# https://algassert.com/quirk#circuit={%22cols%22:[[1,{%22id%22:%22Ryft%22,%22arg%22:%221.982313%22}],[],[{%22id%22:%22Ryft%22,%22arg%22:%221.714144%22},%22%E2%80%A2%22],[],[{%22id%22:%22Ryft%22,%22arg%22:%221.910633%22},%22%E2%97%A6%22],[],[%22Amps2%22],[],[%22%E2%80%A2%22,%22X%22],[{%22id%22:%22Ryft%22,%22arg%22:%22pi/2%22}],[%22%E2%80%A2%22,%22X%22],[%22X%22,%22%E2%80%A2%22],[1,{%22id%22:%22Ryft%22,%22arg%22:%22pi/3%22}],[%22X%22,%22%E2%80%A2%22],[{%22id%22:%22Ryft%22,%22arg%22:%22pi/4%22}],[%22Amps2%22]]}
| [
11748,
2124,
40972,
388,
355,
2124,
80,
198,
11748,
10688,
198,
6738,
10662,
1984,
270,
13,
21170,
5013,
13,
32016,
13,
20307,
62,
70,
689,
1330,
371,
56,
22628,
198,
6738,
10662,
1984,
270,
1330,
29082,
31560,
5013,
11,
12260,
11,
15... | 1.577601 | 1,134 |
from bot.Bot import Bot
from bot.api_ai import find_respond
import json
# change = Change()
# jso = {
#
# "respond": "Bye"
# }
# result = change.respond(jso)
# print(result)
| [
6738,
10214,
13,
20630,
1330,
18579,
198,
6738,
10214,
13,
15042,
62,
1872,
1330,
1064,
62,
5546,
198,
11748,
33918,
628,
198,
198,
2,
1487,
796,
9794,
3419,
198,
2,
474,
568,
796,
1391,
198,
2,
198,
2,
220,
220,
366,
5546,
1298,
... | 2.623188 | 69 |
import data_w
from data_w import widedata
from data_w import*
import model_w
from model_w import*
from model_w import WideCNN
#datset=widedata(ligand_path, protein_path,keys,motif_path,affinity_path)
dataset = widedata(ligand_path, protein_path,keys,motif_path,affinity_path)
train_loader, test_loader = load_splitset(dataset, .2)
modelw=WideCNN()
trainwide=train_w(modelw,train_loader)
torch.save(modelw,'wide.pt')
if __name__ == '__main__':
print(trainwide[0])
print(trainwide[1])
| [
11748,
1366,
62,
86,
201,
198,
6738,
1366,
62,
86,
1330,
266,
1384,
1045,
201,
198,
6738,
1366,
62,
86,
1330,
9,
201,
198,
11748,
2746,
62,
86,
201,
198,
6738,
2746,
62,
86,
1330,
9,
201,
198,
6738,
2746,
62,
86,
1330,
23399,
18... | 2.317181 | 227 |
import math | [
11748,
10688
] | 5.5 | 2 |
from itertools import cycle
import sys
# from qtpy import QtWidgets, QtCore
from PyQt5 import QtWidgets, QtCore
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
ex = ExampleWindow()
ex.show()
sys.exit(app.exec_())
| [
6738,
340,
861,
10141,
1330,
6772,
198,
11748,
25064,
198,
198,
2,
422,
10662,
83,
9078,
1330,
33734,
54,
312,
11407,
11,
33734,
14055,
198,
6738,
9485,
48,
83,
20,
1330,
33734,
54,
312,
11407,
11,
33734,
14055,
628,
198,
198,
361,
... | 2.428571 | 105 |
from os import system
| [
6738,
28686,
1330,
1080,
201,
198,
201,
198
] | 3.125 | 8 |
"""
Module with tests for Highlight
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from ...tests.base import TestsBase
from ..highlight import Highlight2Html, Highlight2Latex
from IPython.config import Config
import xml
#-----------------------------------------------------------------------------
# Class
#-----------------------------------------------------------------------------
highlight2html = Highlight2Html()
highlight2latex = Highlight2Latex()
c = Config()
c.Highlight2Html.default_language='ruby'
highlight2html_ruby = Highlight2Html(config=c)
class TestHighlight(TestsBase):
"""Contains test functions for highlight.py"""
#Hello world test, magics test, blank string test
tests = [
"""
#Hello World Example
def say(text):
print(text)
end
say('Hello World!')
""",
"""
%%pylab
plot(x,y, 'r')
"""
]
tokens = [
['Hello World Example', 'say', 'text', 'print', 'def'],
['pylab', 'plot']]
def test_highlight2html(self):
"""highlight2html test"""
for index, test in enumerate(self.tests):
self._try_highlight(highlight2html, test, self.tokens[index])
def test_highlight2latex(self):
"""highlight2latex test"""
for index, test in enumerate(self.tests):
self._try_highlight(highlight2latex, test, self.tokens[index])
def _try_highlight(self, method, test, tokens):
"""Try highlighting source, look for key tokens"""
results = method(test)
for token in tokens:
assert token in results
| [
37811,
198,
26796,
351,
5254,
329,
3334,
2971,
198,
37811,
198,
198,
2,
10097,
32501,
198,
2,
15069,
357,
66,
8,
2211,
11,
262,
6101,
7535,
7712,
4816,
13,
198,
2,
198,
2,
4307,
6169,
739,
262,
2846,
286,
262,
40499,
347,
10305,
1... | 3.076923 | 689 |
"""Here, I'd like to gather utility function / classes related to `pywin32com`.
Notice that this library is not library, but recipes for reading codes of `pywin32com`.
"""
from win32com.client import DispatchEx, CDispatch
from win32com.client.selecttlb import EnumTlbs
from win32com.client.makepy import GenerateFromTypeLibSpec
from win32com import __gen_path__
# print(__gen_path__) # In this folder, the generated object resides.
def assure_generation():
"""We would like to `generated` pywin32 PowerPoint module.
"""
if _required_generation():
_gen()
assure_generation()
| [
37811,
4342,
11,
314,
1549,
588,
284,
6431,
10361,
2163,
1220,
6097,
3519,
284,
4600,
9078,
5404,
2624,
785,
44646,
220,
198,
26396,
326,
428,
5888,
318,
407,
5888,
11,
475,
14296,
329,
3555,
12416,
286,
4600,
9078,
5404,
2624,
785,
4... | 3.223404 | 188 |
from selenium import webdriver
from time import sleep
import os
options = webdriver.ChromeOptions()
options.add_experimental_option("excludeSwitches", ['enable-automation'])
# プラウザ起動(Chrome)
driver = webdriver.Chrome(r'chromedriver.exe', chrome_options=options) # chromedriver.exeを使う
# リストからURLをひとつづつ処理
print('file:///{}/tmp.png'.format(os.getcwd()))
driver.get('file:///{}/tmp.png'.format(os.getcwd()))
while True:
sleep(1)
driver.refresh() | [
6738,
384,
11925,
1505,
1330,
3992,
26230,
201,
198,
6738,
640,
1330,
3993,
201,
198,
11748,
28686,
201,
198,
201,
198,
25811,
796,
3992,
26230,
13,
1925,
5998,
29046,
3419,
201,
198,
25811,
13,
2860,
62,
23100,
9134,
62,
18076,
7203,
... | 2.244019 | 209 |
# gcode runner
import io, sys
import socket
import time
import re
import argparse
ADDR="192.168.1.18"
PORT=23
LAST_SENT=""
# Should be ok, error, etc.
LAST_RESPONSE=""
# Optional [Caution: Unlocked] in response to $X
LAST_RESPONSE_MSG=""
# Global values set by get_status()
STATUS=""
MPOS=[0.0, 0.0, 0.0]
WPOS=[0.0, 0.0, 0.0]
FEEDS=[0.0, 0.0]
TIMEOUT_COUNT=0
PASS_TIMEOUT_COUNT=0
# Default response for command timeout is 5 minutes
# Get available text with specified timeout in ms
#### Main entry ####
# Parse command line
TARGET_PASSES=1
# Actions to take
WITH_HOME=0
WITH_INIT=1
WITH_FILE=0
WITH_POST=1
# Arbitrary init string
#INIT_CMD='G0 X500 Y800 F2000 G0 Z0 F200\n'
#INIT_CMD='G30 Z2.0\n'
#INIT_CMD='G0 Z0\n'
INIT_CMD='G0 Z10 X10 Y10 F2000\n'
INIT_CMD='G92 Z10 X10 Y10\n'
# Input file
INPUT_FILE='limit-test1-faster.gcode'
# Post-run command
POST_CMD='G0 Z10 X10 Y10 F2000\n'
# Attempt to read entire gcode file. This may fail on really large files.
# Must test with 10's of MB and up.
try:
ifile = open(INPUT_FILE, 'r')
GCode = ifile.readlines()
ifile.close()
except:
print('Failed to open gcode input {0}'.format(INPUT_FILE))
sys.exit(1)
# Analyze for comments
total_lines = len(GCode)
comment_lines = 0
for line in GCode:
if line.startswith('('):
comment_lines = comment_lines + 1
print('Input file {0} has {1} comment lines, {2} out of {3} active (comments will not be sent)'.format(INPUT_FILE, comment_lines, total_lines - comment_lines, total_lines))
start_run = time.monotonic()
line_number = 0
try:
print('Attempting connection via {0} at {1}:{2}'.format('TCP', ADDR, PORT))
socket.setdefaulttimeout(60)
msock = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
started = time.monotonic()
cres = msock.connect( (ADDR, PORT) )
elapsed = time.monotonic() - started
print('Connection time:', elapsed)
# Flush any greeting, usually Smoothie command shell
time.sleep(2)
s = get_text(msock, 5000)
print('Starting text:', s.strip())
s = get_text(msock, 1000)
if s != "":
print('Still starting:', s.strip())
start_run = time.monotonic()
# Query status - if alarm, send $X to clear and try again
#timed_cmd(msock, b'get status\n')
# Smoothie will send <status|mpos|wpos>\n[GC:...] in response to ?$G
#timed_cmd(msock, b'?$G\n')
s = get_text(msock, 1000)
if s != "":
print('Additional text: {0}'.format(s))
# Supposed to be time in milliseconds - Smoothie interprets it as seconds
#timed_cmd(msock, b'G4 P10\n')
for rpass in range(1, 1 + TARGET_PASSES):
print('starting pass', rpass, 'of', TARGET_PASSES)
start_pass = time.monotonic()
get_status(msock)
# If we interrupt a run, we may get an empty status
if STATUS == '':
print('Trying status again:')
get_status(msock, 6000)
# Try again if we timed out
if STATUS == 'Timeout':
print('Status timeout, trying again:')
get_status(msock, 10000)
print('Status:', STATUS)
if STATUS == 'Alarm':
print('Need to clear alarm')
timed_cmd(msock, '$X\n')
if LAST_RESPONSE != 'ok':
print('Did not get ok:', LAST_RESPONSE)
sys.exit(1)
elif STATUS.startswith('Failed'):
# A previous operation failed. Attempt a wait
print('A previous operation failed, attempting to clear failure...')
timed_cmd(msock, 'M400\n')
get_status(msock)
print('Response from wait: {0} {1} status: {2}'.format(LAST_RESPONSE, LAST_RESPONSE_MSG, STATUS))
if STATUS != 'Idle':
print('Unable to clear failure')
sys.exit(1)
elif STATUS != 'Idle':
print('Status must be idle, got:', STATUS)
#sys.exit(1)
break
PASS_TIMEOUT_COUNT = 0
#get_status(msock)
#if STATUS != 'Idle':
# print('Non-idle status:', STATUS)
# break
if WITH_HOME:
print('Homing...')
timed_cmd(msock, 'G28.2 X0 Y0\n')
if LAST_RESPONSE == 'error':
break
timed_cmd(msock, 'G92 X0 Y0\n')
# Wait for motion to complete
timed_cmd(msock, 'M400\n')
get_status(msock)
print('New status after home/reset: {0}\n'.format(STATUS))
if WITH_INIT and rpass == 1:
print('Sending init cmd: {0}'.format(INIT_CMD.strip()))
timed_cmd(msock, INIT_CMD)
timed_cmd(msock, 'M400\n')
get_status(msock)
print('New status after init: {0}\n'.format(STATUS))
line_number = 1
if WITH_FILE:
for line in GCode:
if not line.startswith('('):
# Smoothie switches on if spindle configured in switch mode for ANY value of S, including 0
if line.startswith('M3'):
print('Spindle control: {0}'.format(line.strip()))
# FIXME use longer timeout for M400
timed_cmd(msock, line)
if LAST_RESPONSE == 'error':
print('Exiting, error condition at line {0}'.format(line_number))
sys.exit(1)
line_number = line_number + 1
elapsed_pass = time.monotonic() - start_pass
print('pass {0} total time {1:.4f}s, timeouts: {2}'.format(rpass, elapsed_pass, PASS_TIMEOUT_COUNT))
if WITH_POST:
print('Final pass completed, sending post-run command {0}'.format(POST_CMD))
timed_cmd(msock, POST_CMD)
timed_cmd(msock, 'M400\n')
get_status(msock)
print('New status after post-run cmd: {0}\n'.format(STATUS))
elapsed_run = time.monotonic() - start_run
print('Final pass completed in {0:.4f}s, total timeout count: {1}'.format(elapsed_run, TIMEOUT_COUNT))
except OSError as e:
print('Exception:', e)
print('last cmd:', LAST_SENT, 'line number:', line_number)
elapsed_run = time.monotonic() - start_run
print('Elapsed time: {0:.4f}s'.format(elapsed_run))
sys.exit(1)
msock.close()
print('Completed')
sys.exit(0)
| [
2,
308,
8189,
17490,
198,
11748,
33245,
11,
25064,
198,
11748,
17802,
198,
11748,
640,
198,
11748,
302,
198,
11748,
1822,
29572,
198,
198,
2885,
7707,
2625,
17477,
13,
14656,
13,
16,
13,
1507,
1,
198,
15490,
28,
1954,
198,
43,
11262,
... | 2.130258 | 2,948 |
#4. 定义一个函数,完成以下功能:
# 1) 输入两个整型数,例如输入的是3, 5
# 2) 此函数要计算的是3 + 33 + 333 + 3333 + 33333(到5个为止)
a, b = eval(input("请输入两个整型数"))
print(sum1(a, b))
| [
2,
19,
13,
10263,
106,
248,
20046,
231,
31660,
10310,
103,
49035,
121,
46763,
108,
171,
120,
234,
22522,
234,
22755,
238,
20015,
98,
10310,
233,
27950,
253,
47797,
121,
25,
198,
2,
220,
220,
352,
8,
5525,
122,
241,
17739,
98,
10310,... | 0.99375 | 160 |
# test plugin
from bot.pluginDespatch import Plugin
import re
import datetime
import logging
from models import CapturedUrls
from logos.settings import LOGGING
logger = logging.getLogger(__name__)
logging.config.dictConfig(LOGGING)
| [
2,
1332,
13877,
198,
6738,
10214,
13,
33803,
5960,
17147,
1330,
42636,
198,
11748,
302,
198,
11748,
4818,
8079,
198,
11748,
18931,
198,
6738,
4981,
1330,
6790,
1522,
16692,
7278,
198,
198,
6738,
29645,
13,
33692,
1330,
41605,
38,
2751,
... | 2.435897 | 117 |
from django.contrib.auth.models import User
from rest_framework import serializers
from odata.models import Product, Customer, Category, Shipper, Order, OrderDetail | [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
6738,
267,
7890,
13,
27530,
1330,
8721,
11,
22092,
11,
21743,
11,
911,
14710,
11,
8284,
11,
8284,
11242,
603
] | 4 | 41 |
#! /usr/bin/env python2
'''
This script converts the Ensembl.dat file to the GO reference file used in the topGO R program.
#Example input:
ID 1 standard; DNA; HTG; 122678785 BP.
XX
AC chromosome:CanFam3.1:1:1:122678785:1
XX
SV 1.CanFam3.1
XX
DT 4-SEP-2018
XX
DE Canis lupus familiaris chromosome 1 CanFam3.1 full sequence 1..122678785
DE annotated by Ensembl
XX
KW .
XX
OS Canis lupus familiaris (dog)
OC Eukaryota; Opisthokonta; Metazoa; Eumetazoa; Bilateria; Deuterostomia;
OC Chordata; Craniata; Vertebrata; Gnathostomata; Teleostomi; Euteleostomi;
OC Sarcopterygii; Dipnotetrapodomorpha; Tetrapoda; Amniota; Mammalia; Theria;
OC Eutheria; Boreoeutheria; Laurasiatheria; Carnivora; Caniformia; Canidae;
OC Canis lupus.
XX
CC This sequence was annotated by Ensembl(www.ensembl.org). Please visit the
CC Ensembl or EnsemblGenomes web site, http://www.ensembl.org/ or
CC http://www.ensemblgenomes.org/ for more information.
XX
CC All feature locations are relative to the first (5') base of the sequence
CC in this file. The sequence presented is always the forward strand of the
CC assembly. Features that lie outside of the sequence contained in this file
CC have clonal location coordinates in the format: <clone
CC accession>.<version>:<start>..<end>
XX
CC The /gene indicates a unique id for a gene, /note="transcript_id=..." a
CC unique id for a transcript, /protein_id a unique id for a peptide and
CC note="exon_id=..." a unique id for an exon. These ids are maintained
CC wherever possible between versions.
XX
CC All the exons and transcripts in Ensembl are confirmed by similarity to
CC either protein or cDNA sequences.
XX
FH Key Location/Qualifiers
FT source 1..122678785
FT /organism="Canis lupus familiaris"
FT /db_xref="taxon:9615"
FT gene 722179..735934
FT /gene=ENSCAFG00000000008.3
FT /locus_tag="TXNL4A"
FT /note="thioredoxin like 4A [Source:VGNC
FT Symbol;Acc:VGNC:48019]"
FT mRNA join(722179..722324,722691..722877,731542..731645,
FT 734838..735934)
FT /gene="ENSCAFG00000000008.3"
FT /standard_name="ENSCAFT00000000009.3"
FT CDS join(722725..722877,731542..731645,734838..735009)
FT /gene="ENSCAFG00000000008.3"
FT /protein_id="ENSCAFP00000000008.3"
FT /note="transcript_id=ENSCAFT00000000009.3"
FT /db_xref="RefSeq_mRNA_predicted:XM_005615276"
FT /db_xref="RefSeq_mRNA_predicted:XM_533363"
FT /db_xref="RefSeq_peptide_predicted:XP_005615333"
FT /db_xref="RefSeq_peptide_predicted:XP_022263670"
FT /db_xref="RefSeq_peptide_predicted:XP_533363"
FT /db_xref="Uniprot/SPTREMBL:E2R204"
FT /db_xref="EMBL:AAEX03000011"
FT /db_xref="GO:0000398"
FT /db_xref="GO:0000398"
FT /db_xref="GO:0005634"
FT /db_xref="GO:0005682"
FT /db_xref="GO:0005829"
FT /db_xref="GO:0031965"
FT /db_xref="GO:0046540"
FT /db_xref="GO:0046540"
FT /db_xref="GO:0046540"
FT /db_xref="GO:0071005"
FT /db_xref="VGNC_trans_name:TXNL4A-201"
FT /db_xref="Reactome:R-CFA-72163"
FT /db_xref="Reactome:R-CFA-72165"
FT /db_xref="Reactome:R-CFA-72172"
FT /db_xref="Reactome:R-CFA-72203"
FT /db_xref="Reactome:R-CFA-8953854"
FT /db_xref="UniParc:UPI0000447A0B"
FT /translation="MSYMLPHLHNGWQVDQAILSEEDRVVVIRFGHDWDPTCMKMDEVL
FT YSIAEKVKNFAVIYLVDITEVPDFNKMYELYDPCTVMFFFRNKHIMIDLGTGNNNKINW
FT AMEDKQEMIDIIETVYRGARKGRGLVVSPKDYSTKYRY"
FT gene complement(744461..746178)
FT /gene=ENSCAFG00000031133.1
FT /locus_tag="HSBP1L1"
FT /note="heat shock factor binding protein 1 like 1
FT [Source:VGNC Symbol;Acc:VGNC:53725]"
FT mRNA join(complement(746112..746178),complement(744461..744552))
FT /gene="ENSCAFG00000031133.1"
FT /standard_name="ENSCAFT00000045122.1"
FT CDS join(complement(746112..746178),complement(744461..744552))
FT /gene="ENSCAFG00000031133.1"
FT /protein_id="ENSCAFP00000038592.1"
FT /note="transcript_id=ENSCAFT00000045122.1"
FT /db_xref="RefSeq_mRNA_predicted:XM_003432558"
FT /db_xref="RefSeq_peptide_predicted:XP_003432606"
FT /db_xref="Uniprot/SPTREMBL:J9NZ72"
FT /db_xref="EMBL:AAEX03000011"
FT /db_xref="GO:0003714"
FT /db_xref="GO:0005634"
FT /db_xref="GO:0005737"
FT /db_xref="GO:0005829"
FT /db_xref="GO:0070370"
FT /db_xref="GO:1903507"
FT /db_xref="VGNC_trans_name:HSBP1L1-201"
FT /db_xref="UniParc:UPI00027479F7"
FT /translation="AENLFQELQEHFQALIATLNLRMEEMGSRLEDLQKNVNDLMVQAG
FT VEDPVSEQ"
FT gene complement(829658..866436)
FT /gene=ENSCAFG00000039493.1
FT misc_RNA join(complement(865924..866436),complement(846078..846445),
FT complement(841686..841787),complement(829658..829667))
FT /gene="ENSCAFG00000039493.1"
FT /db_xref="RNAcentral:URS0000A92009"
FT /note="lincRNA"
FT /standard_name="ENSCAFT00000053567.1"
FT misc_RNA join(complement(865924..866436),complement(846078..846445),
FT complement(843054..843163),complement(841759..841787))
FT /gene="ENSCAFG00000039493.1"
FT /db_xref="RNAcentral:URS0000A94E9D"
FT /note="lincRNA"
FT /standard_name="ENSCAFT00000055264.1"
FT misc_RNA join(complement(865924..866436),complement(846078..846445),
FT complement(845283..845439))
FT /gene="ENSCAFG00000039493.1"
FT /db_xref="RNAcentral:URS0000AA5288"
FT /note="lincRNA"
FT /standard_name="ENSCAFT00000058711.1"
FT gene complement(886083..886640)
FT /gene=ENSCAFG00000028976.1
FT mRNA complement(886083..886640)
FT /gene="ENSCAFG00000028976.1"
FT /standard_name="ENSCAFT00000043602.1"
FT CDS complement(886083..886640)
FT /gene="ENSCAFG00000028976.1"
FT /protein_id="ENSCAFP00000039366.1"
FT /note="transcript_id=ENSCAFT00000043602.1"
FT /db_xref="Uniprot/SPTREMBL:J9P1E2"
FT /db_xref="EMBL:AAEX03000016"
FT /db_xref="UniParc:UPI000274763D"
FT /translation="MWTGWPMGVPEHCTAPAPYTGRSAQGPSPTSGSAPGPPHTHGPPA
FT LGIPPRGPLSTQDYPPTWPPAPRTPLMWAPQQPGPPTQATSTEDHPHATPQHPGLPHPH
FT PRGPSAPRTPPCGPSHGSPALGTPPCRPLSTKDPLPPPHPKSYGGWFPGSLFRVLPGPQ
FT EDSPPNRAADAQSQHLVAFRCF"
#Example output:
ENSCAFG00000000008 GO:0000398, GO:0000398, GO:0005634, GO:0005682, GO:0005829, GO:0031965, GO:0046540, GO:0046540, GO:0046540, GO:0071005
ENSCAFG00000031133 GO:0003714, GO:0005634, GO:0005737, GO:0005829, GO:0070370, GO:1903507
#command:
$ python Ensembl.dat-to-topGO.db.py -i input.table -o output.tab
#contact:
Dmytro Kryvokhyzha dmytro.kryvokhyzha@evobio.eu
'''
############################# modules #############################
import calls # my custom module
############################# options #############################
parser = calls.CommandLineParser()
parser.add_argument('-i', '--input', help = 'name of the input file', type=str, required=True)
parser.add_argument('-o', '--output', help = 'name of the output file', type=str, required=True)
args = parser.parse_args()
############################# program #############################
geneName = "geneName"
dicts = {geneName: []}
outfile = open(args.output, 'w')
with open(args.input) as datafile:
for line in datafile:
if line.startswith("FT"):
words = line.split()
if '/gene="' in words[1]:
if dicts[geneName] != []:
GOprint = ', '.join(str(e) for e in dicts[geneName])
outfile.write("%s\t%s\n" % (list(dicts.keys())[0], GOprint))
geneName = words[1].split(".")[0].replace('/gene="', '')
dicts = {geneName: []}
elif '/db_xref="GO' in words[1]:
GO = words[1].replace('/db_xref="', '').replace('"', '')
dicts[geneName].append(GO)
datafile.close()
outfile.close()
print('Done!')
# dicts = {}
# keys = range(4)
# values = ["Hi", "I", "am", "John"]
# for i in keys:
# dicts[i] = values[i]
# print(dicts) | [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
198,
7061,
6,
198,
1212,
4226,
26161,
262,
2039,
4428,
75,
13,
19608,
2393,
284,
262,
10351,
4941,
2393,
973,
287,
262,
1353,
11230,
371,
1430,
13,
198,
198,
2,
16281,
5128,
... | 1.818611 | 5,083 |
import os
import numpy as np
import scipy.spatial as scsp
import scipy.linalg as scli
| [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
13,
2777,
34961,
355,
629,
2777,
198,
11748,
629,
541,
88,
13,
75,
1292,
70,
355,
264,
44506,
628,
628,
628,
628,
628
] | 2.638889 | 36 |
import json
| [
11748,
33918,
198
] | 4 | 3 |
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest2
from mock import patch, MagicMock
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from fuel_plugin.ostf_adapter import config
from fuel_plugin.ostf_adapter.nose_plugin.nose_discovery import discovery
from fuel_plugin.ostf_adapter.storage import models
from fuel_plugin.ostf_adapter import mixins
TEST_PATH = 'fuel_plugin/testing/fixture/dummy_tests'
| [
2,
220,
220,
220,
15069,
2211,
7381,
20836,
11,
3457,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
2393,
... | 3.300654 | 306 |
from CHECLabPy.core.io import TIOReader
from TargetCalibSB.pedestal import PedestalTargetCalib
import fitsio
from tqdm import tqdm
from matplotlib import pyplot as plt
import numpy as np
if __name__ == '__main__':
main()
| [
6738,
5870,
2943,
17822,
20519,
13,
7295,
13,
952,
1330,
31598,
1581,
1329,
263,
198,
6738,
12744,
9771,
571,
16811,
13,
9124,
395,
282,
1330,
13457,
395,
282,
21745,
9771,
571,
198,
11748,
11414,
952,
198,
6738,
256,
80,
36020,
1330,
... | 2.814815 | 81 |
import pandas as pd
import numpy as np
import xgboost as xgb
import lightgbm as lgb
import pyarrow as pa
import pyarrow.parquet as pq
from sklearn.model_selection import train_test_split
import argparse
from scipy.special import erfinv
from scipy.sparse import csr_matrix
from sklearn.preprocessing import StandardScaler
from utils import *
X, X_test, _ = read_train_test_data()
# drop label encoding
lbl_cols = [col for col in X.columns if "_labelencod" in col]
X.drop(lbl_cols, axis=1, inplace=True)
X_test.drop(lbl_cols, axis=1, inplace=True)
# nan index
train_nan_idx = csr_matrix((np.isnan(X)).astype(int))
test_nan_idx = csr_matrix((np.isnan(X_test)).astype(int))
X = X.fillna(X.median())#X.fillna(X.median()) # X.fillna(0)
X = X.replace(np.inf, 99999.999)
X = X.replace(-np.inf, -99999.999)
X = X.values
X_test = X_test.fillna(X_test.median())#X_test.fillna(X_test.median())
X_test = X_test.replace(np.inf, 9999.999)
X_test = X_test.replace(-np.inf, -9999.999)
X_test = X_test.values
train_size = X.shape[0]
print("scale data")
scaler = StandardScaler()#StandardScaler() # GaussRankScaler()
X_all = scaler.fit_transform(np.r_[X, X_test])
del X, X_test; gc.collect()
# X = pd.DataFrame(X_all[:train_size,:])
X = pd.DataFrame(X_all[:train_size,:] * np.array((train_nan_idx.todense()==0).astype(int)))
del train_nan_idx
print("Done scaling train data...")
# X_test = pd.DataFrame(X_all[train_size:,:])
X_test = pd.DataFrame(X_all[train_size:,:] * np.array((test_nan_idx.todense()==0).astype(int)))
print("Done scaling test data...")
# del X_all; gc.collect()
del X_all, test_nan_idx;gc.collect()
X_test = X_test.values
X = X.values
y = pd.read_csv("../input/train.csv")["deal_probability"].values
oof_sgd(X, X_test, y, "stacking_elasticnet")
### no oof features
X, X_test, _ = read_train_test_data_all()
# drop label encoding
lbl_cols = [col for col in X.columns if "_labelencod" in col or "oof_" in col]
X.drop(lbl_cols, axis=1, inplace=True)
X_test.drop(lbl_cols, axis=1, inplace=True)
# nan index
train_nan_idx = csr_matrix((np.isnan(X)).astype(int))
test_nan_idx = csr_matrix((np.isnan(X_test)).astype(int))
X = X.fillna(X.median())#X.fillna(X.median()) # X.fillna(0)
X = X.replace(np.inf, 99999.999)
X = X.replace(-np.inf, -99999.999)
X = X.values
X_test = X_test.fillna(X_test.median())#X_test.fillna(X_test.median())
X_test = X_test.replace(np.inf, 9999.999)
X_test = X_test.replace(-np.inf, -9999.999)
X_test = X_test.values
train_size = X.shape[0]
print("scale data")
scaler = StandardScaler()#StandardScaler() # GaussRankScaler()
X_all = scaler.fit_transform(np.r_[X, X_test])
del X, X_test; gc.collect()
# X = pd.DataFrame(X_all[:train_size,:])
X = pd.DataFrame(X_all[:train_size,:] * np.array((train_nan_idx.todense()==0).astype(int)))
del train_nan_idx
print("Done scaling train data...")
# X_test = pd.DataFrame(X_all[train_size:,:])
X_test = pd.DataFrame(X_all[train_size:,:] * np.array((test_nan_idx.todense()==0).astype(int)))
print("Done scaling test data...")
# del X_all; gc.collect()
del X_all, test_nan_idx;gc.collect()
X_test = X_test.values
X = X.values
y = pd.read_csv("../input/train.csv")["deal_probability"].values
oof_sgd(X, X_test, y, "stacking_elasticnet_nooof")
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2124,
70,
39521,
355,
2124,
22296,
198,
11748,
1657,
70,
20475,
355,
300,
22296,
198,
11748,
12972,
6018,
355,
14187,
198,
11748,
12972,
6018,
13,
1845,
21... | 2.324891 | 1,382 |
argo["salinity"].sel(level=10).plot.line() | [
9448,
14692,
21680,
6269,
1,
4083,
741,
7,
5715,
28,
940,
737,
29487,
13,
1370,
3419
] | 2.625 | 16 |
import numpy as np
import matplotlib.pyplot as plt
from skimage import io
import cv2
# 参考記事:https://qiita.com/haru1843/items/00de955790d3a22a217b
img = io.imread("./dataset/images/imori_256x256.png")
gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
th = getThreshold(gray_img)
binary_img = gray2binary(gray_img, th)
plt.figure(figsize=(12, 4))
plt.subplot(1, 3, 1)
plt.title("input")
plt.imshow(img)
plt.subplot(1, 3, 2)
plt.title("gray")
plt.imshow(gray_img, cmap='gray')
plt.subplot(1, 3, 3)
plt.title("binary")
plt.imshow(binary_img, cmap="gray")
plt.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
1341,
9060,
1330,
33245,
198,
11748,
269,
85,
17,
628,
198,
2,
10263,
237,
224,
32003,
225,
164,
101,
246,
12859,
233,
171,
120,
... | 2.076923 | 273 |
from sqlite3 import Row
from pydantic import BaseModel
| [
6738,
44161,
578,
18,
1330,
11314,
198,
198,
6738,
279,
5173,
5109,
1330,
7308,
17633,
628,
198
] | 3.411765 | 17 |
from .boxloss import *
from .focalloss import *
from .multitaskloss import *
| [
6738,
764,
3524,
22462,
1330,
1635,
198,
6738,
764,
69,
420,
439,
793,
1330,
1635,
198,
6738,
764,
16680,
270,
2093,
22462,
1330,
1635,
198
] | 3.08 | 25 |
import os
import time
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
import sys
sys.path.append('..')
from utils import calHarDist
DATA_DIR = '../datasets'
for filename in ['modified_train.csv']:
print('reading training data from %s ...' % filename)
df = pd.read_csv(os.path.join(DATA_DIR, filename))
c = df.corr().abs()
print(c)
y = df['DURATION']
# df['displacement'] = df.apply(lambda row : calHarDist(row['ORIGIN_LAT'], row['ORIGIN_LNG'], row['CUT_OFF_LAT'], row['CUT_OFF_LNG']), axis = 1)
df.drop(['TRIP_ID','TIMESTAMP','DATE', 'END_TIME', 'ORIGIN_CALL', 'ORIGIN_STAND', 'DURATION'], axis=1, inplace=True)
# values = {'ORIGIN_CALL': -1, 'ORIGIN_STAND': -1}
# df = df.fillna(value=values)
X = np.array(df, dtype=np.float)
# th1 = np.percentile(df['displacement'], [99.9])[0]
# relevant_rows = (df['displacement'] < th1)
# df.drop(['displacement'], axis=1, inplace=True)
# X = df.loc[relevant_rows]
# y = y.loc[relevant_rows]
t0 = time.time()
reg = LinearRegression().fit(X, y)
print(reg.score(X, y))
print('Done in %.1f sec.' % (time.time() - t0))
df = pd.read_csv(os.path.join(DATA_DIR, filename.replace('train', 'test')))
ids = df['TRIP_ID']
df.drop(['TRIP_ID','TIMESTAMP','DATE', 'END_TIME', 'ORIGIN_CALL', 'ORIGIN_STAND'], axis=1, inplace=True)
# values = {'ORIGIN_CALL': -1, 'ORIGIN_STAND': -1}
# df = df.fillna(value=values)
X_tst = np.array(df, dtype=np.float)
y_pred = reg.predict(X_tst)
submission = pd.DataFrame(ids, columns=['TRIP_ID'])
submission['TRAVEL_TIME'] = y_pred
submission.to_csv('../datasets/my_submission.csv', index=False)
| [
11748,
28686,
198,
11748,
640,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
1341,
35720,
13,
29127,
62,
19849,
1330,
44800,
8081,
2234,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
3272,
62... | 2.213235 | 816 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 15 14:01:24 2021
@author: liang
"""
'''
AWS personalize model
'''
import boto3
import json
import os
class PersonalizePredictHandler():
"""
""" | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
30030,
7653,
1315,
1478,
25,
486,
25,
1731,
33448,
198,
198,
31,
9800,
25,
7649,
648,
198... | 2.516854 | 89 |
# Copyright 2016 Bridgewater Associates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.alerters.custom_alerter
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Bridgewater OSS <opensource@bwater.com>
"""
from security_monkey import app
alerter_registry = []
| [
2,
220,
220,
220,
220,
15069,
1584,
48218,
29306,
198,
2,
198,
2,
220,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
220,
220,
220,
345,
743,
407,
779,
428,
2393,
... | 3.290698 | 258 |
import cloudmesh
cloudmesh.shell("help")
print cloudmesh.version()
| [
11748,
6279,
76,
5069,
198,
17721,
76,
5069,
13,
29149,
7203,
16794,
4943,
198,
4798,
6279,
76,
5069,
13,
9641,
3419,
628
] | 3.090909 | 22 |
import json
import numpy as np
import torch
from torch import nn
| [
11748,
33918,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198
] | 3.473684 | 19 |
#
# PySNMP MIB module Juniper-IKE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Juniper-IKE-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:52:12 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion")
juniMibs, = mibBuilder.importSymbols("Juniper-MIBs", "juniMibs")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
ObjectIdentity, Counter32, Bits, Integer32, ModuleIdentity, NotificationType, MibIdentifier, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, iso, Counter64, Unsigned32, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Counter32", "Bits", "Integer32", "ModuleIdentity", "NotificationType", "MibIdentifier", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "iso", "Counter64", "Unsigned32", "TimeTicks")
TextualConvention, DisplayString, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "RowStatus")
juniIkeMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71))
juniIkeMIB.setRevisions(('2005-11-22 16:15', '2004-01-23 15:12', '2004-04-06 22:26',))
if mibBuilder.loadTexts: juniIkeMIB.setLastUpdated('200404062226Z')
if mibBuilder.loadTexts: juniIkeMIB.setOrganization('Juniper Networks, Inc.')
juniIkeObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1))
juniIke = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1))
juniIkePolicyRuleTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1), )
if mibBuilder.loadTexts: juniIkePolicyRuleTable.setStatus('obsolete')
juniIkePolicyRuleEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1, 1), ).setIndexNames((0, "Juniper-IKE-MIB", "juniIkePolicyRulePriority"))
if mibBuilder.loadTexts: juniIkePolicyRuleEntry.setStatus('obsolete')
juniIkePolicyRulePriority = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10000)))
if mibBuilder.loadTexts: juniIkePolicyRulePriority.setStatus('obsolete')
juniIkePolicyRuleAuthMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1, 1, 2), JuniIkeAuthenticationMethod().clone('preSharedKeys')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleAuthMethod.setStatus('obsolete')
juniIkePolicyRuleEncryptMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1, 1, 3), JuniIkeEncryptionMethod().clone('tripleDes')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleEncryptMethod.setStatus('obsolete')
juniIkePolicyRulePfsGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1, 1, 4), JuniIkeGroup().clone('group2')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRulePfsGroup.setStatus('obsolete')
juniIkePolicyRuleHashMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1, 1, 5), JuniIkeHashMethod().clone('sha')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleHashMethod.setStatus('obsolete')
juniIkePolicyRuleLifetime = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(60, 86400)).clone(28800)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleLifetime.setStatus('obsolete')
juniIkePolicyRuleNegotiationMode = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1, 1, 7), JuniIkeNegotiationMode().clone('aggressive')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleNegotiationMode.setStatus('obsolete')
juniIkePolicyRuleRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 1, 1, 8), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleRowStatus.setStatus('obsolete')
juniIkePolicyRuleV2Table = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6), )
if mibBuilder.loadTexts: juniIkePolicyRuleV2Table.setStatus('current')
juniIkePolicyRuleV2Entry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1), ).setIndexNames((0, "Juniper-IKE-MIB", "juniIkePolicyRuleV2Priority"))
if mibBuilder.loadTexts: juniIkePolicyRuleV2Entry.setStatus('current')
juniIkePolicyRuleV2Priority = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10000)))
if mibBuilder.loadTexts: juniIkePolicyRuleV2Priority.setStatus('current')
juniIkePolicyRuleV2AuthMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 2), JuniIkeAuthenticationMethod().clone('preSharedKeys')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleV2AuthMethod.setStatus('current')
juniIkePolicyRuleV2EncryptMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 3), JuniIkeEncryptionMethod().clone('tripleDes')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleV2EncryptMethod.setStatus('current')
juniIkePolicyRuleV2PfsGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 4), JuniIkeGroup().clone('group2')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleV2PfsGroup.setStatus('current')
juniIkePolicyRuleV2HashMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 5), JuniIkeHashMethod().clone('sha')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleV2HashMethod.setStatus('current')
juniIkePolicyRuleV2Lifetime = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(60, 86400)).clone(28800)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleV2Lifetime.setStatus('current')
juniIkePolicyRuleV2NegotiationMode = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 7), JuniIkeNegotiationV2Mode().clone('aggressiveNotAllowed')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleV2NegotiationMode.setStatus('current')
juniIkePolicyRuleV2IpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 8), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleV2IpAddress.setStatus('current')
juniIkePolicyRuleV2RouterIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 9), Unsigned32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleV2RouterIndex.setStatus('current')
juniIkePolicyRuleV2RowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 6, 1, 10), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkePolicyRuleV2RowStatus.setStatus('current')
juniIkeIpv4PresharedKeyTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 2), )
if mibBuilder.loadTexts: juniIkeIpv4PresharedKeyTable.setStatus('current')
juniIkeIpv4PresharedKeyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 2, 1), ).setIndexNames((0, "Juniper-IKE-MIB", "juniIkeIpv4PresharedRemoteIpAddr"), (0, "Juniper-IKE-MIB", "juniIkeIpv4PresharedRouterIdx"))
if mibBuilder.loadTexts: juniIkeIpv4PresharedKeyEntry.setStatus('current')
juniIkeIpv4PresharedRemoteIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 2, 1, 1), IpAddress())
if mibBuilder.loadTexts: juniIkeIpv4PresharedRemoteIpAddr.setStatus('current')
juniIkeIpv4PresharedRouterIdx = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 2, 1, 2), Unsigned32())
if mibBuilder.loadTexts: juniIkeIpv4PresharedRouterIdx.setStatus('current')
juniIkeIpv4PresharedKeyStr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 2, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 200))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkeIpv4PresharedKeyStr.setStatus('current')
juniIkeIpv4PresharedMaskedKeyStr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 2, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 300))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkeIpv4PresharedMaskedKeyStr.setStatus('current')
juniIkeIpv4PresharedKeyRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 2, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkeIpv4PresharedKeyRowStatus.setStatus('current')
juniIkeFqdnPresharedKeyTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 3), )
if mibBuilder.loadTexts: juniIkeFqdnPresharedKeyTable.setStatus('current')
juniIkeFqdnPresharedKeyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 3, 1), ).setIndexNames((0, "Juniper-IKE-MIB", "juniIkeFqdnPresharedRemote"), (0, "Juniper-IKE-MIB", "juniIkeFqdnPresharedRouterIndex"))
if mibBuilder.loadTexts: juniIkeFqdnPresharedKeyEntry.setStatus('current')
juniIkeFqdnPresharedRemote = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 3, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80)))
if mibBuilder.loadTexts: juniIkeFqdnPresharedRemote.setStatus('current')
juniIkeFqdnPresharedRouterIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 3, 1, 2), Unsigned32())
if mibBuilder.loadTexts: juniIkeFqdnPresharedRouterIndex.setStatus('current')
juniIkeFqdnPresharedKeyStr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 3, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 200))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkeFqdnPresharedKeyStr.setStatus('current')
juniIkeFqdnPresharedMaskedKeyStr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 3, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 300))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkeFqdnPresharedMaskedKeyStr.setStatus('current')
juniIkeFqdnPresharedKeyRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 3, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: juniIkeFqdnPresharedKeyRowStatus.setStatus('current')
juniIkeSaTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 4), )
if mibBuilder.loadTexts: juniIkeSaTable.setStatus('obsolete')
juniIkeSaEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 4, 1), ).setIndexNames((0, "Juniper-IKE-MIB", "juniIkeSaRemoteIpAddr"), (0, "Juniper-IKE-MIB", "juniIkeSaLocalIpAddr"), (0, "Juniper-IKE-MIB", "juniIkeSaRouterIndex"), (0, "Juniper-IKE-MIB", "juniIkeSaDirection"))
if mibBuilder.loadTexts: juniIkeSaEntry.setStatus('obsolete')
juniIkeSaRemoteIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 4, 1, 1), IpAddress())
if mibBuilder.loadTexts: juniIkeSaRemoteIpAddr.setStatus('obsolete')
juniIkeSaLocalIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 4, 1, 2), IpAddress())
if mibBuilder.loadTexts: juniIkeSaLocalIpAddr.setStatus('obsolete')
juniIkeSaRouterIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 4, 1, 3), Unsigned32())
if mibBuilder.loadTexts: juniIkeSaRouterIndex.setStatus('obsolete')
juniIkeSaDirection = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 4, 1, 4), JuniIpsecPhase1SaDirection())
if mibBuilder.loadTexts: juniIkeSaDirection.setStatus('obsolete')
juniIkeSaState = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 4, 1, 5), JuniIpsecPhase1SaState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: juniIkeSaState.setStatus('obsolete')
juniIkeSaRemaining = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 4, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 86400))).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: juniIkeSaRemaining.setStatus('obsolete')
juniIkeSa2Table = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5), )
if mibBuilder.loadTexts: juniIkeSa2Table.setStatus('current')
juniIkeSa2Entry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1), ).setIndexNames((0, "Juniper-IKE-MIB", "juniIkeSa2RemoteIpAddr"), (0, "Juniper-IKE-MIB", "juniIkeSaRemotePort"), (0, "Juniper-IKE-MIB", "juniIkeSa2LocalIpAddr"), (0, "Juniper-IKE-MIB", "juniIkeSaLocalPort"), (0, "Juniper-IKE-MIB", "juniIkeSa2RouterIndex"), (0, "Juniper-IKE-MIB", "juniIkeSa2Direction"), (0, "Juniper-IKE-MIB", "juniIkeSaNegotiationDone"))
if mibBuilder.loadTexts: juniIkeSa2Entry.setStatus('current')
juniIkeSa2RemoteIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 1), IpAddress())
if mibBuilder.loadTexts: juniIkeSa2RemoteIpAddr.setStatus('current')
juniIkeSaRemotePort = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 2), Unsigned32())
if mibBuilder.loadTexts: juniIkeSaRemotePort.setStatus('current')
juniIkeSa2LocalIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 3), IpAddress())
if mibBuilder.loadTexts: juniIkeSa2LocalIpAddr.setStatus('current')
juniIkeSaLocalPort = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 4), Unsigned32())
if mibBuilder.loadTexts: juniIkeSaLocalPort.setStatus('current')
juniIkeSa2RouterIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 5), Unsigned32())
if mibBuilder.loadTexts: juniIkeSa2RouterIndex.setStatus('current')
juniIkeSa2Direction = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("responder", 0), ("initiator", 1))))
if mibBuilder.loadTexts: juniIkeSa2Direction.setStatus('current')
juniIkeSaNegotiationDone = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("negotiationNotDone", 0), ("negotiationDone", 1))))
if mibBuilder.loadTexts: juniIkeSaNegotiationDone.setStatus('current')
juniIkeSa2State = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 8), JuniIpsecPhase1SaState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: juniIkeSa2State.setStatus('current')
juniIkeSa2Remaining = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 9), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 86400))).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: juniIkeSa2Remaining.setStatus('current')
juniRemoteCookie = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 10), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: juniRemoteCookie.setStatus('current')
juniLocalCookie = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 1, 1, 5, 1, 11), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: juniLocalCookie.setStatus('current')
juniIkeMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2))
juniIkeMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 1))
juniIkeMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 2))
juniIkeCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 1, 1)).setObjects(("Juniper-IKE-MIB", "juniIkePolicyRuleGroup"), ("Juniper-IKE-MIB", "juniIkeIpv4PreSharedKeyGroup"), ("Juniper-IKE-MIB", "juniIkeFqdnPreSharedKeyGroup"), ("Juniper-IKE-MIB", "juniIkeSaGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIkeCompliance = juniIkeCompliance.setStatus('obsolete')
juniIkeCompliance2 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 1, 2)).setObjects(("Juniper-IKE-MIB", "juniIkePolicyRuleGroup"), ("Juniper-IKE-MIB", "juniIkeIpv4PreSharedKeyGroup"), ("Juniper-IKE-MIB", "juniIkeFqdnPreSharedKeyGroup"), ("Juniper-IKE-MIB", "juniIkeSa2Group"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIkeCompliance2 = juniIkeCompliance2.setStatus('obsolete')
juniIkeCompliance3 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 1, 3)).setObjects(("Juniper-IKE-MIB", "juniIkePolicyRuleV2Group"), ("Juniper-IKE-MIB", "juniIkeIpv4PreSharedKeyGroup"), ("Juniper-IKE-MIB", "juniIkeFqdnPreSharedKeyGroup"), ("Juniper-IKE-MIB", "juniIkeSa2Group"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIkeCompliance3 = juniIkeCompliance3.setStatus('current')
juniIkePolicyRuleGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 2, 1)).setObjects(("Juniper-IKE-MIB", "juniIkePolicyRuleAuthMethod"), ("Juniper-IKE-MIB", "juniIkePolicyRuleEncryptMethod"), ("Juniper-IKE-MIB", "juniIkePolicyRulePfsGroup"), ("Juniper-IKE-MIB", "juniIkePolicyRuleHashMethod"), ("Juniper-IKE-MIB", "juniIkePolicyRuleLifetime"), ("Juniper-IKE-MIB", "juniIkePolicyRuleNegotiationMode"), ("Juniper-IKE-MIB", "juniIkePolicyRuleRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIkePolicyRuleGroup = juniIkePolicyRuleGroup.setStatus('obsolete')
juniIkeIpv4PreSharedKeyGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 2, 2)).setObjects(("Juniper-IKE-MIB", "juniIkeIpv4PresharedKeyStr"), ("Juniper-IKE-MIB", "juniIkeIpv4PresharedMaskedKeyStr"), ("Juniper-IKE-MIB", "juniIkeIpv4PresharedKeyRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIkeIpv4PreSharedKeyGroup = juniIkeIpv4PreSharedKeyGroup.setStatus('current')
juniIkeFqdnPreSharedKeyGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 2, 3)).setObjects(("Juniper-IKE-MIB", "juniIkeFqdnPresharedKeyStr"), ("Juniper-IKE-MIB", "juniIkeFqdnPresharedMaskedKeyStr"), ("Juniper-IKE-MIB", "juniIkeFqdnPresharedKeyRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIkeFqdnPreSharedKeyGroup = juniIkeFqdnPreSharedKeyGroup.setStatus('current')
juniIkeSaGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 2, 4)).setObjects(("Juniper-IKE-MIB", "juniIkeSaState"), ("Juniper-IKE-MIB", "juniIkeSaRemaining"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIkeSaGroup = juniIkeSaGroup.setStatus('obsolete')
juniIkeSa2Group = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 2, 5)).setObjects(("Juniper-IKE-MIB", "juniIkeSa2State"), ("Juniper-IKE-MIB", "juniIkeSa2Remaining"), ("Juniper-IKE-MIB", "juniRemoteCookie"), ("Juniper-IKE-MIB", "juniLocalCookie"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIkeSa2Group = juniIkeSa2Group.setStatus('current')
juniIkePolicyRuleV2Group = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 71, 2, 2, 6)).setObjects(("Juniper-IKE-MIB", "juniIkePolicyRuleV2AuthMethod"), ("Juniper-IKE-MIB", "juniIkePolicyRuleV2EncryptMethod"), ("Juniper-IKE-MIB", "juniIkePolicyRuleV2PfsGroup"), ("Juniper-IKE-MIB", "juniIkePolicyRuleV2HashMethod"), ("Juniper-IKE-MIB", "juniIkePolicyRuleV2Lifetime"), ("Juniper-IKE-MIB", "juniIkePolicyRuleV2NegotiationMode"), ("Juniper-IKE-MIB", "juniIkePolicyRuleV2IpAddress"), ("Juniper-IKE-MIB", "juniIkePolicyRuleV2RouterIndex"), ("Juniper-IKE-MIB", "juniIkePolicyRuleV2RowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIkePolicyRuleV2Group = juniIkePolicyRuleV2Group.setStatus('current')
mibBuilder.exportSymbols("Juniper-IKE-MIB", juniIkeSaRemaining=juniIkeSaRemaining, juniIkePolicyRuleV2Priority=juniIkePolicyRuleV2Priority, juniIkePolicyRuleV2AuthMethod=juniIkePolicyRuleV2AuthMethod, juniIkeSaRemotePort=juniIkeSaRemotePort, juniIkeSa2Group=juniIkeSa2Group, juniIkeSaNegotiationDone=juniIkeSaNegotiationDone, juniIkeSa2RemoteIpAddr=juniIkeSa2RemoteIpAddr, juniIkeSa2Entry=juniIkeSa2Entry, juniIkeFqdnPresharedKeyEntry=juniIkeFqdnPresharedKeyEntry, juniIkeSaState=juniIkeSaState, juniIkeSa2RouterIndex=juniIkeSa2RouterIndex, juniIkeIpv4PresharedRemoteIpAddr=juniIkeIpv4PresharedRemoteIpAddr, juniIkeSaLocalPort=juniIkeSaLocalPort, juniIkePolicyRuleLifetime=juniIkePolicyRuleLifetime, juniIkeMIB=juniIkeMIB, juniIkeSaEntry=juniIkeSaEntry, juniIkePolicyRuleEntry=juniIkePolicyRuleEntry, juniIkePolicyRuleTable=juniIkePolicyRuleTable, juniIkeSa2State=juniIkeSa2State, juniIkePolicyRuleV2PfsGroup=juniIkePolicyRuleV2PfsGroup, juniIkeCompliance3=juniIkeCompliance3, juniIkePolicyRuleV2IpAddress=juniIkePolicyRuleV2IpAddress, juniIkePolicyRuleAuthMethod=juniIkePolicyRuleAuthMethod, juniIkeSaGroup=juniIkeSaGroup, JuniIkeGroup=JuniIkeGroup, juniIkeSaLocalIpAddr=juniIkeSaLocalIpAddr, juniIkeSaRemoteIpAddr=juniIkeSaRemoteIpAddr, juniIkePolicyRuleHashMethod=juniIkePolicyRuleHashMethod, juniIkeIpv4PreSharedKeyGroup=juniIkeIpv4PreSharedKeyGroup, juniIkePolicyRuleV2HashMethod=juniIkePolicyRuleV2HashMethod, juniIke=juniIke, juniIkePolicyRuleV2RouterIndex=juniIkePolicyRuleV2RouterIndex, juniIkeFqdnPresharedKeyRowStatus=juniIkeFqdnPresharedKeyRowStatus, juniIkeSa2Table=juniIkeSa2Table, juniIkeSa2Direction=juniIkeSa2Direction, JuniIkeEncryptionMethod=JuniIkeEncryptionMethod, juniIkeIpv4PresharedMaskedKeyStr=juniIkeIpv4PresharedMaskedKeyStr, juniIkeSaTable=juniIkeSaTable, JuniIkeHashMethod=JuniIkeHashMethod, JuniIpsecPhase1SaState=JuniIpsecPhase1SaState, juniIkePolicyRuleV2NegotiationMode=juniIkePolicyRuleV2NegotiationMode, juniIkeFqdnPresharedRemote=juniIkeFqdnPresharedRemote, juniIkeMIBCompliances=juniIkeMIBCompliances, juniIkeMIBConformance=juniIkeMIBConformance, juniIkeSa2Remaining=juniIkeSa2Remaining, juniLocalCookie=juniLocalCookie, juniIkeSaRouterIndex=juniIkeSaRouterIndex, juniIkeFqdnPreSharedKeyGroup=juniIkeFqdnPreSharedKeyGroup, juniIkeIpv4PresharedKeyRowStatus=juniIkeIpv4PresharedKeyRowStatus, juniIkePolicyRulePriority=juniIkePolicyRulePriority, JuniIkeNegotiationV2Mode=JuniIkeNegotiationV2Mode, juniIkeSaDirection=juniIkeSaDirection, juniIkePolicyRuleV2Lifetime=juniIkePolicyRuleV2Lifetime, juniIkePolicyRuleGroup=juniIkePolicyRuleGroup, juniIkePolicyRuleV2Entry=juniIkePolicyRuleV2Entry, juniIkeFqdnPresharedKeyStr=juniIkeFqdnPresharedKeyStr, juniIkeFqdnPresharedMaskedKeyStr=juniIkeFqdnPresharedMaskedKeyStr, juniIkeCompliance=juniIkeCompliance, JuniIpsecPhase1SaDirection=JuniIpsecPhase1SaDirection, juniIkeSa2LocalIpAddr=juniIkeSa2LocalIpAddr, juniIkePolicyRuleV2Group=juniIkePolicyRuleV2Group, juniIkeIpv4PresharedRouterIdx=juniIkeIpv4PresharedRouterIdx, juniIkePolicyRuleV2RowStatus=juniIkePolicyRuleV2RowStatus, juniRemoteCookie=juniRemoteCookie, PYSNMP_MODULE_ID=juniIkeMIB, juniIkePolicyRuleNegotiationMode=juniIkePolicyRuleNegotiationMode, juniIkePolicyRuleEncryptMethod=juniIkePolicyRuleEncryptMethod, juniIkeIpv4PresharedKeyEntry=juniIkeIpv4PresharedKeyEntry, JuniIkeAuthenticationMethod=JuniIkeAuthenticationMethod, juniIkePolicyRuleV2Table=juniIkePolicyRuleV2Table, juniIkeFqdnPresharedKeyTable=juniIkeFqdnPresharedKeyTable, juniIkeMIBGroups=juniIkeMIBGroups, juniIkeIpv4PresharedKeyStr=juniIkeIpv4PresharedKeyStr, juniIkeFqdnPresharedRouterIndex=juniIkeFqdnPresharedRouterIndex, juniIkeIpv4PresharedKeyTable=juniIkeIpv4PresharedKeyTable, juniIkeCompliance2=juniIkeCompliance2, juniIkePolicyRulePfsGroup=juniIkePolicyRulePfsGroup, juniIkeObjects=juniIkeObjects, JuniIkeNegotiationMode=JuniIkeNegotiationMode, juniIkePolicyRuleRowStatus=juniIkePolicyRuleRowStatus, juniIkePolicyRuleV2EncryptMethod=juniIkePolicyRuleV2EncryptMethod)
| [
2,
198,
2,
9485,
15571,
7378,
337,
9865,
8265,
7653,
9346,
12,
40,
7336,
12,
8895,
33,
357,
4023,
1378,
16184,
76,
489,
8937,
13,
785,
14,
79,
893,
11632,
8,
198,
2,
7054,
45,
13,
16,
2723,
2393,
1378,
14,
14490,
14,
67,
615,
... | 2.381318 | 9,774 |
# Based on CreateQuadGeometry example
import simpl
import simplpy as d3d
import simpl_helpers as sh
import simpl_test_dirs as sd
if __name__ == '__main__':
CreateQuadGeometryTest()
| [
2,
13403,
319,
13610,
4507,
324,
10082,
15748,
1672,
198,
198,
11748,
7106,
198,
11748,
7106,
9078,
355,
288,
18,
67,
198,
11748,
7106,
62,
16794,
364,
355,
427,
198,
11748,
7106,
62,
9288,
62,
15908,
82,
355,
45647,
198,
198,
361,
... | 3.032787 | 61 |
"""
PDF Enlarge
===========
Scale the contents of a PDF document 2x.
- Split each page into two parts along the perpendicular axis (e.g. one
A4 page into two A5 pages).
- Scale each split part 2 times (e.g. A5 -> A4)
- Merge scaled parts into a new PDF document.
"""
import sys
def main() -> int:
"""Console script entry point function."""
print('PDF Enlarge')
return 0
if __name__ == '__main__':
sys.exit(main())
| [
37811,
198,
20456,
2039,
11664,
198,
2559,
18604,
198,
198,
29990,
262,
10154,
286,
257,
12960,
3188,
362,
87,
13,
198,
198,
12,
27758,
1123,
2443,
656,
734,
3354,
1863,
262,
47190,
16488,
357,
68,
13,
70,
13,
530,
198,
220,
317,
19... | 3.027586 | 145 |
# Copyright (C) 2011-2012 Canonical Services Ltd
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from twisted.internet.protocol import (
DatagramProtocol, Factory)
from twisted.protocols.basic import LineReceiver
class StatsDServerProtocol(DatagramProtocol):
"""A Twisted-based implementation of the StatsD server.
Data is received via UDP for local aggregation and then sent to a Graphite
server via TCP.
"""
def datagramReceived(self, data, (host, port)):
"""Process received data and store it locally."""
if data == self.monitor_message:
# Send the expected response to the
# monitoring agent.
return self.transport.write(
self.monitor_response, (host, port))
return self.transport.reactor.callLater(
0, self.processor.process, data)
class StatsDTCPServerProtocol(LineReceiver):
"""A Twisted-based implementation of the StatsD server over TCP.
Data is received via TCP for local aggregation and then sent to a Graphite
server via TCP.
"""
def lineReceived(self, data):
"""Process received data and store it locally."""
if data == self.monitor_message:
# Send the expected response to the
# monitoring agent.
return self.transport.write(self.monitor_response)
return self.transport.reactor.callLater(
0, self.processor.process, data)
| [
2,
15069,
357,
34,
8,
2813,
12,
6999,
19507,
605,
6168,
12052,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
198,
2,
257,
4866,
286,
428,
3788,
290,
3917,
10314,
3696,
357,
1169,
198... | 3.159178 | 779 |
from __future__ import print_function
# -*- coding: utf-8 -*-
from six.moves import range
from acq4.util import Qt
import acq4.pyqtgraph as pg
#import acq4.pyqtgraph.TreeWidget as TreeWidget
import acq4.util.flowchart.EventDetection as FCEventDetection
import acq4.util.debug as debug
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
2237,
13,
76,
5241,
1330,
2837,
198,
198,
6738,
936,
80,
19,
13,
22602,
1330,
33734,
198,
11748,
936,
80... | 2.813725 | 102 |
from shakenfist.client import apiclient
from shakenfist_ci import base
| [
6738,
27821,
69,
396,
13,
16366,
1330,
2471,
291,
75,
1153,
198,
198,
6738,
27821,
69,
396,
62,
979,
1330,
2779,
628
] | 3.318182 | 22 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
11,
15720,
602,
628
] | 2.891892 | 37 |
# -*- coding: utf-8 -*- # Lint as: python3
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The deployments command group for the Apigee CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
class Deployments(base.Group):
"""Manage deployments of Apigee API proxies in runtime environments."""
detailed_help = {
"DESCRIPTION": """
{description}
`{command}` contains commands for enumerating and checking the status
of deployments of proxies to runtime environments.
""",
"EXAMPLES": """
To list all deployments for the active Cloud Platform project, run:
$ {command} list
To list all deployments in a particular environment of a particular
Apigee organization, run:
$ {command} list --environment=ENVIRONMENT --organization=ORG_NAME
To get the status of a specific deployment as a JSON object, run:
$ {command} describe --api=API_NAME --environment=ENVIRONMENT --format=json
""",
}
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
1303,
406,
600,
355,
25,
21015,
18,
198,
2,
15069,
12131,
3012,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
35... | 3.133087 | 541 |
import math
def code(radius: int = 15, num_sides: int = 7) -> str:
"""
Example G-code module, regular polygon.
Please simulate first, before milling.
"""
result = ["G90"]
for i in range(num_sides):
x = radius - radius * math.cos(2 * math.pi * i / num_sides)
y = radius * math.sin(2 * math.pi * i / num_sides)
result.append("G0 X%0.2f Y%0.2f" % (
x,
y
))
result.append("G0 X0 Y0")
return '\n'.join(result)
| [
11748,
10688,
628,
198,
4299,
2438,
7,
42172,
25,
493,
796,
1315,
11,
997,
62,
82,
1460,
25,
493,
796,
767,
8,
4613,
965,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
17934,
402,
12,
8189,
8265,
11,
3218,
7514,
14520,
13,
... | 2.083682 | 239 |
import numpy as np
from scipy.io import loadmat
from scipy.fft import fft
def make_point_clouds(vertices, temperature):
"""[summary]
Parameters
----------
vertices : ndarray of shape (n_vertices, 3)
Vertices of the mesh as 3D points.
temperature : ndarray of shape (n_vertices, n_functions)
A collection of functions defined on the vertices of the mesh, such as SIHKS or other spectral descriptor.
Returns
-------
point_clouds : ndarray of shape (n_functions, n_vertices, 4)
Collection of point clouds formed by concatenating the vertex coordinates and the corresponding
temperature for each given function.
"""
n_vertices = vertices.shape[0]
n_functions = temperature.shape[1]
# Repeat points n_function times [n_functions, n_vertices, 3]
vertices = np.tile(vertices, reps=(n_functions, 1))
vertices = vertices.reshape(n_functions, n_vertices, 3)
# Reshape temperature [n_functions, n_vertices, 1]
temperature = np.expand_dims(temperature.T, axis=-1)
# Concatenate coordinates and temperature
point_clouds = np.concatenate([vertices, temperature], axis=-1)
return point_clouds
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
629,
541,
88,
13,
952,
1330,
3440,
6759,
198,
6738,
629,
541,
88,
13,
487,
83,
1330,
277,
701,
628,
198,
198,
4299,
787,
62,
4122,
62,
17721,
82,
7,
1851,
1063,
11,
5951,
2599,
198,
... | 2.803279 | 427 |
from argparse import (
Action,
Namespace,
HelpFormatter,
OPTIONAL,
REMAINDER,
SUPPRESS,
PARSER,
ONE_OR_MORE,
ZERO_OR_MORE,
)
from .actions import *
from .cli import *
from .core import *
from .deprecated import *
from .formatters import *
from .jsonnet import *
from .jsonschema import *
from .optionals import *
from .signatures import *
from .typehints import *
from .util import *
__version__ = '3.19.2'
| [
6738,
1822,
29572,
1330,
357,
198,
220,
220,
220,
7561,
11,
198,
220,
220,
220,
28531,
10223,
11,
198,
220,
220,
220,
10478,
8479,
1436,
11,
198,
220,
220,
220,
39852,
2849,
1847,
11,
198,
220,
220,
220,
22657,
32,
12115,
1137,
11,
... | 2.602339 | 171 |
#!@PYTHON_EXECUTABLE@
#ckwg +28
# Copyright 2011-2013 by Kitware, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither name of Kitware, Inc. nor the names of any contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
if __name__ == '__main__':
import os
import sys
if not len(sys.argv) == 4:
test_error("Expected three arguments")
sys.exit(1)
testname = sys.argv[1]
os.chdir(sys.argv[2])
sys.path.append(sys.argv[3])
from sprokit.test.test import *
run_test(testname, find_tests(locals()))
| [
2,
0,
31,
47,
56,
4221,
1340,
62,
6369,
2943,
3843,
17534,
31,
198,
2,
694,
86,
70,
1343,
2078,
198,
2,
15069,
2813,
12,
6390,
416,
10897,
1574,
11,
3457,
13,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290... | 3.124386 | 611 |
import hashlib
from corehq.apps.integration.models import (
DialerSettings,
GaenOtpServerSettings,
HmacCalloutSettings,
)
| [
11748,
12234,
8019,
198,
198,
6738,
4755,
71,
80,
13,
18211,
13,
18908,
1358,
13,
27530,
1330,
357,
198,
220,
220,
220,
21269,
263,
26232,
11,
198,
220,
220,
220,
12822,
268,
46,
34788,
10697,
26232,
11,
198,
220,
220,
220,
367,
202... | 2.679245 | 53 |
#!/usr/bin/env python
import os, sys, warnings, logging
logger = logging.getLogger(__name__)
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dashboard.settings")
from django.core.management import execute_from_command_line
# Regex for which modules to ignore warnings from
IGNORE_MODULES = 'djangosaml2'
warnings.filterwarnings("ignore", module=IGNORE_MODULES, category=DeprecationWarning)
execute_from_command_line(sys.argv)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
28686,
11,
25064,
11,
14601,
11,
18931,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
... | 2.847059 | 170 |
import torch
from torch import autograd, nn
import torch.nn.functional as functional
import utils
| [
11748,
28034,
198,
6738,
28034,
1330,
1960,
519,
6335,
11,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
10345,
198,
198,
11748,
3384,
4487,
628,
198,
220,
220,
220,
220,
220,
220,
220,
220,
628
] | 3 | 37 |
import os
basedir = os.path.abspath(os.path.dirname(__file__)) | [
11748,
28686,
198,
3106,
343,
796,
28686,
13,
6978,
13,
397,
2777,
776,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
4008
] | 2.48 | 25 |
import re
import yaml
def split_markup(markup):
"""
Given some markup, return a tuple containing the decoded data and the
template code.
"""
match = re.search(r'\n={3,}\n', markup)
if match:
start, end = match.span()
ctx = yaml.load(markup[:start])
template_code = markup[end:]
else:
ctx = {}
template_code = markup
return ctx, template_code
| [
11748,
302,
198,
198,
11748,
331,
43695,
628,
198,
4299,
6626,
62,
4102,
929,
7,
4102,
929,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
11259,
617,
41485,
11,
1441,
257,
46545,
7268,
262,
875,
9043,
1366,
290,
262,
198,
220... | 2.335196 | 179 |
# CÓDIGO DE PRUEBA
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pymongo import MongoClient
from bson.objectid import ObjectId
from nltk.tokenize import TweetTokenizer
from datetime import datetime as dt
try:
client = MongoClient()
print("Connected to MongoDB\n")
except pymongo.errors.ConnectionFailure as e:
print("Could not connect to MongoDB",e)
db = client.sept2017_db
tweets = db.sept2017_collection
fecha1 = "Tue Sep 19 00:00:01 +0000 2017"
fecha_inicio = dt.strptime(fecha1,'%a %b %d %H:%M:%S +0000 %Y')
print(fecha_inicio)
fecha2 = "Tue Sep 26 23:59:59 +0000 2017"
fecha_fin = dt.strptime(fecha2,'%a %b %d %H:%M:%S +0000 %Y')
print(fecha_fin)
tknzr = TweetTokenizer(preserve_case=False, # Convertir a minúsculas
reduce_len=True, # Reducir caracteres repetidos
strip_handles=False) # Mostrar @usuarios
id_tweet = tknzr.tokenize(tweets.find_one({'_id': ObjectId('59e55c370e0bab1d26640d94') }).get('text'))
#fecha_tweet = tknzr.tokenize(tweets.find_one({"created_at": {"$gte":dt(2017,9,19),"$lt":dt(2017,9,26)}}).get('text'))
print(id_tweet)
| [
2,
327,
127,
241,
35,
3528,
46,
5550,
4810,
8924,
4339,
198,
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
279,
4948,
25162,
1330,
42591,
11792,
198,
67... | 2.131579 | 532 |
__all__ = ["PairedNote"]
class PairedNote:
"""This class holds a DebitNote together with its matching
CreditNote(s)
"""
def __init__(self, debit_note, credit_note):
"""Construct from the matching pair of notes"""
from Acquire.Accounting import CreditNote as _CreditNote
from Acquire.Accounting import DebitNote as _DebitNote
if not isinstance(debit_note, _DebitNote):
raise TypeError("The debit_note must be of type DebitNote!")
if not isinstance(credit_note, _CreditNote):
raise TypeError("The credit_note must be of type CreditNote!")
if credit_note.debit_note_uid() != debit_note.uid():
raise ValueError(
"You must pair up DebitNote (%s) with a "
"matching CreditNote (%s)" % (debit_note.uid(), credit_note.debit_note_uid())
)
self._debit_note = debit_note
self._credit_note = credit_note
def debit_note(self):
"""Return the debit note"""
return self._debit_note
def credit_note(self):
"""Return the credit note"""
return self._credit_note
@staticmethod
def create(debit_notes, credit_notes):
"""Return a list of PairedNotes that pair together the passed
debit notes and credit notes
"""
try:
debit_note = debit_notes[0]
except:
debit_notes = [debit_notes]
if not isinstance(credit_notes, dict):
try:
credit_notes[0]
except:
credit_notes = [credit_notes]
d = {}
for credit_note in credit_notes:
d[credit_note.debit_note_uid()] = credit_note
credit_notes = d
pairs = []
missing = []
for debit_note in debit_notes:
if debit_note.uid() in credit_notes:
pairs.append(PairedNote(debit_note, credit_notes[debit_note.uid()]))
else:
missing.append(debit_note)
if len(missing) > 0 or len(credit_notes) != len(debit_notes):
from Acquire.Accounting import UnbalancedLedgerError
raise UnbalancedLedgerError(
"Cannot balance the ledger as the debit do not match the "
"credits %s versus %s" % (str(debit_notes), str(credit_notes))
)
return pairs
| [
834,
439,
834,
796,
14631,
47,
9820,
6425,
8973,
628,
198,
4871,
350,
9820,
6425,
25,
198,
220,
220,
220,
37227,
1212,
1398,
6622,
257,
1024,
2545,
6425,
1978,
351,
663,
12336,
198,
220,
220,
220,
10504,
6425,
7,
82,
8,
198,
220,
... | 2.200366 | 1,093 |
from nltk import *
from nltk.corpus import *
ans = 'Y'
while((ans=='y')|(ans=='Y')):
input1 = input("Write a scentence")
lang = detect_language(input1)
print(input1+"\t Langauge: "+ lang)
ans = input("to do this again enter (y/Y)")
| [
6738,
299,
2528,
74,
1330,
1635,
201,
198,
6738,
299,
2528,
74,
13,
10215,
79,
385,
1330,
1635,
201,
198,
504,
796,
705,
56,
6,
201,
198,
4514,
19510,
504,
855,
6,
88,
11537,
91,
7,
504,
855,
6,
56,
11537,
2599,
201,
198,
220,
... | 2.265487 | 113 |
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../test/integration/pyxl/"))
from pyxl.codec.register import pyxl_transform_string
for i in xrange(100):
# pyxl/tests/test_if_1.py
pyxl_transform_string(
'''
from pyxl import html
def test():
assert str(<frag><if cond="{True}">true</if><else>false</else></frag>) == "true"
assert str(<frag><if cond="{False}">true</if><else>false</else></frag>) == "false"
''')
for i in xrange(100):
# pyxl/tests/test_curlies_in_attrs_1.py
pyxl_transform_string(
'''
from pyxl import html
def test():
# kannan thinks this should be different
assert str(<frag><img src="{'foo'}" /></frag>) == """<img src="foo" />"""
''')
for i in xrange(100):
# pyxl/tests/test_rss.py
pyxl_transform_string(
'''
import datetime
from unittest2 import TestCase
from pyxl import html
from pyxl import rss
class RssTests(TestCase):
def test_decl(self):
decl = <rss.rss_decl_standalone />.to_string()
self.assertEqual(decl, u'<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>')
def test_rss(self):
r = <rss.rss version="2.0" />.to_string()
self.assertEqual(r, u'<rss version="2.0"></rss>')
def test_channel(self):
c = (
<rss.rss version="2.0">
<rss.channel />
</rss.rss>
).to_string()
self.assertEqual(c, u'<rss version="2.0"><channel></channel></rss>')
def test_channel_with_required_elements(self):
channel = (
<frag>
<rss.rss_decl_standalone />
<rss.rss version="2.0">
<rss.channel>
<rss.title>A Title</rss.title>
<rss.link>https://www.dropbox.com</rss.link>
<rss.description>A detailed description</rss.description>
</rss.channel>
</rss.rss>
</frag>
)
expected = """
<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
<rss version="2.0">
<channel>
<title>A Title</title>
<link>https://www.dropbox.com</link>
<description>A detailed description</description>
</channel>
</rss>
"""
expected = u''.join(l.strip() for l in expected.splitlines())
self.assertEqual(channel.to_string(), expected)
def test_channel_with_optional_elements(self):
channel = (
<frag>
<rss.rss_decl_standalone />
<rss.rss version="2.0">
<rss.channel>
<rss.title>A Title</rss.title>
<rss.link>https://www.dropbox.com</rss.link>
<rss.description>A detailed description</rss.description>
<rss.ttl>60</rss.ttl>
<rss.language>en-us</rss.language>
</rss.channel>
</rss.rss>
</frag>
)
expected = """
<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
<rss version="2.0">
<channel>
<title>A Title</title>
<link>https://www.dropbox.com</link>
<description>A detailed description</description>
<ttl>60</ttl>
<language>en-us</language>
</channel>
</rss>
"""
expected = u''.join(l.strip() for l in expected.splitlines())
self.assertEqual(channel.to_string(), expected)
def test_item_with_common_elements(self):
item = (
<rss.item>
<rss.title>Item Title</rss.title>
<rss.description>
{html.rawhtml('<![CDATA[ ')}
This is a really interesting description
{html.rawhtml(']]>')}
</rss.description>
<rss.link>https://www.dropbox.com/somewhere</rss.link>
</rss.item>
)
expected = """
<item>
<title>Item Title</title>
<description><![CDATA[ This is a really interesting description ]]></description>
<link>https://www.dropbox.com/somewhere</link>
</item>
"""
expected = u''.join(l.strip() for l in expected.splitlines())
self.assertEqual(item.to_string(), expected)
def test_guid(self):
self.assertEqual(<rss.guid>foo</rss.guid>.to_string(), u'<guid>foo</guid>')
self.assertEqual(<rss.guid is-perma-link="{False}">foo</rss.guid>.to_string(),
u'<guid isPermaLink="false">foo</guid>')
self.assertEqual(<rss.guid is-perma-link="{True}">foo</rss.guid>.to_string(),
u'<guid isPermaLink="true">foo</guid>')
def test_date_elements(self):
dt = datetime.datetime(2013, 12, 17, 23, 54, 14)
self.assertEqual(<rss.pubDate date="{dt}" />.to_string(),
u'<pubDate>Tue, 17 Dec 2013 23:54:14 GMT</pubDate>')
self.assertEqual(<rss.lastBuildDate date="{dt}" />.to_string(),
u'<lastBuildDate>Tue, 17 Dec 2013 23:54:14 GMT</lastBuildDate>')
def test_rss_document(self):
dt = datetime.datetime(2013, 12, 17, 23, 54, 14)
dt2 = datetime.datetime(2013, 12, 18, 11, 54, 14)
doc = (
<frag>
<rss.rss_decl_standalone />
<rss.rss version="2.0">
<rss.channel>
<rss.title>A Title</rss.title>
<rss.link>https://www.dropbox.com</rss.link>
<rss.description>A detailed description</rss.description>
<rss.ttl>60</rss.ttl>
<rss.language>en-us</rss.language>
<rss.lastBuildDate date="{dt}" />
<rss.item>
<rss.title>Item Title</rss.title>
<rss.description>
{html.rawhtml('<![CDATA[ ')}
This is a really interesting description
{html.rawhtml(']]>')}
</rss.description>
<rss.link>https://www.dropbox.com/somewhere</rss.link>
<rss.pubDate date="{dt}" />
<rss.guid is-perma-link="{False}">123456789</rss.guid>
</rss.item>
<rss.item>
<rss.title>Another Item</rss.title>
<rss.description>
{html.rawhtml('<![CDATA[ ')}
This is another really interesting description
{html.rawhtml(']]>')}
</rss.description>
<rss.link>https://www.dropbox.com/nowhere</rss.link>
<rss.pubDate date="{dt2}" />
<rss.guid is-perma-link="{False}">ABCDEFGHIJ</rss.guid>
</rss.item>
</rss.channel>
</rss.rss>
</frag>
)
expected = """
<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
<rss version="2.0">
<channel>
<title>A Title</title>
<link>https://www.dropbox.com</link>
<description>A detailed description</description>
<ttl>60</ttl>
<language>en-us</language>
<lastBuildDate>Tue, 17 Dec 2013 23:54:14 GMT</lastBuildDate>
<item>
<title>Item Title</title>
<description><![CDATA[ This is a really interesting description ]]></description>
<link>https://www.dropbox.com/somewhere</link>
<pubDate>Tue, 17 Dec 2013 23:54:14 GMT</pubDate>
<guid isPermaLink="false">123456789</guid>
</item>
<item>
<title>Another Item</title>
<description><![CDATA[ This is another really interesting description ]]></description>
<link>https://www.dropbox.com/nowhere</link>
<pubDate>Wed, 18 Dec 2013 11:54:14 GMT</pubDate>
<guid isPermaLink="false">ABCDEFGHIJ</guid>
</item>
</channel>
</rss>
"""
expected = ''.join(l.strip() for l in expected.splitlines())
self.assertEqual(doc.to_string(), expected)
''')
| [
11748,
28686,
198,
11748,
25064,
198,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
828,
366,
40720,
9288,
14,
18908,
1358,
14,
9078,
87,
75,
30487,
4008,
198,
... | 1.878815 | 4,423 |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
from future.utils import with_metaclass
standard_library.install_aliases()
from abc import ABCMeta
from abc import abstractmethod
class Env(with_metaclass(ABCMeta, object)):
"""RL learning environment.
This serves a minimal interface for RL agents.
"""
@abstractmethod
@abstractmethod
@abstractmethod
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
2003,
1330,
3210,
... | 3.493243 | 148 |
#!/usr/bin/env python3
import os
import sys
import argparse
import logging
from typing import List, Optional, Union, Dict, Tuple
from datetime import datetime, timedelta
from sqlalchemy import create_engine
import rx
import rx.operators as ops
from rx.subject import AsyncSubject, Subject, BehaviorSubject, ReplaySubject
from rx.core.observable import Observable
from typing import List, Optional, NoReturn
from collections import defaultdict
from dateutil.parser import parse
import numpy as np
import pandas as pd
from ibapi import wrapper
from ibapi.common import TickerId, BarData
from ibapi.client import EClient
from ibapi.contract import Contract
from ibapi.utils import iswrapper
ContractList = List[Contract]
BarDataList = List[BarData]
OptionalDate = Optional[datetime]
def make_download_path(args: argparse.Namespace, contract: Contract) -> str:
"""Make path for saving csv files.
Files to be stored in base_directory/<security_type>/<size>/<symbol>/
"""
path = os.path.sep.join([
args.base_directory,
args.security_type,
args.size.replace(" ", "_"),
contract.symbol,
])
return path
if __name__ == "__main__":
main()
# download_bars.py --size "5 min" --start-date 20110804 --end-date 20110904 AAPL
# download_bars.py --size "1 day" --duration "1 Y" --end-date 20210808 ABNB
# stated @ 2021-08-04 23:35:45.267262
# end @ 2021-08-04 23:35:46.107792 | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
1822,
29572,
198,
11748,
18931,
198,
6738,
19720,
1330,
7343,
11,
32233,
11,
4479,
11,
360,
713,
11,
309,
29291,
198,
6738,
4818,
... | 2.970954 | 482 |
# Line Ghanem 27280076
# Anthony Iatropoulos 40028246
# Mikael Samvelian 40003178
import re
# V = 0
# V = 1
# V = 2
# helper function
| [
2,
6910,
402,
7637,
368,
2681,
2078,
405,
4304,
198,
2,
9953,
314,
265,
1773,
19537,
7337,
2078,
26912,
198,
2,
17722,
3010,
3409,
626,
666,
30123,
18,
23188,
198,
198,
11748,
302,
628,
220,
220,
220,
1303,
569,
796,
657,
628,
220,
... | 2.384615 | 65 |
from time import time
from threading import Lock
class PaceMaker(object):
'''
Implementation of https://en.wikipedia.org/wiki/Token_bucket#Algorithm
Args:
no_token_sleep_in_seconds: Seconds to nap when there are no tokens to spend.
Defaults to 1.
'''
@classmethod
def set_rate_per_second(self, rate_per_second):
'''
Sets the rate/sec
Args:
rate_per_second: rate/sec
'''
with self.lock:
self.rate_per_second = rate_per_second
self.tokens = self.rate_per_second
def consume(self, tokens=1):
'''
Consumes the tokens and returns sleep time
Args:
tokens: Number of tokens to consume. Defaults to 1
'''
with self.lock:
# if the rate_per_second is set to 0, throw exception
if self.rate_per_second == 0:
raise Exception('Cannot use the pace maker without setting the heart rate_per_second!!!')
now = self._epoch_in_seconds()
time_lapsed = now - self.last
self.last = now
# Add rate_per_second x seconds lapsed
self.tokens += time_lapsed * self.rate_per_second
# If the bucket is full, discard
if self.tokens > self.rate_per_second:
self.tokens = self.rate_per_second
# subtract the number of tokens being consumed
self.tokens -= tokens
if self.tokens > 0:
# Calculate the pace based on the tokens left
return round(self.tokens/self.rate_per_second, 3)
else:
return self.no_token_sleep_in_seconds
| [
6738,
640,
1330,
640,
198,
6738,
4704,
278,
1330,
13656,
628,
198,
4871,
44111,
48890,
7,
15252,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
220,
220,
220,
220,
46333,
286,
3740,
1378,
268,
13,
31266,
13,
2398,
14,
1546... | 2.038813 | 876 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import warnings
import pandas as pd
import pytest
from covsirphy import PolicyMeasures
from covsirphy import SIRF, Scenario
# Skip this test at this time
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
14601,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
12972,
9288,
198,
6738,
39849,
82,
343,
6883,
1330... | 2.861111 | 72 |
from django.contrib import admin
from .models import Animal, Raca, NomeCientifico, Origem, NomePopular
admin.site.register(Animal)
admin.site.register(Raca)
admin.site.register(Origem)
admin.site.register(NomeCientifico)
admin.site.register(NomePopular)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
201,
198,
6738,
764,
27530,
1330,
13792,
11,
371,
22260,
11,
399,
462,
34,
1153,
811,
78,
11,
6913,
368,
11,
399,
462,
16979,
934,
201,
198,
201,
198,
28482,
13,
15654,
13,
30238,
7,
... | 2.71134 | 97 |
password="pbkdf2(1000,20,sha512)$a4e3c0f67fc691f5$70638e315cc3ad9d7aed2f01edba2f36a5cbe486"
| [
28712,
2625,
40842,
74,
7568,
17,
7,
12825,
11,
1238,
11,
26270,
25836,
8,
3,
64,
19,
68,
18,
66,
15,
69,
3134,
16072,
49541,
69,
20,
3,
35402,
2548,
68,
27936,
535,
18,
324,
24,
67,
22,
8432,
17,
69,
486,
276,
7012,
17,
69,
... | 1.703704 | 54 |
#
# PySNMP MIB module Wellfleet-GRE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Wellfleet-GRE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:40:19 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
TimeTicks, Unsigned32, Bits, IpAddress, ObjectIdentity, Counter32, iso, NotificationType, Gauge32, Counter64, MibIdentifier, ModuleIdentity, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "Unsigned32", "Bits", "IpAddress", "ObjectIdentity", "Counter32", "iso", "NotificationType", "Gauge32", "Counter64", "MibIdentifier", "ModuleIdentity", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
wfGreGroup, = mibBuilder.importSymbols("Wellfleet-COMMON-MIB", "wfGreGroup")
wfGreInterfaceTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 1), )
if mibBuilder.loadTexts: wfGreInterfaceTable.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreInterfaceTable.setDescription('Parameters in wfGreInterfaceTable')
wfGreInterfaceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 1, 1), ).setIndexNames((0, "Wellfleet-GRE-MIB", "wfGreIntfIpAddr"), (0, "Wellfleet-GRE-MIB", "wfGreIntfCct"))
if mibBuilder.loadTexts: wfGreInterfaceEntry.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreInterfaceEntry.setDescription('An entry in wfGreTable.')
wfGreIntfCreate = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("create", 1), ("delete", 2))).clone('create')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfGreIntfCreate.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreIntfCreate.setDescription('Create/Delete parameter. Default is created. Users perform a set operation on this object in order to create/delete an wfGreEntry instance.')
wfGreIntfEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfGreIntfEnable.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreIntfEnable.setDescription('Enable/Disable parameter. Default is enabled. Users perform a set operation on this object in order to enable/disable GRE .')
wfGreIntfState = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("init", 3), ("notpres", 4))).clone('notpres')).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreIntfState.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreIntfState.setDescription('The current state of GRE interface.')
wfGreIntfIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 1, 1, 4), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreIntfIpAddr.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreIntfIpAddr.setDescription('The IP interface to run GRE on.')
wfGreIntfCct = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreIntfCct.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreIntfCct.setDescription('Circuit number of the GRE interface')
wfGreIntfStatsEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfGreIntfStatsEnable.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreIntfStatsEnable.setDescription('Enable/Disable parameter. Default is enabled. Users perform a set operation on this object in order to enable/disable mib statistics for GRE interface.')
wfGreIntfDebugLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 1, 1, 7), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfGreIntfDebugLevel.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreIntfDebugLevel.setDescription('A parameter to specify which messages to be printed in to the log.')
wfGreTunnelTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2), )
if mibBuilder.loadTexts: wfGreTunnelTable.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelTable.setDescription('Parameters in wfGreTunnelTable')
wfGreTunnelEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1), ).setIndexNames((0, "Wellfleet-GRE-MIB", "wfGreTunnelLocalAddr"), (0, "Wellfleet-GRE-MIB", "wfGreTunnelPeerAddress"), (0, "Wellfleet-GRE-MIB", "wfGreTunnelLocalIndex"))
if mibBuilder.loadTexts: wfGreTunnelEntry.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelEntry.setDescription('An entry in wfGreTunnelTable.')
wfGreTunnelLocalAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelLocalAddr.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelLocalAddr.setDescription('IP Address of local interface.')
wfGreTunnelLocalIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelLocalIndex.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelLocalIndex.setDescription('This tunnel index is assigned by the GRE process. It is used to index into the GRE mapping table.')
wfGreTunnelType = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("generic", 1), ("udas", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelType.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelType.setDescription('Indicate whether a tunnel peer has assigned a tunnel ID.')
wfGreTunnelId = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelId.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelId.setDescription('This tunnel ID is assigned by the tunnel peer.')
wfGreTunnelPeerAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 5), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelPeerAddress.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelPeerAddress.setDescription('Address of the tunnel peer.')
wfGreRemotePayloadAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(10, 10)).setFixedLength(10)).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreRemotePayloadAddress.setStatus('deprecated')
if mibBuilder.loadTexts: wfGreRemotePayloadAddress.setDescription('The address of the remote node.')
wfGreTunnelState = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("up", 1), ("down", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelState.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelState.setDescription('The state of the GRE tunnel.')
wfGreVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreVersion.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreVersion.setDescription('Reserved for future use')
wfGreProtoMap = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 9), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreProtoMap.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreProtoMap.setDescription('This will be set to the protocol type of the payload. GRE_PROTO_IP 1 GRE_PROTO_IPX 2')
wfGreTunnelPktsTx = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelPktsTx.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelPktsTx.setDescription('Number of packets transmitted ')
wfGreTunnelPktsRx = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelPktsRx.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelPktsRx.setDescription('Number of packets received ')
wfGreTunnelBytesTx = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelBytesTx.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelBytesTx.setDescription('Number of bytes transmitted ')
wfGreTunnelBytesRx = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelBytesRx.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelBytesRx.setDescription('Number of bytes received')
wfGreTunnelPktsTxDropped = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelPktsTxDropped.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelPktsTxDropped.setDescription('Number of outgoing packets dropped')
wfGreTunnelPktsRxDropped = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelPktsRxDropped.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelPktsRxDropped.setDescription('Number of incoming packets dropped')
wfGreTunnelXsumErr = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelXsumErr.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelXsumErr.setDescription('Number of inbound checksum errors')
wfGreTunnelSeqNumErr = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelSeqNumErr.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelSeqNumErr.setDescription('Number of sequence errors')
wfGreTunnelMtu = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 3, 20, 2, 1, 18), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4500))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfGreTunnelMtu.setStatus('mandatory')
if mibBuilder.loadTexts: wfGreTunnelMtu.setDescription('The MTU of the GRE tunnel')
mibBuilder.exportSymbols("Wellfleet-GRE-MIB", wfGreTunnelPktsTxDropped=wfGreTunnelPktsTxDropped, wfGreTunnelLocalIndex=wfGreTunnelLocalIndex, wfGreTunnelEntry=wfGreTunnelEntry, wfGreTunnelState=wfGreTunnelState, wfGreTunnelType=wfGreTunnelType, wfGreTunnelBytesRx=wfGreTunnelBytesRx, wfGreTunnelPktsRxDropped=wfGreTunnelPktsRxDropped, wfGreIntfCreate=wfGreIntfCreate, wfGreIntfState=wfGreIntfState, wfGreIntfIpAddr=wfGreIntfIpAddr, wfGreTunnelPeerAddress=wfGreTunnelPeerAddress, wfGreTunnelLocalAddr=wfGreTunnelLocalAddr, wfGreIntfEnable=wfGreIntfEnable, wfGreRemotePayloadAddress=wfGreRemotePayloadAddress, wfGreIntfStatsEnable=wfGreIntfStatsEnable, wfGreTunnelBytesTx=wfGreTunnelBytesTx, wfGreProtoMap=wfGreProtoMap, wfGreTunnelXsumErr=wfGreTunnelXsumErr, wfGreTunnelId=wfGreTunnelId, wfGreTunnelTable=wfGreTunnelTable, wfGreInterfaceTable=wfGreInterfaceTable, wfGreVersion=wfGreVersion, wfGreTunnelPktsTx=wfGreTunnelPktsTx, wfGreInterfaceEntry=wfGreInterfaceEntry, wfGreIntfDebugLevel=wfGreIntfDebugLevel, wfGreTunnelPktsRx=wfGreTunnelPktsRx, wfGreIntfCct=wfGreIntfCct, wfGreTunnelMtu=wfGreTunnelMtu, wfGreTunnelSeqNumErr=wfGreTunnelSeqNumErr)
| [
2,
198,
2,
9485,
15571,
7378,
337,
9865,
8265,
3894,
33559,
12,
28934,
12,
8895,
33,
357,
4023,
1378,
16184,
76,
489,
8937,
13,
785,
14,
79,
893,
11632,
8,
198,
2,
7054,
45,
13,
16,
2723,
2393,
1378,
14,
14490,
14,
67,
615,
4756... | 2.617186 | 4,783 |
nFeatures = {
"EE": 182, #22,
"SD": 126, #24,
"SR": 330, #23,
"OF": 0,
"CR": 97, #22, #91
}
params = {
'SD': {
'MoveThroughDoorway_Method2':
{'r2': {'nOutputs': 4, 'nInputs': 150, 'pos': -1, 'decoder': DecodeRobot_SD}},
'Recover_Method1':
{'r2': {'nOutputs': 4, 'nInputs': 128, 'pos': -1, 'decoder': DecodeRobot_SD}},
},
'OF': {
'Order_Method1': {
'm': {'nOutputs': 5, 'nInputs': 613, 'pos': -2, 'decoder': DecodeMachine_OF},
'objList': {'nOutputs': 10, 'nInputs': 613, 'pos': -1, 'decoder': DecodeObjList_OF},
},
'Order_Method2': {
'm': {'nOutputs': 5, 'nInputs': 613, 'pos': -3, 'decoder': DecodeMachine_OF},
'objList': {'nOutputs': 10, 'nInputs': 613, 'pos': -2, 'decoder': DecodeObjList_OF},
'p': {'nOutputs': 4, 'nInputs': 613, 'pos': -1, 'decoder': DecodePalate_OF},
},
'PickupAndLoad_Method1':
{'r': {'nOutputs': 7, 'nInputs': 637, 'pos': -1, 'decoder': DecodeRobot_OF}},
'UnloadAndDeliver_Method1':
{'r': {'nOutputs': 7, 'nInputs': 625, 'pos': -1, 'decoder': DecodeRobot_OF}},
'MoveToPallet_Method1':
{'r': {'nOutputs': 7, 'nInputs': 633, 'pos': -1, 'decoder': DecodeRobot_OF}},
},
} | [
198,
77,
23595,
796,
1391,
198,
220,
220,
220,
366,
6500,
1298,
28581,
11,
1303,
1828,
11,
198,
220,
220,
220,
366,
10305,
1298,
19710,
11,
1303,
1731,
11,
198,
220,
220,
220,
366,
12562,
1298,
25508,
11,
1303,
1954,
11,
198,
220,
... | 1.811066 | 741 |
#!/usr/bin/python3
import pandas
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
11748,
19798,
292,
628
] | 2.5 | 14 |
import sys
print("----------------------- pythonsv project init -----------------------")
sys.path.append(".")
sys.path.append(r'C:\PythonSV\icelakex')
from icelakex.starticx import *
from icelakex.toolext import pysv_config
from svtools.common.pysv_config import CFG
if __name__ == '__main__':
itp, sv = pythonsv_init()
# x = cpuid(0x7,0)
# print(x)
# print("ECX data: %s" % (hex(x['ecx'])))
# ECX_BIN = "{0:08b}".format(x['ecx'])
# print(ECX_BIN[-14] == "1")
# ECX_DEC = x['ecx']
# MASK_14 = 1 << 14
# print(ECX_DEC, MASK_14)
# EXPECT_MASK_14 = 0b1 << 14
# print((ECX_DEC & MASK_14) == EXPECT_MASK_14)
# x = cpuid(0x80000008,0)
# print(x)
# post_80 = itp.threads[0].port(0x80)
# post_81 = itp.threads[0].port(0x81)
# print("POST CODE: %s%s" % (post_80, post_81))
x = itp.threads[0].msr(0x981)
print("MSR 0x981: %s" % x)
# pythonsv_exit()
| [
11748,
25064,
198,
198,
4798,
7203,
19351,
6329,
279,
5272,
684,
85,
1628,
2315,
41436,
6329,
4943,
198,
17597,
13,
6978,
13,
33295,
7203,
19570,
198,
17597,
13,
6978,
13,
33295,
7,
81,
6,
34,
7479,
37906,
50,
53,
59,
291,
417,
539,... | 2.037528 | 453 |
import sys
import threading
import code
import socket
import code
import debugthread
import io
foo="xxx"
s = socket.socket(socket.AF_INET)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('127.0.0.1', 4711))
s.listen(0)
while True:
ss, addr = s.accept()
stdin =io.TextIOWrapper(ss.makefile('rb', 0), encoding='utf8')
stdout = io.TextIOWrapper(ss.makefile('wb', 0), encoding='utf8')
debugthread.shell(stdin, stdout, stdout, locals())
| [
11748,
25064,
198,
11748,
4704,
278,
198,
11748,
2438,
198,
11748,
17802,
198,
11748,
2438,
198,
11748,
14257,
16663,
198,
11748,
33245,
198,
198,
21943,
2625,
31811,
1,
198,
198,
82,
796,
17802,
13,
44971,
7,
44971,
13,
8579,
62,
1268,... | 2.452632 | 190 |
import pytest
from tartiflette.language.ast import InterfaceTypeDefinitionNode
@pytest.mark.parametrize(
"interface_type_definition_node,other,expected",
[
(
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
Ellipsis,
False,
),
(
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionNameBis",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
False,
),
(
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescriptionBis",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
False,
),
(
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectivesBis",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
False,
),
(
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFieldsBis",
location="interfaceTypeDefinitionLocation",
),
False,
),
(
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocationBis",
),
False,
),
(
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
True,
),
],
)
@pytest.mark.parametrize(
"interface_type_definition_node,expected",
[
(
InterfaceTypeDefinitionNode(
name="interfaceTypeDefinitionName",
description="interfaceTypeDefinitionDescription",
directives="interfaceTypeDefinitionDirectives",
fields="interfaceTypeDefinitionFields",
location="interfaceTypeDefinitionLocation",
),
"InterfaceTypeDefinitionNode("
"description='interfaceTypeDefinitionDescription', "
"name='interfaceTypeDefinitionName', "
"directives='interfaceTypeDefinitionDirectives', "
"fields='interfaceTypeDefinitionFields', "
"location='interfaceTypeDefinitionLocation')",
)
],
)
| [
11748,
12972,
9288,
198,
198,
6738,
35842,
361,
21348,
13,
16129,
13,
459,
1330,
26491,
6030,
36621,
19667,
628,
198,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
7,
198,
220,
220,
220,
366,
39994,
62,
4906,
62,
46758,
... | 2.308144 | 2,554 |
"""
Main Gofra programming language source code.
"""
__author__ = "Kirill Zhosul @kirillzhosul"
__license__ = "MIT"
from typing import Generator
from os.path import basename
from sys import argv
import gofra
from gofra.core.danger import *
from gofra.core.stack import Stack
# MAJOR WARNING FOR ALL READERS.
# This code is not refactored,
# currently I am working on refactoring and splitting into the gofra module,
# there is a lot of stuff, that will be reworked.
# Also, want to say that bytecode is not finished, and interpretation will be
# converted to gofra.core.vm that will be run bytecode for own,
# as internal interpretation method (if you want to use C++ VM which is may not be finished also yet,
# see that https://github.com/gofralang/vm/)
# Lexer.
def lexer_tokenize(lines: List[str], file_parent: str) -> Generator[Token, None, None]:
""" Tokenizes lines into list of the Tokens. """
# Check that there is no changes in token type.
assert len(TokenType) == 6, "Please update implementation after adding new TokenType!"
# Get the basename.
file_parent = basename(file_parent)
# Current line index.
current_line_index = 0
# Get lines count.
lines_count = len(lines)
# Check that there is more than zero lines.
if lines_count == 0:
# If there is no lines.
# Error.
gofra.core.errors.message_verbosed(Stage.LEXER, (file_parent, 1, 1), "Error",
"There is no lines found in the given file "
"are you given empty file?", True)
while current_line_index < lines_count:
# Loop over lines.
# Get line.
current_line = lines[current_line_index]
# Find first non-space char.
current_collumn_index = gofra.core.lexer.find_collumn(current_line, 0, lambda char: not char.isspace())
# Get current line length.
current_line_length = len(current_line)
# ?.
current_collumn_end_index = 0
while current_collumn_index < current_line_length:
# Iterate over line.
# Get the location.
current_location = (file_parent, current_line_index + 1, current_collumn_index + 1)
if current_line[current_collumn_index] == EXTRA_CHAR:
# If we got character quote*.
# Index of the column end.
# (Trying to find closing quote*
current_collumn_end_index = gofra.core.lexer.find_collumn(current_line, current_collumn_index + 1,
lambda char: char == EXTRA_CHAR)
if current_collumn_end_index >= len(current_line) or \
current_line[current_collumn_end_index] != EXTRA_CHAR:
# If we got not EXTRA_CHAR or exceed current line length.
# Error.
gofra.core.errors.message_verbosed(Stage.LEXER, current_location, "Error",
"There is unclosed character literal. "
f"Do you forgot to place `{EXTRA_CHAR}`?", True)
# Get current token text.
current_token_text = current_line[current_collumn_index + 1: current_collumn_end_index]
# Get current char value.
current_char_value = gofra.core.lexer.unescape(current_token_text).encode("UTF-8")
if len(current_char_value) != 1:
# If there is 0 or more than 1 characters*.
# Error.
gofra.core.errors.message_verbosed(Stage.LEXER, current_location, "Error",
"Unexpected number of characters in the character literal."
"Only one character is allowed in character literal", True)
# Return character token.
yield Token(
type=TokenType.CHARACTER,
text=current_token_text,
location=current_location,
value=current_char_value[0]
)
# Find first non-space char.
current_collumn_index = gofra.core.lexer.find_collumn(current_line, current_collumn_end_index + 1,
lambda char: not char.isspace())
elif current_line[current_collumn_index] == EXTRA_STRING:
# If this is string.
# String buffer for strings.
current_string_buffer = ""
while current_line_index < len(lines):
# While we don`t reach end of the lines.
# Get string start.
string_start_collumn_index = current_collumn_index
if current_string_buffer == "":
# If we not start writing string buffer.
# Increment by one for quote.
string_start_collumn_index += len(EXTRA_STRING)
else:
# If we started.
# Just grab line.
current_line = lines[current_line_index]
# Get string end.
string_end_collumn_index = gofra.core.lexer.find_string_end(current_line, string_start_collumn_index)
if string_end_collumn_index >= len(current_line) or \
current_line[string_end_collumn_index] != EXTRA_STRING:
# If got end of current line, or not found closing string.
# Add current line.
current_string_buffer += current_line[string_start_collumn_index:]
# Reset and move next line.
current_line_index += 1
current_collumn_index = 0
else:
# If current line.
# Add final buffer.
current_string_buffer += current_line[string_start_collumn_index:string_end_collumn_index]
current_collumn_end_index = string_end_collumn_index
# End lexing string.
break
if current_line_index >= len(lines):
# If we exceed current lines length.
# Error.
gofra.core.errors.message_verbosed(Stage.LEXER, current_location, "Error",
"There is unclosed string literal. "
f"Do you forgot to place `{EXTRA_STRING}`?", True)
# Error?.
assert current_line[current_collumn_index] == EXTRA_STRING, "Got non string closing character!"
# Increase end index.
current_collumn_end_index += 1
# Get current token text.
current_token_text = current_string_buffer
# Return string token.
yield Token(
type=TokenType.STRING,
text=current_token_text,
location=current_location,
value=gofra.core.lexer.unescape(current_token_text)
)
# Find first non-space char.
current_collumn_index = gofra.core.lexer.find_collumn(current_line, current_collumn_end_index,
lambda char: not char.isspace())
else:
# Index of the column end.
current_collumn_end_index = gofra.core.lexer.find_collumn(current_line, current_collumn_index,
lambda char: char.isspace())
# Get current token text.
current_token_text = current_line[current_collumn_index: current_collumn_end_index]
try:
# Try convert token integer.
current_token_integer = int(current_token_text)
except ValueError:
# If there is invalid value for integer.
if current_token_text in KEYWORD_NAMES_TO_TYPE:
# If this is keyword.
# Return keyword token.
yield Token(
type=TokenType.KEYWORD,
text=current_token_text,
location=current_location,
value=KEYWORD_NAMES_TO_TYPE[current_token_text]
)
else:
# Not keyword.
# If this is comment - break.
# TODO: Try to fix something like 0//0 (comment not at the start) will lex not as should.
if current_token_text.startswith(EXTRA_COMMENT):
break
# Return word token.
yield Token(
type=TokenType.WORD,
text=current_token_text,
location=current_location,
value=current_token_text
)
else:
# If all ok.
# Return token.
yield Token(
type=TokenType.INTEGER,
text=current_token_text,
location=current_location,
value=current_token_integer
)
# Find first non-space char.
current_collumn_index = gofra.core.lexer.find_collumn(current_line, current_collumn_end_index,
lambda char: not char.isspace())
# Increment current line.
current_line_index += 1
# Parser.
def parser_parse(tokens: List[Token], context: ParserContext, path: str):
""" Parses token from lexer* (lexer_tokenize()) """
# Check that there is no changes in operator type.
assert len(OperatorType) == 10, "Please update implementation after adding new OperatorType!"
# Check that there is no changes in keyword type.
assert len(Keyword) == 8, "Please update implementation after adding new Keyword!"
# Check that there is no changes in token type.
assert len(TokenType) == 6, "Please update implementation after adding new TokenType!"
# Reverse tokens.
reversed_tokens: List[Token] = list(reversed(tokens))
# Definitions.
definitions: Dict[str, Definition] = dict()
memories: Dict[str, Memory] = dict()
variables: Dict[str, Variable] = dict()
variables_offset = 0
memories_offset = 0
if len(reversed_tokens) == 0:
gofra.core.errors.message_verbosed(Stage.PARSER, (basename(path), 1, 1), "Error",
"There is no tokens found, are you given empty file?", True)
while len(reversed_tokens) > 0:
# While there is any token.
# Get current token.
current_token: Token = reversed_tokens.pop()
if current_token.type == TokenType.WORD:
assert isinstance(current_token.value, str), "Type error, lexer level error?"
if current_token.value in INTRINSIC_NAMES_TO_TYPE:
context.operators.append(Operator(
type=OperatorType.INTRINSIC,
token=current_token,
operand=INTRINSIC_NAMES_TO_TYPE[current_token.value]
))
context.operator_index += 1
continue
if current_token.text in definitions:
# Expand definition tokens.
reversed_tokens += reversed(definitions[current_token.text].tokens)
continue
if current_token.text in memories:
memory = memories[current_token.text]
context.operators.append(Operator(
type=OperatorType.PUSH_INTEGER,
token=current_token,
operand=memory.ptr_offset
))
context.operator_index += 1
continue
if current_token.text in variables:
variable = variables[current_token.text]
context.operators.append(Operator(
type=OperatorType.PUSH_INTEGER,
token=current_token,
operand=variable.ptr_offset
))
context.operator_index += 1
continue
if current_token.text.startswith(EXTRA_DIRECTIVE):
directive = current_token.text[len(EXTRA_DIRECTIVE):]
if directive == "LINTER_SKIP":
if context.directive_linter_skip:
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
f"Directive `{EXTRA_DIRECTIVE}{directive}` defined twice!", True)
context.directive_linter_skip = True
elif directive == "PYTHON_COMMENTS_SKIP":
if context.directive_python_comments_skip:
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
f"Directive `{EXTRA_DIRECTIVE}{directive}` defined twice!",
True)
context.directive_python_comments_skip = True
else:
if directive.startswith("MEM_BUF_BYTE_SIZE="):
# If this is starts with memory buffer byte size definition name.
# Get directive value from all directive text.
directive_value = directive[len("MEM_BUF_BYTE_SIZE="):]
# Get new memory size
try:
new_memory_bytearray_size = int(directive_value)
except ValueError:
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
f"Directive `{EXTRA_DIRECTIVE}{directive}` "
f"passed invalid size `{directive_value}`!", True)
else:
# Change size of the bytearray.
context.memory_bytearray_size = new_memory_bytearray_size
else:
# Message.
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
f"Unknown directive `{EXTRA_DIRECTIVE}{directive}`", True)
continue
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
f"Unknown WORD `{current_token.text}`, "
f"are you misspelled something?", True)
elif current_token.type == TokenType.INTEGER:
# If we got an integer.
# Type check.
assert isinstance(current_token.value, int), "Type error, lexer level error?"
# Add operator to the context.
context.operators.append(Operator(
type=OperatorType.PUSH_INTEGER,
token=current_token,
operand=current_token.value
))
# Increment operator index.
context.operator_index += 1
elif current_token.type == TokenType.STRING:
# If we got a string.
# Type check.
assert isinstance(current_token.value, str), "Type error, lexer level error?"
# Add operator to the context.
context.operators.append(Operator(
type=OperatorType.PUSH_STRING,
token=current_token,
operand=current_token.value
))
# Increment operator index.
context.operator_index += 1
elif current_token.type == TokenType.CHARACTER:
# If we got a character.
# Type check.
assert isinstance(current_token.value, int), "Type error, lexer level error?"
# Add operator to the context.
context.operators.append(Operator(
type=OperatorType.PUSH_INTEGER,
token=current_token,
operand=current_token.value
))
# Increment operator index.
context.operator_index += 1
elif current_token.type == TokenType.KEYWORD:
# If we got a keyword.
if current_token.value == Keyword.IF:
# This is IF keyword.
# Push operator to the context.
context.operators.append(Operator(
type=OperatorType.IF,
token=current_token
))
# Push current operator index to the context memory stack.
context.memory_stack.append(context.operator_index)
# Increment operator index.
context.operator_index += 1
elif current_token.value == Keyword.WHILE:
# This is WHILE keyword.
# Push operator to the context.
context.operators.append(Operator(
type=OperatorType.WHILE,
token=current_token
))
# Push current operator index to the context memory stack.
context.memory_stack.append(context.operator_index)
# Increment operator index.
context.operator_index += 1
elif current_token.value == Keyword.DO:
# This is `DO` keyword.
if len(context.memory_stack) == 0:
# If there is nothing on the memory stack.
# Error.
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
"`do` should used after the `while` block!", True)
# Push operator to the context.
context.operators.append(Operator(
type=OperatorType.DO,
token=current_token
))
# Get `WHILE` operator from the memory stack.
block_operator_index = context.memory_stack.pop()
block_operator = context.operators[block_operator_index]
if block_operator.type != OperatorType.WHILE:
# If this is not while.
# Error.
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
"`do` should used after the `while` block!", True)
# Say that we crossreference WHILE block.
context.operators[context.operator_index].operand = block_operator_index
# Push current operator index to the context memory stack.
context.memory_stack.append(context.operator_index)
# Increment operator index.
context.operator_index += 1
elif current_token.value == Keyword.ELSE:
# If this is else keyword.
if len(context.memory_stack) == 0:
# If there is nothing on the memory stack.
# Error.
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
"`else` should used after the `if` block!", True)
# Get `IF` operator from the memory stack.
block_operator_index = context.memory_stack.pop()
block_operator = context.operators[block_operator_index]
if block_operator.type == OperatorType.IF:
# If we use else after the IF.
# Say that previous IF should jump at the our+1 operator index.
context.operators[block_operator_index].operand = context.operator_index + 1
# Push current operator index to the stack.
context.memory_stack.append(context.operator_index)
# Push operator to the context.
context.operators.append(Operator(
type=OperatorType.ELSE,
token=current_token
))
# Increment operator index.
context.operator_index += 1
else:
# If not `IF`.
# Get error location.
error_location = block_operator.token.location
# Error message.
gofra.core.errors.message_verbosed(Stage.PARSER, error_location, "Error",
"`else` can only used after `if` block!", True)
elif current_token.value == Keyword.END:
# If this is end keyword.
# Get block operator from the stack.
block_operator_index = context.memory_stack.pop()
block_operator = context.operators[block_operator_index]
if block_operator.type == OperatorType.IF:
# If this is IF block.
# Push operator to the context.
context.operators.append(Operator(
type=OperatorType.END,
token=current_token
))
# Say that start IF block refers to this END block.
context.operators[block_operator_index].operand = context.operator_index
# Say that this END block refers to next operator index.
context.operators[context.operator_index].operand = context.operator_index + 1
elif block_operator.type == OperatorType.ELSE:
# If this is ELSE block.
# Push operator to the context.
context.operators.append(Operator(
type=OperatorType.END,
token=current_token
))
# Say that owner block (If/Else) should jump to us.
context.operators[block_operator_index].operand = context.operator_index
# Say that we should jump to the next position.
context.operators[context.operator_index].operand = context.operator_index + 1
elif block_operator.type == OperatorType.DO:
# If this is DO block.
# Type check.
assert block_operator.operand is not None, "DO operator has unset operand! Parser level error?"
assert isinstance(block_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Push operator to the context.
context.operators.append(Operator(
type=OperatorType.END,
token=current_token
))
# Say that DO crossreference to the WHILE block.
context.operators[context.operator_index].operand = block_operator.operand
# Say that WHILE should jump in the DO body.
context.operators[block_operator.operand].operand = context.operator_index + 1
else:
# If invalid we call end not after the if or else.
# Get error location.
error_location = block_operator.token.location
# Error message.
gofra.core.errors.message_verbosed(Stage.PARSER, error_location, "Error",
"`end` can only close `if`, `else` or `do` block!", True)
# Increment operator index.
context.operator_index += 1
elif current_token.value == Keyword.DEFINE:
# This is DEFINE keyword.
if len(reversed_tokens) == 0:
# No name for definition is given.
# Error message.
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
"`define` should have name after the keyword, "
"do you has unfinished definition?", True)
# Get name for definition.
definition_name = reversed_tokens.pop()
if definition_name.type != TokenType.WORD:
# If name is not word.
# Error message.
gofra.core.errors.message_verbosed(Stage.PARSER, definition_name.location, "Error",
"`define` name, should be of type WORD, sorry, but you can`t use something that you give as name for the definition!", True)
if definition_name.text in definitions:
# If already defined.
# Error messages.
gofra.core.errors.message_verbosed(Stage.PARSER, definition_name.location, "Error",
"Definition with name {} was already defined!", False)
gofra.core.errors.message_verbosed(Stage.PARSER, definitions[definition_name.text].location, "Error",
"Original definition was here...", True)
if definition_name.text in INTRINSIC_NAMES_TO_TYPE or definition_name.text in KEYWORD_NAMES_TO_TYPE:
# If default item.
# Error message.
gofra.core.errors.message_verbosed(Stage.PARSER, definition_name.location, "Error",
"Can`t define definition with language defined name!", True)
# Create blank new definition.
definition = Definition(current_token.location, [])
# Add definition.
definitions[definition_name.text] = definition
# How much we require ends.
required_end_count = 0
while len(reversed_tokens) > 0:
# If there is still tokens.
# Get new token.
current_token = reversed_tokens.pop()
if current_token.type == TokenType.KEYWORD:
# If got keyword.
if current_token.text in KEYWORD_NAMES_TO_TYPE:
# If this is correct keyword.
if current_token.text == KEYWORD_TYPES_TO_NAME[Keyword.END]:
# If this is end.
if required_end_count <= 0:
# If we no more require end.
# Stop definition.
break
# Decrease required end counter.
required_end_count -= 1
if KEYWORD_NAMES_TO_TYPE[current_token.text] in \
(Keyword.IF, Keyword.DEFINE, Keyword.DO):
# If this is keyword that requires end.
# Increase required end count.
required_end_count += 1
if KEYWORD_NAMES_TO_TYPE[current_token.text] == Keyword.ELSE:
# If got else.
# Just pass as else not requires end.
pass
else:
# Invalid keyword.
assert False, "Got invalid keyword!"
# Append token.
definition.tokens.append(current_token)
if required_end_count != 0:
# If there is still required end.
# Error message.
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
f"There is {required_end_count} unclosed blocks, "
"that requires cloing `end` keyword inside `define` definition. ",
True)
if not (current_token.type == TokenType.KEYWORD and
current_token.text == KEYWORD_TYPES_TO_NAME[Keyword.END]):
# If got not end at end of definition.
# Error message.
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
"`define` should have `end` at the end of definition, "
"but it was not founded!", True)
elif current_token.value == Keyword.MEMORY:
if len(reversed_tokens) == 0:
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
"`memory` should have name after the keyword, "
"do you has unfinished memory definition?", True)
name_token = reversed_tokens.pop()
if name_token.type != TokenType.WORD:
gofra.core.errors.message_verbosed(Stage.PARSER, name_token.location, "Error",
"`memory` name, should be of type WORD, sorry, but "
"you can`t use something that you give as name "
"for the memory!", True)
if name_token.text in memories or name_token.text in definitions:
gofra.core.errors.message_verbosed(Stage.PARSER, name_token.location, "Error",
f"Definition or memory with name {name_token.text} "
f"was already defined!", False)
if name_token.text in definitions:
gofra.core.errors.message_verbosed(Stage.PARSER, definitions[name_token.text].location,
"Error", "Original definition was here...", True)
# TODO: Memory location report.
if name_token.text in INTRINSIC_NAMES_TO_TYPE or name_token.text in KEYWORD_NAMES_TO_TYPE:
# If default item.
gofra.core.errors.message_verbosed(Stage.PARSER, name_token.location, "Error",
"Can`t define memories with language defined name!", True)
if len(reversed_tokens) <= 0:
gofra.core.errors.message_verbosed(Stage.PARSER, name_token.location, "Error",
"`memory` requires size for memory definition, "
"which was not given!", True)
memory_size_token = reversed_tokens.pop()
if memory_size_token.type != TokenType.INTEGER:
gofra.core.errors.message_verbosed(Stage.PARSER, name_token.location, "Error",
"`var` size, should be of type INTEGER, sorry, but "
"you can`t use something that you give as size "
"for the memory!", True)
# TODO: Proper evaluation.
# Create blank new memory.
memory_name = name_token.text
memories[memory_name] = Memory(memory_name, memory_size_token.value, memories_offset)
memories_offset += memory_size_token.value
if len(reversed_tokens) >= 0:
end_token = reversed_tokens.pop()
if end_token.type == TokenType.KEYWORD and \
end_token.text == KEYWORD_TYPES_TO_NAME[Keyword.END]:
continue
# If got not end at end of definition.
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
"`memory` should have `end` at the end of memory definition, "
"but it was not founded!", True)
elif current_token.value == Keyword.VARIABLE:
if len(reversed_tokens) == 0:
gofra.core.errors.message_verbosed(Stage.PARSER, current_token.location, "Error",
"`var` should have name after the keyword, "
"do you has unfinished variable definition?", True)
name_token = reversed_tokens.pop()
if name_token.type != TokenType.WORD:
gofra.core.errors.message_verbosed(Stage.PARSER, name_token.location, "Error",
"`var` name, should be of type WORD, sorry, but "
"you can`t use something that you give as name "
"for the variable!", True)
if name_token.text in variables or name_token.text in definitions or name_token.text in memories:
gofra.core.errors.message_verbosed(Stage.PARSER, name_token.location, "Error",
f"Definition or variable with name {name_token.text} "
f"was already defined!", False)
if name_token.text in definitions:
gofra.core.errors.message_verbosed(Stage.PARSER, definitions[name_token.text].location,
"Error", "Original definition was here...", True)
# TODO: Memory / variable location report.
if name_token.text in INTRINSIC_NAMES_TO_TYPE or name_token.text in KEYWORD_NAMES_TO_TYPE:
# If default item.
gofra.core.errors.message_verbosed(Stage.PARSER, name_token.location, "Error",
"Can`t define variable with language defined name!", True)
# Create blank new memory.
variable_name = name_token.text
variables[variable_name] = Variable(variable_name, variables_offset)
variables_offset += VARIABLE_SIZE
else:
# If unknown keyword type.
assert False, "Unknown keyword type! (How?)"
else:
# If unknown operator type.
assert False, "Unknown operator type! (How?)"
if len(context.memory_stack) > 0:
# If there is any in the stack.
# Get error operator.
error_operator = context.operators[context.memory_stack.pop()]
# Get error location.
error_location = error_operator.token.location
# Error message.
gofra.core.errors.message_verbosed(Stage.PARSER, error_location, "Error",
f"Unclosed block \"{error_operator.token.text}\"!", True)
if context.directive_linter_skip:
# If skip linter.
# Warning message.
gofra.core.errors.message_verbosed(Stage.PARSER, (basename(path), 1, 1), "Warning",
"#LINTER_SKIP DIRECTIVE! THIS IS UNSAFE, PLEASE DISABLE IT!")
# Interpretator.
def interpretator_run(source: Source,
bytearray_size: int = MEMORY_BYTEARRAY_SIZE):
""" Interpretates the source. """
assert len(OperatorType) == 10, "Please update implementation after adding new OperatorType!"
assert len(Intrinsic) == 30, "Please update implementation after adding new Intrinsic!"
# Create empty stack.
memory_execution_stack = Stack()
# String pointers.
memory_string_pointers: Dict[OPERATOR_ADDRESS, TYPE_POINTER] = dict()
memory_string_size = bytearray_size
memory_string_size_ponter = 0
# Allocate sized bytearray.
memory_bytearray = bytearray(bytearray_size + memory_string_size + MEMORY_MEMORIES_SIZE + MEMORY_VARIABLES_SIZE)
# Get source operators count.
operators_count = len(source.operators)
current_operator_index = 0
if operators_count == 0:
gofra.core.errors.message_verbosed(Stage.RUNNER, ("__RUNNER__", 1, 1), "Error",
"There is no operators found in given file after parsing, "
"are you given empty file or file without resulting operators?", True)
while current_operator_index < operators_count:
# While we not run out of the source operators list.
# Get current operator from the source.
current_operator: Operator = source.operators[current_operator_index]
try:
# Try / Catch to get unexpected Python errors.
if current_operator.type == OperatorType.PUSH_INTEGER:
# Push integer operator.
# Type check.
assert isinstance(current_operator.operand, int), "Type error, parser level error?"
# Push operand to the stack.
memory_execution_stack.push(current_operator.operand)
# Increase operator index.
current_operator_index += 1
elif current_operator.type == OperatorType.PUSH_STRING:
# Push string operator.
# Type check.
assert isinstance(current_operator.operand, str), "Type error, parser level error?"
# Get string data.
string_value = current_operator.operand.encode("UTF-8")
string_length = len(string_value)
if current_operator_index not in memory_string_pointers:
# If we not found string in allocated string pointers.
# Get pointer, and push in to the pointers.
string_pointer: TYPE_POINTER = memory_string_size + 1 + memory_string_size_ponter
memory_string_pointers[current_operator_index] = string_pointer
# Write string right into the bytearray memory.
memory_bytearray[string_pointer: string_pointer + string_length] = string_value
# Increase next pointer by current string length.
memory_string_size_ponter += string_length
# Check that there is no overflow.
if string_length > memory_string_size:
# If overflowed.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
"Trying to push string, when there is memory string buffer overflow!"
" Try use memory size directive, to increase size!", True)
# Push found string pointer to the stack.
found_string_pointer = memory_string_pointers[current_operator_index]
memory_execution_stack.push(found_string_pointer)
# Push string length to the stack.
memory_execution_stack.push(string_length)
# Increase operator index.
current_operator_index += 1
elif current_operator.type == OperatorType.INTRINSIC:
# Intrinsic operator.
if current_operator.operand == Intrinsic.PLUS:
# Intristic plus operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push sum to the stack.
memory_execution_stack.push(operand_b + operand_a)
elif current_operator.operand == Intrinsic.DIVIDE:
# Intristic divide operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push divide to the stack.
memory_execution_stack.push(operand_b // operand_a)
elif current_operator.operand == Intrinsic.MODULUS:
# Intristic modulus operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push divide to the stack.
memory_execution_stack.push(int(operand_b % operand_a))
elif current_operator.operand == Intrinsic.MINUS:
# Intristic minus operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push difference to the stack.
memory_execution_stack.push(operand_b - operand_a)
elif current_operator.operand == Intrinsic.MULTIPLY:
# Intristic multiply operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push muliply to the stack.
memory_execution_stack.push(operand_b * operand_a)
elif current_operator.operand == Intrinsic.EQUAL:
# Intristic equal operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push equal to the stack.
memory_execution_stack.push(int(operand_b == operand_a))
elif current_operator.operand == Intrinsic.NOT_EQUAL:
# Intristic not equal operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push not equal to the stack.
memory_execution_stack.push(int(operand_b != operand_a))
elif current_operator.operand == Intrinsic.LESS_THAN:
# Intristic less than operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push less than to the stack.
memory_execution_stack.push(int(operand_b < operand_a))
elif current_operator.operand == Intrinsic.GREATER_THAN:
# Intristic greater than operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push greater than to the stack.
memory_execution_stack.push(int(operand_b > operand_a))
elif current_operator.operand == Intrinsic.LESS_EQUAL_THAN:
# Intristic less equal than operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push less equal than to the stack.
memory_execution_stack.push(int(operand_b <= operand_a))
elif current_operator.operand == Intrinsic.GREATER_EQUAL_THAN:
# Intristic greater equal than operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push greater equal than to the stack.
memory_execution_stack.push(int(operand_b >= operand_a))
elif current_operator.operand == Intrinsic.SWAP:
# Intristic swap operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push swapped to the stack.
memory_execution_stack.push(operand_a)
memory_execution_stack.push(operand_b)
elif current_operator.operand == Intrinsic.COPY:
# Intristic copy operator.
# Get operand.
operand_a = memory_execution_stack.pop()
# Push copy to the stack.
memory_execution_stack.push(operand_a)
memory_execution_stack.push(operand_a)
elif current_operator.operand == Intrinsic.COPY2:
# Intristic copy2 operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push copy to the stack.
memory_execution_stack.push(operand_b)
memory_execution_stack.push(operand_a)
memory_execution_stack.push(operand_b)
memory_execution_stack.push(operand_a)
elif current_operator.operand == Intrinsic.COPY_OVER:
# Intristic copy over operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Push copy to the stack.
memory_execution_stack.push(operand_b)
memory_execution_stack.push(operand_a)
memory_execution_stack.push(operand_b)
elif current_operator.operand == Intrinsic.DECREMENT:
# Intristic decrement operator.
# Get operand.
operand_a = memory_execution_stack.pop()
# Push decrement to the stack.
memory_execution_stack.push(operand_a - 1)
elif current_operator.operand == Intrinsic.INCREMENT:
# Intristic increment operator.
# Get operand.
operand_a = memory_execution_stack.pop()
# Push increment to the stack.
memory_execution_stack.push(operand_a + 1)
elif current_operator.operand == Intrinsic.FREE:
# Intristic free operator.
# Pop and left.
memory_execution_stack.pop()
elif current_operator.operand == Intrinsic.SHOW:
# Intristic show operator.
# Get operand.
operand_a = memory_execution_stack.pop()
# Show operand.
print(operand_a)
elif current_operator.operand == Intrinsic.MEMORY_WRITE:
# Intristic memory write operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
if operand_b > len(memory_bytearray):
# If this is going to be memory overflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to write at memory address {operand_b} "
f"that overflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferOverflow)", True)
elif operand_b < 0:
# If this is going to be memory undeflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to write at memory address {operand_b} "
f"that underflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferUnderflow)", True)
# Write memory.
try:
memory_bytearray[operand_b] = operand_a
except IndexError:
# Memory error.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Memory buffer (over|under)flow "
f"(Write to pointer {operand_b} when there is memory buffer "
f"with size {len(memory_bytearray)} bytes)!", True)
except ValueError:
# If this is 8bit (1byte) range (number) overflow.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Memory buffer cell can only contain 1 byte (8 bit) "
f"that must be in range (0, 256),\nbut you passed number "
f"{operand_a} which is not fits in the 1 byte cell! (ByteOverflow)",
True)
elif current_operator.operand == Intrinsic.MEMORY_WRITE4BYTES:
# Intristic memory write 4 bytes operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# Convert value to 4 bytes.
try:
operand_a = operand_a.to_bytes(length=4, byteorder="little", signed=(operand_a < 0))
except OverflowError:
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Memory buffer cell can only contain 4 byte (32 bit) "
f"that must be in range (0, 4294967295),\nbut you passed number "
f"{operand_a} which is not fits in the 4 byte cell! (ByteOverflow)",
True)
if operand_b + 4 - 1 > len(memory_bytearray):
# If this is going to be memory overflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to write 4 bytes to memory address from {operand_b} to "
f"{operand_b + 4 - 1} "
f"that overflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferOverflow)", True)
elif operand_b < 0:
# If this is going to be memory undeflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to write at memory address "
f"from {operand_b} to {operand_b + 2} "
f"that underflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferUnderflow)", True)
# Write memory.
try:
memory_bytearray[operand_b:operand_b + 4] = operand_a
except IndexError:
# Memory* error.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Memory buffer (over|under)flow "
f"(Write to pointer from "
f"{operand_b} to {operand_b + 4 - 1} "
f"when there is memory buffer with size "
f"{len(memory_bytearray)} bytes)!", True)
except ValueError:
# If this is 32bit (4byte) range (number) overflow.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Memory buffer cell can only contain 4 byte (32 bit) "
f"that must be in range (0, 4294967295),\nbut you passed number "
f"{operand_a} which is not fits in the 4 byte cell! (ByteOverflow)",
True)
elif current_operator.operand == Intrinsic.MEMORY_READ4BYTES:
# Intristic memory read 4 bytes operator.
# Get operand.
operand_a = memory_execution_stack.pop()
if operand_a + 4 - 1 > len(memory_bytearray):
# If this is going to be memory overflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to read from memory address "
f"{operand_a} to {operand_a + 4 - 1} "
f"that overflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferOverflow)", True)
elif operand_a < 0:
# If this is going to be memory undeflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to read from memory address "
f"{operand_a} to {operand_a + 4 - 1}"
f"that underflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferUnderflow)", True)
# Read memory at the pointer.
try:
memory_bytes = int.from_bytes(memory_bytearray[operand_a:operand_a + 4], byteorder="little")
except IndexError:
# Memory* error.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Memory buffer (over|under)flow "
f"(Read from pointer {operand_a} to {operand_a + 4 - 1} "
f"when there is memory buffer with size "
f"{len(memory_bytearray)} bytes)!", True)
else:
# Push memory to the stack.
memory_execution_stack.push(memory_bytes)
elif current_operator.operand == Intrinsic.MEMORY_READ:
# Intristic memory read operator.
# Get operand.
operand_a = memory_execution_stack.pop()
if operand_a > len(memory_bytearray):
# If this is going to be memory overflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to read from memory address {operand_a} "
f"that overflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferOverflow)", True)
elif operand_a < 0:
# If this is going to be memory undeflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to read from memory address {operand_a} "
f"that underflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferUnderflow)", True)
# Read memory at the pointer.
try:
memory_byte = memory_bytearray[operand_a]
except IndexError:
# Memory* error.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Memory buffer (over|under)flow "
f"(Read from pointer {operand_a} when there is memory buffer "
f"with size {len(memory_bytearray)} bytes)!", True)
else:
# Push memory to the stack.
memory_execution_stack.push(memory_byte)
elif current_operator.operand == Intrinsic.MEMORY_SHOW_CHARACTERS:
# Intristic memory show as chars operator.
# Get both operands.
operand_a = memory_execution_stack.pop()
operand_b = memory_execution_stack.pop()
# String to show.
memory_string: bytes = b""
if operand_b + operand_a > len(memory_bytearray):
# If this is going to be memory overflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to read from memory address "
f"from {operand_b} to {operand_b + operand_a} "
f"that overflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferOverflow)", True)
elif operand_a < 0:
# If this is going to be memory undeflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Trying to read from memory address"
f"from {operand_b} to {operand_b + operand_a} "
f"that underflows memory buffer size {(len(memory_bytearray))}"
" bytes (MemoryBufferUnderflow)", True)
# Read memory string.
try:
memory_string = memory_bytearray[operand_b: operand_b + operand_a]
except IndexError:
# Memory* error.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Memory buffer (over|under)flow "
f"(Read from {operand_b} to {operand_b + operand_a} "
f"when there is memory "
f"buffer with size {len(memory_bytearray)} bytes)!", True)
# Print decoded memory bytes.
print(memory_string.decode("UTF-8"), end="")
elif current_operator.operand == Intrinsic.MEMORY_POINTER:
# Intristic memory pointer operator.
# Push pointer to the stack.
memory_execution_stack.push(MEMORY_BYTEARRAY_NULL_POINTER)
elif current_operator.operand == Intrinsic.NULL:
# Intristic null operator.
# Push pointer to the stack.
memory_execution_stack.push(0)
elif current_operator.operand == Intrinsic.IO_READ_STRING:
# Intrinsic I/O read string operator.
# Get string data.
string_value = input().encode("UTF-8")
string_length = len(string_value)
if current_operator_index not in memory_string_pointers:
# If we not found string in allocated string pointers.
# Get pointer, and push in to the pointers.
string_pointer: TYPE_POINTER = 1 + memory_string_size_ponter
memory_string_pointers[current_operator_index] = string_pointer
# Write string right into the bytearray memory.
memory_bytearray[string_pointer: string_pointer + string_length] = string_value
# Increase next pointer by current string length.
memory_string_size_ponter += string_length
# Check that there is no overflow.
if string_length > memory_string_size:
# If overflow.
# Error.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
"Trying to push I/O string, "
"when there is memory string buffer overflow! "
"Try use memory size directive, to increase size!", True)
# Push found string pointer to the stack.
found_string_pointer = memory_string_pointers[current_operator_index]
memory_execution_stack.push(found_string_pointer)
# Push string length to the stack.
memory_execution_stack.push(string_length)
elif current_operator.operand == Intrinsic.IO_READ_INTEGER:
# Intrinsic I/O read integer operator.
# Get integer data.
try:
integer_value = int(input())
except ValueError:
integer_value = -1
# Push integer to the stack.
memory_execution_stack.push(integer_value)
else:
# If unknown instrinsic type.
assert False, "Unknown instrinsic! (How?)"
# Increase operator index.
current_operator_index += 1
elif current_operator.type == OperatorType.IF:
# IF operator.
# Get operand.
operand_a = memory_execution_stack.pop()
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
if operand_a == 0:
# If this is false.
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), \
"Type error, parser level error?"
# Jump to the operator operand.
# As this is IF, so we should jump to the END.
current_operator_index = current_operator.operand
else:
# If this is true.
# Increment operator index.
# This is makes jump into the if branch.
current_operator_index += 1
elif current_operator.type == OperatorType.ELSE:
# ELSE operator.
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Jump to the operator operand.
# As this is ELSE operator, we should have index + 1, index!
current_operator_index = current_operator.operand
elif current_operator.type == OperatorType.DO:
# DO operator.
# Get operand.
operand_a = memory_execution_stack.pop()
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
if operand_a == 0:
# If this is false.
# Endif jump operator index.
end_jump_operator_index = source.operators[current_operator.operand].operand
# Type check.
assert isinstance(end_jump_operator_index, OPERATOR_ADDRESS), \
"Type error, parser level error?"
# Jump to the operator operand.
# As this is DO, so we should jump to the END.
current_operator_index = int(end_jump_operator_index)
else:
# If this is true.
# Increment operator index.
# This is makes jump into the if body.
current_operator_index += 1
elif current_operator.type == OperatorType.WHILE:
# WHILE operator.
# Increment operator index.
# This is makes jump into the if statement (expression).
current_operator_index += 1
elif current_operator.type == OperatorType.END:
# END operator.
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), \
"Type error, parser level error?"
# Jump to the operator operand.
# As this is END operator, we should have index + 1, index!
current_operator_index = current_operator.operand
elif current_operator.type == OperatorType.DEFINE:
# DEFINE Operator.
# Error.
assert False, "Got definition operator at runner stage, parser level error?"
elif current_operator.type == OperatorType.MEMORY:
assert False, "Got memory operator at runner stage, parser level error?"
else:
# If unknown operator type.
assert False, "Unknown operator type! (How?)"
except IndexError:
# Should be stack error.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
f"Stack error! This is may caused by popping from empty stack!"
f"Do you used {EXTRA_DIRECTIVE}LINTER_SKIP directive? IndexError, (From: "
f"{current_operator.token.text})", True)
except KeyboardInterrupt:
# If stopped.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, current_operator.token.location, "Error",
"Interpretation was stopped by keyboard interrupt!", True)
if len(memory_execution_stack) > 0:
# If there is any in the stack.
# Error message.
gofra.core.errors.message_verbosed(Stage.RUNNER, ("__runner__", 1, 1), "Warning",
"Stack is not empty after running the interpretation!")
# Linter.
def linter_type_check(source: Source):
""" Linter static type check. """
# TODO: IF/WHILE anylyse fixes.
# Check that there is no new operator type.
assert len(OperatorType) == 10, "Please update implementation after adding new OperatorType!"
# Check that there is no new instrinsic type.
assert len(Intrinsic) == 30, "Please update implementation after adding new Intrinsic!"
# Create empty linter stack.
memory_linter_stack = Stack()
# Get source operators count.
operators_count = len(source.operators)
# Current operator index from the source.
current_operator_index = 0
# Check that there is more than zero operators in context.
if operators_count == 0:
# If there is no operators in the final parser context.
# Error.
gofra.core.errors.message_verbosed(Stage.LINTER, ("__linter__", 1, 1), "Error",
"There is no operators found in given file after parsing, "
"are you given empty file or file without resulting operators?", True)
while current_operator_index < operators_count:
# While we not run out of the source operators list.
# Get current operator from the source.
current_operator: Operator = source.operators[current_operator_index]
# Grab our operator
if current_operator.type == OperatorType.PUSH_INTEGER:
# PUSH INTEGER operator.
# Type check.
assert isinstance(current_operator.operand, int), "Type error, lexer level error?"
# Push operand type to the stack.
memory_linter_stack.push(int)
# Increase operator index.
current_operator_index += 1
elif current_operator.type == OperatorType.PUSH_STRING:
# PUSH STRING operator.
# Type check.
assert isinstance(current_operator.operand, str), "Type error, lexer level error?"
# Push operand types to the stack.
memory_linter_stack.push(int) # String size.
memory_linter_stack.push(TYPE_POINTER) # String pointer.
# Increase operator index.
current_operator_index += 1
elif current_operator.type == OperatorType.MEMORY:
assert False, "Got memory operator at linter stage, parser level error?"
elif current_operator.type == OperatorType.INTRINSIC:
# Intrinsic operator.
if current_operator.operand == Intrinsic.PLUS:
# Intristic plus operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.DIVIDE:
# Intristic divide operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.MODULUS:
# Intristic modulus operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.MINUS:
# Intristic minus operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.MULTIPLY:
# Intristic multiply operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.EQUAL:
# Intristic equal operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.NOT_EQUAL:
# Intristic not equal operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.LESS_THAN:
# Intristic less than operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.GREATER_THAN:
# Intristic greater than operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.LESS_EQUAL_THAN:
# Intristic less equal than operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.GREATER_EQUAL_THAN:
# Intristic greater equal than operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.SWAP:
# Intristic swap operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Push swapped to the stack.
memory_linter_stack.push(operand_a)
memory_linter_stack.push(operand_b)
elif current_operator.operand == Intrinsic.COPY:
# Intristic copy operator.
# Check stack size.
if len(memory_linter_stack) < 1:
cli_no_arguments_error_message(current_operator, True)
# Get operand.
operand_a = memory_linter_stack.pop()
# Push copy to the stack.
memory_linter_stack.push(operand_a)
memory_linter_stack.push(operand_a)
elif current_operator.operand == Intrinsic.COPY2:
# Intristic copy2 operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Push copy to the stack.
memory_linter_stack.push(operand_b)
memory_linter_stack.push(operand_a)
memory_linter_stack.push(operand_b)
memory_linter_stack.push(operand_a)
elif current_operator.operand == Intrinsic.COPY_OVER:
# Intristic copy over operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Push copy to the stack.
memory_linter_stack.push(operand_a)
memory_linter_stack.push(operand_a)
memory_linter_stack.push(operand_b)
elif current_operator.operand == Intrinsic.DECREMENT:
# Intristic decrement operator.
# Check stack size.
if len(memory_linter_stack) < 1:
cli_no_arguments_error_message(current_operator, True)
# Get operand.
operand_a = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.INCREMENT:
# Intristic increment operator.
# Check stack size.
if len(memory_linter_stack) < 1:
cli_no_arguments_error_message(current_operator, True)
# Get operand.
operand_a = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.FREE:
# Intristic free operator.
# Check stack size.
if len(memory_linter_stack) < 1:
cli_no_arguments_error_message(current_operator, True)
# Free operand.
memory_linter_stack.pop()
elif current_operator.operand == Intrinsic.SHOW:
# Intristic show operator.
# Check stack size.
if len(memory_linter_stack) < 1:
cli_no_arguments_error_message(current_operator, True)
# Get operand.
operand_a = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
elif current_operator.operand == Intrinsic.MEMORY_WRITE:
# Intristic memory write operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_POINTER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
elif current_operator.operand == Intrinsic.MEMORY_WRITE4BYTES:
# Intristic memory write 4 bytes operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_POINTER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Check type.
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
elif current_operator.operand == Intrinsic.MEMORY_READ4BYTES:
# Intristic memory read 4 bytes operator.
# Check stack size.
if len(memory_linter_stack) < 1:
cli_no_arguments_error_message(current_operator, True)
# Get operand.
operand_a = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_POINTER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.MEMORY_READ:
# Intristic memory read operator.
# Check stack size.
if len(memory_linter_stack) < 1:
cli_no_arguments_error_message(current_operator, True)
# Get operand.
operand_a = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_POINTER:
cli_argument_type_error_message(current_operator, 2, operand_a, int, True)
# Push to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.MEMORY_SHOW_CHARACTERS:
# Intristic memory show bytes as chars operator.
# Check stack size.
if len(memory_linter_stack) < 2:
cli_no_arguments_error_message(current_operator, True)
# Get both operands.
operand_a = memory_linter_stack.pop()
operand_b = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_POINTER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
if operand_b != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 2, operand_b, int, True)
elif current_operator.operand == Intrinsic.MEMORY_POINTER:
# Intristic memory pointer operator.
# Push pointer to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.NULL:
# Intristic null operator.
# Push pointer to the stack.
memory_linter_stack.push(int)
elif current_operator.operand == Intrinsic.IO_READ_STRING:
# I/O read string operator.
# Push operand types to the stack.
memory_linter_stack.push(int) # String size.
memory_linter_stack.push(TYPE_POINTER) # String pointer.
elif current_operator.operand == Intrinsic.IO_READ_INTEGER:
# I/O read integer operator.
# Push operand types to the stack.
memory_linter_stack.push(int) # Integer.
else:
# If unknown instrinsic type.
assert False, "Got unexpected / unknon intrinsic type! (How?)"
# Increase operator index.
current_operator_index += 1
elif current_operator.type == OperatorType.IF:
# IF operator.
# Check stack size.
if len(memory_linter_stack) < 1:
cli_no_arguments_error_message(current_operator, True)
# Get operand.
operand_a = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Increment operator index.
# This is makes jump into the if branch.
current_operator_index += 1
elif current_operator.type == OperatorType.ELSE:
# ELSE operator.
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Jump to the operator operand.
# As this is ELSE operator, we should have index + 1, index!
current_operator_index = current_operator.operand
elif current_operator.type == OperatorType.WHILE:
# WHILE operator.
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Increase operator index.
current_operator_index += 1
elif current_operator.type == OperatorType.DO:
# DO operator.
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Check stack size.
if len(memory_linter_stack) < 1:
cli_no_arguments_error_message(current_operator, True)
# Get operand.
operand_a = memory_linter_stack.pop()
# Check type.
if operand_a != TYPE_INTEGER:
cli_argument_type_error_message(current_operator, 1, operand_a, int, True)
# Endif jump operator index.
end_jump_operator_index = source.operators[current_operator.operand].operand
# Type check.
assert isinstance(end_jump_operator_index, OPERATOR_ADDRESS), "Type error, parser level error?"
# Jump to the END from WHILE.
current_operator_index = int(end_jump_operator_index)
elif current_operator.type == OperatorType.END:
# END operator.
# Type check.
assert isinstance(current_operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Jump to the operator operand.
# As this is END operator, we should have index + 1, index!
current_operator_index = current_operator.operand
elif current_operator.type == OperatorType.DEFINE:
assert False, "Got definition operator at linter stage, parser level error?"
else:
assert False, "Got unexpected / unknon operator type! (How?)"
if len(memory_linter_stack) != 0:
# If there is any in the stack.
# Get last operator token location.
location: LOCATION = source.operators[current_operator_index - 1].token.location
# Error message.
gofra.core.errors.message_verbosed(Stage.LINTER, location, "Error",
f"Stack is not empty at the type checking stage! "
f"(There is {len(memory_linter_stack)} elements when should be 0)", True)
# Source.
def load_source_from_file(file_path: str) -> tuple[Source, ParserContext]:
""" Load file, then return ready source and context for it. (Tokenized, Parsed, Linted). """
# Read source lines.
source_file, _ = gofra.core.other.try_open_file(file_path, "r", True, encoding="UTF-8")
source_lines = source_file.readlines()
source_file.close()
parser_context = ParserContext()
# Tokenize.
lexer_tokens = list(lexer_tokenize(source_lines, file_path))
if len(lexer_tokens) == 0:
gofra.core.errors.message_verbosed(Stage.LEXER, (basename(file_path), 1, 1), "Error",
"There is no tokens found in given file, are you given empty file?", True)
# Parse.
parser_parse(lexer_tokens, parser_context, file_path)
# Create source from context.
parser_context_source = Source(parser_context.operators)
# Type check.
assert isinstance(parser_context.directive_linter_skip, bool), "Expected linter skip directive to be boolean."
if not parser_context.directive_linter_skip:
linter_type_check(parser_context_source)
return parser_context_source, parser_context
# Python.
def python_generate(source: Source, context: ParserContext, path: str):
""" Generates graph from the source. """
# Check that there is no changes in operator type or intrinsic.
assert len(OperatorType) == 10, "Please update implementation for python generation after adding new OperatorType!"
assert len(Intrinsic) == 28, "Please update implementation for python generationg after adding new Intrinsic!"
def __update_indent(value: int):
""" Updates indent by given value. """
# Update level.
nonlocal current_indent_level # type: ignore
current_indent_level += value
# Update indent string.
nonlocal current_indent # type: ignore
current_indent = "\t" * current_indent_level
def __write_footer():
""" Write footer. """
# Trick.
nonlocal current_bytearray_should_written, current_string_buffer_should_written
if current_bytearray_should_written or current_string_buffer_should_written:
current_string_buffer_should_written = True
current_bytearray_should_written = True
if current_bytearray_should_written:
# If we should write bytearray block.
# Allocate bytearray.
current_lines.insert(current_bytearray_insert_position,
f"memory = bytearray("
f"{context.memory_bytearray_size} + strings_size"
f")")
# Comment allocation.
if not directive_skip_comments:
current_lines.insert(current_bytearray_insert_position,
"# Allocate memory buffer (memory + strings)"
"(As you called memory operators): \n")
# Warn user about using byte operations in python compilation.
gofra.core.errors.message("Warning", "YOU ARE USING MEMORY OPERATIONS, THAT MAY HAVE EXPLICIT BEHAVIOUR! "
"IT IS MAY HARDER TO CATCH ERROR IF YOU RUN COMPILED VERSION "
"(NOT INTERPRETATED)")
if current_string_buffer_should_written:
# If we should write string buffer block.
# Push string function.
current_lines.insert(current_string_buffer_insert_position,
"\ndef stack_push_string(stack_str, op_index): \n"
"\tstr_len = len(stack_str)\n"
"\tif op_index not in strings_pointers:\n"
"\t\tglobal strings_size_pointer\n"
"\t\tptr = strings_size + 1 + strings_size_pointer\n"
"\t\tstrings_pointers[op_index] = ptr\n"
"\t\tmemory[ptr: ptr + str_len] = stack_str\n"
"\t\tstrings_size_pointer += str_len\n"
"\t\tif str_len > strings_size:\n"
"\t\t\tprint(\""
"ERROR! Trying to push string, "
"when there is memory string buffer overflow! "
"Try use memory size directive, to increase size!"
"\")\n"
"\t\t\texit(1)\n"
"\tfsp = strings_pointers[op_index]\n"
"\treturn fsp, str_len\n"
)
# Allocate string buffer.
current_lines.insert(current_string_buffer_insert_position,
f"strings_pointers = dict()\n"
f"strings_size = {context.memory_bytearray_size}\n"
f"strings_size_pointer = 0")
# Comment allocation.
if not directive_skip_comments:
current_lines.insert(current_string_buffer_insert_position,
"# Allocate strings buffer "
"(As you used strings): \n")
def __write_header():
""" Writes header. """
# Write auto-generated mention.
if not directive_skip_comments:
current_lines.append("# This file is auto-generated by Gofra-Language python subcommand! \n\n")
# Write stack initialization element.
if not directive_skip_comments:
current_lines.append("# Allocate stack (As is Gofra is Stack-Based Language): \n")
current_lines.append("stack = []\n")
# Update bytearray insert position.
nonlocal current_bytearray_insert_position
current_bytearray_insert_position = len(current_lines)
# Update string buffer insert position.
nonlocal current_string_buffer_insert_position
current_string_buffer_insert_position = len(current_lines)
# Write file and expression comments.
if not directive_skip_comments:
current_lines.append("\n\n")
current_lines.append(f"# File ({basename(path)}): \n")
current_lines.append(f"# Expressions: \n")
# Update while insert position.
nonlocal current_while_insert_position
current_while_insert_position = len(current_lines)
# Write source header.
if not directive_skip_comments:
current_lines.append("# Source:\n")
def __write_operator_intrinsic(operator: Operator):
""" Writes default operator (non-intrinsic). """
# Check that this is intrinsic operator.
assert operator.type == OperatorType.INTRINSIC, "Non-INTRINSIC operators " \
"should be written using __write_operator()!"
# Type check.
assert isinstance(current_operator.operand, Intrinsic), f"Type error, parser level error?"
nonlocal current_bytearray_should_written # type: ignore
if current_operator.operand == Intrinsic.PLUS:
# Intristic plus operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(operand_b + operand_a)")
elif current_operator.operand == Intrinsic.MINUS:
# Intristic minus operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(operand_b - operand_a)")
elif current_operator.operand == Intrinsic.INCREMENT:
# Intristic increment operator.
# Write operator data.
write("stack.append(stack.pop() + 1)")
elif current_operator.operand == Intrinsic.DECREMENT:
# Intristic decrement operator.
# Write operator data.
write("stack.append(stack.pop() - 1)")
elif current_operator.operand == Intrinsic.MULTIPLY:
# Intristic multiply operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(operand_b * operand_a)")
elif current_operator.operand == Intrinsic.DIVIDE:
# Intristic divide operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(operand_b // operand_a)")
elif current_operator.operand == Intrinsic.MODULUS:
# Intristic modulus operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write(f"stack.append(int(operand_b % operand_a))") # TODO: Check %, remove or left int()
elif current_operator.operand == Intrinsic.EQUAL:
# Intristic equal operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(int(operand_b == operand_a))")
elif current_operator.operand == Intrinsic.GREATER_EQUAL_THAN:
# Intristic greater equal than operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(int(operand_b >= operand_a))")
elif current_operator.operand == Intrinsic.GREATER_THAN:
# Intristic greater than operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(int(operand_b > operand_a))")
elif current_operator.operand == Intrinsic.LESS_THAN:
# Intristic less than operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(int(operand_b < operand_a))")
elif current_operator.operand == Intrinsic.LESS_EQUAL_THAN:
# Intristic less equal than operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(int(operand_b<= operand_a))")
elif current_operator.operand == Intrinsic.SWAP:
# Intristic swap operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(operand_a)")
write("stack.append(operand_b)")
elif current_operator.operand == Intrinsic.COPY:
# Intristic copy operator.
# Write operator data.
write("operand_a = stack.pop()")
write("stack.append(operand_a)")
write("stack.append(operand_a)")
elif current_operator.operand == Intrinsic.SHOW:
# Intristic show operator.
# Write operator data.
write("print(stack.pop())")
elif current_operator.operand == Intrinsic.FREE:
# Intristic free operator.
# Write operator data.
write("stack.pop()")
elif current_operator.operand == Intrinsic.NOT_EQUAL:
# Intristic not equal operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(int(operand_b != operand_a))")
elif current_operator.operand == Intrinsic.COPY2:
# Intristic copy2 operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(operand_b)")
write("stack.append(operand_a)")
write("stack.append(operand_b)")
write("stack.append(operand_a)")
elif current_operator.operand == Intrinsic.COPY_OVER:
# Intristic copy over operator.
# Write operator data.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("stack.append(operand_b)")
write("stack.append(operand_a)")
write("stack.append(operand_b)")
elif current_operator.operand == Intrinsic.MEMORY_POINTER:
# Intrinsic null pointer operator.
# Write bytearray block.
# TODO: May be removed, but just OK.
current_bytearray_should_written = True
# Write operator data.
write(f"stack.append({MEMORY_BYTEARRAY_NULL_POINTER})")
elif current_operator.operand == Intrinsic.NULL:
# Intrinsic null operator.
# Write operator data.
write(f"stack.append(0)")
elif current_operator.operand == Intrinsic.MEMORY_WRITE:
# Intrinsic memory write operator.
# Write bytearray block.
current_bytearray_should_written = True
# Write operator data.
# TODO: More checks at compiled script.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("memory[operand_b] = operand_a")
elif current_operator.operand == Intrinsic.MEMORY_READ:
# Intrinsic memory read operator.
# Write bytearray block.
current_bytearray_should_written = True
# Write operator data.
# TODO: More checks at compiled script.
write("operand_a = stack.pop()")
write("memory_byte = memory[operand_a]")
write("stack.append(memory_byte)")
elif current_operator.operand == Intrinsic.MEMORY_WRITE4BYTES:
# Intristic memory write 4 bytes operator.
# Write bytearray block.
current_bytearray_should_written = True
# Write operator data.
# TODO: More checks at compiled script.
write("operand_a = stack.pop()")
write("operand_b = stack.pop()")
write("memory_bytes = operand_a.to_bytes(length=4, byteorder=\"little\", signed=(operand_a < 0))")
write("memory[operand_b:operand_b + 4] = memory_bytes")
elif current_operator.operand == Intrinsic.MEMORY_READ4BYTES:
# Intristic memory read 4 bytes operator.
# Write bytearray block.
current_bytearray_should_written = True
# Write operator data.
# TODO: More checks at compiled script.
write("operand_a = stack.pop()")
write("memory_bytes = int.from_bytes(memory[operand_a:operand_a + 4], byteorder=\"little\")")
write("stack.append(memory_bytes)")
elif current_operator.operand == Intrinsic.MEMORY_SHOW_CHARACTERS:
# Intrinsic memory show as characters operator.
# Write bytearray block.
current_bytearray_should_written = True
# Write operator data.
write("memory_length = stack.pop()")
write("memory_pointer = stack.pop()")
write("memory_index = 0")
write("while memory_index < memory_length:")
write("\tmemory_byte = memory[memory_pointer + memory_index]")
write("\tprint(chr(memory_byte), end=\"\")")
write("\tmemory_index += 1")
else:
# If unknown instrinsic type.
# Write node data.
gofra.core.errors.message_verbosed(Stage.COMPILATOR, current_operator.token.location, "Error",
f"Intrinsic `{INTRINSIC_TYPES_TO_NAME[current_operator.operand]}` "
f"is not implemented for python generation!", True)
def __write_operator(operator: Operator):
""" Writes default operator (non-intrinsic). """
# Nonlocalise while data.
nonlocal current_while_block # type: ignore
nonlocal current_while_defined_name # type: ignore
nonlocal current_while_comment # type: ignore
# Grab our operator
if operator.type == OperatorType.INTRINSIC:
# Intrinsic operator.
# Error.
assert False, "Intrinsic operators should be written using __write_operator_intrinsic()!"
elif operator.type == OperatorType.PUSH_INTEGER:
# PUSH INTEGER operator.
# Type check.
assert isinstance(operator.operand, int), "Type error, parser level error?"
# Write operator data.
write(f"stack.append({operator.operand})")
elif operator.type == OperatorType.PUSH_STRING:
# PUSH STRING operator.
# Type check.
assert isinstance(operator.operand, str), "Type error, parser level error?"
# Write operator data.
# TODO: Warn using `current_operator_index`
write(f"s_str, s_len = stack_push_string({operator.operand.encode('UTF-8')}, {current_operator_index})")
write(f"stack.append(s_str)")
write(f"stack.append(s_len)")
# Write strings buffer block.
nonlocal current_string_buffer_should_written
current_string_buffer_should_written = True
# And memory.
nonlocal current_bytearray_should_written
current_bytearray_should_written = True
elif operator.type == OperatorType.IF:
# IF operator.
# Type check.
assert isinstance(operator.operand, OPERATOR_ADDRESS), f"Type error, parser level error?"
# Write operator data.
write("if stack.pop() != 0:")
# Increase indent level.
__update_indent(1)
elif operator.type == OperatorType.WHILE:
# WHILE operator.
# Type check.
assert isinstance(operator.operand, OPERATOR_ADDRESS), f"Type error, parser level error?"
# Remember name, so we can write "def" at the top of the source in current_while_insert_position.
current_while_defined_name = f"while_expression_ip{current_operator_index}"
# Remember comment for while function block.
current_while_comment = comment
# Write operator data.
current_lines.append(f"{current_indent}{comment[2:]}\n"
f"{current_indent}while {current_while_defined_name}()")
# Set that we in while expression.
current_while_block = True
elif operator.type == OperatorType.DO:
# DO operator.
# Type check.
assert isinstance(operator.operand, OPERATOR_ADDRESS), f"Type error, parser level error?"
if current_while_block:
# If we close while.
# Current insert position for lines.
# (As we don`t want to reset current_while_insert_position)
while_block_insert_position = current_while_insert_position
# Insert header.
function_comment = "" if directive_skip_comments else f"\t# -- Should be called from WHILE.\n"
current_lines.insert(while_block_insert_position,
f"def {current_while_defined_name}():{current_while_comment}\n" + function_comment)
for while_stack_line in current_while_lines:
# Iterate over while stack lines.
# Increment.
while_block_insert_position += 1
# Insert.
current_lines.insert(while_block_insert_position, f"\t{while_stack_line}")
# Insert return.
return_comment = "" if directive_skip_comments else f" # -- Return for calling from WHILE ."
current_lines.insert(while_block_insert_position + 1,
f"\treturn stack.pop()" + return_comment + "\n")
else:
# If this is not while.
# Error.
gofra.core.errors.message_verbosed(Stage.COMPILATOR, operator.token.location, "Error",
"Got `do`, when there is no `while` block started! "
"(Parsing error?)", True)
# Write operator.
current_lines.append(f":{comment}\n")
# Go out the while block expression.
current_while_block = False
# Reset current while lines list (stack).
current_while_lines.clear()
# Increase indent level.
__update_indent(1)
elif operator.type == OperatorType.ELSE:
# ELSE operator.
# Type check.
assert isinstance(operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Write operator data.
pass_comment = "" if directive_skip_comments else f" # -- Be sure that there is no empty body."
current_lines.append(current_indent + f"pass{pass_comment}\n")
# Decrease indent level.
__update_indent(-1)
# Write operator data.
write("else:")
# Increase indent level.
__update_indent(1)
elif operator.type == OperatorType.END:
# END operator.
# Actually, there is no END in Python.
# Type check.
assert isinstance(operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
# Write operator data.
pass_comment = "" if directive_skip_comments else f" # -- Be sure that there is no empty body."
current_lines.append(current_indent + f"pass{pass_comment}\n")
# Decrease indent level.
__update_indent(-1)
elif operator.type == OperatorType.DEFINE:
# DEFINE Operator.
# Error.
assert False, "Got definition operator at runner stage, parser level error?"
else:
# If unknown operator type.
assert False, f"Got unexpected / unknon operator type! (How?)"
def write(text: str):
""" Writes text to file. """
if current_while_block:
# If we are in loop.
# Add text without indent.
current_while_lines.append(text + comment + "\n")
else:
# Write default text.
current_lines.append(current_indent + text + comment + "\n")
# Indentation level.
current_indent_level = 0 # Indent level for calculating.
current_indent = "" # Indent string for writing.
# While.
current_while_block = False # If true, we are in while loop.
current_while_comment = "" # While block comment to place in final expression function.
current_while_defined_name = "" # While defined name for naming expression function.
current_while_lines: List[str] = [] # List of while lines to write in expression function.
current_while_insert_position = 0 # Position to insert while expressions blocks.
# Bytearray.
current_bytearray_insert_position = 0 # Position to insert bytearray block if bytearray_should_written is true.
current_bytearray_should_written = False # If true, will warn about memory usage and write bytearray block.
# TODO: Remove, as redundant, there is bytearray insert position above, which is same.
# Strings.
# Position to insert string bufer allocation block,
# if current_string_buffer_should_written is true.
current_string_buffer_insert_position = 0
current_string_buffer_should_written = False # If true, will write string buffer allocation block.
# Should we skip comments.
directive_skip_comments = context.directive_python_comments_skip
# Get source operators count.
operators_count = len(source.operators)
# Current operator index from the source.
current_operator_index = 0
# Lines.
current_lines: List[str] = []
# Check that there is more than zero operators in context.
if operators_count == 0:
# If there is no operators in the final parser context.
# Error.
gofra.core.errors.message_verbosed(Stage.COMPILATOR, (basename(path), 1, 1), "Error",
"There is no operators found in given file after parsing, "
"are you given empty file or file without resulting operators?", True)
# Open file.
file, _ = gofra.core.other.try_open_file(path + ".py", "w", True)
# Write header.
__write_header()
while current_operator_index < operators_count:
# While we not run out of the source operators list.
# Get current operator from the source.
current_operator: Operator = source.operators[current_operator_index]
# Make comment string.
location: LOCATION = current_operator.token.location
location_string: str = f"Line {location[1]}, Row {location[2]}"
comment = "" if directive_skip_comments else f" # Token: {current_operator.token.text} [{location_string}]"
if current_operator.type == OperatorType.INTRINSIC:
# If this is intrinsic.
# Write intrinsic operator.
__write_operator_intrinsic(current_operator)
else:
# If this is other operator.
# Write default operator.
__write_operator(current_operator)
# Increment current index.
current_operator_index += 1
# Write footer.
__write_footer()
if len(current_while_lines) != 0:
# If we have something at the while lines stack.
# Error.
gofra.core.errors.message_verbosed(Stage.COMPILATOR, source.operators[-1].token.location, "Error",
"While lines stack is not empty after running python generation! "
"(Compilation error?)", True)
# Write lines in final file.
for current_stack_line in current_lines:
file.write(current_stack_line)
# Close file.
file.close()
# Bytecode.
def compile_bytecode(source: Source, _, path: str):
""" Compiles operators to bytecode. """
# Check that there is no changes in operator type or intrinsic.
assert len(OperatorType) == 10, \
"Please update implementation for bytecode compilation after adding new OperatorType!"
assert len(Intrinsic) == 28, "Please update implementation for bytecode compilation after adding new Intrinsic!"
def __write_operator_intrinsic(operator: Operator):
""" Writes default operator (non-intrinsic). """
# Check that this is intrinsic operator.
assert operator.type == OperatorType.INTRINSIC, "Non-INTRINSIC operators " \
"should be written using __write_operator()!"
# Type check.
assert isinstance(current_operator.operand, Intrinsic), f"Type error, parser level error?"
if current_operator.operand in INTRINSIC_TO_BYTECODE_OPERATOR:
# Intristic operator.
# Write operator data.
write(INTRINSIC_TO_BYTECODE_OPERATOR[current_operator.operand])
else:
gofra.core.errors.message_verbosed(Stage.COMPILATOR, current_operator.token.location, "Error",
f"Intrinsic `{INTRINSIC_TYPES_TO_NAME[current_operator.operand]}` "
f"is not implemented for bytecode compilation!", True)
def __write_operator(operator: Operator):
""" Writes default operator (non-intrinsic). """
# Grab our operator
if operator.type == OperatorType.INTRINSIC:
assert False, "Intrinsic operators should be written using __write_operator_intrinsic()!"
elif operator.type == OperatorType.PUSH_INTEGER:
assert isinstance(operator.operand, int), "Type error, parser level error?"
# Write operator data.
write(OPERATOR_TYPE_TO_BYTECODE_OPERATOR[OperatorType.PUSH_INTEGER])
write(f"{operator.operand}")
elif operator.type == OperatorType.PUSH_STRING:
assert isinstance(operator.operand, str), "Type error, parser level error?"
gofra.core.errors.message("Error", "Strings is not implemented yet in the bytecode!", True)
elif operator.type == OperatorType.IF:
assert isinstance(operator.operand, OPERATOR_ADDRESS), f"Type error, parser level error?"
gofra.core.errors.message("Error", "Conditional is not implemented yet in the bytecode!", True)
elif operator.type == OperatorType.WHILE:
assert isinstance(operator.operand, OPERATOR_ADDRESS), f"Type error, parser level error?"
gofra.core.errors.message("Error", "Conditional is not implemented yet in the bytecode!", True)
elif operator.type == OperatorType.DO:
assert isinstance(operator.operand, OPERATOR_ADDRESS), f"Type error, parser level error?"
gofra.core.errors.message("Error", "Conditional is not implemented yet in the bytecode!", True)
elif operator.type == OperatorType.ELSE:
assert isinstance(operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
gofra.core.errors.message("Error", "Conditional is not implemented yet in the bytecode!", True)
elif operator.type == OperatorType.END:
assert isinstance(operator.operand, OPERATOR_ADDRESS), "Type error, parser level error?"
gofra.core.errors.message("Error", "Conditional is not implemented yet in the bytecode!", True)
elif operator.type == OperatorType.DEFINE:
assert False, "Got definition operator at runner stage, parser level error?"
else:
# If unknown operator type.
assert False, f"Got unexpected / unknon operator type! (How?)"
# WIP.
current_lines.append("\n")
def write(text: str):
""" Writes text to file. """
current_lines.append(text + " ")
# Get source operators count.
operators_count = len(source.operators)
# Current operator index from the source.
current_operator_index = 0
# Lines.
current_lines: List[str] = []
# Check that there is more than zero operators in context.
if operators_count == 0:
# If there is no operators in the final parser context.
gofra.core.errors.message_verbosed(Stage.COMPILATOR, (basename(path), 1, 1), "Error",
"There is no operators found in given file after parsing, "
"are you given empty file or file without resulting operators?", True)
# Open file.
bytecode_path = path + ".gofbc"
file, _ = gofra.core.other.try_open_file(bytecode_path, "w", True)
while current_operator_index < operators_count:
# While we not run out of the source operators list.
# Get current operator from the source.
current_operator: Operator = source.operators[current_operator_index]
if current_operator.type == OperatorType.INTRINSIC:
# If this is intrinsic.
# Write intrinsic operator.
__write_operator_intrinsic(current_operator)
else:
# If this is other operator.
# Write default operator.
__write_operator(current_operator)
# Increment current index.
current_operator_index += 1
# Write lines in final file.
for current_stack_line in current_lines:
file.write(current_stack_line)
# Close file.
file.close()
return bytecode_path
def execute_bytecode(path: str):
""" Executes bytecode file. """
# Check that there is no changes in operator type or intrinsic.
assert len(OperatorType) == 10, "Please update implementation for bytecode execution after adding new OperatorType!"
assert len(Intrinsic) == 28, "Please update implementation for bytecode execution after adding new Intrinsic!"
if not path.endswith(".gofbc"):
gofra.core.errors.message("Error", f"File \"{path}\" should have extension `.gofbc` for being executed!", True)
return
# Open file.
file, _ = gofra.core.other.try_open_file(path, "r", True)
# Tokenize operator tokens.
bc_op_tokens = []
for line in file.readlines():
op_tokens = line.split(" ")
for op_token in op_tokens:
if op_token == "\n" or op_token.replace(" ", "") == "":
continue
bc_op_tokens.append(op_token)
# New context.
parser_context = ParserContext()
# Convert OPs to interpretator operators.
current_bc_operator_index = 0
while current_bc_operator_index < len(bc_op_tokens):
bc_operator = bc_op_tokens[current_bc_operator_index]
if bc_operator == OPERATOR_TYPE_TO_BYTECODE_OPERATOR[OperatorType.PUSH_INTEGER]:
parser_context.operators.append(Operator(
OperatorType.PUSH_INTEGER,
Token(TokenType.BYTECODE, bc_operator, (path, -1, -1), bc_operator),
int(bc_op_tokens[current_bc_operator_index + 1])
))
current_bc_operator_index += 2
continue
else:
if bc_operator in BYTECODE_OPERATOR_NAMES_TO_INTRINSIC:
parser_context.operators.append(Operator(
OperatorType.INTRINSIC,
Token(TokenType.BYTECODE, bc_operator, (path, -1, -1), bc_operator),
BYTECODE_OPERATOR_NAMES_TO_INTRINSIC[bc_operator]
))
else:
gofra.core.errors.message_verbosed(Stage.PARSER, ("Bytecode", -1, -1), "Error",
f"Got unexpected bytecode instruction - {repr(bc_operator)}!", True)
current_bc_operator_index += 1
continue
# Run.
parser_context_source = Source(parser_context.operators)
interpretator_run(parser_context_source)
# Close file.
file.close()
# CLI.
def cli_no_arguments_error_message(operator: Operator, force_exit: bool = False):
""" Shows no arguments passed error message to the CLI. """
if operator.type == OperatorType.INTRINSIC:
# Intrinsic Operator.
# Type check.
assert isinstance(operator.operand, Intrinsic), "Type error, parser level error?"
# Error
gofra.core.errors.message_verbosed(Stage.LINTER, operator.token.location, "Error",
f"`{INTRINSIC_TYPES_TO_NAME[operator.operand]}` "
f"intrinsic should have more arguments at the stack, but it was not founded!")
elif operator.type == OperatorType.IF:
# IF Operator.
# Error
gofra.core.errors.message_verbosed(Stage.LINTER, operator.token.location, "Error",
"`IF` operator should have 1 argument at the stack, but it was not found!")
elif operator.type == OperatorType.DO:
# DO Operator.
# Error
gofra.core.errors.message_verbosed(Stage.LINTER, operator.token.location, "Error",
"`DO` operator should have 1 argument at the stack, but it was not found!")
else:
# Unknown operator.
assert False, "Tried to call no_arguments_error_message() " \
"for operator that does not need arguments! (Type checker error?)"
# If we should force exit.
if force_exit:
exit(1)
def cli_argument_type_error_message(operator: Operator, argument_index: int,
actual_type: type, expected_type: type, force_exit: bool = False):
""" Shows unexpected argument type passed error message to the CLI. """
if operator.type == OperatorType.INTRINSIC:
assert isinstance(operator.operand, Intrinsic), "Type error, parser level error?"
gofra.core.errors.message_verbosed(Stage.LINTER, operator.token.location, "Error",
f"`{INTRINSIC_TYPES_TO_NAME[operator.operand]}` "
f"intrinsic expected {argument_index} argument "
f"to be with type {expected_type}, but it has type {actual_type}!")
elif operator.type == OperatorType.IF:
gofra.core.errors.message_verbosed(Stage.LINTER, operator.token.location, "Error",
f"`IF` operator expected type {expected_type} but got {actual_type}!")
elif operator.type == OperatorType.DO:
gofra.core.errors.message_verbosed(Stage.LINTER, operator.token.location, "Error",
f"`DO` operator expected type {expected_type} but got {actual_type}!")
else:
assert False, "Tried to call cli_argument_type_error_message() " \
"for unknown operator! (Type checker error?)"
if force_exit:
exit(1)
def cli_validate_argument_vector(argument_vector: List[str]) -> List[str]:
""" Validates CLI argv (argument vector) """
# Check that ther is any in the ARGV.
assert len(argument_vector) > 0, "There is no source (mspl.py) file path in the ARGV"
# Get argument vector without source(mspl.py) path.
argument_runner_filename: str = argument_vector[0]
argument_vector = argument_vector[1:]
# Validate ARGV.
if len(argument_vector) == 0:
# If there is no arguments.
# Message.
gofra.systems.cli.usage_message(argument_runner_filename)
gofra.core.errors.message("Error", "Please pass file path to work with (.gof or .gofbc ~)", True)
elif len(argument_vector) == 1:
# Just one argument.
if argument_vector[0] != "help":
# If this is not help.
# Message.
gofra.systems.cli.usage_message(argument_runner_filename)
gofra.core.errors.message("Error", "Please pass subcommand after the file path!", True)
# Show usage.
gofra.systems.cli.usage_message(argument_runner_filename)
# Exit.
exit(0)
# Return path as source file and help (argv[0]).
return ["", argument_vector[0], ""]
elif len(argument_vector) == 2:
# Expected ARGV length.
# All ok.
return [*argument_vector, ""]
elif len(argument_vector) == 3:
# If this is may silent argument.
if argument_vector[2] != "-silent":
# If silent.
# Message.
gofra.systems.cli.usage_message(argument_runner_filename)
gofra.core.errors.message("Error", "Unexpected arguments!", True)
# Return final ARGV.
return argument_vector
def cli_entry_point():
""" Entry point for the CLI. """
# Get and check size of cli argument vector.
cli_argument_vector = cli_validate_argument_vector(argv)
assert len(cli_argument_vector) == 3, "Got unexpected size of argument vector."
# CLI Options.
cli_source_path, cli_subcommand, cli_silent = cli_argument_vector
cli_silent: bool = bool(cli_silent == "-silent")
# Welcome message.
if not cli_silent:
gofra.systems.cli.welcome_message()
# Load source and check size of it.
loaded_file = None
if cli_subcommand in ("run", "graph", "python", "dump", "compile"):
loaded_file = load_source_from_file(cli_source_path)
assert len(loaded_file) == 2, "Got unexpected data from loaded file."
if cli_subcommand == "run":
# If this is interpretate subcommand.
cli_source, cli_context = loaded_file
interpretator_run(cli_source, cli_context.memory_bytearray_size)
# Message.
if not cli_silent:
print(f"[Info] File \"{basename(cli_source_path)}\" was interpreted!")
elif cli_subcommand == "graph":
# If this is graph subcommand.
# Get source from loaded file.
cli_source, _ = loaded_file
# Generate graph file.
gofra.systems.graph.write(cli_source, cli_source_path)
# Message.
if not cli_silent:
print(f"[Info] .dot file \"{basename(cli_source_path)}.dot\" generated!")
elif cli_subcommand == "python":
# If this is python subcommand.
# Get source and context from loaded file.
cli_source, cli_context = loaded_file
# Generate python file.
python_generate(cli_source, cli_context, cli_source_path)
# Message.
if not cli_silent:
print(f"[Info] .py file \"{basename(cli_source_path)}.py\" generated!")
elif cli_subcommand == "dump":
# If this is dump subcommand.
# Get source from loaded file.
cli_source, _ = loaded_file
# Dump print.
gofra.systems.dump.dump(cli_source.operators)
# Message.
if not cli_silent:
print(f"[Info] File \"{basename(cli_source_path)}\" was dump printed!")
elif cli_subcommand == "compile":
# If this is compile subcommand.
# Get source from loaded file.
cli_source, cli_context = loaded_file
# Compile.
bytecode_path = compile_bytecode(cli_source, cli_context, cli_source_path)
# Message.
if not cli_silent:
print(f"[Info] File \"{basename(cli_source_path)}\" was compiled to \"{basename(bytecode_path)}\"!")
elif cli_subcommand == "execute":
# If this is execute subcommand.
# Execute.
execute_bytecode(cli_source_path)
# Message.
if not cli_silent:
print(f"[Info] File \"{basename(cli_source_path)}\" was executed!")
else:
# If unknown subcommand.
# Message.
gofra.systems.cli.usage_message(__file__)
gofra.core.errors.message("Error", f"Unknown subcommand `{cli_subcommand}`!")
if __name__ == "__main__":
cli_entry_point()
| [
37811,
198,
220,
220,
220,
8774,
1514,
69,
430,
8300,
3303,
2723,
2438,
13,
198,
198,
37811,
198,
198,
834,
9800,
834,
796,
366,
42,
343,
359,
10511,
418,
377,
2488,
74,
343,
359,
23548,
418,
377,
1,
198,
834,
43085,
834,
796,
366... | 1.961445 | 70,238 |
# Global settings for photologue example project.
import os
DEBUG = TEMPLATE_DEBUG = True
# Top level folder - the one created by the startproject command.
TOP_FOLDER = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
ADMINS = ()
MANAGERS = ADMINS
# Default dev database is Sqlite. In production I use postgres.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(TOP_FOLDER, 'database.sql3')
}
}
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
# TODO: setting this to True in Django 1.4 causes runtime warnings, when 1.4
# is end-of-lined in 2014 we can change this setting to True.
USE_TZ = False
MEDIA_ROOT = os.path.join(TOP_FOLDER, 'public', 'media')
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(TOP_FOLDER, 'public', 'static')
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(TOP_FOLDER, 'example_project/static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '3p0f5q)l$=gt++#z0inpfh%bm_ujl6(-yogbzw2)(xea48@70d'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = [
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
ROOT_URLCONF = 'example_project.urls'
from photologue import PHOTOLOGUE_TEMPLATE_DIR
TEMPLATE_DIRS = (
os.path.join(TOP_FOLDER, 'example_project/templates'),
PHOTOLOGUE_TEMPLATE_DIR
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
)
FIXTURE_DIRS = (
os.path.join(TOP_FOLDER, 'example_project/fixtures'),
)
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'photologue',
'example_project',
'south',
]
SOUTH_TESTS_MIGRATE = False
| [
2,
8060,
6460,
329,
2825,
39795,
1672,
1628,
13,
198,
198,
11748,
28686,
198,
198,
30531,
796,
309,
3620,
6489,
6158,
62,
30531,
796,
6407,
198,
198,
2,
5849,
1241,
9483,
532,
262,
530,
2727,
416,
262,
923,
16302,
3141,
13,
198,
352... | 2.514706 | 1,156 |
# Learning rate scheduler
| [
2,
18252,
2494,
6038,
18173,
201,
198,
201,
198
] | 3.222222 | 9 |
import asyncio
import dataclasses
import itertools
import logging
import pathlib
import sys
import traceback
import typing
from typing import Any, Callable, Dict, FrozenSet, Iterable, Set, Text, Tuple, Type
import gamla
import immutables
import toposort
import typeguard
from gamla.optimized import async_functions as opt_async_gamla
from gamla.optimized import sync as opt_gamla
from computation_graph import base_types, graph
_toposort_nodes: Callable[
[base_types.GraphType], Tuple[FrozenSet[base_types.ComputationNode], ...]
] = opt_gamla.compose_left(
opt_gamla.groupby_many(_get_edge_sources),
opt_gamla.valmap(
opt_gamla.compose_left(opt_gamla.map(base_types.edge_destination), set)
),
_transpose_graph,
toposort.toposort,
opt_gamla.map(frozenset),
tuple,
)
_incoming_edge_options = opt_gamla.compose_left(
graph.get_incoming_edges_for_node,
gamla.after(
opt_gamla.compose_left(
opt_gamla.groupby(base_types.edge_key),
opt_gamla.valmap(gamla.sort_by(gamla.attrgetter("priority"))),
dict.values,
opt_gamla.star(itertools.product),
opt_gamla.map(tuple),
)
),
)
_get_args_helper = opt_gamla.compose_left(
opt_gamla.keyfilter(gamla.attrgetter("args")),
dict.values,
gamla.head,
opt_gamla.maptuple(gamla.attrgetter("result")),
)
_get_inner_kwargs = opt_gamla.compose_left(
opt_gamla.keyfilter(base_types.edge_key),
dict.items,
opt_gamla.groupby(opt_gamla.compose_left(gamla.head, base_types.edge_key)),
opt_gamla.valmap(
opt_gamla.compose_left(
gamla.head, gamla.second, gamla.head, gamla.attrgetter("result")
)
),
)
_DecisionsType = Dict[base_types.ComputationNode, base_types.ComputationResult]
_ResultToDecisionsType = Dict[base_types.ComputationResult, _DecisionsType]
_IntermediaryResults = Dict[base_types.ComputationNode, _ResultToDecisionsType]
class _NotCoherent(Exception):
"""This exception signals that for a specific set of incoming
node edges not all paths agree on the ComputationResult"""
NodeToResults = Callable[[base_types.ComputationNode], _ResultToDecisionsType]
_get_kwargs_from_edges = opt_gamla.compose_left(
opt_gamla.map(base_types.edge_key), opt_gamla.remove(gamla.equals(None)), tuple
)
_ChoiceOfOutputForNode = Tuple[
Tuple[base_types.ComputationResult, _DecisionsType], base_types.ComputationNode,
]
@gamla.curry
_choice_to_value: Callable[
[_ChoiceOfOutputForNode], base_types.ComputationResult
] = opt_gamla.compose_left(gamla.head, gamla.head)
_decisions_from_value_choices = opt_gamla.compose_left(
gamla.concat,
gamla.bifurcate(
opt_gamla.compose_left(
opt_gamla.map(opt_gamla.compose_left(gamla.head, gamla.second)),
opt_gamla.reduce(
opt_gamla.merge_with_reducer(_check_equal_and_take_one),
immutables.Map(),
),
),
opt_gamla.mapdict(opt_gamla.juxt(gamla.second, _choice_to_value)),
),
opt_gamla.merge,
)
_SingleNodeSideEffect = Callable[[base_types.ComputationNode, Any], None]
def _dag_layer_reduce(
f: Callable[
[_IntermediaryResults, FrozenSet[base_types.ComputationNode]],
_IntermediaryResults,
]
) -> Callable[[base_types.GraphType], _IntermediaryResults]:
"""Directed acyclic graph reduction."""
return gamla.compose_left(
_toposort_nodes, gamla.reduce_curried(f, immutables.Map())
)
_create_node_run_options = opt_gamla.compose_left(
gamla.pack,
gamla.explode(1),
opt_gamla.mapcat(
opt_gamla.compose_left(
gamla.bifurcate(
gamla.head, gamla.second, opt_gamla.star(lambda _, y, z: z(y))
),
gamla.explode(2),
)
),
)
@gamla.curry
_is_graph_async = opt_gamla.compose_left(
opt_gamla.mapcat(lambda edge: (edge.source, *edge.args)),
opt_gamla.remove(gamla.equals(None)),
opt_gamla.map(gamla.attrgetter("func")),
gamla.anymap(asyncio.iscoroutinefunction),
)
to_callable_with_side_effect = gamla.curry(
_to_callable_with_side_effect_for_single_and_multiple
)(_type_check)
# Use the second line if you want to see the winning path in the computation graph (a little slower).
to_callable = to_callable_with_side_effect(gamla.just(gamla.just(None)))
# to_callable = to_callable_with_side_effect(graphviz.computation_trace('utterance_computation.dot'))
| [
11748,
30351,
952,
198,
11748,
4818,
330,
28958,
198,
11748,
340,
861,
10141,
198,
11748,
18931,
198,
11748,
3108,
8019,
198,
11748,
25064,
198,
11748,
12854,
1891,
198,
11748,
19720,
198,
6738,
19720,
1330,
4377,
11,
4889,
540,
11,
360,
... | 2.300254 | 1,965 |
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
# Create your models here. | [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,
628,
198,
2,
13610,
534,
4981,
994,
13
] | 3.648649 | 37 |
import torch
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import sys
import matplotlib
font = {"size": 30}
matplotlib.rc("font", **font)
matplotlib.rcParams["text.usetex"] = True
n_gene_sets_to_name = 2
results_df = pd.read_csv("./out/st_avg_gene_variance_gsea_results.csv", index_col=0)
results_df["logpval"] = -np.log10(results_df.padj.values)
plt.figure(figsize=(7, 7))
sns.scatterplot(data=results_df, x="NES", y="logpval", color="black", edgecolor=None)
plt.xlabel("Enrichment score")
plt.ylabel(r"$-\log_{10}$(p-val)")
sorted_idx = np.argsort(-results_df.NES.values)
for ii in range(n_gene_sets_to_name):
gs_name = " ".join(results_df.pathway.values[sorted_idx[ii]].split("_")[1:])
plt.text(
s=gs_name,
x=results_df.NES.values[sorted_idx[ii]],
y=results_df.logpval.values[sorted_idx[ii]],
ha="right",
)
plt.tight_layout()
plt.savefig("./out/st_avg_gene_variance_gsea_results.png")
plt.show()
import ipdb
ipdb.set_trace()
| [
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
11748,
384,
397,
1211,
355,
3013,
82,
198,
11748,
25064,
198,
198,
11748,
... | 2.164211 | 475 |
if __name__ == "__main__":
parser = OptionParser()
Runtime.add_options(parser)
options, args = parser.parse_args()
pid, players = load_config("{{VIFF_CONFIG_LOC}}")
Zp = GF(find_prime(2**65, blum=True))
runtime_class = make_runtime_class(
mixins=[ProbabilisticEqualityMixin, ComparisonToft07Mixin]
)
pre_runtime = create_runtime(pid, players, 1, options,
runtime_class=runtime_class)
pre_runtime.addCallback(protocol, Zp)
pre_runtime.addErrback(report_error)
reactor.run()
| [
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
30751,
796,
16018,
46677,
3419,
198,
220,
220,
220,
43160,
13,
2860,
62,
25811,
7,
48610,
8,
198,
220,
220,
220,
3689,
11,
26498,
796,
30751,
13,
2957... | 2.447964 | 221 |
"""project urls."""
from django.urls import path, re_path
from project.views import ProjectListView, ProjectDetailView
app_name = 'project'
urlpatterns = [
path('', ProjectListView.as_view(), name='index'),
re_path(r'(?P<uid>\w+)/', ProjectDetailView.as_view(), name='detail'),
]
| [
37811,
16302,
2956,
7278,
526,
15931,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
11,
302,
62,
6978,
198,
198,
6738,
1628,
13,
33571,
1330,
4935,
8053,
7680,
11,
4935,
11242,
603,
7680,
198,
198,
1324,
62,
3672,
796,
705,
16302... | 2.745283 | 106 |
from flask import Flask, render_template, redirect, url_for, request, session
from flask_restful import Api
from connect import config
from routes.routes import initialize_routes
from my_code.Process import RecommendMovie
from read_file import get_user_id
from read_file import get_rating
app = Flask(__name__)
api = Api(app)
config()
initialize_routes(api)
@app.route("/")
@app.route("/recommend")
@app.route("/", methods=['POST'])
@app.route("/movie")
if __name__ == "__main__":
app.secret_key = b'_5#y2L"F4Q8z\n\xec]/'
app.run(debug=True)
# app.run()
| [
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
18941,
11,
19016,
62,
1640,
11,
2581,
11,
6246,
198,
6738,
42903,
62,
2118,
913,
1330,
5949,
72,
198,
6738,
2018,
1330,
4566,
198,
6738,
11926,
13,
81,
448,
274,
1330,
41216,
62,
81... | 2.648402 | 219 |
from PIL import Image, ImageDraw, ImageFont
FONT = ImageFont.load_default() | [
6738,
350,
4146,
1330,
7412,
11,
7412,
25302,
11,
7412,
23252,
198,
198,
37,
35830,
796,
7412,
23252,
13,
2220,
62,
12286,
3419
] | 3.304348 | 23 |